From 9be1c8afb4926aee043d48493c0474c0eea56540 Mon Sep 17 00:00:00 2001 From: Icenowy Zheng Date: Thu, 2 Mar 2017 04:13:39 +0800 Subject: clk: sunxi-ng: add Allwinner H5 CCU support for H3 CCU driver Allwinner H5 is a SoC that features a design which keeps the peripheral compatible with H3, so that it have also a CCU like the one on H3 -- only one bus gate/reset is added, and the mmc sample/output phases are removed because of MMC controller update. Add its support in our existing H3 CCU driver. Signed-off-by: Icenowy Zheng Signed-off-by: Maxime Ripard --- include/dt-bindings/clock/sun8i-h3-ccu.h | 5 ++++- include/dt-bindings/reset/sun8i-h3-ccu.h | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/dt-bindings/clock/sun8i-h3-ccu.h b/include/dt-bindings/clock/sun8i-h3-ccu.h index efb7ba2bd515..c2afc41d6964 100644 --- a/include/dt-bindings/clock/sun8i-h3-ccu.h +++ b/include/dt-bindings/clock/sun8i-h3-ccu.h @@ -91,7 +91,7 @@ #define CLK_BUS_UART1 63 #define CLK_BUS_UART2 64 #define CLK_BUS_UART3 65 -#define CLK_BUS_SCR 66 +#define CLK_BUS_SCR0 66 #define CLK_BUS_EPHY 67 #define CLK_BUS_DBG 68 @@ -142,4 +142,7 @@ #define CLK_GPU 114 +/* New clocks imported in H5 */ +#define CLK_BUS_SCR1 115 + #endif /* _DT_BINDINGS_CLK_SUN8I_H3_H_ */ diff --git a/include/dt-bindings/reset/sun8i-h3-ccu.h b/include/dt-bindings/reset/sun8i-h3-ccu.h index 6b7af80c26ec..484c2a22919d 100644 --- a/include/dt-bindings/reset/sun8i-h3-ccu.h +++ b/include/dt-bindings/reset/sun8i-h3-ccu.h @@ -98,6 +98,9 @@ #define RST_BUS_UART1 50 #define RST_BUS_UART2 51 #define RST_BUS_UART3 52 -#define RST_BUS_SCR 53 +#define RST_BUS_SCR0 53 + +/* New resets imported in H5 */ +#define RST_BUS_SCR1 54 #endif /* _DT_BINDINGS_RST_SUN8I_H3_H_ */ -- cgit v1.2.3 From 923386f761f5ff35da2ac778839876fe4a2f165f Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Fri, 18 Dec 2015 15:36:57 +0200 Subject: clk: ti: remove un-used definitions from public clk_hw_omap struct Clksel support has been deprecated a while back, so remove these from the struct also. Signed-off-by: Tero Kristo Acked-by: Tony Lindgren --- include/linux/clk/ti.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'include') diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index 6110fe09ed18..07308db5a15d 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -129,8 +129,6 @@ struct clk_hw_omap_ops { * @enable_bit: bitshift to write to enable/disable the clock (see @enable_reg) * @flags: see "struct clk.flags possibilities" above * @clksel_reg: for clksel clks, register va containing src/divisor select - * @clksel_mask: bitmask in @clksel_reg for the src/divisor selector - * @clksel: for clksel clks, pointer to struct clksel for this clock * @dpll_data: for DPLLs, pointer to struct dpll_data for this clock * @clkdm_name: clockdomain name that this clock is contained in * @clkdm: pointer to struct clockdomain, resolved from @clkdm_name at runtime @@ -145,8 +143,6 @@ struct clk_hw_omap { u8 enable_bit; u8 flags; void __iomem *clksel_reg; - u32 clksel_mask; - const struct clksel *clksel; struct dpll_data *dpll_data; const char *clkdm_name; struct clockdomain *clkdm; -- cgit v1.2.3 From b6f27b2db2df395d65b02a758861c7fc54edbec1 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Fri, 30 Sep 2016 14:10:11 +0300 Subject: clk: ti: add clkdm_lookup to the exported functions This will be needed to move some additional clockdomain functionality under clock driver. Signed-off-by: Tero Kristo Acked-by: Tony Lindgren --- arch/arm/mach-omap2/clock.c | 1 + include/linux/clk/ti.h | 2 ++ 2 files changed, 3 insertions(+) (limited to 'include') diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c index 1270afdcacdf..6fac82609c96 100644 --- a/arch/arm/mach-omap2/clock.c +++ b/arch/arm/mach-omap2/clock.c @@ -57,6 +57,7 @@ u16 cpu_mask; static struct ti_clk_ll_ops omap_clk_ll_ops = { .clkdm_clk_enable = clkdm_clk_enable, .clkdm_clk_disable = clkdm_clk_disable, + .clkdm_lookup = clkdm_lookup, .cm_wait_module_ready = omap_cm_wait_module_ready, .cm_split_idlest_reg = cm_split_idlest_reg, }; diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index 07308db5a15d..bc7fd8f0fb5d 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -213,6 +213,7 @@ struct clk_omap_reg { * @clk_writel: pointer to register write function * @clkdm_clk_enable: pointer to clockdomain enable function * @clkdm_clk_disable: pointer to clockdomain disable function + * @clkdm_lookup: pointer to clockdomain lookup function * @cm_wait_module_ready: pointer to CM module wait ready function * @cm_split_idlest_reg: pointer to CM module function to split idlest reg * @@ -228,6 +229,7 @@ struct ti_clk_ll_ops { int (*clkdm_clk_enable)(struct clockdomain *clkdm, struct clk *clk); int (*clkdm_clk_disable)(struct clockdomain *clkdm, struct clk *clk); + struct clockdomain * (*clkdm_lookup)(const char *name); int (*cm_wait_module_ready)(u8 part, s16 prcm_mod, u16 idlest_reg, u8 idlest_shift); int (*cm_split_idlest_reg)(void __iomem *idlest_reg, s16 *prcm_inst, -- cgit v1.2.3 From 2e1a294c0f2273a6d3537c91965ca46a6483bd8c Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Fri, 30 Sep 2016 14:13:38 +0300 Subject: clk: ti: move omap2_init_clk_clkdm under TI clock driver This is not needed outside the driver, so move it inside it and remove the prototype from the public header also. Signed-off-by: Tero Kristo Acked-by: Tony Lindgren --- arch/arm/mach-omap2/clock.c | 32 -------------------------------- drivers/clk/ti/clock.h | 1 + drivers/clk/ti/clockdomain.c | 30 ++++++++++++++++++++++++++++++ include/linux/clk/ti.h | 1 - 4 files changed, 31 insertions(+), 33 deletions(-) (limited to 'include') diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c index 6fac82609c96..ae5b23c19b83 100644 --- a/arch/arm/mach-omap2/clock.c +++ b/arch/arm/mach-omap2/clock.c @@ -79,38 +79,6 @@ int __init omap2_clk_setup_ll_ops(void) * OMAP2+ specific clock functions */ -/* Public functions */ - -/** - * omap2_init_clk_clkdm - look up a clockdomain name, store pointer in clk - * @clk: OMAP clock struct ptr to use - * - * Convert a clockdomain name stored in a struct clk 'clk' into a - * clockdomain pointer, and save it into the struct clk. Intended to be - * called during clk_register(). No return value. - */ -void omap2_init_clk_clkdm(struct clk_hw *hw) -{ - struct clk_hw_omap *clk = to_clk_hw_omap(hw); - struct clockdomain *clkdm; - const char *clk_name; - - if (!clk->clkdm_name) - return; - - clk_name = __clk_get_name(hw->clk); - - clkdm = clkdm_lookup(clk->clkdm_name); - if (clkdm) { - pr_debug("clock: associated clk %s to clkdm %s\n", - clk_name, clk->clkdm_name); - clk->clkdm = clkdm; - } else { - pr_debug("clock: could not associate clk %s to clkdm %s\n", - clk_name, clk->clkdm_name); - } -} - /** * ti_clk_init_features - init clock features struct for the SoC * diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h index ee6d22507a3d..ecf82d86118c 100644 --- a/drivers/clk/ti/clock.h +++ b/drivers/clk/ti/clock.h @@ -228,6 +228,7 @@ extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_wait; extern const struct clk_ops ti_clk_divider_ops; extern const struct clk_ops ti_clk_mux_ops; +void omap2_init_clk_clkdm(struct clk_hw *hw); int omap2_clkops_enable_clkdm(struct clk_hw *hw); void omap2_clkops_disable_clkdm(struct clk_hw *hw); diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c index 6cf9dd189a92..704157d8c0b7 100644 --- a/drivers/clk/ti/clockdomain.c +++ b/drivers/clk/ti/clockdomain.c @@ -103,6 +103,36 @@ void omap2_clkops_disable_clkdm(struct clk_hw *hw) ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk); } +/** + * omap2_init_clk_clkdm - look up a clockdomain name, store pointer in clk + * @clk: OMAP clock struct ptr to use + * + * Convert a clockdomain name stored in a struct clk 'clk' into a + * clockdomain pointer, and save it into the struct clk. Intended to be + * called during clk_register(). No return value. + */ +void omap2_init_clk_clkdm(struct clk_hw *hw) +{ + struct clk_hw_omap *clk = to_clk_hw_omap(hw); + struct clockdomain *clkdm; + const char *clk_name; + + if (!clk->clkdm_name) + return; + + clk_name = __clk_get_name(hw->clk); + + clkdm = ti_clk_ll_ops->clkdm_lookup(clk->clkdm_name); + if (clkdm) { + pr_debug("clock: associated clk %s to clkdm %s\n", + clk_name, clk->clkdm_name); + clk->clkdm = clkdm; + } else { + pr_debug("clock: could not associate clk %s to clkdm %s\n", + clk_name, clk->clkdm_name); + } +} + static void __init of_ti_clockdomain_setup(struct device_node *node) { struct clk *clk; diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index bc7fd8f0fb5d..626ae94b7444 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -238,7 +238,6 @@ struct ti_clk_ll_ops { #define to_clk_hw_omap(_hw) container_of(_hw, struct clk_hw_omap, hw) -void omap2_init_clk_clkdm(struct clk_hw *clk); int omap2_clk_disable_autoidle_all(void); int omap2_clk_enable_autoidle_all(void); int omap2_clk_allow_idle(struct clk *clk); -- cgit v1.2.3 From c91f07801f144920f8467486a1e36e42ed9d9ff2 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Mon, 30 Jan 2017 16:01:36 +0200 Subject: clk: ti: drop unnecessary MEMMAP_ADDRESSING flag This has been superceded by the usage of ti_clk_ll_ops for now. Signed-off-by: Tero Kristo Acked-by: Tony Lindgren --- drivers/clk/ti/apll.c | 1 - drivers/clk/ti/dpll.c | 2 -- drivers/clk/ti/gate.c | 4 +--- drivers/clk/ti/interface.c | 1 - include/linux/clk/ti.h | 2 -- 5 files changed, 1 insertion(+), 9 deletions(-) (limited to 'include') diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c index 62b5db7b8fe9..5cba28c6ab35 100644 --- a/drivers/clk/ti/apll.c +++ b/drivers/clk/ti/apll.c @@ -194,7 +194,6 @@ static void __init of_dra7_apll_setup(struct device_node *node) clk_hw->dpll_data = ad; clk_hw->hw.init = init; - clk_hw->flags = MEMMAP_ADDRESSING; init->name = node->name; init->ops = &apll_ck_ops; diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c index 37624e16cf04..c149bd169f43 100644 --- a/drivers/clk/ti/dpll.c +++ b/drivers/clk/ti/dpll.c @@ -248,7 +248,6 @@ struct clk *ti_clk_register_dpll(struct ti_clk *setup) clk_hw->dpll_data = dd; clk_hw->ops = &clkhwops_omap3_dpll; clk_hw->hw.init = &init; - clk_hw->flags = MEMMAP_ADDRESSING; init.name = setup->name; init.ops = ops; @@ -380,7 +379,6 @@ static void __init of_ti_dpll_setup(struct device_node *node, clk_hw->dpll_data = dd; clk_hw->ops = &clkhwops_omap3_dpll; clk_hw->hw.init = init; - clk_hw->flags = MEMMAP_ADDRESSING; init->name = node->name; init->ops = ops; diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c index b3291dbca5ed..5ff62e2c63d0 100644 --- a/drivers/clk/ti/gate.c +++ b/drivers/clk/ti/gate.c @@ -113,7 +113,7 @@ static struct clk *_register_gate(struct device *dev, const char *name, clk_hw->enable_bit = bit_idx; clk_hw->ops = hw_ops; - clk_hw->flags = MEMMAP_ADDRESSING | clk_gate_flags; + clk_hw->flags = clk_gate_flags; init.parent_names = &parent_name; init.num_parents = 1; @@ -203,7 +203,6 @@ struct clk_hw *ti_clk_build_component_gate(struct ti_clk_gate *setup) ops = &clkhwops_iclk_wait; gate->ops = ops; - gate->flags = MEMMAP_ADDRESSING; return &gate->hw; } @@ -269,7 +268,6 @@ _of_ti_composite_gate_clk_setup(struct device_node *node, gate->enable_bit = val; gate->ops = hw_ops; - gate->flags = MEMMAP_ADDRESSING; if (!ti_clk_add_component(node, &gate->hw, CLK_COMPONENT_TYPE_GATE)) return; diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c index 7927e1a2ba02..42d9fd4f5f6a 100644 --- a/drivers/clk/ti/interface.c +++ b/drivers/clk/ti/interface.c @@ -47,7 +47,6 @@ static struct clk *_register_interface(struct device *dev, const char *name, clk_hw->hw.init = &init; clk_hw->ops = ops; - clk_hw->flags = MEMMAP_ADDRESSING; clk_hw->enable_reg = reg; clk_hw->enable_bit = bit_idx; diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index 626ae94b7444..affdabd0b6a1 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -168,7 +168,6 @@ struct clk_hw_omap { * should be used. This is a temporary solution - a better approach * would be to associate clock type-specific data with the clock, * similar to the struct dpll_data approach. - * MEMMAP_ADDRESSING: Use memmap addressing to access clock registers. */ #define ENABLE_REG_32BIT (1 << 0) /* Use 32-bit access */ #define CLOCK_IDLE_CONTROL (1 << 1) @@ -176,7 +175,6 @@ struct clk_hw_omap { #define ENABLE_ON_INIT (1 << 3) /* Enable upon framework init */ #define INVERT_ENABLE (1 << 4) /* 0 enables, 1 disables */ #define CLOCK_CLKOUTX2 (1 << 5) -#define MEMMAP_ADDRESSING (1 << 6) /* CM_CLKEN_PLL*.EN* bit values - not all are available for every DPLL */ #define DPLL_LOW_POWER_STOP 0x1 -- cgit v1.2.3 From 6c0afb503937a12a8d20a805fcf263e31afa9871 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Thu, 9 Feb 2017 11:24:37 +0200 Subject: clk: ti: convert to use proper register definition for all accesses Currently, TI clock driver uses an encapsulated struct that is cast into a void pointer to store all register addresses. This can be considered as rather nasty hackery, and prevents from expanding the register address field also. Instead, replace all the code to use proper struct in place for this, which contains all the previously used data. This patch is rather large as it is touching multiple files, but this can't be split up as we need to avoid any boot breakage. Signed-off-by: Tero Kristo Acked-by: Tony Lindgren --- arch/arm/mach-omap2/clkt2xxx_dpllcore.c | 3 +- arch/arm/mach-omap2/clock.c | 2 +- arch/arm/mach-omap2/clock.h | 2 ++ arch/arm/mach-omap2/cm.h | 5 +-- arch/arm/mach-omap2/cm2xxx.c | 9 ++--- arch/arm/mach-omap2/cm3xxx.c | 10 ++---- arch/arm/mach-omap2/cm_common.c | 2 +- drivers/clk/ti/apll.c | 47 ++++++++++++------------- drivers/clk/ti/autoidle.c | 18 +++++----- drivers/clk/ti/clk-3xxx.c | 55 +++++++++++++++-------------- drivers/clk/ti/clk.c | 47 ++++++++++++------------- drivers/clk/ti/clkt_dflt.c | 61 ++++++++++++--------------------- drivers/clk/ti/clkt_dpll.c | 6 ++-- drivers/clk/ti/clkt_iclk.c | 29 ++++++++-------- drivers/clk/ti/clock.h | 11 +++--- drivers/clk/ti/clockdomain.c | 8 ----- drivers/clk/ti/divider.c | 24 +++++++------ drivers/clk/ti/dpll.c | 48 ++++++++++---------------- drivers/clk/ti/dpll3xxx.c | 38 ++++++++++---------- drivers/clk/ti/dpll44xx.c | 14 ++++---- drivers/clk/ti/gate.c | 32 ++++++++--------- drivers/clk/ti/interface.c | 22 ++++++------ drivers/clk/ti/mux.c | 41 +++++++++------------- include/linux/clk/ti.h | 46 +++++++++++++------------ 24 files changed, 264 insertions(+), 316 deletions(-) (limited to 'include') diff --git a/arch/arm/mach-omap2/clkt2xxx_dpllcore.c b/arch/arm/mach-omap2/clkt2xxx_dpllcore.c index 59cf310bc1e9..e8d417309f33 100644 --- a/arch/arm/mach-omap2/clkt2xxx_dpllcore.c +++ b/arch/arm/mach-omap2/clkt2xxx_dpllcore.c @@ -138,7 +138,8 @@ int omap2_reprogram_dpllcore(struct clk_hw *hw, unsigned long rate, if (!dd) return -EINVAL; - tmpset.cm_clksel1_pll = readl_relaxed(dd->mult_div1_reg); + tmpset.cm_clksel1_pll = + omap_clk_ll_ops.clk_readl(&dd->mult_div1_reg); tmpset.cm_clksel1_pll &= ~(dd->mult_mask | dd->div1_mask); div = ((curr_prcm_set->xtal_speed / 1000000) - 1); diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c index ae5b23c19b83..42881f21cede 100644 --- a/arch/arm/mach-omap2/clock.c +++ b/arch/arm/mach-omap2/clock.c @@ -54,7 +54,7 @@ u16 cpu_mask; #define OMAP3PLUS_DPLL_FINT_MIN 32000 #define OMAP3PLUS_DPLL_FINT_MAX 52000000 -static struct ti_clk_ll_ops omap_clk_ll_ops = { +struct ti_clk_ll_ops omap_clk_ll_ops = { .clkdm_clk_enable = clkdm_clk_enable, .clkdm_clk_disable = clkdm_clk_disable, .clkdm_lookup = clkdm_lookup, diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h index 4e66295dca25..cf45550197e6 100644 --- a/arch/arm/mach-omap2/clock.h +++ b/arch/arm/mach-omap2/clock.h @@ -64,6 +64,8 @@ #define OMAP4XXX_EN_DPLL_FRBYPASS 0x6 #define OMAP4XXX_EN_DPLL_LOCKED 0x7 +extern struct ti_clk_ll_ops omap_clk_ll_ops; + extern u16 cpu_mask; extern const struct clkops clkops_omap2_dflt_wait; diff --git a/arch/arm/mach-omap2/cm.h b/arch/arm/mach-omap2/cm.h index 1fe3e6b833d2..de75cbcdc9d1 100644 --- a/arch/arm/mach-omap2/cm.h +++ b/arch/arm/mach-omap2/cm.h @@ -23,6 +23,7 @@ #define MAX_MODULE_READY_TIME 2000 # ifndef __ASSEMBLER__ +#include extern void __iomem *cm_base; extern void __iomem *cm2_base; extern void omap2_set_globals_cm(void __iomem *cm, void __iomem *cm2); @@ -50,7 +51,7 @@ extern void omap2_set_globals_cm(void __iomem *cm, void __iomem *cm2); * @module_disable: ptr to the SoC CM-specific module_disable impl */ struct cm_ll_data { - int (*split_idlest_reg)(void __iomem *idlest_reg, s16 *prcm_inst, + int (*split_idlest_reg)(struct clk_omap_reg *idlest_reg, s16 *prcm_inst, u8 *idlest_reg_id); int (*wait_module_ready)(u8 part, s16 prcm_mod, u16 idlest_reg, u8 idlest_shift); @@ -60,7 +61,7 @@ struct cm_ll_data { void (*module_disable)(u8 part, u16 inst, u16 clkctrl_offs); }; -extern int cm_split_idlest_reg(void __iomem *idlest_reg, s16 *prcm_inst, +extern int cm_split_idlest_reg(struct clk_omap_reg *idlest_reg, s16 *prcm_inst, u8 *idlest_reg_id); int omap_cm_wait_module_ready(u8 part, s16 prcm_mod, u16 idlest_reg, u8 idlest_shift); diff --git a/arch/arm/mach-omap2/cm2xxx.c b/arch/arm/mach-omap2/cm2xxx.c index 3e5fd3587eb1..cd90b4c6a06b 100644 --- a/arch/arm/mach-omap2/cm2xxx.c +++ b/arch/arm/mach-omap2/cm2xxx.c @@ -204,7 +204,7 @@ void omap2xxx_cm_apll96_disable(void) * XXX This function is only needed until absolute register addresses are * removed from the OMAP struct clk records. */ -static int omap2xxx_cm_split_idlest_reg(void __iomem *idlest_reg, +static int omap2xxx_cm_split_idlest_reg(struct clk_omap_reg *idlest_reg, s16 *prcm_inst, u8 *idlest_reg_id) { @@ -212,10 +212,7 @@ static int omap2xxx_cm_split_idlest_reg(void __iomem *idlest_reg, u8 idlest_offs; int i; - if (idlest_reg < cm_base || idlest_reg > (cm_base + 0x0fff)) - return -EINVAL; - - idlest_offs = (unsigned long)idlest_reg & 0xff; + idlest_offs = idlest_reg->offset & 0xff; for (i = 0; i < ARRAY_SIZE(omap2xxx_cm_idlest_offs); i++) { if (idlest_offs == omap2xxx_cm_idlest_offs[i]) { *idlest_reg_id = i + 1; @@ -226,7 +223,7 @@ static int omap2xxx_cm_split_idlest_reg(void __iomem *idlest_reg, if (i == ARRAY_SIZE(omap2xxx_cm_idlest_offs)) return -EINVAL; - offs = idlest_reg - cm_base; + offs = idlest_reg->offset; offs &= 0xff00; *prcm_inst = offs; diff --git a/arch/arm/mach-omap2/cm3xxx.c b/arch/arm/mach-omap2/cm3xxx.c index d91ae8206d1e..55b046a719dc 100644 --- a/arch/arm/mach-omap2/cm3xxx.c +++ b/arch/arm/mach-omap2/cm3xxx.c @@ -118,7 +118,7 @@ static int omap3xxx_cm_wait_module_ready(u8 part, s16 prcm_mod, u16 idlest_id, * XXX This function is only needed until absolute register addresses are * removed from the OMAP struct clk records. */ -static int omap3xxx_cm_split_idlest_reg(void __iomem *idlest_reg, +static int omap3xxx_cm_split_idlest_reg(struct clk_omap_reg *idlest_reg, s16 *prcm_inst, u8 *idlest_reg_id) { @@ -126,11 +126,7 @@ static int omap3xxx_cm_split_idlest_reg(void __iomem *idlest_reg, u8 idlest_offs; int i; - if (idlest_reg < (cm_base + OMAP3430_IVA2_MOD) || - idlest_reg > (cm_base + 0x1ffff)) - return -EINVAL; - - idlest_offs = (unsigned long)idlest_reg & 0xff; + idlest_offs = idlest_reg->offset & 0xff; for (i = 0; i < ARRAY_SIZE(omap3xxx_cm_idlest_offs); i++) { if (idlest_offs == omap3xxx_cm_idlest_offs[i]) { *idlest_reg_id = i + 1; @@ -141,7 +137,7 @@ static int omap3xxx_cm_split_idlest_reg(void __iomem *idlest_reg, if (i == ARRAY_SIZE(omap3xxx_cm_idlest_offs)) return -EINVAL; - offs = idlest_reg - cm_base; + offs = idlest_reg->offset; offs &= 0xff00; *prcm_inst = offs; diff --git a/arch/arm/mach-omap2/cm_common.c b/arch/arm/mach-omap2/cm_common.c index 23e8bcec34e3..bbe41f4c9dc8 100644 --- a/arch/arm/mach-omap2/cm_common.c +++ b/arch/arm/mach-omap2/cm_common.c @@ -65,7 +65,7 @@ void __init omap2_set_globals_cm(void __iomem *cm, void __iomem *cm2) * or 0 upon success. XXX This function is only needed until absolute * register addresses are removed from the OMAP struct clk records. */ -int cm_split_idlest_reg(void __iomem *idlest_reg, s16 *prcm_inst, +int cm_split_idlest_reg(struct clk_omap_reg *idlest_reg, s16 *prcm_inst, u8 *idlest_reg_id) { if (!cm_ll_data->split_idlest_reg) { diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c index 5cba28c6ab35..06f486b3488c 100644 --- a/drivers/clk/ti/apll.c +++ b/drivers/clk/ti/apll.c @@ -55,20 +55,20 @@ static int dra7_apll_enable(struct clk_hw *hw) state <<= __ffs(ad->idlest_mask); /* Check is already locked */ - v = ti_clk_ll_ops->clk_readl(ad->idlest_reg); + v = ti_clk_ll_ops->clk_readl(&ad->idlest_reg); if ((v & ad->idlest_mask) == state) return r; - v = ti_clk_ll_ops->clk_readl(ad->control_reg); + v = ti_clk_ll_ops->clk_readl(&ad->control_reg); v &= ~ad->enable_mask; v |= APLL_FORCE_LOCK << __ffs(ad->enable_mask); - ti_clk_ll_ops->clk_writel(v, ad->control_reg); + ti_clk_ll_ops->clk_writel(v, &ad->control_reg); state <<= __ffs(ad->idlest_mask); while (1) { - v = ti_clk_ll_ops->clk_readl(ad->idlest_reg); + v = ti_clk_ll_ops->clk_readl(&ad->idlest_reg); if ((v & ad->idlest_mask) == state) break; if (i > MAX_APLL_WAIT_TRIES) @@ -99,10 +99,10 @@ static void dra7_apll_disable(struct clk_hw *hw) state <<= __ffs(ad->idlest_mask); - v = ti_clk_ll_ops->clk_readl(ad->control_reg); + v = ti_clk_ll_ops->clk_readl(&ad->control_reg); v &= ~ad->enable_mask; v |= APLL_AUTO_IDLE << __ffs(ad->enable_mask); - ti_clk_ll_ops->clk_writel(v, ad->control_reg); + ti_clk_ll_ops->clk_writel(v, &ad->control_reg); } static int dra7_apll_is_enabled(struct clk_hw *hw) @@ -113,7 +113,7 @@ static int dra7_apll_is_enabled(struct clk_hw *hw) ad = clk->dpll_data; - v = ti_clk_ll_ops->clk_readl(ad->control_reg); + v = ti_clk_ll_ops->clk_readl(&ad->control_reg); v &= ad->enable_mask; v >>= __ffs(ad->enable_mask); @@ -185,6 +185,7 @@ static void __init of_dra7_apll_setup(struct device_node *node) struct clk_hw_omap *clk_hw = NULL; struct clk_init_data *init = NULL; const char **parent_names = NULL; + int ret; ad = kzalloc(sizeof(*ad), GFP_KERNEL); clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL); @@ -212,10 +213,10 @@ static void __init of_dra7_apll_setup(struct device_node *node) init->parent_names = parent_names; - ad->control_reg = ti_clk_get_reg_addr(node, 0); - ad->idlest_reg = ti_clk_get_reg_addr(node, 1); + ret = ti_clk_get_reg_addr(node, 0, &ad->control_reg); + ret |= ti_clk_get_reg_addr(node, 1, &ad->idlest_reg); - if (IS_ERR(ad->control_reg) || IS_ERR(ad->idlest_reg)) + if (ret) goto cleanup; ad->idlest_mask = 0x1; @@ -241,7 +242,7 @@ static int omap2_apll_is_enabled(struct clk_hw *hw) struct dpll_data *ad = clk->dpll_data; u32 v; - v = ti_clk_ll_ops->clk_readl(ad->control_reg); + v = ti_clk_ll_ops->clk_readl(&ad->control_reg); v &= ad->enable_mask; v >>= __ffs(ad->enable_mask); @@ -267,13 +268,13 @@ static int omap2_apll_enable(struct clk_hw *hw) u32 v; int i = 0; - v = ti_clk_ll_ops->clk_readl(ad->control_reg); + v = ti_clk_ll_ops->clk_readl(&ad->control_reg); v &= ~ad->enable_mask; v |= OMAP2_EN_APLL_LOCKED << __ffs(ad->enable_mask); - ti_clk_ll_ops->clk_writel(v, ad->control_reg); + ti_clk_ll_ops->clk_writel(v, &ad->control_reg); while (1) { - v = ti_clk_ll_ops->clk_readl(ad->idlest_reg); + v = ti_clk_ll_ops->clk_readl(&ad->idlest_reg); if (v & ad->idlest_mask) break; if (i > MAX_APLL_WAIT_TRIES) @@ -297,10 +298,10 @@ static void omap2_apll_disable(struct clk_hw *hw) struct dpll_data *ad = clk->dpll_data; u32 v; - v = ti_clk_ll_ops->clk_readl(ad->control_reg); + v = ti_clk_ll_ops->clk_readl(&ad->control_reg); v &= ~ad->enable_mask; v |= OMAP2_EN_APLL_STOPPED << __ffs(ad->enable_mask); - ti_clk_ll_ops->clk_writel(v, ad->control_reg); + ti_clk_ll_ops->clk_writel(v, &ad->control_reg); } static struct clk_ops omap2_apll_ops = { @@ -315,10 +316,10 @@ static void omap2_apll_set_autoidle(struct clk_hw_omap *clk, u32 val) struct dpll_data *ad = clk->dpll_data; u32 v; - v = ti_clk_ll_ops->clk_readl(ad->autoidle_reg); + v = ti_clk_ll_ops->clk_readl(&ad->autoidle_reg); v &= ~ad->autoidle_mask; v |= val << __ffs(ad->autoidle_mask); - ti_clk_ll_ops->clk_writel(v, ad->control_reg); + ti_clk_ll_ops->clk_writel(v, &ad->control_reg); } #define OMAP2_APLL_AUTOIDLE_LOW_POWER_STOP 0x3 @@ -347,6 +348,7 @@ static void __init of_omap2_apll_setup(struct device_node *node) struct clk *clk; const char *parent_name; u32 val; + int ret; ad = kzalloc(sizeof(*ad), GFP_KERNEL); clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL); @@ -392,12 +394,11 @@ static void __init of_omap2_apll_setup(struct device_node *node) ad->idlest_mask = 1 << val; - ad->control_reg = ti_clk_get_reg_addr(node, 0); - ad->autoidle_reg = ti_clk_get_reg_addr(node, 1); - ad->idlest_reg = ti_clk_get_reg_addr(node, 2); + ret = ti_clk_get_reg_addr(node, 0, &ad->control_reg); + ret |= ti_clk_get_reg_addr(node, 1, &ad->autoidle_reg); + ret |= ti_clk_get_reg_addr(node, 2, &ad->idlest_reg); - if (IS_ERR(ad->control_reg) || IS_ERR(ad->autoidle_reg) || - IS_ERR(ad->idlest_reg)) + if (ret) goto cleanup; clk = clk_register(NULL, &clk_hw->hw); diff --git a/drivers/clk/ti/autoidle.c b/drivers/clk/ti/autoidle.c index 345af43465f0..7bb9afbe4058 100644 --- a/drivers/clk/ti/autoidle.c +++ b/drivers/clk/ti/autoidle.c @@ -25,7 +25,7 @@ #include "clock.h" struct clk_ti_autoidle { - void __iomem *reg; + struct clk_omap_reg reg; u8 shift; u8 flags; const char *name; @@ -73,28 +73,28 @@ static void _allow_autoidle(struct clk_ti_autoidle *clk) { u32 val; - val = ti_clk_ll_ops->clk_readl(clk->reg); + val = ti_clk_ll_ops->clk_readl(&clk->reg); if (clk->flags & AUTOIDLE_LOW) val &= ~(1 << clk->shift); else val |= (1 << clk->shift); - ti_clk_ll_ops->clk_writel(val, clk->reg); + ti_clk_ll_ops->clk_writel(val, &clk->reg); } static void _deny_autoidle(struct clk_ti_autoidle *clk) { u32 val; - val = ti_clk_ll_ops->clk_readl(clk->reg); + val = ti_clk_ll_ops->clk_readl(&clk->reg); if (clk->flags & AUTOIDLE_LOW) val |= (1 << clk->shift); else val &= ~(1 << clk->shift); - ti_clk_ll_ops->clk_writel(val, clk->reg); + ti_clk_ll_ops->clk_writel(val, &clk->reg); } /** @@ -140,6 +140,7 @@ int __init of_ti_clk_autoidle_setup(struct device_node *node) { u32 shift; struct clk_ti_autoidle *clk; + int ret; /* Check if this clock has autoidle support or not */ if (of_property_read_u32(node, "ti,autoidle-shift", &shift)) @@ -152,11 +153,10 @@ int __init of_ti_clk_autoidle_setup(struct device_node *node) clk->shift = shift; clk->name = node->name; - clk->reg = ti_clk_get_reg_addr(node, 0); - - if (IS_ERR(clk->reg)) { + ret = ti_clk_get_reg_addr(node, 0, &clk->reg); + if (ret) { kfree(clk); - return -EINVAL; + return ret; } if (of_property_read_bool(node, "ti,invert-autoidle-bit")) diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c index 11d8aa3ec186..b1251cae98b8 100644 --- a/drivers/clk/ti/clk-3xxx.c +++ b/drivers/clk/ti/clk-3xxx.c @@ -52,14 +52,13 @@ * @idlest_reg and @idlest_bit. No return value. */ static void omap3430es2_clk_ssi_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, + struct clk_omap_reg *idlest_reg, u8 *idlest_bit, u8 *idlest_val) { - u32 r; - - r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); - *idlest_reg = (__force void __iomem *)r; + memcpy(idlest_reg, &clk->enable_reg, sizeof(*idlest_reg)); + idlest_reg->offset &= ~0xf0; + idlest_reg->offset |= 0x20; *idlest_bit = OMAP3430ES2_ST_SSI_IDLE_SHIFT; *idlest_val = OMAP34XX_CM_IDLEST_VAL; } @@ -85,15 +84,15 @@ const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait = { * default find_idlest code assumes that they are at the same * position.) No return value. */ -static void omap3430es2_clk_dss_usbhost_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, - u8 *idlest_bit, - u8 *idlest_val) +static void +omap3430es2_clk_dss_usbhost_find_idlest(struct clk_hw_omap *clk, + struct clk_omap_reg *idlest_reg, + u8 *idlest_bit, u8 *idlest_val) { - u32 r; + memcpy(idlest_reg, &clk->enable_reg, sizeof(*idlest_reg)); - r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); - *idlest_reg = (__force void __iomem *)r; + idlest_reg->offset &= ~0xf0; + idlest_reg->offset |= 0x20; /* USBHOST_IDLE has same shift */ *idlest_bit = OMAP3430ES2_ST_DSS_IDLE_SHIFT; *idlest_val = OMAP34XX_CM_IDLEST_VAL; @@ -122,15 +121,15 @@ const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait = { * shift from the CM_{I,F}CLKEN bit. Pass back the correct info via * @idlest_reg and @idlest_bit. No return value. */ -static void omap3430es2_clk_hsotgusb_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, - u8 *idlest_bit, - u8 *idlest_val) +static void +omap3430es2_clk_hsotgusb_find_idlest(struct clk_hw_omap *clk, + struct clk_omap_reg *idlest_reg, + u8 *idlest_bit, + u8 *idlest_val) { - u32 r; - - r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); - *idlest_reg = (__force void __iomem *)r; + memcpy(idlest_reg, &clk->enable_reg, sizeof(*idlest_reg)); + idlest_reg->offset &= ~0xf0; + idlest_reg->offset |= 0x20; *idlest_bit = OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT; *idlest_val = OMAP34XX_CM_IDLEST_VAL; } @@ -154,11 +153,11 @@ const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait = { * bit. A value of 1 indicates that clock is enabled. */ static void am35xx_clk_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, + struct clk_omap_reg *idlest_reg, u8 *idlest_bit, u8 *idlest_val) { - *idlest_reg = (__force void __iomem *)(clk->enable_reg); + memcpy(idlest_reg, &clk->enable_reg, sizeof(*idlest_reg)); *idlest_bit = clk->enable_bit + AM35XX_IPSS_ICK_EN_ACK_OFFSET; *idlest_val = AM35XX_IPSS_CLK_IDLEST_VAL; } @@ -178,10 +177,10 @@ static void am35xx_clk_find_idlest(struct clk_hw_omap *clk, * avoid this issue, and remove the casts. No return value. */ static void am35xx_clk_find_companion(struct clk_hw_omap *clk, - void __iomem **other_reg, + struct clk_omap_reg *other_reg, u8 *other_bit) { - *other_reg = (__force void __iomem *)(clk->enable_reg); + memcpy(other_reg, &clk->enable_reg, sizeof(*other_reg)); if (clk->enable_bit & AM35XX_IPSS_ICK_MASK) *other_bit = clk->enable_bit + AM35XX_IPSS_ICK_FCK_OFFSET; else @@ -205,14 +204,14 @@ const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait = { * and @idlest_bit. No return value. */ static void am35xx_clk_ipss_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, + struct clk_omap_reg *idlest_reg, u8 *idlest_bit, u8 *idlest_val) { - u32 r; + memcpy(idlest_reg, &clk->enable_reg, sizeof(*idlest_reg)); - r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); - *idlest_reg = (__force void __iomem *)r; + idlest_reg->offset &= ~0xf0; + idlest_reg->offset |= 0x20; *idlest_bit = AM35XX_ST_IPSS_SHIFT; *idlest_val = OMAP34XX_CM_IDLEST_VAL; } diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c index 024123f421d6..ddbad7e8d7c9 100644 --- a/drivers/clk/ti/clk.c +++ b/drivers/clk/ti/clk.c @@ -43,27 +43,29 @@ struct clk_iomap { static struct clk_iomap *clk_memmaps[CLK_MAX_MEMMAPS]; -static void clk_memmap_writel(u32 val, void __iomem *reg) +static void clk_memmap_writel(u32 val, const struct clk_omap_reg *reg) { - struct clk_omap_reg *r = (struct clk_omap_reg *)® - struct clk_iomap *io = clk_memmaps[r->index]; + struct clk_iomap *io = clk_memmaps[reg->index]; - if (io->regmap) - regmap_write(io->regmap, r->offset, val); + if (reg->ptr) + writel_relaxed(val, reg->ptr); + else if (io->regmap) + regmap_write(io->regmap, reg->offset, val); else - writel_relaxed(val, io->mem + r->offset); + writel_relaxed(val, io->mem + reg->offset); } -static u32 clk_memmap_readl(void __iomem *reg) +static u32 clk_memmap_readl(const struct clk_omap_reg *reg) { u32 val; - struct clk_omap_reg *r = (struct clk_omap_reg *)® - struct clk_iomap *io = clk_memmaps[r->index]; + struct clk_iomap *io = clk_memmaps[reg->index]; - if (io->regmap) - regmap_read(io->regmap, r->offset, &val); + if (reg->ptr) + val = readl_relaxed(reg->ptr); + else if (io->regmap) + regmap_read(io->regmap, reg->offset, &val); else - val = readl_relaxed(io->mem + r->offset); + val = readl_relaxed(io->mem + reg->offset); return val; } @@ -162,20 +164,18 @@ int __init ti_clk_retry_init(struct device_node *node, struct clk_hw *hw, * ti_clk_get_reg_addr - get register address for a clock register * @node: device node for the clock * @index: register index from the clock node + * @reg: pointer to target register struct * - * Builds clock register address from device tree information. This - * is a struct of type clk_omap_reg. Returns a pointer to the register - * address, or a pointer error value in failure. + * Builds clock register address from device tree information, and returns + * the data via the provided output pointer @reg. Returns 0 on success, + * negative error value on failure. */ -void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index) +int ti_clk_get_reg_addr(struct device_node *node, int index, + struct clk_omap_reg *reg) { - struct clk_omap_reg *reg; u32 val; - u32 tmp; int i; - reg = (struct clk_omap_reg *)&tmp; - for (i = 0; i < CLK_MAX_MEMMAPS; i++) { if (clocks_node_ptr[i] == node->parent) break; @@ -183,19 +183,20 @@ void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index) if (i == CLK_MAX_MEMMAPS) { pr_err("clk-provider not found for %s!\n", node->name); - return IOMEM_ERR_PTR(-ENOENT); + return -ENOENT; } reg->index = i; if (of_property_read_u32_index(node, "reg", index, &val)) { pr_err("%s must have reg[%d]!\n", node->name, index); - return IOMEM_ERR_PTR(-EINVAL); + return -EINVAL; } reg->offset = val; + reg->ptr = NULL; - return (__force void __iomem *)tmp; + return 0; } /** diff --git a/drivers/clk/ti/clkt_dflt.c b/drivers/clk/ti/clkt_dflt.c index c6ae563801d7..91751dd26b16 100644 --- a/drivers/clk/ti/clkt_dflt.c +++ b/drivers/clk/ti/clkt_dflt.c @@ -55,7 +55,8 @@ * elapsed. XXX Deprecated - should be moved into drivers for the * individual IP block that the IDLEST register exists in. */ -static int _wait_idlest_generic(struct clk_hw_omap *clk, void __iomem *reg, +static int _wait_idlest_generic(struct clk_hw_omap *clk, + struct clk_omap_reg *reg, u32 mask, u8 idlest, const char *name) { int i = 0, ena = 0; @@ -91,7 +92,7 @@ static int _wait_idlest_generic(struct clk_hw_omap *clk, void __iomem *reg, */ static void _omap2_module_wait_ready(struct clk_hw_omap *clk) { - void __iomem *companion_reg, *idlest_reg; + struct clk_omap_reg companion_reg, idlest_reg; u8 other_bit, idlest_bit, idlest_val, idlest_reg_id; s16 prcm_mod; int r; @@ -99,17 +100,17 @@ static void _omap2_module_wait_ready(struct clk_hw_omap *clk) /* Not all modules have multiple clocks that their IDLEST depends on */ if (clk->ops->find_companion) { clk->ops->find_companion(clk, &companion_reg, &other_bit); - if (!(ti_clk_ll_ops->clk_readl(companion_reg) & + if (!(ti_clk_ll_ops->clk_readl(&companion_reg) & (1 << other_bit))) return; } clk->ops->find_idlest(clk, &idlest_reg, &idlest_bit, &idlest_val); - r = ti_clk_ll_ops->cm_split_idlest_reg(idlest_reg, &prcm_mod, + r = ti_clk_ll_ops->cm_split_idlest_reg(&idlest_reg, &prcm_mod, &idlest_reg_id); if (r) { /* IDLEST register not in the CM module */ - _wait_idlest_generic(clk, idlest_reg, (1 << idlest_bit), + _wait_idlest_generic(clk, &idlest_reg, (1 << idlest_bit), idlest_val, clk_hw_get_name(&clk->hw)); } else { ti_clk_ll_ops->cm_wait_module_ready(0, prcm_mod, idlest_reg_id, @@ -139,17 +140,17 @@ static void _omap2_module_wait_ready(struct clk_hw_omap *clk) * avoid this issue, and remove the casts. No return value. */ void omap2_clk_dflt_find_companion(struct clk_hw_omap *clk, - void __iomem **other_reg, u8 *other_bit) + struct clk_omap_reg *other_reg, + u8 *other_bit) { - u32 r; + memcpy(other_reg, &clk->enable_reg, sizeof(*other_reg)); /* * Convert CM_ICLKEN* <-> CM_FCLKEN*. This conversion assumes * it's just a matter of XORing the bits. */ - r = ((__force u32)clk->enable_reg ^ (CM_FCLKEN ^ CM_ICLKEN)); + other_reg->offset ^= (CM_FCLKEN ^ CM_ICLKEN); - *other_reg = (__force void __iomem *)r; *other_bit = clk->enable_bit; } @@ -168,13 +169,14 @@ void omap2_clk_dflt_find_companion(struct clk_hw_omap *clk, * CM_IDLEST2). This is not true for all modules. No return value. */ void omap2_clk_dflt_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, u8 *idlest_bit, + struct clk_omap_reg *idlest_reg, u8 *idlest_bit, u8 *idlest_val) { - u32 r; + memcpy(idlest_reg, &clk->enable_reg, sizeof(*idlest_reg)); + + idlest_reg->offset &= ~0xf0; + idlest_reg->offset |= 0x20; - r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20); - *idlest_reg = (__force void __iomem *)r; *idlest_bit = clk->enable_bit; /* @@ -222,31 +224,19 @@ int omap2_dflt_clk_enable(struct clk_hw *hw) } } - if (IS_ERR(clk->enable_reg)) { - pr_err("%s: %s missing enable_reg\n", __func__, - clk_hw_get_name(hw)); - ret = -EINVAL; - goto err; - } - /* FIXME should not have INVERT_ENABLE bit here */ - v = ti_clk_ll_ops->clk_readl(clk->enable_reg); + v = ti_clk_ll_ops->clk_readl(&clk->enable_reg); if (clk->flags & INVERT_ENABLE) v &= ~(1 << clk->enable_bit); else v |= (1 << clk->enable_bit); - ti_clk_ll_ops->clk_writel(v, clk->enable_reg); - v = ti_clk_ll_ops->clk_readl(clk->enable_reg); /* OCP barrier */ + ti_clk_ll_ops->clk_writel(v, &clk->enable_reg); + v = ti_clk_ll_ops->clk_readl(&clk->enable_reg); /* OCP barrier */ if (clk->ops && clk->ops->find_idlest) _omap2_module_wait_ready(clk); return 0; - -err: - if (clkdm_control && clk->clkdm) - ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk); - return ret; } /** @@ -264,22 +254,13 @@ void omap2_dflt_clk_disable(struct clk_hw *hw) u32 v; clk = to_clk_hw_omap(hw); - if (IS_ERR(clk->enable_reg)) { - /* - * 'independent' here refers to a clock which is not - * controlled by its parent. - */ - pr_err("%s: independent clock %s has no enable_reg\n", - __func__, clk_hw_get_name(hw)); - return; - } - v = ti_clk_ll_ops->clk_readl(clk->enable_reg); + v = ti_clk_ll_ops->clk_readl(&clk->enable_reg); if (clk->flags & INVERT_ENABLE) v |= (1 << clk->enable_bit); else v &= ~(1 << clk->enable_bit); - ti_clk_ll_ops->clk_writel(v, clk->enable_reg); + ti_clk_ll_ops->clk_writel(v, &clk->enable_reg); /* No OCP barrier needed here since it is a disable operation */ if (!(ti_clk_get_features()->flags & TI_CLK_DISABLE_CLKDM_CONTROL) && @@ -300,7 +281,7 @@ int omap2_dflt_clk_is_enabled(struct clk_hw *hw) struct clk_hw_omap *clk = to_clk_hw_omap(hw); u32 v; - v = ti_clk_ll_ops->clk_readl(clk->enable_reg); + v = ti_clk_ll_ops->clk_readl(&clk->enable_reg); if (clk->flags & INVERT_ENABLE) v ^= BIT(clk->enable_bit); diff --git a/drivers/clk/ti/clkt_dpll.c b/drivers/clk/ti/clkt_dpll.c index b919fdfe8256..ce98da2c10be 100644 --- a/drivers/clk/ti/clkt_dpll.c +++ b/drivers/clk/ti/clkt_dpll.c @@ -213,7 +213,7 @@ u8 omap2_init_dpll_parent(struct clk_hw *hw) if (!dd) return -EINVAL; - v = ti_clk_ll_ops->clk_readl(dd->control_reg); + v = ti_clk_ll_ops->clk_readl(&dd->control_reg); v &= dd->enable_mask; v >>= __ffs(dd->enable_mask); @@ -249,14 +249,14 @@ unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk) return 0; /* Return bypass rate if DPLL is bypassed */ - v = ti_clk_ll_ops->clk_readl(dd->control_reg); + v = ti_clk_ll_ops->clk_readl(&dd->control_reg); v &= dd->enable_mask; v >>= __ffs(dd->enable_mask); if (_omap2_dpll_is_in_bypass(v)) return clk_hw_get_rate(dd->clk_bypass); - v = ti_clk_ll_ops->clk_readl(dd->mult_div1_reg); + v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg); dpll_mult = v & dd->mult_mask; dpll_mult >>= __ffs(dd->mult_mask); dpll_div = v & dd->div1_mask; diff --git a/drivers/clk/ti/clkt_iclk.c b/drivers/clk/ti/clkt_iclk.c index 38c36908cf88..60b583d7db33 100644 --- a/drivers/clk/ti/clkt_iclk.c +++ b/drivers/clk/ti/clkt_iclk.c @@ -31,28 +31,29 @@ void omap2_clkt_iclk_allow_idle(struct clk_hw_omap *clk) { u32 v; - void __iomem *r; + struct clk_omap_reg r; - r = (__force void __iomem *) - ((__force u32)clk->enable_reg ^ (CM_AUTOIDLE ^ CM_ICLKEN)); + memcpy(&r, &clk->enable_reg, sizeof(r)); + r.offset ^= (CM_AUTOIDLE ^ CM_ICLKEN); - v = ti_clk_ll_ops->clk_readl(r); + v = ti_clk_ll_ops->clk_readl(&r); v |= (1 << clk->enable_bit); - ti_clk_ll_ops->clk_writel(v, r); + ti_clk_ll_ops->clk_writel(v, &r); } /* XXX */ void omap2_clkt_iclk_deny_idle(struct clk_hw_omap *clk) { u32 v; - void __iomem *r; + struct clk_omap_reg r; - r = (__force void __iomem *) - ((__force u32)clk->enable_reg ^ (CM_AUTOIDLE ^ CM_ICLKEN)); + memcpy(&r, &clk->enable_reg, sizeof(r)); - v = ti_clk_ll_ops->clk_readl(r); + r.offset ^= (CM_AUTOIDLE ^ CM_ICLKEN); + + v = ti_clk_ll_ops->clk_readl(&r); v &= ~(1 << clk->enable_bit); - ti_clk_ll_ops->clk_writel(v, r); + ti_clk_ll_ops->clk_writel(v, &r); } /** @@ -68,14 +69,12 @@ void omap2_clkt_iclk_deny_idle(struct clk_hw_omap *clk) * modules. No return value. */ static void omap2430_clk_i2chs_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, + struct clk_omap_reg *idlest_reg, u8 *idlest_bit, u8 *idlest_val) { - u32 r; - - r = ((__force u32)clk->enable_reg ^ (OMAP24XX_CM_FCLKEN2 ^ CM_IDLEST)); - *idlest_reg = (__force void __iomem *)r; + memcpy(idlest_reg, &clk->enable_reg, sizeof(*idlest_reg)); + idlest_reg->offset ^= (OMAP24XX_CM_FCLKEN2 ^ CM_IDLEST); *idlest_bit = clk->enable_bit; *idlest_val = OMAP24XX_CM_IDLEST_VAL; } diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h index 437ea768f837..3f7b26540be8 100644 --- a/drivers/clk/ti/clock.h +++ b/drivers/clk/ti/clock.h @@ -18,7 +18,7 @@ struct clk_omap_divider { struct clk_hw hw; - void __iomem *reg; + struct clk_omap_reg reg; u8 shift; u8 width; u8 flags; @@ -29,7 +29,7 @@ struct clk_omap_divider { struct clk_omap_mux { struct clk_hw hw; - void __iomem *reg; + struct clk_omap_reg reg; u32 *table; u32 mask; u8 shift; @@ -228,7 +228,8 @@ void ti_clk_patch_legacy_clks(struct ti_clk **patch); struct clk *ti_clk_register_clk(struct ti_clk *setup); int ti_clk_register_legacy_clks(struct ti_clk_alias *clks); -void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index); +int ti_clk_get_reg_addr(struct device_node *node, int index, + struct clk_omap_reg *reg); void ti_dt_clocks_register(struct ti_dt_clk *oclks); int ti_clk_retry_init(struct device_node *node, struct clk_hw *hw, ti_of_clk_init_cb_t func); @@ -263,10 +264,10 @@ int omap2_dflt_clk_enable(struct clk_hw *hw); void omap2_dflt_clk_disable(struct clk_hw *hw); int omap2_dflt_clk_is_enabled(struct clk_hw *hw); void omap2_clk_dflt_find_companion(struct clk_hw_omap *clk, - void __iomem **other_reg, + struct clk_omap_reg *other_reg, u8 *other_bit); void omap2_clk_dflt_find_idlest(struct clk_hw_omap *clk, - void __iomem **idlest_reg, + struct clk_omap_reg *idlest_reg, u8 *idlest_bit, u8 *idlest_val); void omap2_clkt_iclk_allow_idle(struct clk_hw_omap *clk); diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c index 704157d8c0b7..fbedc6a9fed0 100644 --- a/drivers/clk/ti/clockdomain.c +++ b/drivers/clk/ti/clockdomain.c @@ -52,10 +52,6 @@ int omap2_clkops_enable_clkdm(struct clk_hw *hw) return -EINVAL; } - if (unlikely(clk->enable_reg)) - pr_err("%s: %s: should use dflt_clk_enable ?!\n", __func__, - clk_hw_get_name(hw)); - if (ti_clk_get_features()->flags & TI_CLK_DISABLE_CLKDM_CONTROL) { pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n", __func__, clk_hw_get_name(hw)); @@ -90,10 +86,6 @@ void omap2_clkops_disable_clkdm(struct clk_hw *hw) return; } - if (unlikely(clk->enable_reg)) - pr_err("%s: %s: should use dflt_clk_disable ?!\n", __func__, - clk_hw_get_name(hw)); - if (ti_clk_get_features()->flags & TI_CLK_DISABLE_CLKDM_CONTROL) { pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n", __func__, clk_hw_get_name(hw)); diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c index 1cc0242d823f..d6dcb283b72b 100644 --- a/drivers/clk/ti/divider.c +++ b/drivers/clk/ti/divider.c @@ -100,7 +100,7 @@ static unsigned long ti_clk_divider_recalc_rate(struct clk_hw *hw, struct clk_omap_divider *divider = to_clk_omap_divider(hw); unsigned int div, val; - val = ti_clk_ll_ops->clk_readl(divider->reg) >> divider->shift; + val = ti_clk_ll_ops->clk_readl(÷r->reg) >> divider->shift; val &= div_mask(divider); div = _get_div(divider, val); @@ -257,11 +257,11 @@ static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, if (divider->flags & CLK_DIVIDER_HIWORD_MASK) { val = div_mask(divider) << (divider->shift + 16); } else { - val = ti_clk_ll_ops->clk_readl(divider->reg); + val = ti_clk_ll_ops->clk_readl(÷r->reg); val &= ~(div_mask(divider) << divider->shift); } val |= value << divider->shift; - ti_clk_ll_ops->clk_writel(val, divider->reg); + ti_clk_ll_ops->clk_writel(val, ÷r->reg); return 0; } @@ -274,7 +274,8 @@ const struct clk_ops ti_clk_divider_ops = { static struct clk *_register_divider(struct device *dev, const char *name, const char *parent_name, - unsigned long flags, void __iomem *reg, + unsigned long flags, + struct clk_omap_reg *reg, u8 shift, u8 width, u8 clk_divider_flags, const struct clk_div_table *table) { @@ -303,7 +304,7 @@ static struct clk *_register_divider(struct device *dev, const char *name, init.num_parents = (parent_name ? 1 : 0); /* struct clk_divider assignments */ - div->reg = reg; + memcpy(&div->reg, reg, sizeof(*reg)); div->shift = shift; div->width = width; div->flags = clk_divider_flags; @@ -561,14 +562,15 @@ static int _get_divider_width(struct device_node *node, } static int __init ti_clk_divider_populate(struct device_node *node, - void __iomem **reg, const struct clk_div_table **table, + struct clk_omap_reg *reg, const struct clk_div_table **table, u32 *flags, u8 *div_flags, u8 *width, u8 *shift) { u32 val; + int ret; - *reg = ti_clk_get_reg_addr(node, 0); - if (IS_ERR(*reg)) - return PTR_ERR(*reg); + ret = ti_clk_get_reg_addr(node, 0, reg); + if (ret) + return ret; if (!of_property_read_u32(node, "ti,bit-shift", &val)) *shift = val; @@ -607,7 +609,7 @@ static void __init of_ti_divider_clk_setup(struct device_node *node) { struct clk *clk; const char *parent_name; - void __iomem *reg; + struct clk_omap_reg reg; u8 clk_divider_flags = 0; u8 width = 0; u8 shift = 0; @@ -620,7 +622,7 @@ static void __init of_ti_divider_clk_setup(struct device_node *node) &clk_divider_flags, &width, &shift)) goto cleanup; - clk = _register_divider(NULL, node->name, parent_name, flags, reg, + clk = _register_divider(NULL, node->name, parent_name, flags, ®, shift, width, clk_divider_flags, table); if (!IS_ERR(clk)) { diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c index 778bc90955b9..96d84888c6c5 100644 --- a/drivers/clk/ti/dpll.c +++ b/drivers/clk/ti/dpll.c @@ -203,17 +203,10 @@ cleanup: } #if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS) -static void __iomem *_get_reg(u8 module, u16 offset) +void _get_reg(u8 module, u16 offset, struct clk_omap_reg *reg) { - u32 reg; - struct clk_omap_reg *reg_setup; - - reg_setup = (struct clk_omap_reg *)® - - reg_setup->index = module; - reg_setup->offset = offset; - - return (void __iomem *)reg; + reg->index = module; + reg->offset = offset; } struct clk *ti_clk_register_dpll(struct ti_clk *setup) @@ -255,10 +248,10 @@ struct clk *ti_clk_register_dpll(struct ti_clk *setup) init.num_parents = dpll->num_parents; init.parent_names = dpll->parents; - dd->control_reg = _get_reg(dpll->module, dpll->control_reg); - dd->idlest_reg = _get_reg(dpll->module, dpll->idlest_reg); - dd->mult_div1_reg = _get_reg(dpll->module, dpll->mult_div1_reg); - dd->autoidle_reg = _get_reg(dpll->module, dpll->autoidle_reg); + _get_reg(dpll->module, dpll->control_reg, &dd->control_reg); + _get_reg(dpll->module, dpll->idlest_reg, &dd->idlest_reg); + _get_reg(dpll->module, dpll->mult_div1_reg, &dd->mult_div1_reg); + _get_reg(dpll->module, dpll->autoidle_reg, &dd->autoidle_reg); dd->modes = dpll->modes; dd->div1_mask = dpll->div1_mask; @@ -344,12 +337,9 @@ static void _register_dpll_x2(struct device_node *node, ret = of_property_count_elems_of_size(node, "reg", 1); if (ret <= 0) { hw_ops = NULL; - } else { - clk_hw->clksel_reg = ti_clk_get_reg_addr(node, 0); - if (IS_ERR(clk_hw->clksel_reg)) { - kfree(clk_hw); - return; - } + } else if (ti_clk_get_reg_addr(node, 0, &clk_hw->clksel_reg)) { + kfree(clk_hw); + return; } } @@ -412,7 +402,8 @@ static void __init of_ti_dpll_setup(struct device_node *node, init->parent_names = parent_names; - dd->control_reg = ti_clk_get_reg_addr(node, 0); + if (ti_clk_get_reg_addr(node, 0, &dd->control_reg)) + goto cleanup; /* * Special case for OMAP2 DPLL, register order is different due to @@ -420,25 +411,22 @@ static void __init of_ti_dpll_setup(struct device_node *node, * missing idlest_mask. */ if (!dd->idlest_mask) { - dd->mult_div1_reg = ti_clk_get_reg_addr(node, 1); + if (ti_clk_get_reg_addr(node, 1, &dd->mult_div1_reg)) + goto cleanup; #ifdef CONFIG_ARCH_OMAP2 clk_hw->ops = &clkhwops_omap2xxx_dpll; omap2xxx_clkt_dpllcore_init(&clk_hw->hw); #endif } else { - dd->idlest_reg = ti_clk_get_reg_addr(node, 1); - if (IS_ERR(dd->idlest_reg)) + if (ti_clk_get_reg_addr(node, 1, &dd->idlest_reg)) goto cleanup; - dd->mult_div1_reg = ti_clk_get_reg_addr(node, 2); + if (ti_clk_get_reg_addr(node, 2, &dd->mult_div1_reg)) + goto cleanup; } - if (IS_ERR(dd->control_reg) || IS_ERR(dd->mult_div1_reg)) - goto cleanup; - if (dd->autoidle_mask) { - dd->autoidle_reg = ti_clk_get_reg_addr(node, 3); - if (IS_ERR(dd->autoidle_reg)) + if (ti_clk_get_reg_addr(node, 3, &dd->autoidle_reg)) goto cleanup; } diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c index 4cdd28a25584..4534de2ef455 100644 --- a/drivers/clk/ti/dpll3xxx.c +++ b/drivers/clk/ti/dpll3xxx.c @@ -54,10 +54,10 @@ static void _omap3_dpll_write_clken(struct clk_hw_omap *clk, u8 clken_bits) dd = clk->dpll_data; - v = ti_clk_ll_ops->clk_readl(dd->control_reg); + v = ti_clk_ll_ops->clk_readl(&dd->control_reg); v &= ~dd->enable_mask; v |= clken_bits << __ffs(dd->enable_mask); - ti_clk_ll_ops->clk_writel(v, dd->control_reg); + ti_clk_ll_ops->clk_writel(v, &dd->control_reg); } /* _omap3_wait_dpll_status: wait for a DPLL to enter a specific state */ @@ -73,7 +73,7 @@ static int _omap3_wait_dpll_status(struct clk_hw_omap *clk, u8 state) state <<= __ffs(dd->idlest_mask); - while (((ti_clk_ll_ops->clk_readl(dd->idlest_reg) & dd->idlest_mask) + while (((ti_clk_ll_ops->clk_readl(&dd->idlest_reg) & dd->idlest_mask) != state) && i < MAX_DPLL_WAIT_TRIES) { i++; udelay(1); @@ -151,7 +151,7 @@ static int _omap3_noncore_dpll_lock(struct clk_hw_omap *clk) state <<= __ffs(dd->idlest_mask); /* Check if already locked */ - if ((ti_clk_ll_ops->clk_readl(dd->idlest_reg) & dd->idlest_mask) == + if ((ti_clk_ll_ops->clk_readl(&dd->idlest_reg) & dd->idlest_mask) == state) goto done; @@ -317,14 +317,14 @@ static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel) * only since freqsel field is no longer present on other devices. */ if (ti_clk_get_features()->flags & TI_CLK_DPLL_HAS_FREQSEL) { - v = ti_clk_ll_ops->clk_readl(dd->control_reg); + v = ti_clk_ll_ops->clk_readl(&dd->control_reg); v &= ~dd->freqsel_mask; v |= freqsel << __ffs(dd->freqsel_mask); - ti_clk_ll_ops->clk_writel(v, dd->control_reg); + ti_clk_ll_ops->clk_writel(v, &dd->control_reg); } /* Set DPLL multiplier, divider */ - v = ti_clk_ll_ops->clk_readl(dd->mult_div1_reg); + v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg); /* Handle Duty Cycle Correction */ if (dd->dcc_mask) { @@ -370,11 +370,11 @@ static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel) } } - ti_clk_ll_ops->clk_writel(v, dd->mult_div1_reg); + ti_clk_ll_ops->clk_writel(v, &dd->mult_div1_reg); /* Set 4X multiplier and low-power mode */ if (dd->m4xen_mask || dd->lpmode_mask) { - v = ti_clk_ll_ops->clk_readl(dd->control_reg); + v = ti_clk_ll_ops->clk_readl(&dd->control_reg); if (dd->m4xen_mask) { if (dd->last_rounded_m4xen) @@ -390,7 +390,7 @@ static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel) v &= ~dd->lpmode_mask; } - ti_clk_ll_ops->clk_writel(v, dd->control_reg); + ti_clk_ll_ops->clk_writel(v, &dd->control_reg); } /* We let the clock framework set the other output dividers later */ @@ -652,10 +652,10 @@ static u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk) dd = clk->dpll_data; - if (!dd->autoidle_reg) + if (!dd->autoidle_mask) return -EINVAL; - v = ti_clk_ll_ops->clk_readl(dd->autoidle_reg); + v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg); v &= dd->autoidle_mask; v >>= __ffs(dd->autoidle_mask); @@ -681,7 +681,7 @@ static void omap3_dpll_allow_idle(struct clk_hw_omap *clk) dd = clk->dpll_data; - if (!dd->autoidle_reg) + if (!dd->autoidle_mask) return; /* @@ -689,10 +689,10 @@ static void omap3_dpll_allow_idle(struct clk_hw_omap *clk) * by writing 0x5 instead of 0x1. Add some mechanism to * optionally enter this mode. */ - v = ti_clk_ll_ops->clk_readl(dd->autoidle_reg); + v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg); v &= ~dd->autoidle_mask; v |= DPLL_AUTOIDLE_LOW_POWER_STOP << __ffs(dd->autoidle_mask); - ti_clk_ll_ops->clk_writel(v, dd->autoidle_reg); + ti_clk_ll_ops->clk_writel(v, &dd->autoidle_reg); } /** @@ -711,13 +711,13 @@ static void omap3_dpll_deny_idle(struct clk_hw_omap *clk) dd = clk->dpll_data; - if (!dd->autoidle_reg) + if (!dd->autoidle_mask) return; - v = ti_clk_ll_ops->clk_readl(dd->autoidle_reg); + v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg); v &= ~dd->autoidle_mask; v |= DPLL_AUTOIDLE_DISABLE << __ffs(dd->autoidle_mask); - ti_clk_ll_ops->clk_writel(v, dd->autoidle_reg); + ti_clk_ll_ops->clk_writel(v, &dd->autoidle_reg); } /* Clock control for DPLL outputs */ @@ -773,7 +773,7 @@ unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw, WARN_ON(!dd->enable_mask); - v = ti_clk_ll_ops->clk_readl(dd->control_reg) & dd->enable_mask; + v = ti_clk_ll_ops->clk_readl(&dd->control_reg) & dd->enable_mask; v >>= __ffs(dd->enable_mask); if ((v != OMAP3XXX_EN_DPLL_LOCKED) || (dd->flags & DPLL_J_TYPE)) rate = parent_rate; diff --git a/drivers/clk/ti/dpll44xx.c b/drivers/clk/ti/dpll44xx.c index 82c05b55a7be..d7a3f7ec8d77 100644 --- a/drivers/clk/ti/dpll44xx.c +++ b/drivers/clk/ti/dpll44xx.c @@ -42,17 +42,17 @@ static void omap4_dpllmx_allow_gatectrl(struct clk_hw_omap *clk) u32 v; u32 mask; - if (!clk || !clk->clksel_reg) + if (!clk) return; mask = clk->flags & CLOCK_CLKOUTX2 ? OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK : OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK; - v = ti_clk_ll_ops->clk_readl(clk->clksel_reg); + v = ti_clk_ll_ops->clk_readl(&clk->clksel_reg); /* Clear the bit to allow gatectrl */ v &= ~mask; - ti_clk_ll_ops->clk_writel(v, clk->clksel_reg); + ti_clk_ll_ops->clk_writel(v, &clk->clksel_reg); } static void omap4_dpllmx_deny_gatectrl(struct clk_hw_omap *clk) @@ -60,17 +60,17 @@ static void omap4_dpllmx_deny_gatectrl(struct clk_hw_omap *clk) u32 v; u32 mask; - if (!clk || !clk->clksel_reg) + if (!clk) return; mask = clk->flags & CLOCK_CLKOUTX2 ? OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK : OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK; - v = ti_clk_ll_ops->clk_readl(clk->clksel_reg); + v = ti_clk_ll_ops->clk_readl(&clk->clksel_reg); /* Set the bit to deny gatectrl */ v |= mask; - ti_clk_ll_ops->clk_writel(v, clk->clksel_reg); + ti_clk_ll_ops->clk_writel(v, &clk->clksel_reg); } const struct clk_hw_omap_ops clkhwops_omap4_dpllmx = { @@ -128,7 +128,7 @@ unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw, rate = omap2_get_dpll_rate(clk); /* regm4xen adds a multiplier of 4 to DPLL calculations */ - v = ti_clk_ll_ops->clk_readl(dd->control_reg); + v = ti_clk_ll_ops->clk_readl(&dd->control_reg); if (v & OMAP4430_DPLL_REGM4XEN_MASK) rate *= OMAP4430_REGM4XEN_MULT; diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c index 77f0920e12f1..7151ec3a1b07 100644 --- a/drivers/clk/ti/gate.c +++ b/drivers/clk/ti/gate.c @@ -76,15 +76,15 @@ static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *hw) /* Restore the dividers */ if (!ret) { - orig_v = ti_clk_ll_ops->clk_readl(parent->reg); + orig_v = ti_clk_ll_ops->clk_readl(&parent->reg); dummy_v = orig_v; /* Write any other value different from the Read value */ dummy_v ^= (1 << parent->shift); - ti_clk_ll_ops->clk_writel(dummy_v, parent->reg); + ti_clk_ll_ops->clk_writel(dummy_v, &parent->reg); /* Write the original divider */ - ti_clk_ll_ops->clk_writel(orig_v, parent->reg); + ti_clk_ll_ops->clk_writel(orig_v, &parent->reg); } return ret; @@ -92,7 +92,7 @@ static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *hw) static struct clk *_register_gate(struct device *dev, const char *name, const char *parent_name, unsigned long flags, - void __iomem *reg, u8 bit_idx, + struct clk_omap_reg *reg, u8 bit_idx, u8 clk_gate_flags, const struct clk_ops *ops, const struct clk_hw_omap_ops *hw_ops) { @@ -109,7 +109,7 @@ static struct clk *_register_gate(struct device *dev, const char *name, init.name = name; init.ops = ops; - clk_hw->enable_reg = reg; + memcpy(&clk_hw->enable_reg, reg, sizeof(*reg)); clk_hw->enable_bit = bit_idx; clk_hw->ops = hw_ops; @@ -133,8 +133,7 @@ struct clk *ti_clk_register_gate(struct ti_clk *setup) { const struct clk_ops *ops = &omap_gate_clk_ops; const struct clk_hw_omap_ops *hw_ops = NULL; - u32 reg; - struct clk_omap_reg *reg_setup; + struct clk_omap_reg reg; u32 flags = 0; u8 clk_gate_flags = 0; struct ti_clk_gate *gate; @@ -144,8 +143,6 @@ struct clk *ti_clk_register_gate(struct ti_clk *setup) if (gate->flags & CLKF_INTERFACE) return ti_clk_register_interface(setup); - reg_setup = (struct clk_omap_reg *)® - if (gate->flags & CLKF_SET_RATE_PARENT) flags |= CLK_SET_RATE_PARENT; @@ -169,11 +166,12 @@ struct clk *ti_clk_register_gate(struct ti_clk *setup) if (gate->flags & CLKF_AM35XX) hw_ops = &clkhwops_am35xx_ipss_module_wait; - reg_setup->index = gate->module; - reg_setup->offset = gate->reg; + reg.index = gate->module; + reg.offset = gate->reg; + reg.ptr = NULL; return _register_gate(NULL, setup->name, gate->parent, flags, - (void __iomem *)reg, gate->bit_shift, + ®, gate->bit_shift, clk_gate_flags, ops, hw_ops); } @@ -214,15 +212,14 @@ static void __init _of_ti_gate_clk_setup(struct device_node *node, { struct clk *clk; const char *parent_name; - void __iomem *reg = NULL; + struct clk_omap_reg reg; u8 enable_bit = 0; u32 val; u32 flags = 0; u8 clk_gate_flags = 0; if (ops != &omap_gate_clkdm_clk_ops) { - reg = ti_clk_get_reg_addr(node, 0); - if (IS_ERR(reg)) + if (ti_clk_get_reg_addr(node, 0, ®)) return; if (!of_property_read_u32(node, "ti,bit-shift", &val)) @@ -242,7 +239,7 @@ static void __init _of_ti_gate_clk_setup(struct device_node *node, if (of_property_read_bool(node, "ti,set-bit-to-disable")) clk_gate_flags |= INVERT_ENABLE; - clk = _register_gate(NULL, node->name, parent_name, flags, reg, + clk = _register_gate(NULL, node->name, parent_name, flags, ®, enable_bit, clk_gate_flags, ops, hw_ops); if (!IS_ERR(clk)) @@ -260,8 +257,7 @@ _of_ti_composite_gate_clk_setup(struct device_node *node, if (!gate) return; - gate->enable_reg = ti_clk_get_reg_addr(node, 0); - if (IS_ERR(gate->enable_reg)) + if (ti_clk_get_reg_addr(node, 0, &gate->enable_reg)) goto cleanup; of_property_read_u32(node, "ti,bit-shift", &val); diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c index 42d9fd4f5f6a..62cf50c1e1e3 100644 --- a/drivers/clk/ti/interface.c +++ b/drivers/clk/ti/interface.c @@ -34,7 +34,7 @@ static const struct clk_ops ti_interface_clk_ops = { static struct clk *_register_interface(struct device *dev, const char *name, const char *parent_name, - void __iomem *reg, u8 bit_idx, + struct clk_omap_reg *reg, u8 bit_idx, const struct clk_hw_omap_ops *ops) { struct clk_init_data init = { NULL }; @@ -47,7 +47,7 @@ static struct clk *_register_interface(struct device *dev, const char *name, clk_hw->hw.init = &init; clk_hw->ops = ops; - clk_hw->enable_reg = reg; + memcpy(&clk_hw->enable_reg, reg, sizeof(*reg)); clk_hw->enable_bit = bit_idx; init.name = name; @@ -71,14 +71,13 @@ static struct clk *_register_interface(struct device *dev, const char *name, struct clk *ti_clk_register_interface(struct ti_clk *setup) { const struct clk_hw_omap_ops *ops = &clkhwops_iclk_wait; - u32 reg; - struct clk_omap_reg *reg_setup; + struct clk_omap_reg reg; struct ti_clk_gate *gate; gate = setup->data; - reg_setup = (struct clk_omap_reg *)® - reg_setup->index = gate->module; - reg_setup->offset = gate->reg; + reg.index = gate->module; + reg.offset = gate->reg; + reg.ptr = NULL; if (gate->flags & CLKF_NO_WAIT) ops = &clkhwops_iclk; @@ -96,7 +95,7 @@ struct clk *ti_clk_register_interface(struct ti_clk *setup) ops = &clkhwops_am35xx_ipss_wait; return _register_interface(NULL, setup->name, gate->parent, - (void __iomem *)reg, gate->bit_shift, ops); + ®, gate->bit_shift, ops); } #endif @@ -105,12 +104,11 @@ static void __init _of_ti_interface_clk_setup(struct device_node *node, { struct clk *clk; const char *parent_name; - void __iomem *reg; + struct clk_omap_reg reg; u8 enable_bit = 0; u32 val; - reg = ti_clk_get_reg_addr(node, 0); - if (IS_ERR(reg)) + if (ti_clk_get_reg_addr(node, 0, ®)) return; if (!of_property_read_u32(node, "ti,bit-shift", &val)) @@ -122,7 +120,7 @@ static void __init _of_ti_interface_clk_setup(struct device_node *node, return; } - clk = _register_interface(NULL, node->name, parent_name, reg, + clk = _register_interface(NULL, node->name, parent_name, ®, enable_bit, ops); if (!IS_ERR(clk)) diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c index daa2dee6bafe..18c267b38461 100644 --- a/drivers/clk/ti/mux.c +++ b/drivers/clk/ti/mux.c @@ -39,7 +39,7 @@ static u8 ti_clk_mux_get_parent(struct clk_hw *hw) * OTOH, pmd_trace_clk_mux_ck uses a separate bit for each clock, so * val = 0x4 really means "bit 2, index starts at bit 0" */ - val = ti_clk_ll_ops->clk_readl(mux->reg) >> mux->shift; + val = ti_clk_ll_ops->clk_readl(&mux->reg) >> mux->shift; val &= mux->mask; if (mux->table) { @@ -81,11 +81,11 @@ static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index) if (mux->flags & CLK_MUX_HIWORD_MASK) { val = mux->mask << (mux->shift + 16); } else { - val = ti_clk_ll_ops->clk_readl(mux->reg); + val = ti_clk_ll_ops->clk_readl(&mux->reg); val &= ~(mux->mask << mux->shift); } val |= index << mux->shift; - ti_clk_ll_ops->clk_writel(val, mux->reg); + ti_clk_ll_ops->clk_writel(val, &mux->reg); return 0; } @@ -99,7 +99,7 @@ const struct clk_ops ti_clk_mux_ops = { static struct clk *_register_mux(struct device *dev, const char *name, const char * const *parent_names, u8 num_parents, unsigned long flags, - void __iomem *reg, u8 shift, u32 mask, + struct clk_omap_reg *reg, u8 shift, u32 mask, u8 clk_mux_flags, u32 *table) { struct clk_omap_mux *mux; @@ -120,7 +120,7 @@ static struct clk *_register_mux(struct device *dev, const char *name, init.num_parents = num_parents; /* struct clk_mux assignments */ - mux->reg = reg; + memcpy(&mux->reg, reg, sizeof(*reg)); mux->shift = shift; mux->mask = mask; mux->flags = clk_mux_flags; @@ -140,12 +140,9 @@ struct clk *ti_clk_register_mux(struct ti_clk *setup) struct ti_clk_mux *mux; u32 flags; u8 mux_flags = 0; - struct clk_omap_reg *reg_setup; - u32 reg; + struct clk_omap_reg reg; u32 mask; - reg_setup = (struct clk_omap_reg *)® - mux = setup->data; flags = CLK_SET_RATE_NO_REPARENT; @@ -154,8 +151,9 @@ struct clk *ti_clk_register_mux(struct ti_clk *setup) mask--; mask = (1 << fls(mask)) - 1; - reg_setup->index = mux->module; - reg_setup->offset = mux->reg; + reg.index = mux->module; + reg.offset = mux->reg; + reg.ptr = NULL; if (mux->flags & CLKF_INDEX_STARTS_AT_ONE) mux_flags |= CLK_MUX_INDEX_ONE; @@ -164,7 +162,7 @@ struct clk *ti_clk_register_mux(struct ti_clk *setup) flags |= CLK_SET_RATE_PARENT; return _register_mux(NULL, setup->name, mux->parents, mux->num_parents, - flags, (void __iomem *)reg, mux->bit_shift, mask, + flags, ®, mux->bit_shift, mask, mux_flags, NULL); } @@ -177,7 +175,7 @@ struct clk *ti_clk_register_mux(struct ti_clk *setup) static void of_mux_clk_setup(struct device_node *node) { struct clk *clk; - void __iomem *reg; + struct clk_omap_reg reg; unsigned int num_parents; const char **parent_names; u8 clk_mux_flags = 0; @@ -196,9 +194,7 @@ static void of_mux_clk_setup(struct device_node *node) of_clk_parent_fill(node, parent_names, num_parents); - reg = ti_clk_get_reg_addr(node, 0); - - if (IS_ERR(reg)) + if (ti_clk_get_reg_addr(node, 0, ®)) goto cleanup; of_property_read_u32(node, "ti,bit-shift", &shift); @@ -217,7 +213,7 @@ static void of_mux_clk_setup(struct device_node *node) mask = (1 << fls(mask)) - 1; clk = _register_mux(NULL, node->name, parent_names, num_parents, - flags, reg, shift, mask, clk_mux_flags, NULL); + flags, ®, shift, mask, clk_mux_flags, NULL); if (!IS_ERR(clk)) of_clk_add_provider(node, of_clk_src_simple_get, clk); @@ -230,7 +226,6 @@ CLK_OF_DECLARE(mux_clk, "ti,mux-clock", of_mux_clk_setup); struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup) { struct clk_omap_mux *mux; - struct clk_omap_reg *reg; int num_parents; if (!setup) @@ -240,12 +235,10 @@ struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup) if (!mux) return ERR_PTR(-ENOMEM); - reg = (struct clk_omap_reg *)&mux->reg; - mux->shift = setup->bit_shift; - reg->index = setup->module; - reg->offset = setup->reg; + mux->reg.index = setup->module; + mux->reg.offset = setup->reg; if (setup->flags & CLKF_INDEX_STARTS_AT_ONE) mux->flags |= CLK_MUX_INDEX_ONE; @@ -268,9 +261,7 @@ static void __init of_ti_composite_mux_clk_setup(struct device_node *node) if (!mux) return; - mux->reg = ti_clk_get_reg_addr(node, 0); - - if (IS_ERR(mux->reg)) + if (ti_clk_get_reg_addr(node, 0, &mux->reg)) goto cleanup; if (!of_property_read_u32(node, "ti,bit-shift", &val)) diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index affdabd0b6a1..d18da839b810 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -18,6 +18,18 @@ #include #include +/** + * struct clk_omap_reg - OMAP register declaration + * @offset: offset from the master IP module base address + * @index: index of the master IP module + */ +struct clk_omap_reg { + void __iomem *ptr; + u16 offset; + u8 index; + u8 flags; +}; + /** * struct dpll_data - DPLL registers and integration data * @mult_div1_reg: register containing the DPLL M and N bitfields @@ -67,12 +79,12 @@ * can be placed into read-only space. */ struct dpll_data { - void __iomem *mult_div1_reg; + struct clk_omap_reg mult_div1_reg; u32 mult_mask; u32 div1_mask; struct clk_hw *clk_bypass; struct clk_hw *clk_ref; - void __iomem *control_reg; + struct clk_omap_reg control_reg; u32 enable_mask; unsigned long last_rounded_rate; u16 last_rounded_m; @@ -84,8 +96,8 @@ struct dpll_data { u16 max_divider; unsigned long max_rate; u8 modes; - void __iomem *autoidle_reg; - void __iomem *idlest_reg; + struct clk_omap_reg autoidle_reg; + struct clk_omap_reg idlest_reg; u32 autoidle_mask; u32 freqsel_mask; u32 idlest_mask; @@ -113,10 +125,10 @@ struct clk_hw_omap; */ struct clk_hw_omap_ops { void (*find_idlest)(struct clk_hw_omap *oclk, - void __iomem **idlest_reg, + struct clk_omap_reg *idlest_reg, u8 *idlest_bit, u8 *idlest_val); void (*find_companion)(struct clk_hw_omap *oclk, - void __iomem **other_reg, + struct clk_omap_reg *other_reg, u8 *other_bit); void (*allow_idle)(struct clk_hw_omap *oclk); void (*deny_idle)(struct clk_hw_omap *oclk); @@ -139,10 +151,10 @@ struct clk_hw_omap { struct list_head node; unsigned long fixed_rate; u8 fixed_div; - void __iomem *enable_reg; + struct clk_omap_reg enable_reg; u8 enable_bit; u8 flags; - void __iomem *clksel_reg; + struct clk_omap_reg clksel_reg; struct dpll_data *dpll_data; const char *clkdm_name; struct clockdomain *clkdm; @@ -195,16 +207,6 @@ enum { CLK_MAX_MEMMAPS }; -/** - * struct clk_omap_reg - OMAP register declaration - * @offset: offset from the master IP module base address - * @index: index of the master IP module - */ -struct clk_omap_reg { - u16 offset; - u16 index; -}; - /** * struct ti_clk_ll_ops - low-level ops for clocks * @clk_readl: pointer to register read function @@ -222,16 +224,16 @@ struct clk_omap_reg { * operations not provided directly by clock drivers. */ struct ti_clk_ll_ops { - u32 (*clk_readl)(void __iomem *reg); - void (*clk_writel)(u32 val, void __iomem *reg); + u32 (*clk_readl)(const struct clk_omap_reg *reg); + void (*clk_writel)(u32 val, const struct clk_omap_reg *reg); int (*clkdm_clk_enable)(struct clockdomain *clkdm, struct clk *clk); int (*clkdm_clk_disable)(struct clockdomain *clkdm, struct clk *clk); struct clockdomain * (*clkdm_lookup)(const char *name); int (*cm_wait_module_ready)(u8 part, s16 prcm_mod, u16 idlest_reg, u8 idlest_shift); - int (*cm_split_idlest_reg)(void __iomem *idlest_reg, s16 *prcm_inst, - u8 *idlest_reg_id); + int (*cm_split_idlest_reg)(struct clk_omap_reg *idlest_reg, + s16 *prcm_inst, u8 *idlest_reg_id); }; #define to_clk_hw_omap(_hw) container_of(_hw, struct clk_hw_omap, hw) -- cgit v1.2.3 From 7f501f0a72036dc29ad9a53811474c393634b401 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Tue, 24 May 2016 19:20:05 +0200 Subject: mtd: nand: Store nand ID in struct nand_chip Store the NAND ID in struct nand_chip to avoid passing id_data and id_len as function parameters. Signed-off-by: Boris Brezillon Acked-by: Richard Weinberger Reviewed-by: Marek Vasut --- drivers/mtd/nand/nand_base.c | 55 ++++++++++++++++++++++++-------------------- include/linux/mtd/nand.h | 13 +++++++++++ 2 files changed, 43 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index f7969e0d59fc..6f3ae626cabc 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -3821,18 +3821,16 @@ static int nand_get_bits_per_cell(u8 cellinfo) * chip. The rest of the parameters must be decoded according to generic or * manufacturer-specific "extended ID" decoding patterns. */ -static void nand_decode_ext_id(struct nand_chip *chip, u8 id_data[8], - int *busw) +static void nand_decode_ext_id(struct nand_chip *chip, int *busw) { struct mtd_info *mtd = nand_to_mtd(chip); - int extid, id_len; + int extid, id_len = chip->id.len; + u8 *id_data = chip->id.data; /* The 3rd id byte holds MLC / multichip data */ chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]); /* The 4th id byte is the important one */ extid = id_data[3]; - id_len = nand_id_len(id_data, 8); - /* * Field definitions are in the following datasheets: * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32) @@ -3956,9 +3954,10 @@ static void nand_decode_ext_id(struct nand_chip *chip, u8 id_data[8], * the chip. */ static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type, - u8 id_data[8], int *busw) + int *busw) { struct mtd_info *mtd = nand_to_mtd(chip); + u8 *id_data = chip->id.data; int maf_id = id_data[0]; mtd->erasesize = type->erasesize; @@ -3988,9 +3987,10 @@ static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type, * heuristic patterns using various detected parameters (e.g., manufacturer, * page size, cell-type information). */ -static void nand_decode_bbm_options(struct nand_chip *chip, u8 id_data[8]) +static void nand_decode_bbm_options(struct nand_chip *chip) { struct mtd_info *mtd = nand_to_mtd(chip); + u8 *id_data = chip->id.data; int maf_id = id_data[0]; /* Set the bad block position */ @@ -4026,10 +4026,10 @@ static inline bool is_full_id_nand(struct nand_flash_dev *type) } static bool find_full_id_nand(struct nand_chip *chip, - struct nand_flash_dev *type, u8 *id_data, - int *busw) + struct nand_flash_dev *type, int *busw) { struct mtd_info *mtd = nand_to_mtd(chip); + u8 *id_data = chip->id.data; if (!strncmp(type->id, id_data, type->id_len)) { mtd->writesize = type->pagesize; @@ -4058,13 +4058,13 @@ static bool find_full_id_nand(struct nand_chip *chip, * Get the flash and manufacturer id and lookup if the type is supported. */ static int nand_get_flash_type(struct nand_chip *chip, - int *maf_id, int *dev_id, struct nand_flash_dev *type) { struct mtd_info *mtd = nand_to_mtd(chip); int busw; int i, maf_idx; - u8 id_data[8]; + u8 *id_data = chip->id.data; + u8 maf_id, dev_id; /* * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx) @@ -4079,8 +4079,8 @@ static int nand_get_flash_type(struct nand_chip *chip, chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); /* Read manufacturer and device IDs */ - *maf_id = chip->read_byte(mtd); - *dev_id = chip->read_byte(mtd); + maf_id = chip->read_byte(mtd); + dev_id = chip->read_byte(mtd); /* * Try again to make sure, as some systems the bus-hold or other @@ -4095,20 +4095,22 @@ static int nand_get_flash_type(struct nand_chip *chip, for (i = 0; i < 8; i++) id_data[i] = chip->read_byte(mtd); - if (id_data[0] != *maf_id || id_data[1] != *dev_id) { + if (id_data[0] != maf_id || id_data[1] != dev_id) { pr_info("second ID read did not match %02x,%02x against %02x,%02x\n", - *maf_id, *dev_id, id_data[0], id_data[1]); + maf_id, dev_id, id_data[0], id_data[1]); return -ENODEV; } + chip->id.len = nand_id_len(id_data, 8); + if (!type) type = nand_flash_ids; for (; type->name != NULL; type++) { if (is_full_id_nand(type)) { - if (find_full_id_nand(chip, type, id_data, &busw)) + if (find_full_id_nand(chip, type, &busw)) goto ident_done; - } else if (*dev_id == type->dev_id) { + } else if (dev_id == type->dev_id) { break; } } @@ -4134,9 +4136,9 @@ static int nand_get_flash_type(struct nand_chip *chip, if (!type->pagesize) { /* Decode parameters from extended ID */ - nand_decode_ext_id(chip, id_data, &busw); + nand_decode_ext_id(chip, &busw); } else { - nand_decode_id(chip, type, id_data, &busw); + nand_decode_id(chip, type, &busw); } /* Get chip options */ chip->options |= type->options; @@ -4145,13 +4147,13 @@ static int nand_get_flash_type(struct nand_chip *chip, * Check if chip is not a Samsung device. Do not clear the * options for chips which do not have an extended id. */ - if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize) + if (maf_id != NAND_MFR_SAMSUNG && !type->pagesize) chip->options &= ~NAND_SAMSUNG_LP_OPTIONS; ident_done: /* Try to identify manufacturer */ for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) { - if (nand_manuf_ids[maf_idx].id == *maf_id) + if (nand_manuf_ids[maf_idx].id == maf_id) break; } @@ -4165,7 +4167,7 @@ ident_done: * chip correct! */ pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", - *maf_id, *dev_id); + maf_id, dev_id); pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, mtd->name); pr_warn("bus width %d instead %d bit\n", (chip->options & NAND_BUSWIDTH_16) ? 16 : 8, @@ -4173,7 +4175,7 @@ ident_done: return -EINVAL; } - nand_decode_bbm_options(chip, id_data); + nand_decode_bbm_options(chip); /* Calculate the address shift from the page size */ chip->page_shift = ffs(mtd->writesize) - 1; @@ -4197,7 +4199,7 @@ ident_done: chip->cmdfunc = nand_command_lp; pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", - *maf_id, *dev_id); + maf_id, dev_id); if (chip->onfi_version) pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, @@ -4400,7 +4402,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips, nand_set_defaults(chip, chip->options & NAND_BUSWIDTH_16); /* Read the flash type */ - ret = nand_get_flash_type(chip, &nand_maf_id, &nand_dev_id, table); + ret = nand_get_flash_type(chip, table); if (ret) { if (!(chip->options & NAND_SCAN_SILENT_NODEV)) pr_warn("No NAND device found\n"); @@ -4425,6 +4427,9 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips, if (ret) return ret; + nand_maf_id = chip->id.data[0]; + nand_dev_id = chip->id.data[1]; + chip->select_chip(mtd, -1); /* Check for a chip array */ diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 9591e0fbe5bd..e2c11351b1bd 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -464,6 +464,17 @@ struct nand_jedec_params { __le16 crc; } __packed; +/** + * struct nand_id - NAND id structure + * @data: buffer containing the id bytes. Currently 8 bytes large, but can + * be extended if required. + * @len: ID length. + */ +struct nand_id { + u8 data[8]; + int len; +}; + /** * struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independent devices * @lock: protection lock @@ -793,6 +804,7 @@ nand_get_sdr_timings(const struct nand_data_interface *conf) * @pagebuf_bitflips: [INTERN] holds the bitflip count for the page which is * currently in data_buf. * @subpagesize: [INTERN] holds the subpagesize + * @id: [INTERN] holds NAND ID * @onfi_version: [INTERN] holds the chip ONFI version (BCD encoded), * non 0 if ONFI supported. * @jedec_version: [INTERN] holds the chip JEDEC version (BCD encoded), @@ -881,6 +893,7 @@ struct nand_chip { int badblockpos; int badblockbits; + struct nand_id id; int onfi_version; int jedec_version; union { -- cgit v1.2.3 From 8cfb9ab68f90703d419870fce7ac21ac401399f2 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Sat, 7 Jan 2017 15:15:57 +0100 Subject: mtd: nand: Rename the nand_manufacturers struct Drop the 's' at the end of nand_manufacturers since the struct is actually describing a single manufacturer, not a manufacturer table. Signed-off-by: Boris Brezillon --- drivers/mtd/nand/nand_ids.c | 2 +- include/linux/mtd/nand.h | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c index 4a2f75b0c200..3f80cfcb5e37 100644 --- a/drivers/mtd/nand/nand_ids.c +++ b/drivers/mtd/nand/nand_ids.c @@ -169,7 +169,7 @@ struct nand_flash_dev nand_flash_ids[] = { }; /* Manufacturer IDs */ -struct nand_manufacturers nand_manuf_ids[] = { +struct nand_manufacturer nand_manuf_ids[] = { {NAND_MFR_TOSHIBA, "Toshiba"}, {NAND_MFR_ESMT, "ESMT"}, {NAND_MFR_SAMSUNG, "Samsung"}, diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index e2c11351b1bd..9c679e8bde42 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -1062,17 +1062,17 @@ struct nand_flash_dev { }; /** - * struct nand_manufacturers - NAND Flash Manufacturer ID Structure + * struct nand_manufacturer - NAND Flash Manufacturer structure * @name: Manufacturer name * @id: manufacturer ID code of device. */ -struct nand_manufacturers { +struct nand_manufacturer { int id; char *name; }; extern struct nand_flash_dev nand_flash_ids[]; -extern struct nand_manufacturers nand_manuf_ids[]; +extern struct nand_manufacturer nand_manuf_ids[]; int nand_default_bbt(struct mtd_info *mtd); int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs); -- cgit v1.2.3 From bcc678c2d7a0e0af14cb3d858ebd367be378c172 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Sat, 7 Jan 2017 15:48:25 +0100 Subject: mtd: nand: Do not expose the NAND manufacturer table directly There is no reason to expose the NAND manufacturer table. Provide an helper function to find manufacturers by their id. We also turn the nand_manufacturers table into a const array, since its members are not modified after the initial assignment. Finally, we remove the sentinel manufacturer entry from the manufacturers table (we already have the array size information given by ARRAY_SIZE()), and add the nand_manufacturer_name() helper to handle the "Unknown" case properly. Signed-off-by: Boris Brezillon --- drivers/mtd/nand/nand_base.c | 23 +++++++++++------------ drivers/mtd/nand/nand_ids.c | 22 ++++++++++++++++++++-- include/linux/mtd/nand.h | 9 ++++++++- 3 files changed, 39 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index b1eb99e84044..0120252e0710 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -4052,9 +4052,10 @@ static bool find_full_id_nand(struct nand_chip *chip, */ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) { + const struct nand_manufacturer *manufacturer; struct mtd_info *mtd = nand_to_mtd(chip); int busw; - int i, maf_idx; + int i; u8 *id_data = chip->id.data; u8 maf_id, dev_id; @@ -4159,10 +4160,7 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) ident_done: /* Try to identify manufacturer */ - for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) { - if (nand_manuf_ids[maf_idx].id == maf_id) - break; - } + manufacturer = nand_get_manufacturer(maf_id); if (chip->options & NAND_BUSWIDTH_AUTO) { WARN_ON(busw & NAND_BUSWIDTH_16); @@ -4174,7 +4172,8 @@ ident_done: */ pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", maf_id, dev_id); - pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, mtd->name); + pr_info("%s %s\n", nand_manufacturer_name(manufacturer), + mtd->name); pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8, (chip->options & NAND_BUSWIDTH_16) ? 16 : 8); return -EINVAL; @@ -4207,14 +4206,14 @@ ident_done: maf_id, dev_id); if (chip->onfi_version) - pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, - chip->onfi_params.model); + pr_info("%s %s\n", nand_manufacturer_name(manufacturer), + chip->onfi_params.model); else if (chip->jedec_version) - pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, - chip->jedec_params.model); + pr_info("%s %s\n", nand_manufacturer_name(manufacturer), + chip->jedec_params.model); else - pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, - type->name); + pr_info("%s %s\n", nand_manufacturer_name(manufacturer), + type->name); pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n", (int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC", diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c index bd267ade0742..06f59a6b74ff 100644 --- a/drivers/mtd/nand/nand_ids.c +++ b/drivers/mtd/nand/nand_ids.c @@ -169,7 +169,7 @@ struct nand_flash_dev nand_flash_ids[] = { }; /* Manufacturer IDs */ -struct nand_manufacturer nand_manuf_ids[] = { +static const struct nand_manufacturer nand_manufacturers[] = { {NAND_MFR_TOSHIBA, "Toshiba"}, {NAND_MFR_ESMT, "ESMT"}, {NAND_MFR_SAMSUNG, "Samsung"}, @@ -186,5 +186,23 @@ struct nand_manufacturer nand_manuf_ids[] = { {NAND_MFR_INTEL, "Intel"}, {NAND_MFR_ATO, "ATO"}, {NAND_MFR_WINBOND, "Winbond"}, - {0x0, "Unknown"} }; + +/** + * nand_get_manufacturer - Get manufacturer information from the manufacturer + * ID + * @id: manufacturer ID + * + * Returns a pointer a nand_manufacturer object if the manufacturer is defined + * in the NAND manufacturers database, NULL otherwise. + */ +const struct nand_manufacturer *nand_get_manufacturer(u8 id) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(nand_manufacturers); i++) + if (nand_manufacturers[i].id == id) + return &nand_manufacturers[i]; + + return NULL; +} diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 9c679e8bde42..6415aa16043c 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -1071,8 +1071,15 @@ struct nand_manufacturer { char *name; }; +const struct nand_manufacturer *nand_get_manufacturer(u8 id); + +static inline const char * +nand_manufacturer_name(const struct nand_manufacturer *manufacturer) +{ + return manufacturer ? manufacturer->name : "Unknown"; +} + extern struct nand_flash_dev nand_flash_ids[]; -extern struct nand_manufacturer nand_manuf_ids[]; int nand_default_bbt(struct mtd_info *mtd); int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs); -- cgit v1.2.3 From abbe26d144ec22bb067fa414d717b9f7ca2e12bd Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Wed, 8 Jun 2016 09:32:55 +0200 Subject: mtd: nand: Add manufacturer specific initialization/detection steps A lot of NANDs are implementing generic features in a non-generic way, or are providing advanced auto-detection logic where the NAND ID bytes meaning changes with the NAND generation. Providing this vendor specific initialization step will allow us to get rid of full-id entries in the nand_ids table or all the vendor specific cases added over the time in the generic NAND ID decoding logic. Signed-off-by: Boris Brezillon --- drivers/mtd/nand/nand_base.c | 75 ++++++++++++++++++++++++++++++++++++++------ include/linux/mtd/nand.h | 35 +++++++++++++++++++++ 2 files changed, 100 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 0120252e0710..92ff6adbd7c9 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -3819,7 +3819,7 @@ static int nand_get_bits_per_cell(u8 cellinfo) * chip. The rest of the parameters must be decoded according to generic or * manufacturer-specific "extended ID" decoding patterns. */ -static void nand_decode_ext_id(struct nand_chip *chip) +void nand_decode_ext_id(struct nand_chip *chip) { struct mtd_info *mtd = nand_to_mtd(chip); int extid, id_len = chip->id.len; @@ -3944,6 +3944,7 @@ static void nand_decode_ext_id(struct nand_chip *chip) } } +EXPORT_SYMBOL_GPL(nand_decode_ext_id); /* * Old devices have chip data hardcoded in the device ID table. nand_decode_id @@ -4047,6 +4048,53 @@ static bool find_full_id_nand(struct nand_chip *chip, return false; } +/* + * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC + * compliant and does not have a full-id or legacy-id entry in the nand_ids + * table. + */ +static void nand_manufacturer_detect(struct nand_chip *chip) +{ + /* + * Try manufacturer detection if available and use + * nand_decode_ext_id() otherwise. + */ + if (chip->manufacturer.desc && chip->manufacturer.desc->ops && + chip->manufacturer.desc->ops->detect) + chip->manufacturer.desc->ops->detect(chip); + else + nand_decode_ext_id(chip); +} + +/* + * Manufacturer initialization. This function is called for all NANDs including + * ONFI and JEDEC compliant ones. + * Manufacturer drivers should put all their specific initialization code in + * their ->init() hook. + */ +static int nand_manufacturer_init(struct nand_chip *chip) +{ + if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops || + !chip->manufacturer.desc->ops->init) + return 0; + + return chip->manufacturer.desc->ops->init(chip); +} + +/* + * Manufacturer cleanup. This function is called for all NANDs including + * ONFI and JEDEC compliant ones. + * Manufacturer drivers should put all their specific cleanup code in their + * ->cleanup() hook. + */ +static void nand_manufacturer_cleanup(struct nand_chip *chip) +{ + /* Release manufacturer private data */ + if (chip->manufacturer.desc && chip->manufacturer.desc->ops && + chip->manufacturer.desc->ops->cleanup) + chip->manufacturer.desc->ops->cleanup(chip); +} + /* * Get the flash and manufacturer id and lookup if the type is supported. */ @@ -4055,7 +4103,7 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) const struct nand_manufacturer *manufacturer; struct mtd_info *mtd = nand_to_mtd(chip); int busw; - int i; + int i, ret; u8 *id_data = chip->id.data; u8 maf_id, dev_id; @@ -4096,6 +4144,10 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) chip->id.len = nand_id_len(id_data, 8); + /* Try to identify manufacturer */ + manufacturer = nand_get_manufacturer(maf_id); + chip->manufacturer.desc = manufacturer; + if (!type) type = nand_flash_ids; @@ -4142,12 +4194,11 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) chip->chipsize = (uint64_t)type->chipsize << 20; - if (!type->pagesize) { - /* Decode parameters from extended ID */ - nand_decode_ext_id(chip); - } else { + if (!type->pagesize) + nand_manufacturer_detect(chip); + else nand_decode_id(chip, type); - } + /* Get chip options */ chip->options |= type->options; @@ -4159,9 +4210,6 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) chip->options &= ~NAND_SAMSUNG_LP_OPTIONS; ident_done: - /* Try to identify manufacturer */ - manufacturer = nand_get_manufacturer(maf_id); - if (chip->options & NAND_BUSWIDTH_AUTO) { WARN_ON(busw & NAND_BUSWIDTH_16); nand_set_defaults(chip); @@ -4202,6 +4250,10 @@ ident_done: if (mtd->writesize > 512 && chip->cmdfunc == nand_command) chip->cmdfunc = nand_command_lp; + ret = nand_manufacturer_init(chip); + if (ret) + return ret; + pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", maf_id, dev_id); @@ -4947,6 +4999,9 @@ void nand_cleanup(struct nand_chip *chip) if (chip->badblock_pattern && chip->badblock_pattern->options & NAND_BBT_DYNAMICSTRUCT) kfree(chip->badblock_pattern); + + /* Free manufacturer priv data. */ + nand_manufacturer_cleanup(chip); } EXPORT_SYMBOL_GPL(nand_cleanup); diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 6415aa16043c..ee9a19f42293 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -731,6 +731,20 @@ nand_get_sdr_timings(const struct nand_data_interface *conf) return &conf->timings.sdr; } +/** + * struct nand_manufacturer_ops - NAND Manufacturer operations + * @detect: detect the NAND memory organization and capabilities + * @init: initialize all vendor specific fields (like the ->read_retry() + * implementation) if any. + * @cleanup: the ->init() function may have allocated resources, ->cleanup() + * is here to let vendor specific code release those resources. + */ +struct nand_manufacturer_ops { + void (*detect)(struct nand_chip *chip); + int (*init)(struct nand_chip *chip); + void (*cleanup)(struct nand_chip *chip); +}; + /** * struct nand_chip - NAND Private Flash Chip Data * @mtd: MTD device registered to the MTD framework @@ -835,6 +849,7 @@ nand_get_sdr_timings(const struct nand_data_interface *conf) * additional error status checks (determine if errors are * correctable). * @write_page: [REPLACEABLE] High-level page write function + * @manufacturer: [INTERN] Contains manufacturer information */ struct nand_chip { @@ -923,6 +938,11 @@ struct nand_chip { struct nand_bbt_descr *badblock_pattern; void *priv; + + struct { + const struct nand_manufacturer *desc; + void *priv; + } manufacturer; }; extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops; @@ -959,6 +979,17 @@ static inline void nand_set_controller_data(struct nand_chip *chip, void *priv) chip->priv = priv; } +static inline void nand_set_manufacturer_data(struct nand_chip *chip, + void *priv) +{ + chip->manufacturer.priv = priv; +} + +static inline void *nand_get_manufacturer_data(struct nand_chip *chip) +{ + return chip->manufacturer.priv; +} + /* * NAND Flash Manufacturer ID Codes */ @@ -1065,10 +1096,12 @@ struct nand_flash_dev { * struct nand_manufacturer - NAND Flash Manufacturer structure * @name: Manufacturer name * @id: manufacturer ID code of device. + * @ops: manufacturer operations */ struct nand_manufacturer { int id; char *name; + const struct nand_manufacturer_ops *ops; }; const struct nand_manufacturer *nand_get_manufacturer(u8 id); @@ -1246,4 +1279,6 @@ int nand_reset(struct nand_chip *chip, int chipnr); /* Free resources held by the NAND device */ void nand_cleanup(struct nand_chip *chip); +/* Default extended ID decoding function */ +void nand_decode_ext_id(struct nand_chip *chip); #endif /* __LINUX_MTD_NAND_H */ -- cgit v1.2.3 From c51d0ac59f24200dfdccc897ff7c3c9446c7599a Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Wed, 8 Jun 2016 10:22:19 +0200 Subject: mtd: nand: Move Samsung specific init/detection logic in nand_samsung.c Move Samsung specific initialization and detection logic into nand_samsung.c. This is part of the "separate vendor specific code from core" cleanup process. Signed-off-by: Boris Brezillon Acked-by: Richard Weinberger --- drivers/mtd/nand/Makefile | 1 + drivers/mtd/nand/nand_base.c | 52 ++--------------------- drivers/mtd/nand/nand_ids.c | 4 +- drivers/mtd/nand/nand_samsung.c | 92 +++++++++++++++++++++++++++++++++++++++++ include/linux/mtd/nand.h | 2 + 5 files changed, 101 insertions(+), 50 deletions(-) create mode 100644 drivers/mtd/nand/nand_samsung.c (limited to 'include') diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index bfd5d12b9ade..d4b90b0f879e 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile @@ -61,3 +61,4 @@ obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o +nand-objs += nand_samsung.o diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 92ff6adbd7c9..fd38d59d33a6 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -3832,48 +3832,13 @@ void nand_decode_ext_id(struct nand_chip *chip) /* * Field definitions are in the following datasheets: * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32) - * New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44) * Hynix MLC (6 byte ID): Hynix H27UBG8T2B (p.22) * * Check for ID length, non-zero 6th byte, cell type, and Hynix/Samsung * ID to decide what to do. */ - if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG && - !nand_is_slc(chip) && id_data[5] != 0x00) { - /* Calc pagesize */ - mtd->writesize = 2048 << (extid & 0x03); - extid >>= 2; - /* Calc oobsize */ - switch (((extid >> 2) & 0x04) | (extid & 0x03)) { - case 1: - mtd->oobsize = 128; - break; - case 2: - mtd->oobsize = 218; - break; - case 3: - mtd->oobsize = 400; - break; - case 4: - mtd->oobsize = 436; - break; - case 5: - mtd->oobsize = 512; - break; - case 6: - mtd->oobsize = 640; - break; - case 7: - default: /* Other cases are "reserved" (unknown) */ - mtd->oobsize = 1024; - break; - } - extid >>= 2; - /* Calc blocksize */ - mtd->erasesize = (128 * 1024) << - (((extid >> 1) & 0x04) | (extid & 0x03)); - } else if (id_len == 6 && id_data[0] == NAND_MFR_HYNIX && - !nand_is_slc(chip)) { + if (id_len == 6 && id_data[0] == NAND_MFR_HYNIX && + !nand_is_slc(chip)) { unsigned int tmp; /* Calc pagesize */ @@ -4001,13 +3966,10 @@ static void nand_decode_bbm_options(struct nand_chip *chip) * Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba, * AMD/Spansion, and Macronix. All others scan only the first page. */ - if (!nand_is_slc(chip) && - (maf_id == NAND_MFR_SAMSUNG || - maf_id == NAND_MFR_HYNIX)) + if (!nand_is_slc(chip) && maf_id == NAND_MFR_HYNIX) chip->bbt_options |= NAND_BBT_SCANLASTPAGE; else if ((nand_is_slc(chip) && - (maf_id == NAND_MFR_SAMSUNG || - maf_id == NAND_MFR_HYNIX || + (maf_id == NAND_MFR_HYNIX || maf_id == NAND_MFR_TOSHIBA || maf_id == NAND_MFR_AMD || maf_id == NAND_MFR_MACRONIX)) || @@ -4202,12 +4164,6 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) /* Get chip options */ chip->options |= type->options; - /* - * Check if chip is not a Samsung device. Do not clear the - * options for chips which do not have an extended id. - */ - if (maf_id != NAND_MFR_SAMSUNG && !type->pagesize) - chip->options &= ~NAND_SAMSUNG_LP_OPTIONS; ident_done: if (chip->options & NAND_BUSWIDTH_AUTO) { diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c index 06f59a6b74ff..8eba7dfe7c1f 100644 --- a/drivers/mtd/nand/nand_ids.c +++ b/drivers/mtd/nand/nand_ids.c @@ -10,7 +10,7 @@ #include #include -#define LP_OPTIONS NAND_SAMSUNG_LP_OPTIONS +#define LP_OPTIONS 0 #define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16) #define SP_OPTIONS NAND_NEED_READRDY @@ -172,7 +172,7 @@ struct nand_flash_dev nand_flash_ids[] = { static const struct nand_manufacturer nand_manufacturers[] = { {NAND_MFR_TOSHIBA, "Toshiba"}, {NAND_MFR_ESMT, "ESMT"}, - {NAND_MFR_SAMSUNG, "Samsung"}, + {NAND_MFR_SAMSUNG, "Samsung", &samsung_nand_manuf_ops}, {NAND_MFR_FUJITSU, "Fujitsu"}, {NAND_MFR_NATIONAL, "National"}, {NAND_MFR_RENESAS, "Renesas"}, diff --git a/drivers/mtd/nand/nand_samsung.c b/drivers/mtd/nand/nand_samsung.c new file mode 100644 index 000000000000..5c259dd5b652 --- /dev/null +++ b/drivers/mtd/nand/nand_samsung.c @@ -0,0 +1,92 @@ +/* + * Copyright (C) 2017 Free Electrons + * Copyright (C) 2017 NextThing Co + * + * Author: Boris Brezillon + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +static void samsung_nand_decode_id(struct nand_chip *chip) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + + /* New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44) */ + if (chip->id.len == 6 && !nand_is_slc(chip) && + chip->id.data[5] != 0x00) { + u8 extid = chip->id.data[3]; + + /* Get pagesize */ + mtd->writesize = 2048 << (extid & 0x03); + + extid >>= 2; + + /* Get oobsize */ + switch (((extid >> 2) & 0x4) | (extid & 0x3)) { + case 1: + mtd->oobsize = 128; + break; + case 2: + mtd->oobsize = 218; + break; + case 3: + mtd->oobsize = 400; + break; + case 4: + mtd->oobsize = 436; + break; + case 5: + mtd->oobsize = 512; + break; + case 6: + mtd->oobsize = 640; + break; + default: + /* + * We should never reach this case, but if that + * happens, this probably means Samsung decided to use + * a different extended ID format, and we should find + * a way to support it. + */ + WARN(1, "Invalid OOB size value"); + break; + } + + /* Get blocksize */ + extid >>= 2; + mtd->erasesize = (128 * 1024) << + (((extid >> 1) & 0x04) | (extid & 0x03)); + } else { + nand_decode_ext_id(chip); + } +} + +static int samsung_nand_init(struct nand_chip *chip) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + + if (mtd->writesize > 512) + chip->options |= NAND_SAMSUNG_LP_OPTIONS; + + if (!nand_is_slc(chip)) + chip->bbt_options |= NAND_BBT_SCANLASTPAGE; + else + chip->bbt_options |= NAND_BBT_SCAN2NDPAGE; + + return 0; +} + +const struct nand_manufacturer_ops samsung_nand_manuf_ops = { + .detect = samsung_nand_decode_id, + .init = samsung_nand_init, +}; diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index ee9a19f42293..2f83cb55392f 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -1114,6 +1114,8 @@ nand_manufacturer_name(const struct nand_manufacturer *manufacturer) extern struct nand_flash_dev nand_flash_ids[]; +extern const struct nand_manufacturer_ops samsung_nand_manuf_ops; + int nand_default_bbt(struct mtd_info *mtd); int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs); int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs); -- cgit v1.2.3 From 01389b6bd2f4f7649cdbb4a99a15d9e0c05d6f8c Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Wed, 8 Jun 2016 10:30:18 +0200 Subject: mtd: nand: Move Hynix specific init/detection logic in nand_hynix.c Move Hynix specific initialization and detection logic into nand_hynix.c. This is part of the "separate vendor specific code from core" cleanup process. Signed-off-by: Boris Brezillon Acked-by: Richard Weinberger --- drivers/mtd/nand/Makefile | 1 + drivers/mtd/nand/nand_base.c | 114 +++++++++++------------------------------- drivers/mtd/nand/nand_hynix.c | 84 +++++++++++++++++++++++++++++++ drivers/mtd/nand/nand_ids.c | 2 +- include/linux/mtd/nand.h | 1 + 5 files changed, 115 insertions(+), 87 deletions(-) create mode 100644 drivers/mtd/nand/nand_hynix.c (limited to 'include') diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index d4b90b0f879e..ddb2670d9767 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile @@ -61,4 +61,5 @@ obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o +nand-objs += nand_hynix.o nand-objs += nand_samsung.o diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index fd38d59d33a6..d65db5921a0f 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -3829,85 +3829,32 @@ void nand_decode_ext_id(struct nand_chip *chip) /* The 4th id byte is the important one */ extid = id_data[3]; + /* Calc pagesize */ + mtd->writesize = 1024 << (extid & 0x03); + extid >>= 2; + /* Calc oobsize */ + mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9); + extid >>= 2; + /* Calc blocksize. Blocksize is multiples of 64KiB */ + mtd->erasesize = (64 * 1024) << (extid & 0x03); + extid >>= 2; + /* Get buswidth information */ + if (extid & 0x1) + chip->options |= NAND_BUSWIDTH_16; + /* - * Field definitions are in the following datasheets: - * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32) - * Hynix MLC (6 byte ID): Hynix H27UBG8T2B (p.22) - * - * Check for ID length, non-zero 6th byte, cell type, and Hynix/Samsung - * ID to decide what to do. + * Toshiba 24nm raw SLC (i.e., not BENAND) have 32B OOB per + * 512B page. For Toshiba SLC, we decode the 5th/6th byte as + * follows: + * - ID byte 6, bits[2:0]: 100b -> 43nm, 101b -> 32nm, + * 110b -> 24nm + * - ID byte 5, bit[7]: 1 -> BENAND, 0 -> raw SLC */ - if (id_len == 6 && id_data[0] == NAND_MFR_HYNIX && - !nand_is_slc(chip)) { - unsigned int tmp; - - /* Calc pagesize */ - mtd->writesize = 2048 << (extid & 0x03); - extid >>= 2; - /* Calc oobsize */ - switch (((extid >> 2) & 0x04) | (extid & 0x03)) { - case 0: - mtd->oobsize = 128; - break; - case 1: - mtd->oobsize = 224; - break; - case 2: - mtd->oobsize = 448; - break; - case 3: - mtd->oobsize = 64; - break; - case 4: - mtd->oobsize = 32; - break; - case 5: - mtd->oobsize = 16; - break; - default: - mtd->oobsize = 640; - break; - } - extid >>= 2; - /* Calc blocksize */ - tmp = ((extid >> 1) & 0x04) | (extid & 0x03); - if (tmp < 0x03) - mtd->erasesize = (128 * 1024) << tmp; - else if (tmp == 0x03) - mtd->erasesize = 768 * 1024; - else - mtd->erasesize = (64 * 1024) << tmp; - } else { - /* Calc pagesize */ - mtd->writesize = 1024 << (extid & 0x03); - extid >>= 2; - /* Calc oobsize */ - mtd->oobsize = (8 << (extid & 0x01)) * - (mtd->writesize >> 9); - extid >>= 2; - /* Calc blocksize. Blocksize is multiples of 64KiB */ - mtd->erasesize = (64 * 1024) << (extid & 0x03); - extid >>= 2; - /* Get buswidth information */ - if (extid & 0x1) - chip->options |= NAND_BUSWIDTH_16; - - /* - * Toshiba 24nm raw SLC (i.e., not BENAND) have 32B OOB per - * 512B page. For Toshiba SLC, we decode the 5th/6th byte as - * follows: - * - ID byte 6, bits[2:0]: 100b -> 43nm, 101b -> 32nm, - * 110b -> 24nm - * - ID byte 5, bit[7]: 1 -> BENAND, 0 -> raw SLC - */ - if (id_len >= 6 && id_data[0] == NAND_MFR_TOSHIBA && - nand_is_slc(chip) && - (id_data[5] & 0x7) == 0x6 /* 24nm */ && - !(id_data[4] & 0x80) /* !BENAND */) { - mtd->oobsize = 32 * mtd->writesize >> 9; - } - - } + if (id_len >= 6 && id_data[0] == NAND_MFR_TOSHIBA && + nand_is_slc(chip) && + (id_data[5] & 0x7) == 0x6 /* 24nm */ && + !(id_data[4] & 0x80) /* !BENAND */) + mtd->oobsize = 32 * mtd->writesize >> 9; } EXPORT_SYMBOL_GPL(nand_decode_ext_id); @@ -3966,15 +3913,10 @@ static void nand_decode_bbm_options(struct nand_chip *chip) * Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba, * AMD/Spansion, and Macronix. All others scan only the first page. */ - if (!nand_is_slc(chip) && maf_id == NAND_MFR_HYNIX) - chip->bbt_options |= NAND_BBT_SCANLASTPAGE; - else if ((nand_is_slc(chip) && - (maf_id == NAND_MFR_HYNIX || - maf_id == NAND_MFR_TOSHIBA || - maf_id == NAND_MFR_AMD || - maf_id == NAND_MFR_MACRONIX)) || - (mtd->writesize == 2048 && - maf_id == NAND_MFR_MICRON)) + if ((nand_is_slc(chip) && + (maf_id == NAND_MFR_TOSHIBA || maf_id == NAND_MFR_AMD || + maf_id == NAND_MFR_MACRONIX)) || + (mtd->writesize == 2048 && maf_id == NAND_MFR_MICRON)) chip->bbt_options |= NAND_BBT_SCAN2NDPAGE; } diff --git a/drivers/mtd/nand/nand_hynix.c b/drivers/mtd/nand/nand_hynix.c new file mode 100644 index 000000000000..fbd661688158 --- /dev/null +++ b/drivers/mtd/nand/nand_hynix.c @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2017 Free Electrons + * Copyright (C) 2017 NextThing Co + * + * Author: Boris Brezillon + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +static void hynix_nand_decode_id(struct nand_chip *chip) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + + /* Hynix MLC (6 byte ID): Hynix H27UBG8T2B (p.22) */ + if (chip->id.len == 6 && !nand_is_slc(chip)) { + u8 tmp, extid = chip->id.data[3]; + + /* Extract pagesize */ + mtd->writesize = 2048 << (extid & 0x03); + extid >>= 2; + + /* Extract oobsize */ + switch (((extid >> 2) & 0x4) | (extid & 0x3)) { + case 0: + mtd->oobsize = 128; + break; + case 1: + mtd->oobsize = 224; + break; + case 2: + mtd->oobsize = 448; + break; + case 3: + mtd->oobsize = 64; + break; + case 4: + mtd->oobsize = 32; + break; + case 5: + mtd->oobsize = 16; + break; + default: + mtd->oobsize = 640; + break; + } + + /* Extract blocksize */ + extid >>= 2; + tmp = ((extid >> 1) & 0x04) | (extid & 0x03); + if (tmp < 0x03) + mtd->erasesize = (128 * 1024) << tmp; + else if (tmp == 0x03) + mtd->erasesize = 768 * 1024; + else + mtd->erasesize = (64 * 1024) << tmp; + } else { + nand_decode_ext_id(chip); + } +} + +static int hynix_nand_init(struct nand_chip *chip) +{ + if (!nand_is_slc(chip)) + chip->bbt_options |= NAND_BBT_SCANLASTPAGE; + else + chip->bbt_options |= NAND_BBT_SCAN2NDPAGE; + + return 0; +} + +const struct nand_manufacturer_ops hynix_nand_manuf_ops = { + .detect = hynix_nand_decode_id, + .init = hynix_nand_init, +}; diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c index 8eba7dfe7c1f..7dc06a807d00 100644 --- a/drivers/mtd/nand/nand_ids.c +++ b/drivers/mtd/nand/nand_ids.c @@ -177,7 +177,7 @@ static const struct nand_manufacturer nand_manufacturers[] = { {NAND_MFR_NATIONAL, "National"}, {NAND_MFR_RENESAS, "Renesas"}, {NAND_MFR_STMICRO, "ST Micro"}, - {NAND_MFR_HYNIX, "Hynix"}, + {NAND_MFR_HYNIX, "Hynix", &hynix_nand_manuf_ops}, {NAND_MFR_MICRON, "Micron"}, {NAND_MFR_AMD, "AMD/Spansion"}, {NAND_MFR_MACRONIX, "Macronix"}, diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 2f83cb55392f..74e3a231cb56 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -1115,6 +1115,7 @@ nand_manufacturer_name(const struct nand_manufacturer *manufacturer) extern struct nand_flash_dev nand_flash_ids[]; extern const struct nand_manufacturer_ops samsung_nand_manuf_ops; +extern const struct nand_manufacturer_ops hynix_nand_manuf_ops; int nand_default_bbt(struct mtd_info *mtd); int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs); -- cgit v1.2.3 From 9b2d61f80b060ce3ea5af2a99e148b0b214932b2 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Wed, 8 Jun 2016 10:34:57 +0200 Subject: mtd: nand: Move Toshiba specific init/detection logic in nand_toshiba.c Move Toshiba specific initialization and detection logic into nand_toshiba.c. This is part of the "separate vendor specific code from core" cleanup process. Signed-off-by: Boris Brezillon Acked-by: Richard Weinberger --- drivers/mtd/nand/Makefile | 1 + drivers/mtd/nand/nand_base.c | 19 ++------------- drivers/mtd/nand/nand_ids.c | 2 +- drivers/mtd/nand/nand_toshiba.c | 51 +++++++++++++++++++++++++++++++++++++++++ include/linux/mtd/nand.h | 1 + 5 files changed, 56 insertions(+), 18 deletions(-) create mode 100644 drivers/mtd/nand/nand_toshiba.c (limited to 'include') diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index ddb2670d9767..7c059822f479 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile @@ -63,3 +63,4 @@ obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o nand-objs += nand_hynix.o nand-objs += nand_samsung.o +nand-objs += nand_toshiba.o diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index d65db5921a0f..36bca97900af 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -3822,7 +3822,7 @@ static int nand_get_bits_per_cell(u8 cellinfo) void nand_decode_ext_id(struct nand_chip *chip) { struct mtd_info *mtd = nand_to_mtd(chip); - int extid, id_len = chip->id.len; + int extid; u8 *id_data = chip->id.data; /* The 3rd id byte holds MLC / multichip data */ chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]); @@ -3841,20 +3841,6 @@ void nand_decode_ext_id(struct nand_chip *chip) /* Get buswidth information */ if (extid & 0x1) chip->options |= NAND_BUSWIDTH_16; - - /* - * Toshiba 24nm raw SLC (i.e., not BENAND) have 32B OOB per - * 512B page. For Toshiba SLC, we decode the 5th/6th byte as - * follows: - * - ID byte 6, bits[2:0]: 100b -> 43nm, 101b -> 32nm, - * 110b -> 24nm - * - ID byte 5, bit[7]: 1 -> BENAND, 0 -> raw SLC - */ - if (id_len >= 6 && id_data[0] == NAND_MFR_TOSHIBA && - nand_is_slc(chip) && - (id_data[5] & 0x7) == 0x6 /* 24nm */ && - !(id_data[4] & 0x80) /* !BENAND */) - mtd->oobsize = 32 * mtd->writesize >> 9; } EXPORT_SYMBOL_GPL(nand_decode_ext_id); @@ -3914,8 +3900,7 @@ static void nand_decode_bbm_options(struct nand_chip *chip) * AMD/Spansion, and Macronix. All others scan only the first page. */ if ((nand_is_slc(chip) && - (maf_id == NAND_MFR_TOSHIBA || maf_id == NAND_MFR_AMD || - maf_id == NAND_MFR_MACRONIX)) || + (maf_id == NAND_MFR_AMD || maf_id == NAND_MFR_MACRONIX)) || (mtd->writesize == 2048 && maf_id == NAND_MFR_MICRON)) chip->bbt_options |= NAND_BBT_SCAN2NDPAGE; } diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c index 7dc06a807d00..22ff89e9c478 100644 --- a/drivers/mtd/nand/nand_ids.c +++ b/drivers/mtd/nand/nand_ids.c @@ -170,7 +170,7 @@ struct nand_flash_dev nand_flash_ids[] = { /* Manufacturer IDs */ static const struct nand_manufacturer nand_manufacturers[] = { - {NAND_MFR_TOSHIBA, "Toshiba"}, + {NAND_MFR_TOSHIBA, "Toshiba", &toshiba_nand_manuf_ops}, {NAND_MFR_ESMT, "ESMT"}, {NAND_MFR_SAMSUNG, "Samsung", &samsung_nand_manuf_ops}, {NAND_MFR_FUJITSU, "Fujitsu"}, diff --git a/drivers/mtd/nand/nand_toshiba.c b/drivers/mtd/nand/nand_toshiba.c new file mode 100644 index 000000000000..fa787ba38dcd --- /dev/null +++ b/drivers/mtd/nand/nand_toshiba.c @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2017 Free Electrons + * Copyright (C) 2017 NextThing Co + * + * Author: Boris Brezillon + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +static void toshiba_nand_decode_id(struct nand_chip *chip) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + + nand_decode_ext_id(chip); + + /* + * Toshiba 24nm raw SLC (i.e., not BENAND) have 32B OOB per + * 512B page. For Toshiba SLC, we decode the 5th/6th byte as + * follows: + * - ID byte 6, bits[2:0]: 100b -> 43nm, 101b -> 32nm, + * 110b -> 24nm + * - ID byte 5, bit[7]: 1 -> BENAND, 0 -> raw SLC + */ + if (chip->id.len >= 6 && nand_is_slc(chip) && + (chip->id.data[5] & 0x7) == 0x6 /* 24nm */ && + !(chip->id.data[4] & 0x80) /* !BENAND */) + mtd->oobsize = 32 * mtd->writesize >> 9; +} + +static int toshiba_nand_init(struct nand_chip *chip) +{ + if (nand_is_slc(chip)) + chip->bbt_options |= NAND_BBT_SCAN2NDPAGE; + + return 0; +} + +const struct nand_manufacturer_ops toshiba_nand_manuf_ops = { + .detect = toshiba_nand_decode_id, + .init = toshiba_nand_init, +}; diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 74e3a231cb56..dd9e3b5ddd4f 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -1114,6 +1114,7 @@ nand_manufacturer_name(const struct nand_manufacturer *manufacturer) extern struct nand_flash_dev nand_flash_ids[]; +extern const struct nand_manufacturer_ops toshiba_nand_manuf_ops; extern const struct nand_manufacturer_ops samsung_nand_manuf_ops; extern const struct nand_manufacturer_ops hynix_nand_manuf_ops; -- cgit v1.2.3 From 10d4e75c36f6c16311dde1461f318210da357219 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Wed, 8 Jun 2016 10:38:57 +0200 Subject: mtd: nand: Move Micron specific init logic in nand_micron.c Move Micron specific initialization logic into nand_micron.c. This is part of the "separate vendor specific code from core" cleanup process. Signed-off-by: Boris Brezillon Acked-by: Richard Weinberger --- drivers/mtd/nand/Makefile | 1 + drivers/mtd/nand/nand_base.c | 32 +--------------- drivers/mtd/nand/nand_ids.c | 2 +- drivers/mtd/nand/nand_micron.c | 86 ++++++++++++++++++++++++++++++++++++++++++ include/linux/mtd/nand.h | 21 +---------- 5 files changed, 91 insertions(+), 51 deletions(-) create mode 100644 drivers/mtd/nand/nand_micron.c (limited to 'include') diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index 7c059822f479..11d743155810 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile @@ -62,5 +62,6 @@ obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o nand-objs += nand_hynix.o +nand-objs += nand_micron.o nand-objs += nand_samsung.o nand-objs += nand_toshiba.o diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 36bca97900af..f45e3bbe5f85 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -3537,30 +3537,6 @@ ext_out: return ret; } -static int nand_setup_read_retry_micron(struct mtd_info *mtd, int retry_mode) -{ - struct nand_chip *chip = mtd_to_nand(mtd); - uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode}; - - return chip->onfi_set_features(mtd, chip, ONFI_FEATURE_ADDR_READ_RETRY, - feature); -} - -/* - * Configure chip properties from Micron vendor-specific ONFI table - */ -static void nand_onfi_detect_micron(struct nand_chip *chip, - struct nand_onfi_params *p) -{ - struct nand_onfi_vendor_micron *micron = (void *)p->vendor; - - if (le16_to_cpu(p->vendor_revision) < 1) - return; - - chip->read_retries = micron->read_retry_options; - chip->setup_read_retry = nand_setup_read_retry_micron; -} - /* * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise. */ @@ -3660,9 +3636,6 @@ static int nand_flash_detect_onfi(struct nand_chip *chip) pr_warn("Could not retrieve ONFI ECC requirements\n"); } - if (p->jedec_id == NAND_MFR_MICRON) - nand_onfi_detect_micron(chip, p); - return 1; } @@ -3899,9 +3872,8 @@ static void nand_decode_bbm_options(struct nand_chip *chip) * Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba, * AMD/Spansion, and Macronix. All others scan only the first page. */ - if ((nand_is_slc(chip) && - (maf_id == NAND_MFR_AMD || maf_id == NAND_MFR_MACRONIX)) || - (mtd->writesize == 2048 && maf_id == NAND_MFR_MICRON)) + if (nand_is_slc(chip) && + (maf_id == NAND_MFR_AMD || maf_id == NAND_MFR_MACRONIX)) chip->bbt_options |= NAND_BBT_SCAN2NDPAGE; } diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c index 22ff89e9c478..42c6743401c5 100644 --- a/drivers/mtd/nand/nand_ids.c +++ b/drivers/mtd/nand/nand_ids.c @@ -178,7 +178,7 @@ static const struct nand_manufacturer nand_manufacturers[] = { {NAND_MFR_RENESAS, "Renesas"}, {NAND_MFR_STMICRO, "ST Micro"}, {NAND_MFR_HYNIX, "Hynix", &hynix_nand_manuf_ops}, - {NAND_MFR_MICRON, "Micron"}, + {NAND_MFR_MICRON, "Micron", µn_nand_manuf_ops}, {NAND_MFR_AMD, "AMD/Spansion"}, {NAND_MFR_MACRONIX, "Macronix"}, {NAND_MFR_EON, "Eon"}, diff --git a/drivers/mtd/nand/nand_micron.c b/drivers/mtd/nand/nand_micron.c new file mode 100644 index 000000000000..877011069251 --- /dev/null +++ b/drivers/mtd/nand/nand_micron.c @@ -0,0 +1,86 @@ +/* + * Copyright (C) 2017 Free Electrons + * Copyright (C) 2017 NextThing Co + * + * Author: Boris Brezillon + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +struct nand_onfi_vendor_micron { + u8 two_plane_read; + u8 read_cache; + u8 read_unique_id; + u8 dq_imped; + u8 dq_imped_num_settings; + u8 dq_imped_feat_addr; + u8 rb_pulldown_strength; + u8 rb_pulldown_strength_feat_addr; + u8 rb_pulldown_strength_num_settings; + u8 otp_mode; + u8 otp_page_start; + u8 otp_data_prot_addr; + u8 otp_num_pages; + u8 otp_feat_addr; + u8 read_retry_options; + u8 reserved[72]; + u8 param_revision; +} __packed; + +static int micron_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + u8 feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode}; + + return chip->onfi_set_features(mtd, chip, ONFI_FEATURE_ADDR_READ_RETRY, + feature); +} + +/* + * Configure chip properties from Micron vendor-specific ONFI table + */ +static int micron_nand_onfi_init(struct nand_chip *chip) +{ + struct nand_onfi_params *p = &chip->onfi_params; + struct nand_onfi_vendor_micron *micron = (void *)p->vendor; + + if (!chip->onfi_version) + return 0; + + if (le16_to_cpu(p->vendor_revision) < 1) + return 0; + + chip->read_retries = micron->read_retry_options; + chip->setup_read_retry = micron_nand_setup_read_retry; + + return 0; +} + +static int micron_nand_init(struct nand_chip *chip) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + int ret; + + ret = micron_nand_onfi_init(chip); + if (ret) + return ret; + + if (mtd->writesize == 2048) + chip->bbt_options |= NAND_BBT_SCAN2NDPAGE; + + return 0; +} + +const struct nand_manufacturer_ops micron_nand_manuf_ops = { + .init = micron_nand_init, +}; diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index dd9e3b5ddd4f..7d0f18ecbf57 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -366,26 +366,6 @@ struct onfi_ext_param_page { */ } __packed; -struct nand_onfi_vendor_micron { - u8 two_plane_read; - u8 read_cache; - u8 read_unique_id; - u8 dq_imped; - u8 dq_imped_num_settings; - u8 dq_imped_feat_addr; - u8 rb_pulldown_strength; - u8 rb_pulldown_strength_feat_addr; - u8 rb_pulldown_strength_num_settings; - u8 otp_mode; - u8 otp_page_start; - u8 otp_data_prot_addr; - u8 otp_num_pages; - u8 otp_feat_addr; - u8 read_retry_options; - u8 reserved[72]; - u8 param_revision; -} __packed; - struct jedec_ecc_info { u8 ecc_bits; u8 codeword_size; @@ -1117,6 +1097,7 @@ extern struct nand_flash_dev nand_flash_ids[]; extern const struct nand_manufacturer_ops toshiba_nand_manuf_ops; extern const struct nand_manufacturer_ops samsung_nand_manuf_ops; extern const struct nand_manufacturer_ops hynix_nand_manuf_ops; +extern const struct nand_manufacturer_ops micron_nand_manuf_ops; int nand_default_bbt(struct mtd_info *mtd); int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs); -- cgit v1.2.3 From 229204da53b31d576fcc1c93a33626943ea8202c Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Wed, 8 Jun 2016 10:42:23 +0200 Subject: mtd: nand: Move AMD/Spansion specific init/detection logic in nand_amd.c Move AMD/Spansion specific initialization/detection logic into nand_amd.c. This is part of the "separate vendor specific code from core" cleanup process. Signed-off-by: Boris Brezillon Acked-by: Richard Weinberger --- drivers/mtd/nand/Makefile | 1 + drivers/mtd/nand/nand_amd.c | 51 ++++++++++++++++++++++++++++++++++++++++++++ drivers/mtd/nand/nand_base.c | 18 +--------------- drivers/mtd/nand/nand_ids.c | 2 +- include/linux/mtd/nand.h | 1 + 5 files changed, 55 insertions(+), 18 deletions(-) create mode 100644 drivers/mtd/nand/nand_amd.c (limited to 'include') diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index 11d743155810..f48ddcc132bc 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile @@ -61,6 +61,7 @@ obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o +nand-objs += nand_amd.o nand-objs += nand_hynix.o nand-objs += nand_micron.o nand-objs += nand_samsung.o diff --git a/drivers/mtd/nand/nand_amd.c b/drivers/mtd/nand/nand_amd.c new file mode 100644 index 000000000000..170403a3bfa8 --- /dev/null +++ b/drivers/mtd/nand/nand_amd.c @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2017 Free Electrons + * Copyright (C) 2017 NextThing Co + * + * Author: Boris Brezillon + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +static void amd_nand_decode_id(struct nand_chip *chip) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + + nand_decode_ext_id(chip); + + /* + * Check for Spansion/AMD ID + repeating 5th, 6th byte since + * some Spansion chips have erasesize that conflicts with size + * listed in nand_ids table. + * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39) + */ + if (chip->id.data[4] != 0x00 && chip->id.data[5] == 0x00 && + chip->id.data[6] == 0x00 && chip->id.data[7] == 0x00 && + mtd->writesize == 512) { + mtd->erasesize = 128 * 1024; + mtd->erasesize <<= ((chip->id.data[3] & 0x03) << 1); + } +} + +static int amd_nand_init(struct nand_chip *chip) +{ + if (nand_is_slc(chip)) + chip->bbt_options |= NAND_BBT_SCAN2NDPAGE; + + return 0; +} + +const struct nand_manufacturer_ops amd_nand_manuf_ops = { + .detect = amd_nand_decode_id, + .init = amd_nand_init, +}; diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index f45e3bbe5f85..a9060bcb9eb5 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -3825,8 +3825,6 @@ EXPORT_SYMBOL_GPL(nand_decode_ext_id); static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type) { struct mtd_info *mtd = nand_to_mtd(chip); - u8 *id_data = chip->id.data; - int maf_id = id_data[0]; mtd->erasesize = type->erasesize; mtd->writesize = type->pagesize; @@ -3834,19 +3832,6 @@ static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type) /* All legacy ID NAND are small-page, SLC */ chip->bits_per_cell = 1; - - /* - * Check for Spansion/AMD ID + repeating 5th, 6th byte since - * some Spansion chips have erasesize that conflicts with size - * listed in nand_ids table. - * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39) - */ - if (maf_id == NAND_MFR_AMD && id_data[4] != 0x00 && id_data[5] == 0x00 - && id_data[6] == 0x00 && id_data[7] == 0x00 - && mtd->writesize == 512) { - mtd->erasesize = 128 * 1024; - mtd->erasesize <<= ((id_data[3] & 0x03) << 1); - } } /* @@ -3872,8 +3857,7 @@ static void nand_decode_bbm_options(struct nand_chip *chip) * Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba, * AMD/Spansion, and Macronix. All others scan only the first page. */ - if (nand_is_slc(chip) && - (maf_id == NAND_MFR_AMD || maf_id == NAND_MFR_MACRONIX)) + if (nand_is_slc(chip) && maf_id == NAND_MFR_MACRONIX) chip->bbt_options |= NAND_BBT_SCAN2NDPAGE; } diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c index 42c6743401c5..af7b92e08fbd 100644 --- a/drivers/mtd/nand/nand_ids.c +++ b/drivers/mtd/nand/nand_ids.c @@ -179,7 +179,7 @@ static const struct nand_manufacturer nand_manufacturers[] = { {NAND_MFR_STMICRO, "ST Micro"}, {NAND_MFR_HYNIX, "Hynix", &hynix_nand_manuf_ops}, {NAND_MFR_MICRON, "Micron", µn_nand_manuf_ops}, - {NAND_MFR_AMD, "AMD/Spansion"}, + {NAND_MFR_AMD, "AMD/Spansion", &amd_nand_manuf_ops}, {NAND_MFR_MACRONIX, "Macronix"}, {NAND_MFR_EON, "Eon"}, {NAND_MFR_SANDISK, "SanDisk"}, diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 7d0f18ecbf57..97dce42778e9 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -1098,6 +1098,7 @@ extern const struct nand_manufacturer_ops toshiba_nand_manuf_ops; extern const struct nand_manufacturer_ops samsung_nand_manuf_ops; extern const struct nand_manufacturer_ops hynix_nand_manuf_ops; extern const struct nand_manufacturer_ops micron_nand_manuf_ops; +extern const struct nand_manufacturer_ops amd_nand_manuf_ops; int nand_default_bbt(struct mtd_info *mtd); int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs); -- cgit v1.2.3 From 3b5206f4be9b65d2f0f85b3239cf117a1d0de7ce Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Wed, 8 Jun 2016 10:43:26 +0200 Subject: mtd: nand: Move Macronix specific initialization in nand_macronix.c Move Macronix specific initialization logic into nand_macronix.c. This is part of the "separate vendor specific code from core" cleanup process. Signed-off-by: Boris Brezillon --- drivers/mtd/nand/Makefile | 1 + drivers/mtd/nand/nand_base.c | 11 ----------- drivers/mtd/nand/nand_ids.c | 2 +- drivers/mtd/nand/nand_macronix.c | 30 ++++++++++++++++++++++++++++++ include/linux/mtd/nand.h | 1 + 5 files changed, 33 insertions(+), 12 deletions(-) create mode 100644 drivers/mtd/nand/nand_macronix.c (limited to 'include') diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index f48ddcc132bc..098b8791f10a 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile @@ -63,6 +63,7 @@ obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o nand-objs += nand_amd.o nand-objs += nand_hynix.o +nand-objs += nand_macronix.o nand-objs += nand_micron.o nand-objs += nand_samsung.o nand-objs += nand_toshiba.o diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index a9060bcb9eb5..685376d8ceab 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -3842,23 +3842,12 @@ static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type) static void nand_decode_bbm_options(struct nand_chip *chip) { struct mtd_info *mtd = nand_to_mtd(chip); - u8 *id_data = chip->id.data; - int maf_id = id_data[0]; /* Set the bad block position */ if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16)) chip->badblockpos = NAND_LARGE_BADBLOCK_POS; else chip->badblockpos = NAND_SMALL_BADBLOCK_POS; - - /* - * Bad block marker is stored in the last page of each block on Samsung - * and Hynix MLC devices; stored in first two pages of each block on - * Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba, - * AMD/Spansion, and Macronix. All others scan only the first page. - */ - if (nand_is_slc(chip) && maf_id == NAND_MFR_MACRONIX) - chip->bbt_options |= NAND_BBT_SCAN2NDPAGE; } static inline bool is_full_id_nand(struct nand_flash_dev *type) diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c index af7b92e08fbd..9d5ca0e540b5 100644 --- a/drivers/mtd/nand/nand_ids.c +++ b/drivers/mtd/nand/nand_ids.c @@ -180,7 +180,7 @@ static const struct nand_manufacturer nand_manufacturers[] = { {NAND_MFR_HYNIX, "Hynix", &hynix_nand_manuf_ops}, {NAND_MFR_MICRON, "Micron", µn_nand_manuf_ops}, {NAND_MFR_AMD, "AMD/Spansion", &amd_nand_manuf_ops}, - {NAND_MFR_MACRONIX, "Macronix"}, + {NAND_MFR_MACRONIX, "Macronix", ¯onix_nand_manuf_ops}, {NAND_MFR_EON, "Eon"}, {NAND_MFR_SANDISK, "SanDisk"}, {NAND_MFR_INTEL, "Intel"}, diff --git a/drivers/mtd/nand/nand_macronix.c b/drivers/mtd/nand/nand_macronix.c new file mode 100644 index 000000000000..84855c3e1a02 --- /dev/null +++ b/drivers/mtd/nand/nand_macronix.c @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2017 Free Electrons + * Copyright (C) 2017 NextThing Co + * + * Author: Boris Brezillon + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +static int macronix_nand_init(struct nand_chip *chip) +{ + if (nand_is_slc(chip)) + chip->bbt_options |= NAND_BBT_SCAN2NDPAGE; + + return 0; +} + +const struct nand_manufacturer_ops macronix_nand_manuf_ops = { + .init = macronix_nand_init, +}; diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 97dce42778e9..c7de017c7f4c 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -1099,6 +1099,7 @@ extern const struct nand_manufacturer_ops samsung_nand_manuf_ops; extern const struct nand_manufacturer_ops hynix_nand_manuf_ops; extern const struct nand_manufacturer_ops micron_nand_manuf_ops; extern const struct nand_manufacturer_ops amd_nand_manuf_ops; +extern const struct nand_manufacturer_ops macronix_nand_manuf_ops; int nand_default_bbt(struct mtd_info *mtd); int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs); -- cgit v1.2.3 From bdc7dd67e7d9a50af992fd6c5e21b080475321e3 Mon Sep 17 00:00:00 2001 From: Elaine Zhang Date: Mon, 6 Feb 2017 10:50:34 +0800 Subject: clk: rockchip: add rk3328 clk_mac2io_ext ID Signed-off-by: Elaine Zhang Signed-off-by: Heiko Stuebner --- include/dt-bindings/clock/rk3328-cru.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/dt-bindings/clock/rk3328-cru.h b/include/dt-bindings/clock/rk3328-cru.h index ee702c8e4c09..d2b26a4b43eb 100644 --- a/include/dt-bindings/clock/rk3328-cru.h +++ b/include/dt-bindings/clock/rk3328-cru.h @@ -97,6 +97,7 @@ #define SCLK_MAC2IO_SRC 99 #define SCLK_MAC2IO 100 #define SCLK_MAC2PHY 101 +#define SCLK_MAC2IO_EXT 102 /* dclk gates */ #define DCLK_LCDC 120 -- cgit v1.2.3 From a4fa90d24b4bb7d171fbcb2ceffd0315a6e29050 Mon Sep 17 00:00:00 2001 From: Elaine Zhang Date: Tue, 7 Mar 2017 19:05:15 +0800 Subject: clk: rockchip: fix up rk3368 timer-ids The timer-ids are wrong compared to the manual, probably due a simple copy-paste mistake from the otherwise very similar rk3288. And there are even more timers in the system than the ones wrongly listed here. Timer-Ids were unused both in clock-driver as well as devicetree till now, so fixing them won't break anything. Signed-off-by: Elaine Zhang Signed-off-by: Heiko Stuebner --- include/dt-bindings/clock/rk3368-cru.h | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/dt-bindings/clock/rk3368-cru.h b/include/dt-bindings/clock/rk3368-cru.h index 9c5dd9ba2f6c..2bf23eda8938 100644 --- a/include/dt-bindings/clock/rk3368-cru.h +++ b/include/dt-bindings/clock/rk3368-cru.h @@ -44,13 +44,12 @@ #define SCLK_I2S_8CH 82 #define SCLK_SPDIF_8CH 83 #define SCLK_I2S_2CH 84 -#define SCLK_TIMER0 85 -#define SCLK_TIMER1 86 -#define SCLK_TIMER2 87 -#define SCLK_TIMER3 88 -#define SCLK_TIMER4 89 -#define SCLK_TIMER5 90 -#define SCLK_TIMER6 91 +#define SCLK_TIMER00 85 +#define SCLK_TIMER01 86 +#define SCLK_TIMER02 87 +#define SCLK_TIMER03 88 +#define SCLK_TIMER04 89 +#define SCLK_TIMER05 90 #define SCLK_OTGPHY0 93 #define SCLK_OTG_ADP 96 #define SCLK_HSICPHY480M 97 -- cgit v1.2.3 From 710fbd769c9c9ba80f5b2727dd6bf4e06bb69b9e Mon Sep 17 00:00:00 2001 From: Elaine Zhang Date: Tue, 7 Mar 2017 19:05:17 +0800 Subject: clk: rockchip: add clock ids for timer10-15 of RK3368 SoCs Signed-off-by: Elaine Zhang Signed-off-by: Heiko Stuebner --- include/dt-bindings/clock/rk3368-cru.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include') diff --git a/include/dt-bindings/clock/rk3368-cru.h b/include/dt-bindings/clock/rk3368-cru.h index 2bf23eda8938..aeb83e581a11 100644 --- a/include/dt-bindings/clock/rk3368-cru.h +++ b/include/dt-bindings/clock/rk3368-cru.h @@ -81,6 +81,12 @@ #define SCLK_SFC 126 #define SCLK_MAC 127 #define SCLK_MACREF_OUT 128 +#define SCLK_TIMER10 133 +#define SCLK_TIMER11 134 +#define SCLK_TIMER12 135 +#define SCLK_TIMER13 136 +#define SCLK_TIMER14 137 +#define SCLK_TIMER15 138 #define DCLK_VOP 190 #define MCLK_CRYPTO 191 -- cgit v1.2.3 From 34ac2c278b306cc3006dd5cbfaff4ec52065bf6f Mon Sep 17 00:00:00 2001 From: Peter De Schrijver Date: Thu, 23 Feb 2017 12:44:39 +0200 Subject: clk: tegra: Fix ISP clock modelling The 2 ISP clocks (ispa and ispb) share a mux/divider control. So model this as 1 mux/divider clock and child gate clocks. Signed-off-by: Peter De Schrijver Reviewed-by: Mikko Perttunen Tested-by: Mikko Perttunen Signed-off-by: Thierry Reding --- drivers/clk/tegra/clk-id.h | 1 + drivers/clk/tegra/clk-tegra-periph.c | 11 +++++++++-- drivers/clk/tegra/clk-tegra210.c | 1 + include/dt-bindings/clock/tegra210-car.h | 4 ++-- 4 files changed, 13 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h index 5738635c5274..1019eb8eff4d 100644 --- a/drivers/clk/tegra/clk-id.h +++ b/drivers/clk/tegra/clk-id.h @@ -307,6 +307,7 @@ enum clk_id { tegra_clk_xusb_ssp_src, tegra_clk_sclk_mux, tegra_clk_sor_safe, + tegra_clk_ispa, tegra_clk_max, }; diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c index 4ce4e7fb1124..19b00b77ecbe 100644 --- a/drivers/clk/tegra/clk-tegra-periph.c +++ b/drivers/clk/tegra/clk-tegra-periph.c @@ -168,6 +168,12 @@ 0, TEGRA_PERIPH_NO_GATE, _clk_id,\ _parents##_idx, 0, _lock) +#define MUX8_NOGATE(_name, _parents, _offset, _clk_id) \ + TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset, \ + 29, MASK(3), 0, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP,\ + 0, TEGRA_PERIPH_NO_GATE, _clk_id,\ + _parents##_idx, 0, NULL) + #define INT(_name, _parents, _offset, \ _clk_num, _gate_flags, _clk_id) \ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset,\ @@ -739,7 +745,7 @@ static struct tegra_periph_init_data periph_clks[] = { MUX8("soc_therm", mux_clkm_pllc_pllp_plla, CLK_SOURCE_SOC_THERM, 78, TEGRA_PERIPH_ON_APB, tegra_clk_soc_therm_8), MUX8("vi_sensor", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR, 164, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor_8), MUX8("isp", mux_pllm_pllc_pllp_plla_clkm_pllc4, CLK_SOURCE_ISP, 23, TEGRA_PERIPH_ON_APB, tegra_clk_isp_8), - MUX8("isp", mux_pllc_pllp_plla1_pllc2_c3_clkm_pllc4, CLK_SOURCE_ISP, 23, TEGRA_PERIPH_ON_APB, tegra_clk_isp_9), + MUX8_NOGATE("isp", mux_pllc_pllp_plla1_pllc2_c3_clkm_pllc4, CLK_SOURCE_ISP, tegra_clk_isp_9), MUX8("entropy", mux_pllp_clkm1, CLK_SOURCE_ENTROPY, 149, 0, tegra_clk_entropy), MUX8("entropy", mux_pllp_clkm_clk32_plle, CLK_SOURCE_ENTROPY, 149, 0, tegra_clk_entropy_8), MUX8("hdmi_audio", mux_pllp3_pllc_clkm, CLK_SOURCE_HDMI_AUDIO, 176, TEGRA_PERIPH_NO_RESET, tegra_clk_hdmi_audio), @@ -819,7 +825,8 @@ static struct tegra_periph_init_data gate_clks[] = { GATE("xusb_dev", "xusb_dev_src", 95, 0, tegra_clk_xusb_dev, 0), GATE("emc", "emc_mux", 57, 0, tegra_clk_emc, CLK_IGNORE_UNUSED), GATE("sata_cold", "clk_m", 129, TEGRA_PERIPH_ON_APB, tegra_clk_sata_cold, 0), - GATE("ispb", "clk_m", 3, 0, tegra_clk_ispb, 0), + GATE("ispa", "isp", 23, 0, tegra_clk_ispa, 0), + GATE("ispb", "isp", 3, 0, tegra_clk_ispb, 0), GATE("vim2_clk", "clk_m", 11, 0, tegra_clk_vim2_clk, 0), GATE("pcie", "clk_m", 70, 0, tegra_clk_pcie, 0), GATE("gpu", "pll_ref", 184, 0, tegra_clk_gpu, 0), diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c index 2ef8d49537c2..7bda8ba449a8 100644 --- a/drivers/clk/tegra/clk-tegra210.c +++ b/drivers/clk/tegra/clk-tegra210.c @@ -2210,6 +2210,7 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = { [tegra_clk_pll_c4_out3] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT3, .present = true }, [tegra_clk_apb2ape] = { .dt_id = TEGRA210_CLK_APB2APE, .present = true }, [tegra_clk_pll_a1] = { .dt_id = TEGRA210_CLK_PLL_A1, .present = true }, + [tegra_clk_ispa] = { .dt_id = TEGRA210_CLK_ISPA, .present = true }, }; static struct tegra_devclk devclks[] __initdata = { diff --git a/include/dt-bindings/clock/tegra210-car.h b/include/dt-bindings/clock/tegra210-car.h index 35288b20f2c9..f5c6563ab2d6 100644 --- a/include/dt-bindings/clock/tegra210-car.h +++ b/include/dt-bindings/clock/tegra210-car.h @@ -39,7 +39,7 @@ /* 20 (register bit affects vi and vi_sensor) */ /* 21 */ #define TEGRA210_CLK_USBD 22 -#define TEGRA210_CLK_ISP 23 +#define TEGRA210_CLK_ISPA 23 /* 24 */ /* 25 */ #define TEGRA210_CLK_DISP2 26 @@ -349,7 +349,7 @@ #define TEGRA210_CLK_PLL_RE_OUT1 319 /* 320 */ /* 321 */ -/* 322 */ +#define TEGRA210_CLK_ISP 322 /* 323 */ /* 324 */ /* 325 */ -- cgit v1.2.3 From bfa34832df1fffb79c1719d4016e9cacf0f83b22 Mon Sep 17 00:00:00 2001 From: Peter De Schrijver Date: Tue, 28 Feb 2017 16:37:17 +0200 Subject: clk: tegra: Add CEC clock This clock is used to clock the HDMI CEC interface. Signed-off-by: Peter De Schrijver Reviewed-by: Mikko Perttunen Tested-by: Mikko Perttunen Signed-off-by: Thierry Reding --- drivers/clk/tegra/clk-id.h | 1 + drivers/clk/tegra/clk-tegra-periph.c | 1 + drivers/clk/tegra/clk-tegra114.c | 1 + drivers/clk/tegra/clk-tegra124.c | 1 + drivers/clk/tegra/clk-tegra210.c | 1 + drivers/clk/tegra/clk-tegra30.c | 1 + include/dt-bindings/clock/tegra114-car.h | 2 +- include/dt-bindings/clock/tegra124-car-common.h | 2 +- include/dt-bindings/clock/tegra210-car.h | 2 +- include/dt-bindings/clock/tegra30-car.h | 2 +- 10 files changed, 10 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h index 1019eb8eff4d..fc978b2bd1ce 100644 --- a/drivers/clk/tegra/clk-id.h +++ b/drivers/clk/tegra/clk-id.h @@ -308,6 +308,7 @@ enum clk_id { tegra_clk_sclk_mux, tegra_clk_sor_safe, tegra_clk_ispa, + tegra_clk_cec, tegra_clk_max, }; diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c index c9e795b190f2..a2aed27b3265 100644 --- a/drivers/clk/tegra/clk-tegra-periph.c +++ b/drivers/clk/tegra/clk-tegra-periph.c @@ -837,6 +837,7 @@ static struct tegra_periph_init_data gate_clks[] = { GATE("pll_p_out_cpu", "pll_p", 223, 0, tegra_clk_pll_p_out_cpu, 0), GATE("pll_p_out_adsp", "pll_p", 187, 0, tegra_clk_pll_p_out_adsp, 0), GATE("apb2ape", "clk_m", 107, 0, tegra_clk_apb2ape, 0), + GATE("cec", "pclk", 136, 0, tegra_clk_cec, 0), }; static struct tegra_periph_init_data div_clks[] = { diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c index 933b5dd698b8..fd1a99c05c2d 100644 --- a/drivers/clk/tegra/clk-tegra114.c +++ b/drivers/clk/tegra/clk-tegra114.c @@ -819,6 +819,7 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = { [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA114_CLK_CLK_OUT_3_MUX, .present = true }, [tegra_clk_dsia_mux] = { .dt_id = TEGRA114_CLK_DSIA_MUX, .present = true }, [tegra_clk_dsib_mux] = { .dt_id = TEGRA114_CLK_DSIB_MUX, .present = true }, + [tegra_clk_cec] = { .dt_id = TEGRA114_CLK_CEC, .present = true }, }; static struct tegra_devclk devclks[] __initdata = { diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c index a112d3d2bff1..e81ea5b11577 100644 --- a/drivers/clk/tegra/clk-tegra124.c +++ b/drivers/clk/tegra/clk-tegra124.c @@ -928,6 +928,7 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = { [tegra_clk_clk_out_1_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_1_MUX, .present = true }, [tegra_clk_clk_out_2_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_2_MUX, .present = true }, [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_3_MUX, .present = true }, + [tegra_clk_cec] = { .dt_id = TEGRA124_CLK_CEC, .present = true }, }; static struct tegra_devclk devclks[] __initdata = { diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c index 58d7f9ce9197..bdb296ad1151 100644 --- a/drivers/clk/tegra/clk-tegra210.c +++ b/drivers/clk/tegra/clk-tegra210.c @@ -2222,6 +2222,7 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = { [tegra_clk_apb2ape] = { .dt_id = TEGRA210_CLK_APB2APE, .present = true }, [tegra_clk_pll_a1] = { .dt_id = TEGRA210_CLK_PLL_A1, .present = true }, [tegra_clk_ispa] = { .dt_id = TEGRA210_CLK_ISPA, .present = true }, + [tegra_clk_cec] = { .dt_id = TEGRA210_CLK_CEC, .present = true }, }; static struct tegra_devclk devclks[] __initdata = { diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c index 8e2db5ead8da..a2d163f759b4 100644 --- a/drivers/clk/tegra/clk-tegra30.c +++ b/drivers/clk/tegra/clk-tegra30.c @@ -817,6 +817,7 @@ static struct tegra_clk tegra30_clks[tegra_clk_max] __initdata = { [tegra_clk_pll_p_out4] = { .dt_id = TEGRA30_CLK_PLL_P_OUT4, .present = true }, [tegra_clk_pll_a] = { .dt_id = TEGRA30_CLK_PLL_A, .present = true }, [tegra_clk_pll_a_out0] = { .dt_id = TEGRA30_CLK_PLL_A_OUT0, .present = true }, + [tegra_clk_cec] = { .dt_id = TEGRA30_CLK_CEC, .present = true }, }; static const char *pll_e_parents[] = { "pll_ref", "pll_p" }; diff --git a/include/dt-bindings/clock/tegra114-car.h b/include/dt-bindings/clock/tegra114-car.h index 534c03f8ad72..ed5ca218c857 100644 --- a/include/dt-bindings/clock/tegra114-car.h +++ b/include/dt-bindings/clock/tegra114-car.h @@ -156,7 +156,7 @@ /* 133 */ /* 134 */ /* 135 */ -/* 136 */ +#define TEGRA114_CLK_CEC 136 /* 137 */ /* 138 */ /* 139 */ diff --git a/include/dt-bindings/clock/tegra124-car-common.h b/include/dt-bindings/clock/tegra124-car-common.h index a2156090563f..9352c7e2ce0b 100644 --- a/include/dt-bindings/clock/tegra124-car-common.h +++ b/include/dt-bindings/clock/tegra124-car-common.h @@ -156,7 +156,7 @@ /* 133 */ /* 134 */ /* 135 */ -/* 136 */ +#define TEGRA124_CLK_CEC 136 /* 137 */ /* 138 */ /* 139 */ diff --git a/include/dt-bindings/clock/tegra210-car.h b/include/dt-bindings/clock/tegra210-car.h index f5c6563ab2d6..e7a2578831f7 100644 --- a/include/dt-bindings/clock/tegra210-car.h +++ b/include/dt-bindings/clock/tegra210-car.h @@ -156,7 +156,7 @@ /* 133 */ /* 134 */ /* 135 */ -/* 136 */ +#define TEGRA210_CLK_CEC 136 /* 137 */ /* 138 */ /* 139 */ diff --git a/include/dt-bindings/clock/tegra30-car.h b/include/dt-bindings/clock/tegra30-car.h index 889e49ba0aa3..7213354b9652 100644 --- a/include/dt-bindings/clock/tegra30-car.h +++ b/include/dt-bindings/clock/tegra30-car.h @@ -156,7 +156,7 @@ /* 133 */ /* 134 */ /* 135 */ -/* 136 */ +#define TEGRA30_CLK_CEC 136 /* 137 */ /* 138 */ /* 139 */ -- cgit v1.2.3 From 319af7975c9ff500a30b2e6c4433c1f327283884 Mon Sep 17 00:00:00 2001 From: Peter De Schrijver Date: Tue, 28 Feb 2017 16:37:18 +0200 Subject: clk: tegra: Define Tegra210 DMIC sync clocks Tegra210 has 3 DMIC inputs which can be clocked from the recovered clock of several other audio inputs (eg. i2s0, i2s1, ...). To model this, we add a 3 new clocks similar to the audio* clocks which handle the same function for the I2S and SPDIF clocks. Signed-off-by: Peter De Schrijver Reviewed-by: Mikko Perttunen Tested-by: Mikko Perttunen Signed-off-by: Thierry Reding --- drivers/clk/tegra/clk-id.h | 6 +++ drivers/clk/tegra/clk-tegra-audio.c | 85 +++++++++++++++++++++++--------- drivers/clk/tegra/clk-tegra210.c | 6 +++ include/dt-bindings/clock/tegra210-car.h | 9 +++- 4 files changed, 81 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h index fc978b2bd1ce..b40293c19c19 100644 --- a/drivers/clk/tegra/clk-id.h +++ b/drivers/clk/tegra/clk-id.h @@ -309,6 +309,12 @@ enum clk_id { tegra_clk_sor_safe, tegra_clk_ispa, tegra_clk_cec, + tegra_clk_dmic1_sync_clk, + tegra_clk_dmic2_sync_clk, + tegra_clk_dmic3_sync_clk, + tegra_clk_dmic1_sync_clk_mux, + tegra_clk_dmic2_sync_clk_mux, + tegra_clk_dmic3_sync_clk_mux, tegra_clk_max, }; diff --git a/drivers/clk/tegra/clk-tegra-audio.c b/drivers/clk/tegra/clk-tegra-audio.c index e2bfa9b368f6..b37cae7af26d 100644 --- a/drivers/clk/tegra/clk-tegra-audio.c +++ b/drivers/clk/tegra/clk-tegra-audio.c @@ -31,6 +31,9 @@ #define AUDIO_SYNC_CLK_I2S3 0x4ac #define AUDIO_SYNC_CLK_I2S4 0x4b0 #define AUDIO_SYNC_CLK_SPDIF 0x4b4 +#define AUDIO_SYNC_CLK_DMIC1 0x560 +#define AUDIO_SYNC_CLK_DMIC2 0x564 +#define AUDIO_SYNC_CLK_DMIC3 0x6b8 #define AUDIO_SYNC_DOUBLER 0x49c @@ -91,8 +94,14 @@ struct tegra_audio2x_clk_initdata { static DEFINE_SPINLOCK(clk_doubler_lock); -static const char *mux_audio_sync_clk[] = { "spdif_in_sync", "i2s0_sync", - "i2s1_sync", "i2s2_sync", "i2s3_sync", "i2s4_sync", "vimclk_sync", +static const char * const mux_audio_sync_clk[] = { "spdif_in_sync", + "i2s0_sync", "i2s1_sync", "i2s2_sync", "i2s3_sync", "i2s4_sync", + "pll_a_out0", "vimclk_sync", +}; + +static const char * const mux_dmic_sync_clk[] = { "unused", "i2s0_sync", + "i2s1_sync", "i2s2_sync", "i2s3_sync", "i2s4_sync", "pll_a_out0", + "vimclk_sync", }; static struct tegra_sync_source_initdata sync_source_clks[] __initdata = { @@ -114,6 +123,12 @@ static struct tegra_audio_clk_initdata audio_clks[] = { AUDIO(spdif, AUDIO_SYNC_CLK_SPDIF), }; +static struct tegra_audio_clk_initdata dmic_clks[] = { + AUDIO(dmic1_sync_clk, AUDIO_SYNC_CLK_DMIC1), + AUDIO(dmic2_sync_clk, AUDIO_SYNC_CLK_DMIC2), + AUDIO(dmic3_sync_clk, AUDIO_SYNC_CLK_DMIC3), +}; + static struct tegra_audio2x_clk_initdata audio2x_clks[] = { AUDIO2X(audio0, 113, 24), AUDIO2X(audio1, 114, 25), @@ -123,6 +138,41 @@ static struct tegra_audio2x_clk_initdata audio2x_clks[] = { AUDIO2X(spdif, 118, 29), }; +static void __init tegra_audio_sync_clk_init(void __iomem *clk_base, + struct tegra_clk *tegra_clks, + struct tegra_audio_clk_initdata *sync, + int num_sync_clks, + const char * const *mux_names, + int num_mux_inputs) +{ + struct clk *clk; + struct clk **dt_clk; + struct tegra_audio_clk_initdata *data; + int i; + + for (i = 0, data = sync; i < num_sync_clks; i++, data++) { + dt_clk = tegra_lookup_dt_id(data->mux_clk_id, tegra_clks); + if (!dt_clk) + continue; + + clk = clk_register_mux(NULL, data->mux_name, mux_names, + num_mux_inputs, + CLK_SET_RATE_NO_REPARENT, + clk_base + data->offset, 0, 3, 0, + NULL); + *dt_clk = clk; + + dt_clk = tegra_lookup_dt_id(data->gate_clk_id, tegra_clks); + if (!dt_clk) + continue; + + clk = clk_register_gate(NULL, data->gate_name, data->mux_name, + 0, clk_base + data->offset, 4, + CLK_GATE_SET_TO_DISABLE, NULL); + *dt_clk = clk; + } +} + void __init tegra_audio_clk_init(void __iomem *clk_base, void __iomem *pmc_base, struct tegra_clk *tegra_clks, struct tegra_audio_clk_info *audio_info, @@ -176,30 +226,17 @@ void __init tegra_audio_clk_init(void __iomem *clk_base, *dt_clk = clk; } - for (i = 0; i < ARRAY_SIZE(audio_clks); i++) { - struct tegra_audio_clk_initdata *data; + tegra_audio_sync_clk_init(clk_base, tegra_clks, audio_clks, + ARRAY_SIZE(audio_clks), mux_audio_sync_clk, + ARRAY_SIZE(mux_audio_sync_clk)); - data = &audio_clks[i]; - dt_clk = tegra_lookup_dt_id(data->mux_clk_id, tegra_clks); + /* make sure the DMIC sync clocks have a valid parent */ + for (i = 0; i < ARRAY_SIZE(dmic_clks); i++) + writel_relaxed(1, clk_base + dmic_clks[i].offset); - if (!dt_clk) - continue; - clk = clk_register_mux(NULL, data->mux_name, mux_audio_sync_clk, - ARRAY_SIZE(mux_audio_sync_clk), - CLK_SET_RATE_NO_REPARENT, - clk_base + data->offset, 0, 3, 0, - NULL); - *dt_clk = clk; - - dt_clk = tegra_lookup_dt_id(data->gate_clk_id, tegra_clks); - if (!dt_clk) - continue; - - clk = clk_register_gate(NULL, data->gate_name, data->mux_name, - 0, clk_base + data->offset, 4, - CLK_GATE_SET_TO_DISABLE, NULL); - *dt_clk = clk; - } + tegra_audio_sync_clk_init(clk_base, tegra_clks, dmic_clks, + ARRAY_SIZE(dmic_clks), mux_dmic_sync_clk, + ARRAY_SIZE(mux_dmic_sync_clk)); for (i = 0; i < ARRAY_SIZE(audio2x_clks); i++) { struct tegra_audio2x_clk_initdata *data; diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c index bdb296ad1151..ca63901a5416 100644 --- a/drivers/clk/tegra/clk-tegra210.c +++ b/drivers/clk/tegra/clk-tegra210.c @@ -2223,6 +2223,12 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = { [tegra_clk_pll_a1] = { .dt_id = TEGRA210_CLK_PLL_A1, .present = true }, [tegra_clk_ispa] = { .dt_id = TEGRA210_CLK_ISPA, .present = true }, [tegra_clk_cec] = { .dt_id = TEGRA210_CLK_CEC, .present = true }, + [tegra_clk_dmic1_sync_clk] = { .dt_id = TEGRA210_CLK_DMIC1_SYNC_CLK, .present = true }, + [tegra_clk_dmic2_sync_clk] = { .dt_id = TEGRA210_CLK_DMIC2_SYNC_CLK, .present = true }, + [tegra_clk_dmic3_sync_clk] = { .dt_id = TEGRA210_CLK_DMIC3_SYNC_CLK, .present = true }, + [tegra_clk_dmic1_sync_clk_mux] = { .dt_id = TEGRA210_CLK_DMIC1_SYNC_CLK_MUX, .present = true }, + [tegra_clk_dmic2_sync_clk_mux] = { .dt_id = TEGRA210_CLK_DMIC2_SYNC_CLK_MUX, .present = true }, + [tegra_clk_dmic3_sync_clk_mux] = { .dt_id = TEGRA210_CLK_DMIC3_SYNC_CLK_MUX, .present = true }, }; static struct tegra_devclk devclks[] __initdata = { diff --git a/include/dt-bindings/clock/tegra210-car.h b/include/dt-bindings/clock/tegra210-car.h index e7a2578831f7..5aa10278ea1b 100644 --- a/include/dt-bindings/clock/tegra210-car.h +++ b/include/dt-bindings/clock/tegra210-car.h @@ -396,6 +396,13 @@ #define TEGRA210_CLK_PLL_C_UD 364 #define TEGRA210_CLK_SCLK_MUX 365 -#define TEGRA210_CLK_CLK_MAX 366 +#define TEGRA210_CLK_DMIC1_SYNC_CLK 388 +#define TEGRA210_CLK_DMIC1_SYNC_CLK_MUX 389 +#define TEGRA210_CLK_DMIC2_SYNC_CLK 390 +#define TEGRA210_CLK_DMIC2_SYNC_CLK_MUX 391 +#define TEGRA210_CLK_DMIC3_SYNC_CLK 392 +#define TEGRA210_CLK_DMIC3_SYNC_CLK_MUX 393 + +#define TEGRA210_CLK_CLK_MAX 394 #endif /* _DT_BINDINGS_CLOCK_TEGRA210_CAR_H */ -- cgit v1.2.3 From 24c3ebef1ab6d5620eaaa51f69223118cac97db6 Mon Sep 17 00:00:00 2001 From: Peter De Schrijver Date: Tue, 28 Feb 2017 16:37:22 +0200 Subject: clk: tegra: Add aclk This clock clocks the ADSP Cortex-A9. Signed-off-by: Peter De Schrijver Reviewed-by: Mikko Perttunen Tested-by: Mikko Perttunen Signed-off-by: Thierry Reding --- drivers/clk/tegra/clk-tegra210.c | 10 ++++++++++ include/dt-bindings/clock/tegra210-car.h | 2 ++ 2 files changed, 12 insertions(+) (limited to 'include') diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c index cfe70781e2b4..9a2512a6b419 100644 --- a/drivers/clk/tegra/clk-tegra210.c +++ b/drivers/clk/tegra/clk-tegra210.c @@ -2308,6 +2308,11 @@ static struct tegra_audio_clk_info tegra210_audio_plls[] = { static struct clk **clks; +static const char * const aclk_parents[] = { + "pll_a1", "pll_c", "pll_p", "pll_a_out0", "pll_c2", "pll_c3", + "clk_m" +}; + static __init void tegra210_periph_clk_init(void __iomem *clk_base, void __iomem *pmc_base) { @@ -2369,6 +2374,11 @@ static __init void tegra210_periph_clk_init(void __iomem *clk_base, clk_register_clkdev(clk, "cml1", NULL); clks[TEGRA210_CLK_CML1] = clk; + clk = tegra_clk_register_super_clk("aclk", aclk_parents, + ARRAY_SIZE(aclk_parents), 0, clk_base + 0x6e0, + 0, NULL); + clks[TEGRA210_CLK_ACLK] = clk; + tegra_periph_clk_init(clk_base, pmc_base, tegra210_clks, &pll_p_params); } diff --git a/include/dt-bindings/clock/tegra210-car.h b/include/dt-bindings/clock/tegra210-car.h index 5aa10278ea1b..8744b19cca3e 100644 --- a/include/dt-bindings/clock/tegra210-car.h +++ b/include/dt-bindings/clock/tegra210-car.h @@ -396,6 +396,8 @@ #define TEGRA210_CLK_PLL_C_UD 364 #define TEGRA210_CLK_SCLK_MUX 365 +#define TEGRA210_CLK_ACLK 370 + #define TEGRA210_CLK_DMIC1_SYNC_CLK 388 #define TEGRA210_CLK_DMIC1_SYNC_CLK_MUX 389 #define TEGRA210_CLK_DMIC2_SYNC_CLK 390 -- cgit v1.2.3 From 3843832fc8cadc2d48ba4ea4cd350a696906ac42 Mon Sep 17 00:00:00 2001 From: Peter De Schrijver Date: Tue, 28 Feb 2017 17:19:24 +0200 Subject: clk: tegra: Handle UTMIPLL IDDQ Export UTMIPLL IDDQ functions. These will be needed when powergating the XUSB partition. Signed-off-by: BH Hsieh Signed-off-by: Peter De Schrijver Signed-off-by: Thierry Reding --- drivers/clk/tegra/clk-tegra210.c | 26 ++++++++++++++++++++++++++ include/linux/clk/tegra.h | 2 ++ 2 files changed, 28 insertions(+) (limited to 'include') diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c index 9a2512a6b419..f89d2a912273 100644 --- a/drivers/clk/tegra/clk-tegra210.c +++ b/drivers/clk/tegra/clk-tegra210.c @@ -2313,6 +2313,32 @@ static const char * const aclk_parents[] = { "clk_m" }; +void tegra210_put_utmipll_in_iddq(void) +{ + u32 reg; + + reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0); + + if (reg & UTMIPLL_HW_PWRDN_CFG0_UTMIPLL_LOCK) { + pr_err("trying to assert IDDQ while UTMIPLL is locked\n"); + return; + } + + reg |= UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE; + writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0); +} +EXPORT_SYMBOL_GPL(tegra210_put_utmipll_in_iddq); + +void tegra210_put_utmipll_out_iddq(void) +{ + u32 reg; + + reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0); + reg &= ~UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE; + writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0); +} +EXPORT_SYMBOL_GPL(tegra210_put_utmipll_out_iddq); + static __init void tegra210_periph_clk_init(void __iomem *clk_base, void __iomem *pmc_base) { diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h index 7007a5f48080..e17d32831e28 100644 --- a/include/linux/clk/tegra.h +++ b/include/linux/clk/tegra.h @@ -125,5 +125,7 @@ extern void tegra210_xusb_pll_hw_control_enable(void); extern void tegra210_xusb_pll_hw_sequence_start(void); extern void tegra210_sata_pll_hw_control_enable(void); extern void tegra210_sata_pll_hw_sequence_start(void); +extern void tegra210_put_utmipll_in_iddq(void); +extern void tegra210_put_utmipll_out_iddq(void); #endif /* __LINUX_CLK_TEGRA_H_ */ -- cgit v1.2.3 From 68d724cedcca8ab86eee824682f7da0af5e6e50d Mon Sep 17 00:00:00 2001 From: Peter De Schrijver Date: Wed, 15 Mar 2017 14:59:32 +0200 Subject: clk: tegra: Add Tegra210 special resets Tegra210 has 2 special resets which don't follow the normal pattern: DVCO and ADSP. Add them in this patch. Changelog: v2: add DT bindings file Signed-off-by: Peter De Schrijver Signed-off-by: Thierry Reding --- drivers/clk/tegra/clk-tegra210.c | 85 ++++++++++++++++++++++++++++++++ include/dt-bindings/reset/tegra210-car.h | 13 +++++ 2 files changed, 98 insertions(+) create mode 100644 include/dt-bindings/reset/tegra210-car.h (limited to 'include') diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c index 1a8121abd427..6f29125ec439 100644 --- a/drivers/clk/tegra/clk-tegra210.c +++ b/drivers/clk/tegra/clk-tegra210.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include "clk.h" @@ -218,6 +219,12 @@ #define CLK_M_DIVISOR_SHIFT 2 #define CLK_M_DIVISOR_MASK 0x3 +#define RST_DFLL_DVCO 0x2f4 +#define DVFS_DFLL_RESET_SHIFT 0 + +#define CLK_RST_CONTROLLER_RST_DEV_Y_SET 0x2a8 +#define CLK_RST_CONTROLLER_RST_DEV_Y_CLR 0x2ac + /* * SDM fractional divisor is 16-bit 2's complement signed number within * (-2^12 ... 2^12-1) range. Represented in PLL data structure as unsigned @@ -2982,6 +2989,81 @@ static void __init tegra210_clock_apply_init_table(void) tegra_init_from_table(init_table, clks, TEGRA210_CLK_CLK_MAX); } +/** + * tegra210_car_barrier - wait for pending writes to the CAR to complete + * + * Wait for any outstanding writes to the CAR MMIO space from this CPU + * to complete before continuing execution. No return value. + */ +static void tegra210_car_barrier(void) +{ + readl_relaxed(clk_base + RST_DFLL_DVCO); +} + +/** + * tegra210_clock_assert_dfll_dvco_reset - assert the DFLL's DVCO reset + * + * Assert the reset line of the DFLL's DVCO. No return value. + */ +static void tegra210_clock_assert_dfll_dvco_reset(void) +{ + u32 v; + + v = readl_relaxed(clk_base + RST_DFLL_DVCO); + v |= (1 << DVFS_DFLL_RESET_SHIFT); + writel_relaxed(v, clk_base + RST_DFLL_DVCO); + tegra210_car_barrier(); +} + +/** + * tegra210_clock_deassert_dfll_dvco_reset - deassert the DFLL's DVCO reset + * + * Deassert the reset line of the DFLL's DVCO, allowing the DVCO to + * operate. No return value. + */ +static void tegra210_clock_deassert_dfll_dvco_reset(void) +{ + u32 v; + + v = readl_relaxed(clk_base + RST_DFLL_DVCO); + v &= ~(1 << DVFS_DFLL_RESET_SHIFT); + writel_relaxed(v, clk_base + RST_DFLL_DVCO); + tegra210_car_barrier(); +} + +static int tegra210_reset_assert(unsigned long id) +{ + if (id == TEGRA210_RST_DFLL_DVCO) + tegra210_clock_assert_dfll_dvco_reset(); + else if (id == TEGRA210_RST_ADSP) + writel(GENMASK(26, 21) | BIT(7), + clk_base + CLK_RST_CONTROLLER_RST_DEV_Y_SET); + else + return -EINVAL; + + return 0; +} + +static int tegra210_reset_deassert(unsigned long id) +{ + if (id == TEGRA210_RST_DFLL_DVCO) + tegra210_clock_deassert_dfll_dvco_reset(); + else if (id == TEGRA210_RST_ADSP) { + writel(BIT(21), clk_base + CLK_RST_CONTROLLER_RST_DEV_Y_CLR); + /* + * Considering adsp cpu clock (min: 12.5MHZ, max: 1GHz) + * a delay of 5us ensures that it's at least + * 6 * adsp_cpu_cycle_period long. + */ + udelay(5); + writel(GENMASK(26, 22) | BIT(7), + clk_base + CLK_RST_CONTROLLER_RST_DEV_Y_CLR); + } else + return -EINVAL; + + return 0; +} + /** * tegra210_clock_init - Tegra210-specific clock initialization * @np: struct device_node * of the DT node for the SoC CAR IP block @@ -3046,6 +3128,9 @@ static void __init tegra210_clock_init(struct device_node *np) tegra_super_clk_gen5_init(clk_base, pmc_base, tegra210_clks, &pll_x_params); + tegra_init_special_resets(2, tegra210_reset_assert, + tegra210_reset_deassert); + tegra_add_of_provider(np); tegra_register_devclks(devclks, ARRAY_SIZE(devclks)); diff --git a/include/dt-bindings/reset/tegra210-car.h b/include/dt-bindings/reset/tegra210-car.h new file mode 100644 index 000000000000..296ec6e3f8c0 --- /dev/null +++ b/include/dt-bindings/reset/tegra210-car.h @@ -0,0 +1,13 @@ +/* + * This header provides Tegra210-specific constants for binding + * nvidia,tegra210-car. + */ + +#ifndef _DT_BINDINGS_RESET_TEGRA210_CAR_H +#define _DT_BINDINGS_RESET_TEGRA210_CAR_H + +#define TEGRA210_RESET(x) (7 * 32 + (x)) +#define TEGRA210_RST_DFLL_DVCO TEGRA210_RESET(0) +#define TEGRA210_RST_ADSP TEGRA210_RESET(1) + +#endif /* _DT_BINDINGS_RESET_TEGRA210_CAR_H */ -- cgit v1.2.3 From 59af78d78db8bde6a63e09772aa44192f772fa96 Mon Sep 17 00:00:00 2001 From: Peter De Schrijver Date: Wed, 15 Mar 2017 17:42:05 +0200 Subject: clk: tegra: Add SATA seq input control This will be used by the powergating driver to ensure proper sequencer state when the SATA domain is powergated. Signed-off-by: Peter De Schrijver Signed-off-by: Thierry Reding --- drivers/clk/tegra/clk-tegra210.c | 25 +++++++++++++++++++++++++ include/linux/clk/tegra.h | 1 + 2 files changed, 26 insertions(+) (limited to 'include') diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c index 6f29125ec439..f3e51e640d4d 100644 --- a/drivers/clk/tegra/clk-tegra210.c +++ b/drivers/clk/tegra/clk-tegra210.c @@ -181,6 +181,11 @@ #define SATA_PLL_CFG0 0x490 #define SATA_PLL_CFG0_PADPLL_RESET_SWCTL BIT(0) #define SATA_PLL_CFG0_PADPLL_USE_LOCKDET BIT(2) +#define SATA_PLL_CFG0_SATA_SEQ_IN_SWCTL BIT(4) +#define SATA_PLL_CFG0_SATA_SEQ_RESET_INPUT_VALUE BIT(5) +#define SATA_PLL_CFG0_SATA_SEQ_LANE_PD_INPUT_VALUE BIT(6) +#define SATA_PLL_CFG0_SATA_SEQ_PADPLL_PD_INPUT_VALUE BIT(7) + #define SATA_PLL_CFG0_PADPLL_SLEEP_IDDQ BIT(13) #define SATA_PLL_CFG0_SEQ_ENABLE BIT(24) @@ -483,6 +488,26 @@ void tegra210_sata_pll_hw_sequence_start(void) } EXPORT_SYMBOL_GPL(tegra210_sata_pll_hw_sequence_start); +void tegra210_set_sata_pll_seq_sw(bool state) +{ + u32 val; + + val = readl_relaxed(clk_base + SATA_PLL_CFG0); + if (state) { + val |= SATA_PLL_CFG0_SATA_SEQ_IN_SWCTL; + val |= SATA_PLL_CFG0_SATA_SEQ_RESET_INPUT_VALUE; + val |= SATA_PLL_CFG0_SATA_SEQ_LANE_PD_INPUT_VALUE; + val |= SATA_PLL_CFG0_SATA_SEQ_PADPLL_PD_INPUT_VALUE; + } else { + val &= ~SATA_PLL_CFG0_SATA_SEQ_IN_SWCTL; + val &= ~SATA_PLL_CFG0_SATA_SEQ_RESET_INPUT_VALUE; + val &= ~SATA_PLL_CFG0_SATA_SEQ_LANE_PD_INPUT_VALUE; + val &= ~SATA_PLL_CFG0_SATA_SEQ_PADPLL_PD_INPUT_VALUE; + } + writel_relaxed(val, clk_base + SATA_PLL_CFG0); +} +EXPORT_SYMBOL_GPL(tegra210_set_sata_pll_seq_sw); + static inline void _pll_misc_chk_default(void __iomem *base, struct tegra_clk_pll_params *params, u8 misc_num, u32 default_val, u32 mask) diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h index e17d32831e28..d23c9cf26993 100644 --- a/include/linux/clk/tegra.h +++ b/include/linux/clk/tegra.h @@ -125,6 +125,7 @@ extern void tegra210_xusb_pll_hw_control_enable(void); extern void tegra210_xusb_pll_hw_sequence_start(void); extern void tegra210_sata_pll_hw_control_enable(void); extern void tegra210_sata_pll_hw_sequence_start(void); +extern void tegra210_set_sata_pll_seq_sw(bool state); extern void tegra210_put_utmipll_in_iddq(void); extern void tegra210_put_utmipll_out_iddq(void); -- cgit v1.2.3 From 61012985eb132a2fa5e4a3eddbc33528334fa377 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 16 Mar 2017 16:23:55 +0200 Subject: iommu/vt-d: Use lo_hi_readq() / lo_hi_writeq() There is already helper functions to do 64-bit I/O on 32-bit machines or buses, thus we don't need to reinvent the wheel. Signed-off-by: Andy Shevchenko Signed-off-by: Joerg Roedel --- include/linux/intel-iommu.h | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index c573a52ae440..485a5b48f038 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -30,6 +30,8 @@ #include #include #include +#include + #include #include @@ -72,24 +74,8 @@ #define OFFSET_STRIDE (9) -#ifdef CONFIG_64BIT #define dmar_readq(a) readq(a) #define dmar_writeq(a,v) writeq(v,a) -#else -static inline u64 dmar_readq(void __iomem *addr) -{ - u32 lo, hi; - lo = readl(addr); - hi = readl(addr + 4); - return (((u64) hi) << 32) + lo; -} - -static inline void dmar_writeq(void __iomem *addr, u64 val) -{ - writel((u32)val, addr); - writel((u32)(val >> 32), addr + 4); -} -#endif #define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4) #define DMAR_VER_MINOR(v) ((v) & 0x0f) -- cgit v1.2.3 From 273df9635385b2156851c7ee49f40658d7bcb29d Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 16 Mar 2017 17:00:19 +0000 Subject: iommu/dma: Make PCI window reservation generic Now that we're applying the IOMMU API reserved regions to our IOVA domains, we shouldn't need to privately special-case PCI windows, or indeed anything else which isn't specific to our iommu-dma layer. However, since those aren't IOMMU-specific either, rather than start duplicating code into IOMMU drivers let's transform the existing function into an iommu_get_resv_regions() helper that they can share. Signed-off-by: Robin Murphy Signed-off-by: Joerg Roedel --- drivers/iommu/arm-smmu-v3.c | 2 ++ drivers/iommu/arm-smmu.c | 2 ++ drivers/iommu/dma-iommu.c | 38 ++++++++++++++++++++++++++++---------- include/linux/dma-iommu.h | 5 +++++ 4 files changed, 37 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 591bb96047c9..bbd46efbe075 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1893,6 +1893,8 @@ static void arm_smmu_get_resv_regions(struct device *dev, return; list_add_tail(®ion->list, head); + + iommu_dma_get_resv_regions(dev, head); } static void arm_smmu_put_resv_regions(struct device *dev, diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index b493c99e17f7..9b33700b7c69 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -1613,6 +1613,8 @@ static void arm_smmu_get_resv_regions(struct device *dev, return; list_add_tail(®ion->list, head); + + iommu_dma_get_resv_regions(dev, head); } static void arm_smmu_put_resv_regions(struct device *dev, diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 5787f919f4ec..85652110c8ff 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -167,22 +167,43 @@ void iommu_put_dma_cookie(struct iommu_domain *domain) } EXPORT_SYMBOL(iommu_put_dma_cookie); -static void iova_reserve_pci_windows(struct pci_dev *dev, - struct iova_domain *iovad) +/** + * iommu_dma_get_resv_regions - Reserved region driver helper + * @dev: Device from iommu_get_resv_regions() + * @list: Reserved region list from iommu_get_resv_regions() + * + * IOMMU drivers can use this to implement their .get_resv_regions callback + * for general non-IOMMU-specific reservations. Currently, this covers host + * bridge windows for PCI devices. + */ +void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) { - struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); + struct pci_host_bridge *bridge; struct resource_entry *window; - unsigned long lo, hi; + if (!dev_is_pci(dev)) + return; + + bridge = pci_find_host_bridge(to_pci_dev(dev)->bus); resource_list_for_each_entry(window, &bridge->windows) { + struct iommu_resv_region *region; + phys_addr_t start; + size_t length; + if (resource_type(window->res) != IORESOURCE_MEM) continue; - lo = iova_pfn(iovad, window->res->start - window->offset); - hi = iova_pfn(iovad, window->res->end - window->offset); - reserve_iova(iovad, lo, hi); + start = window->res->start - window->offset; + length = window->res->end - window->res->start + 1; + region = iommu_alloc_resv_region(start, length, 0, + IOMMU_RESV_RESERVED); + if (!region) + return; + + list_add_tail(®ion->list, list); } } +EXPORT_SYMBOL(iommu_dma_get_resv_regions); static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, phys_addr_t start, phys_addr_t end) @@ -218,9 +239,6 @@ static int iova_reserve_iommu_regions(struct device *dev, LIST_HEAD(resv_regions); int ret = 0; - if (dev_is_pci(dev)) - iova_reserve_pci_windows(to_pci_dev(dev), iovad); - iommu_get_resv_regions(dev, &resv_regions); list_for_each_entry(region, &resv_regions, list) { unsigned long lo, hi; diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index 5725c94b1f12..b6635a46fc7c 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -71,6 +71,7 @@ int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); /* The DMA API isn't _quite_ the whole story, though... */ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg); +void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list); #else @@ -100,6 +101,10 @@ static inline void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg) { } +static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) +{ +} + #endif /* CONFIG_IOMMU_DMA */ #endif /* __KERNEL__ */ #endif /* __DMA_IOMMU_H */ -- cgit v1.2.3 From 7e2a9035c1dbc4632f1897a8fe580fc90f33c013 Mon Sep 17 00:00:00 2001 From: Andy Yan Date: Fri, 17 Mar 2017 18:18:38 +0100 Subject: clk: rockchip: rename RK1108 to RV1108 Rockchip finally named the SOC as RV1108, so change it. Signed-off-by: Andy Yan [include rename in rk1108.dtsi to prevent compile errors] Signed-off-by: Heiko Stuebner --- arch/arm/boot/dts/rk1108.dtsi | 2 +- drivers/clk/rockchip/Makefile | 2 +- drivers/clk/rockchip/clk-rk1108.c | 531 --------------------------------- drivers/clk/rockchip/clk-rv1108.c | 531 +++++++++++++++++++++++++++++++++ drivers/clk/rockchip/clk.h | 28 +- include/dt-bindings/clock/rk1108-cru.h | 269 ----------------- include/dt-bindings/clock/rv1108-cru.h | 269 +++++++++++++++++ 7 files changed, 816 insertions(+), 816 deletions(-) delete mode 100644 drivers/clk/rockchip/clk-rk1108.c create mode 100644 drivers/clk/rockchip/clk-rv1108.c delete mode 100644 include/dt-bindings/clock/rk1108-cru.h create mode 100644 include/dt-bindings/clock/rv1108-cru.h (limited to 'include') diff --git a/arch/arm/boot/dts/rk1108.dtsi b/arch/arm/boot/dts/rk1108.dtsi index d6194bff7afe..4867342b88d4 100644 --- a/arch/arm/boot/dts/rk1108.dtsi +++ b/arch/arm/boot/dts/rk1108.dtsi @@ -41,7 +41,7 @@ #include #include #include -#include +#include #include / { #address-cells = <1>; diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile index 141971488f40..26b220c988b2 100644 --- a/drivers/clk/rockchip/Makefile +++ b/drivers/clk/rockchip/Makefile @@ -12,7 +12,7 @@ obj-y += clk-muxgrf.o obj-y += clk-ddr.o obj-$(CONFIG_RESET_CONTROLLER) += softrst.o -obj-y += clk-rk1108.o +obj-y += clk-rv1108.o obj-y += clk-rk3036.o obj-y += clk-rk3188.o obj-y += clk-rk3228.o diff --git a/drivers/clk/rockchip/clk-rk1108.c b/drivers/clk/rockchip/clk-rk1108.c deleted file mode 100644 index 92750d798e5d..000000000000 --- a/drivers/clk/rockchip/clk-rk1108.c +++ /dev/null @@ -1,531 +0,0 @@ -/* - * Copyright (c) 2016 Rockchip Electronics Co. Ltd. - * Author: Shawn Lin - * Andy Yan - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include "clk.h" - -#define RK1108_GRF_SOC_STATUS0 0x480 - -enum rk1108_plls { - apll, dpll, gpll, -}; - -static struct rockchip_pll_rate_table rk1108_pll_rates[] = { - /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */ - RK3036_PLL_RATE(1608000000, 1, 67, 1, 1, 1, 0), - RK3036_PLL_RATE(1584000000, 1, 66, 1, 1, 1, 0), - RK3036_PLL_RATE(1560000000, 1, 65, 1, 1, 1, 0), - RK3036_PLL_RATE(1536000000, 1, 64, 1, 1, 1, 0), - RK3036_PLL_RATE(1512000000, 1, 63, 1, 1, 1, 0), - RK3036_PLL_RATE(1488000000, 1, 62, 1, 1, 1, 0), - RK3036_PLL_RATE(1464000000, 1, 61, 1, 1, 1, 0), - RK3036_PLL_RATE(1440000000, 1, 60, 1, 1, 1, 0), - RK3036_PLL_RATE(1416000000, 1, 59, 1, 1, 1, 0), - RK3036_PLL_RATE(1392000000, 1, 58, 1, 1, 1, 0), - RK3036_PLL_RATE(1368000000, 1, 57, 1, 1, 1, 0), - RK3036_PLL_RATE(1344000000, 1, 56, 1, 1, 1, 0), - RK3036_PLL_RATE(1320000000, 1, 55, 1, 1, 1, 0), - RK3036_PLL_RATE(1296000000, 1, 54, 1, 1, 1, 0), - RK3036_PLL_RATE(1272000000, 1, 53, 1, 1, 1, 0), - RK3036_PLL_RATE(1248000000, 1, 52, 1, 1, 1, 0), - RK3036_PLL_RATE(1200000000, 1, 50, 1, 1, 1, 0), - RK3036_PLL_RATE(1188000000, 2, 99, 1, 1, 1, 0), - RK3036_PLL_RATE(1104000000, 1, 46, 1, 1, 1, 0), - RK3036_PLL_RATE(1100000000, 12, 550, 1, 1, 1, 0), - RK3036_PLL_RATE(1008000000, 1, 84, 2, 1, 1, 0), - RK3036_PLL_RATE(1000000000, 6, 500, 2, 1, 1, 0), - RK3036_PLL_RATE( 984000000, 1, 82, 2, 1, 1, 0), - RK3036_PLL_RATE( 960000000, 1, 80, 2, 1, 1, 0), - RK3036_PLL_RATE( 936000000, 1, 78, 2, 1, 1, 0), - RK3036_PLL_RATE( 912000000, 1, 76, 2, 1, 1, 0), - RK3036_PLL_RATE( 900000000, 4, 300, 2, 1, 1, 0), - RK3036_PLL_RATE( 888000000, 1, 74, 2, 1, 1, 0), - RK3036_PLL_RATE( 864000000, 1, 72, 2, 1, 1, 0), - RK3036_PLL_RATE( 840000000, 1, 70, 2, 1, 1, 0), - RK3036_PLL_RATE( 816000000, 1, 68, 2, 1, 1, 0), - RK3036_PLL_RATE( 800000000, 6, 400, 2, 1, 1, 0), - RK3036_PLL_RATE( 700000000, 6, 350, 2, 1, 1, 0), - RK3036_PLL_RATE( 696000000, 1, 58, 2, 1, 1, 0), - RK3036_PLL_RATE( 600000000, 1, 75, 3, 1, 1, 0), - RK3036_PLL_RATE( 594000000, 2, 99, 2, 1, 1, 0), - RK3036_PLL_RATE( 504000000, 1, 63, 3, 1, 1, 0), - RK3036_PLL_RATE( 500000000, 6, 250, 2, 1, 1, 0), - RK3036_PLL_RATE( 408000000, 1, 68, 2, 2, 1, 0), - RK3036_PLL_RATE( 312000000, 1, 52, 2, 2, 1, 0), - RK3036_PLL_RATE( 216000000, 1, 72, 4, 2, 1, 0), - RK3036_PLL_RATE( 96000000, 1, 64, 4, 4, 1, 0), - { /* sentinel */ }, -}; - -#define RK1108_DIV_CORE_MASK 0xf -#define RK1108_DIV_CORE_SHIFT 4 - -#define RK1108_CLKSEL0(_core_peri_div) \ - { \ - .reg = RK1108_CLKSEL_CON(1), \ - .val = HIWORD_UPDATE(_core_peri_div, RK1108_DIV_CORE_MASK,\ - RK1108_DIV_CORE_SHIFT) \ - } - -#define RK1108_CPUCLK_RATE(_prate, _core_peri_div) \ - { \ - .prate = _prate, \ - .divs = { \ - RK1108_CLKSEL0(_core_peri_div), \ - }, \ - } - -static struct rockchip_cpuclk_rate_table rk1108_cpuclk_rates[] __initdata = { - RK1108_CPUCLK_RATE(816000000, 4), - RK1108_CPUCLK_RATE(600000000, 4), - RK1108_CPUCLK_RATE(312000000, 4), -}; - -static const struct rockchip_cpuclk_reg_data rk1108_cpuclk_data = { - .core_reg = RK1108_CLKSEL_CON(0), - .div_core_shift = 0, - .div_core_mask = 0x1f, - .mux_core_alt = 1, - .mux_core_main = 0, - .mux_core_shift = 8, - .mux_core_mask = 0x1, -}; - -PNAME(mux_pll_p) = { "xin24m", "xin24m"}; -PNAME(mux_ddrphy_p) = { "dpll_ddr", "gpll_ddr", "apll_ddr" }; -PNAME(mux_armclk_p) = { "apll_core", "gpll_core", "dpll_core" }; -PNAME(mux_usb480m_pre_p) = { "usbphy", "xin24m" }; -PNAME(mux_hdmiphy_phy_p) = { "hdmiphy", "xin24m" }; -PNAME(mux_dclk_hdmiphy_pre_p) = { "dclk_hdmiphy_src_gpll", "dclk_hdmiphy_src_dpll" }; -PNAME(mux_pll_src_4plls_p) = { "dpll", "hdmiphy", "gpll", "usb480m" }; -PNAME(mux_pll_src_3plls_p) = { "apll", "gpll", "dpll" }; -PNAME(mux_pll_src_2plls_p) = { "dpll", "gpll" }; -PNAME(mux_pll_src_apll_gpll_p) = { "apll", "gpll" }; -PNAME(mux_aclk_peri_src_p) = { "aclk_peri_src_dpll", "aclk_peri_src_gpll" }; -PNAME(mux_aclk_bus_src_p) = { "aclk_bus_src_gpll", "aclk_bus_src_apll", "aclk_bus_src_dpll" }; -PNAME(mux_mmc_src_p) = { "dpll", "gpll", "xin24m", "usb480m" }; -PNAME(mux_pll_src_dpll_gpll_usb480m_p) = { "dpll", "gpll", "usb480m" }; -PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" }; -PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" }; -PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" }; -PNAME(mux_sclk_macphy_p) = { "sclk_macphy_pre", "ext_gmac" }; -PNAME(mux_i2s0_pre_p) = { "i2s0_src", "i2s0_frac", "ext_i2s", "xin12m" }; -PNAME(mux_i2s_out_p) = { "i2s0_pre", "xin12m" }; -PNAME(mux_i2s1_p) = { "i2s1_src", "i2s1_frac", "xin12m" }; -PNAME(mux_i2s2_p) = { "i2s2_src", "i2s2_frac", "xin12m" }; - -static struct rockchip_pll_clock rk1108_pll_clks[] __initdata = { - [apll] = PLL(pll_rk3399, PLL_APLL, "apll", mux_pll_p, 0, RK1108_PLL_CON(0), - RK1108_PLL_CON(3), 8, 31, 0, rk1108_pll_rates), - [dpll] = PLL(pll_rk3399, PLL_DPLL, "dpll", mux_pll_p, 0, RK1108_PLL_CON(8), - RK1108_PLL_CON(11), 8, 31, 0, NULL), - [gpll] = PLL(pll_rk3399, PLL_GPLL, "gpll", mux_pll_p, 0, RK1108_PLL_CON(16), - RK1108_PLL_CON(19), 8, 31, ROCKCHIP_PLL_SYNC_RATE, rk1108_pll_rates), -}; - -#define MFLAGS CLK_MUX_HIWORD_MASK -#define DFLAGS CLK_DIVIDER_HIWORD_MASK -#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE) -#define IFLAGS ROCKCHIP_INVERTER_HIWORD_MASK - -static struct rockchip_clk_branch rk1108_uart0_fracmux __initdata = - MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, CLK_SET_RATE_PARENT, - RK1108_CLKSEL_CON(13), 8, 2, MFLAGS); - -static struct rockchip_clk_branch rk1108_uart1_fracmux __initdata = - MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, CLK_SET_RATE_PARENT, - RK1108_CLKSEL_CON(14), 8, 2, MFLAGS); - -static struct rockchip_clk_branch rk1108_uart2_fracmux __initdata = - MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, CLK_SET_RATE_PARENT, - RK1108_CLKSEL_CON(15), 8, 2, MFLAGS); - -static struct rockchip_clk_branch rk1108_i2s0_fracmux __initdata = - MUX(0, "i2s0_pre", mux_i2s0_pre_p, CLK_SET_RATE_PARENT, - RK1108_CLKSEL_CON(5), 12, 2, MFLAGS); - -static struct rockchip_clk_branch rk1108_i2s1_fracmux __initdata = - MUX(0, "i2s1_pre", mux_i2s1_p, CLK_SET_RATE_PARENT, - RK1108_CLKSEL_CON(6), 12, 2, MFLAGS); - -static struct rockchip_clk_branch rk1108_i2s2_fracmux __initdata = - MUX(0, "i2s2_pre", mux_i2s2_p, CLK_SET_RATE_PARENT, - RK1108_CLKSEL_CON(7), 12, 2, MFLAGS); - -static struct rockchip_clk_branch rk1108_clk_branches[] __initdata = { - MUX(0, "hdmi_phy", mux_hdmiphy_phy_p, CLK_SET_RATE_PARENT, - RK1108_MISC_CON, 13, 2, MFLAGS), - MUX(0, "usb480m", mux_usb480m_pre_p, CLK_SET_RATE_PARENT, - RK1108_MISC_CON, 15, 2, MFLAGS), - /* - * Clock-Architecture Diagram 2 - */ - - /* PD_CORE */ - GATE(0, "dpll_core", "dpll", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(0), 1, GFLAGS), - GATE(0, "apll_core", "apll", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(0), 0, GFLAGS), - GATE(0, "gpll_core", "gpll", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(0), 2, GFLAGS), - COMPOSITE_NOMUX(0, "pclken_dbg", "armclk", CLK_IGNORE_UNUSED, - RK1108_CLKSEL_CON(1), 4, 4, DFLAGS | CLK_DIVIDER_READ_ONLY, - RK1108_CLKGATE_CON(0), 5, GFLAGS), - COMPOSITE_NOMUX(ACLK_ENMCORE, "aclkenm_core", "armclk", CLK_IGNORE_UNUSED, - RK1108_CLKSEL_CON(1), 0, 3, DFLAGS | CLK_DIVIDER_READ_ONLY, - RK1108_CLKGATE_CON(0), 4, GFLAGS), - GATE(ACLK_CORE, "aclk_core", "aclkenm_core", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(11), 0, GFLAGS), - GATE(0, "pclk_dbg", "pclken_dbg", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(11), 1, GFLAGS), - - /* PD_RKVENC */ - - /* PD_RKVDEC */ - - /* PD_PMU_wrapper */ - COMPOSITE_NOMUX(0, "pmu_24m_ena", "gpll", CLK_IGNORE_UNUSED, - RK1108_CLKSEL_CON(38), 0, 5, DFLAGS, - RK1108_CLKGATE_CON(8), 12, GFLAGS), - GATE(0, "pmu", "pmu_24m_ena", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(10), 0, GFLAGS), - GATE(0, "intmem1", "pmu_24m_ena", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(10), 1, GFLAGS), - GATE(0, "gpio0_pmu", "pmu_24m_ena", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(10), 2, GFLAGS), - GATE(0, "pmugrf", "pmu_24m_ena", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(10), 3, GFLAGS), - GATE(0, "pmu_noc", "pmu_24m_ena", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(10), 4, GFLAGS), - GATE(0, "i2c0_pmu_pclk", "pmu_24m_ena", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(10), 5, GFLAGS), - GATE(0, "pwm0_pmu_pclk", "pmu_24m_ena", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(10), 6, GFLAGS), - COMPOSITE(0, "pwm0_pmu_clk", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED, - RK1108_CLKSEL_CON(12), 7, 1, MFLAGS, 0, 7, DFLAGS, - RK1108_CLKGATE_CON(8), 15, GFLAGS), - COMPOSITE(0, "i2c0_pmu_clk", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED, - RK1108_CLKSEL_CON(19), 7, 1, MFLAGS, 0, 7, DFLAGS, - RK1108_CLKGATE_CON(8), 14, GFLAGS), - GATE(0, "pvtm_pmu", "xin24m", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(8), 13, GFLAGS), - - /* - * Clock-Architecture Diagram 4 - */ - COMPOSITE(0, "aclk_vio0_2wrap_occ", mux_pll_src_4plls_p, CLK_IGNORE_UNUSED, - RK1108_CLKSEL_CON(28), 6, 2, MFLAGS, 0, 5, DFLAGS, - RK1108_CLKGATE_CON(6), 0, GFLAGS), - GATE(0, "aclk_vio0_pre", "aclk_vio0_2wrap_occ", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(17), 0, GFLAGS), - COMPOSITE_NOMUX(0, "hclk_vio_pre", "aclk_vio0_pre", 0, - RK1108_CLKSEL_CON(29), 0, 5, DFLAGS, - RK1108_CLKGATE_CON(7), 2, GFLAGS), - COMPOSITE_NOMUX(0, "pclk_vio_pre", "aclk_vio0_pre", 0, - RK1108_CLKSEL_CON(29), 8, 5, DFLAGS, - RK1108_CLKGATE_CON(7), 3, GFLAGS), - - INVERTER(0, "pclk_vip", "ext_vip", - RK1108_CLKSEL_CON(31), 8, IFLAGS), - GATE(0, "pclk_isp_pre", "pclk_vip", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(7), 6, GFLAGS), - GATE(0, "pclk_isp", "pclk_isp_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(18), 10, GFLAGS), - GATE(0, "dclk_hdmiphy_src_gpll", "gpll", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(6), 5, GFLAGS), - GATE(0, "dclk_hdmiphy_src_dpll", "dpll", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(6), 4, GFLAGS), - COMPOSITE_NOGATE(0, "dclk_hdmiphy", mux_dclk_hdmiphy_pre_p, 0, - RK1108_CLKSEL_CON(32), 6, 2, MFLAGS, 8, 6, DFLAGS), - - /* - * Clock-Architecture Diagram 5 - */ - - FACTOR(0, "xin12m", "xin24m", 0, 1, 2), - - COMPOSITE(0, "i2s0_src", mux_pll_src_2plls_p, 0, - RK1108_CLKSEL_CON(5), 8, 1, MFLAGS, 0, 7, DFLAGS, - RK1108_CLKGATE_CON(2), 0, GFLAGS), - COMPOSITE_FRACMUX(0, "i2s1_frac", "i2s1_src", CLK_SET_RATE_PARENT, - RK1108_CLKSEL_CON(8), 0, - RK1108_CLKGATE_CON(2), 1, GFLAGS, - &rk1108_i2s0_fracmux), - GATE(SCLK_I2S0, "sclk_i2s0", "i2s0_pre", CLK_SET_RATE_PARENT, - RK1108_CLKGATE_CON(2), 2, GFLAGS), - COMPOSITE_NODIV(0, "i2s_out", mux_i2s_out_p, 0, - RK1108_CLKSEL_CON(5), 15, 1, MFLAGS, - RK1108_CLKGATE_CON(2), 3, GFLAGS), - - COMPOSITE(0, "i2s1_src", mux_pll_src_2plls_p, 0, - RK1108_CLKSEL_CON(6), 8, 1, MFLAGS, 0, 7, DFLAGS, - RK1108_CLKGATE_CON(2), 4, GFLAGS), - COMPOSITE_FRACMUX(0, "i2s1_frac", "i2s1_src", CLK_SET_RATE_PARENT, - RK2928_CLKSEL_CON(9), 0, - RK2928_CLKGATE_CON(2), 5, GFLAGS, - &rk1108_i2s1_fracmux), - GATE(SCLK_I2S1, "sclk_i2s1", "i2s1_pre", CLK_SET_RATE_PARENT, - RK1108_CLKGATE_CON(2), 6, GFLAGS), - - COMPOSITE(0, "i2s2_src", mux_pll_src_2plls_p, 0, - RK1108_CLKSEL_CON(7), 8, 1, MFLAGS, 0, 7, DFLAGS, - RK1108_CLKGATE_CON(3), 8, GFLAGS), - COMPOSITE_FRACMUX(0, "i2s2_frac", "i2s2_src", CLK_SET_RATE_PARENT, - RK1108_CLKSEL_CON(10), 0, - RK1108_CLKGATE_CON(2), 9, GFLAGS, - &rk1108_i2s2_fracmux), - GATE(SCLK_I2S2, "sclk_i2s2", "i2s2_pre", CLK_SET_RATE_PARENT, - RK1108_CLKGATE_CON(2), 10, GFLAGS), - - /* PD_BUS */ - GATE(0, "aclk_bus_src_gpll", "gpll", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(1), 0, GFLAGS), - GATE(0, "aclk_bus_src_apll", "apll", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(1), 1, GFLAGS), - GATE(0, "aclk_bus_src_dpll", "dpll", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(1), 2, GFLAGS), - COMPOSITE_NOGATE(ACLK_PRE, "aclk_bus_pre", mux_aclk_bus_src_p, 0, - RK1108_CLKSEL_CON(2), 8, 2, MFLAGS, 0, 5, DFLAGS), - COMPOSITE_NOMUX(0, "hclk_bus_pre", "aclk_bus_2wrap_occ", 0, - RK1108_CLKSEL_CON(3), 0, 5, DFLAGS, - RK1108_CLKGATE_CON(1), 4, GFLAGS), - COMPOSITE_NOMUX(0, "pclken_bus", "aclk_bus_2wrap_occ", 0, - RK1108_CLKSEL_CON(3), 8, 5, DFLAGS, - RK1108_CLKGATE_CON(1), 5, GFLAGS), - GATE(0, "pclk_bus_pre", "pclken_bus", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(1), 6, GFLAGS), - GATE(0, "pclk_top_pre", "pclken_bus", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(1), 7, GFLAGS), - GATE(0, "pclk_ddr_pre", "pclken_bus", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(1), 8, GFLAGS), - GATE(0, "clk_timer0", "mux_pll_p", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(1), 9, GFLAGS), - GATE(0, "clk_timer1", "mux_pll_p", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(1), 10, GFLAGS), - GATE(0, "pclk_timer", "pclk_bus_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(13), 4, GFLAGS), - - COMPOSITE(0, "uart0_src", mux_pll_src_dpll_gpll_usb480m_p, CLK_IGNORE_UNUSED, - RK1108_CLKSEL_CON(13), 12, 2, MFLAGS, 0, 7, DFLAGS, - RK1108_CLKGATE_CON(3), 1, GFLAGS), - COMPOSITE(0, "uart1_src", mux_pll_src_dpll_gpll_usb480m_p, CLK_IGNORE_UNUSED, - RK1108_CLKSEL_CON(14), 12, 2, MFLAGS, 0, 7, DFLAGS, - RK1108_CLKGATE_CON(3), 3, GFLAGS), - COMPOSITE(0, "uart21_src", mux_pll_src_dpll_gpll_usb480m_p, CLK_IGNORE_UNUSED, - RK1108_CLKSEL_CON(15), 12, 2, MFLAGS, 0, 7, DFLAGS, - RK1108_CLKGATE_CON(3), 5, GFLAGS), - - COMPOSITE_FRACMUX(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT, - RK1108_CLKSEL_CON(16), 0, - RK1108_CLKGATE_CON(3), 2, GFLAGS, - &rk1108_uart0_fracmux), - COMPOSITE_FRACMUX(0, "uart1_frac", "uart1_src", CLK_SET_RATE_PARENT, - RK1108_CLKSEL_CON(17), 0, - RK1108_CLKGATE_CON(3), 4, GFLAGS, - &rk1108_uart1_fracmux), - COMPOSITE_FRACMUX(0, "uart2_frac", "uart2_src", CLK_SET_RATE_PARENT, - RK1108_CLKSEL_CON(18), 0, - RK1108_CLKGATE_CON(3), 6, GFLAGS, - &rk1108_uart2_fracmux), - GATE(PCLK_UART0, "pclk_uart0", "pclk_bus_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(13), 10, GFLAGS), - GATE(PCLK_UART1, "pclk_uart1", "pclk_bus_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(13), 11, GFLAGS), - GATE(PCLK_UART2, "pclk_uart2", "pclk_bus_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(13), 12, GFLAGS), - - COMPOSITE(0, "clk_i2c1", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED, - RK1108_CLKSEL_CON(19), 15, 2, MFLAGS, 8, 7, DFLAGS, - RK1108_CLKGATE_CON(3), 7, GFLAGS), - COMPOSITE(0, "clk_i2c2", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED, - RK1108_CLKSEL_CON(20), 7, 2, MFLAGS, 0, 7, DFLAGS, - RK1108_CLKGATE_CON(3), 8, GFLAGS), - COMPOSITE(0, "clk_i2c3", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED, - RK1108_CLKSEL_CON(20), 15, 2, MFLAGS, 8, 7, DFLAGS, - RK1108_CLKGATE_CON(3), 9, GFLAGS), - GATE(0, "pclk_i2c1", "pclk_bus_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(13), 0, GFLAGS), - GATE(0, "pclk_i2c2", "pclk_bus_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(13), 1, GFLAGS), - GATE(0, "pclk_i2c3", "pclk_bus_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(13), 2, GFLAGS), - COMPOSITE(0, "clk_pwm1", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED, - RK1108_CLKSEL_CON(12), 15, 2, MFLAGS, 8, 7, DFLAGS, - RK1108_CLKGATE_CON(3), 10, GFLAGS), - GATE(0, "pclk_pwm1", "pclk_bus_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(13), 6, GFLAGS), - GATE(0, "pclk_wdt", "pclk_bus_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(13), 3, GFLAGS), - GATE(0, "pclk_gpio1", "pclk_bus_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(13), 7, GFLAGS), - GATE(0, "pclk_gpio2", "pclk_bus_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(13), 8, GFLAGS), - GATE(0, "pclk_gpio3", "pclk_bus_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(13), 9, GFLAGS), - - GATE(0, "pclk_grf", "pclk_bus_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(14), 0, GFLAGS), - - GATE(ACLK_DMAC, "aclk_dmac", "aclk_bus_pre", 0, - RK1108_CLKGATE_CON(12), 2, GFLAGS), - GATE(0, "hclk_rom", "hclk_bus_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(12), 3, GFLAGS), - GATE(0, "aclk_intmem", "aclk_bus_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(12), 1, GFLAGS), - - /* PD_DDR */ - GATE(0, "apll_ddr", "apll", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(0), 8, GFLAGS), - GATE(0, "dpll_ddr", "dpll", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(0), 9, GFLAGS), - GATE(0, "gpll_ddr", "gpll", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(0), 10, GFLAGS), - COMPOSITE(0, "ddrphy4x", mux_ddrphy_p, CLK_IGNORE_UNUSED, - RK1108_CLKSEL_CON(4), 8, 2, MFLAGS, 0, 3, - DFLAGS | CLK_DIVIDER_POWER_OF_TWO, - RK1108_CLKGATE_CON(10), 9, GFLAGS), - GATE(0, "ddrupctl", "ddrphy_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(12), 4, GFLAGS), - GATE(0, "ddrc", "ddrphy", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(12), 5, GFLAGS), - GATE(0, "ddrmon", "ddrphy_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(12), 6, GFLAGS), - GATE(0, "timer_clk", "xin24m", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(0), 11, GFLAGS), - - /* - * Clock-Architecture Diagram 6 - */ - - /* PD_PERI */ - COMPOSITE_NOMUX(0, "pclk_periph_pre", "gpll", 0, - RK1108_CLKSEL_CON(23), 10, 5, DFLAGS, - RK1108_CLKGATE_CON(4), 5, GFLAGS), - GATE(0, "pclk_periph", "pclk_periph_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(15), 13, GFLAGS), - COMPOSITE_NOMUX(0, "hclk_periph_pre", "gpll", 0, - RK1108_CLKSEL_CON(23), 5, 5, DFLAGS, - RK1108_CLKGATE_CON(4), 4, GFLAGS), - GATE(0, "hclk_periph", "hclk_periph_pre", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(15), 12, GFLAGS), - - GATE(0, "aclk_peri_src_dpll", "dpll", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(4), 1, GFLAGS), - GATE(0, "aclk_peri_src_gpll", "gpll", CLK_IGNORE_UNUSED, - RK1108_CLKGATE_CON(4), 2, GFLAGS), - COMPOSITE(0, "aclk_periph", mux_aclk_peri_src_p, CLK_IGNORE_UNUSED, - RK1108_CLKSEL_CON(23), 15, 2, MFLAGS, 0, 5, DFLAGS, - RK1108_CLKGATE_CON(15), 11, GFLAGS), - - COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0, - RK1108_CLKSEL_CON(25), 8, 2, MFLAGS, 0, 8, DFLAGS, - RK1108_CLKGATE_CON(5), 0, GFLAGS), - - COMPOSITE_NODIV(0, "sclk_sdio_src", mux_mmc_src_p, 0, - RK1108_CLKSEL_CON(25), 10, 2, MFLAGS, - RK1108_CLKGATE_CON(5), 2, GFLAGS), - DIV(SCLK_SDIO, "sclk_sdio", "sclk_sdio_src", 0, - RK1108_CLKSEL_CON(26), 0, 8, DFLAGS), - - COMPOSITE_NODIV(0, "sclk_emmc_src", mux_mmc_src_p, 0, - RK1108_CLKSEL_CON(25), 12, 2, MFLAGS, - RK1108_CLKGATE_CON(5), 1, GFLAGS), - DIV(SCLK_EMMC, "sclk_emmc", "sclk_emmc_src", 0, - RK2928_CLKSEL_CON(26), 8, 8, DFLAGS), - GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_periph", 0, RK1108_CLKGATE_CON(15), 0, GFLAGS), - GATE(HCLK_SDIO, "hclk_sdio", "hclk_periph", 0, RK1108_CLKGATE_CON(15), 1, GFLAGS), - GATE(HCLK_EMMC, "hclk_emmc", "hclk_periph", 0, RK1108_CLKGATE_CON(15), 2, GFLAGS), - - COMPOSITE(SCLK_NANDC, "sclk_nandc", mux_pll_src_2plls_p, 0, - RK1108_CLKSEL_CON(27), 14, 2, MFLAGS, 8, 5, DFLAGS, - RK1108_CLKGATE_CON(5), 3, GFLAGS), - GATE(HCLK_NANDC, "hclk_nandc", "hclk_periph", 0, RK1108_CLKGATE_CON(15), 3, GFLAGS), - - COMPOSITE(SCLK_SFC, "sclk_sfc", mux_pll_src_2plls_p, 0, - RK1108_CLKSEL_CON(27), 7, 2, MFLAGS, 0, 7, DFLAGS, - RK1108_CLKGATE_CON(5), 4, GFLAGS), - GATE(HCLK_SFC, "hclk_sfc", "hclk_periph", 0, RK1108_CLKGATE_CON(15), 10, GFLAGS), - - COMPOSITE(0, "sclk_macphy_pre", mux_pll_src_apll_gpll_p, 0, - RK1108_CLKSEL_CON(24), 12, 2, MFLAGS, 0, 5, DFLAGS, - RK1108_CLKGATE_CON(4), 10, GFLAGS), - MUX(0, "sclk_macphy", mux_sclk_macphy_p, CLK_SET_RATE_PARENT, - RK1108_CLKSEL_CON(24), 8, 2, MFLAGS), - GATE(0, "sclk_macphy_rx", "sclk_macphy", 0, RK1108_CLKGATE_CON(4), 8, GFLAGS), - GATE(0, "sclk_mac_ref", "sclk_macphy", 0, RK1108_CLKGATE_CON(4), 6, GFLAGS), - GATE(0, "sclk_mac_refout", "sclk_macphy", 0, RK1108_CLKGATE_CON(4), 7, GFLAGS), - - MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "sclk_sdmmc", RK1108_SDMMC_CON0, 1), - MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK1108_SDMMC_CON1, 1), - - MMC(SCLK_SDIO_DRV, "sdio_drv", "sclk_sdio", RK1108_SDIO_CON0, 1), - MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "sclk_sdio", RK1108_SDIO_CON1, 1), - - MMC(SCLK_EMMC_DRV, "emmc_drv", "sclk_emmc", RK1108_EMMC_CON0, 1), - MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "sclk_emmc", RK1108_EMMC_CON1, 1), -}; - -static const char *const rk1108_critical_clocks[] __initconst = { - "aclk_core", - "aclk_bus_src_gpll", - "aclk_periph", - "hclk_periph", - "pclk_periph", -}; - -static void __init rk1108_clk_init(struct device_node *np) -{ - struct rockchip_clk_provider *ctx; - void __iomem *reg_base; - - reg_base = of_iomap(np, 0); - if (!reg_base) { - pr_err("%s: could not map cru region\n", __func__); - return; - } - - ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS); - if (IS_ERR(ctx)) { - pr_err("%s: rockchip clk init failed\n", __func__); - iounmap(reg_base); - return; - } - - rockchip_clk_register_plls(ctx, rk1108_pll_clks, - ARRAY_SIZE(rk1108_pll_clks), - RK1108_GRF_SOC_STATUS0); - rockchip_clk_register_branches(ctx, rk1108_clk_branches, - ARRAY_SIZE(rk1108_clk_branches)); - rockchip_clk_protect_critical(rk1108_critical_clocks, - ARRAY_SIZE(rk1108_critical_clocks)); - - rockchip_clk_register_armclk(ctx, ARMCLK, "armclk", - mux_armclk_p, ARRAY_SIZE(mux_armclk_p), - &rk1108_cpuclk_data, rk1108_cpuclk_rates, - ARRAY_SIZE(rk1108_cpuclk_rates)); - - rockchip_register_softrst(np, 13, reg_base + RK1108_SOFTRST_CON(0), - ROCKCHIP_SOFTRST_HIWORD_MASK); - - rockchip_register_restart_notifier(ctx, RK1108_GLB_SRST_FST, NULL); - - rockchip_clk_of_add_provider(np, ctx); -} -CLK_OF_DECLARE(rk1108_cru, "rockchip,rk1108-cru", rk1108_clk_init); diff --git a/drivers/clk/rockchip/clk-rv1108.c b/drivers/clk/rockchip/clk-rv1108.c new file mode 100644 index 000000000000..7c05ab366348 --- /dev/null +++ b/drivers/clk/rockchip/clk-rv1108.c @@ -0,0 +1,531 @@ +/* + * Copyright (c) 2016 Rockchip Electronics Co. Ltd. + * Author: Shawn Lin + * Andy Yan + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include "clk.h" + +#define RV1108_GRF_SOC_STATUS0 0x480 + +enum rv1108_plls { + apll, dpll, gpll, +}; + +static struct rockchip_pll_rate_table rv1108_pll_rates[] = { + /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */ + RK3036_PLL_RATE(1608000000, 1, 67, 1, 1, 1, 0), + RK3036_PLL_RATE(1584000000, 1, 66, 1, 1, 1, 0), + RK3036_PLL_RATE(1560000000, 1, 65, 1, 1, 1, 0), + RK3036_PLL_RATE(1536000000, 1, 64, 1, 1, 1, 0), + RK3036_PLL_RATE(1512000000, 1, 63, 1, 1, 1, 0), + RK3036_PLL_RATE(1488000000, 1, 62, 1, 1, 1, 0), + RK3036_PLL_RATE(1464000000, 1, 61, 1, 1, 1, 0), + RK3036_PLL_RATE(1440000000, 1, 60, 1, 1, 1, 0), + RK3036_PLL_RATE(1416000000, 1, 59, 1, 1, 1, 0), + RK3036_PLL_RATE(1392000000, 1, 58, 1, 1, 1, 0), + RK3036_PLL_RATE(1368000000, 1, 57, 1, 1, 1, 0), + RK3036_PLL_RATE(1344000000, 1, 56, 1, 1, 1, 0), + RK3036_PLL_RATE(1320000000, 1, 55, 1, 1, 1, 0), + RK3036_PLL_RATE(1296000000, 1, 54, 1, 1, 1, 0), + RK3036_PLL_RATE(1272000000, 1, 53, 1, 1, 1, 0), + RK3036_PLL_RATE(1248000000, 1, 52, 1, 1, 1, 0), + RK3036_PLL_RATE(1200000000, 1, 50, 1, 1, 1, 0), + RK3036_PLL_RATE(1188000000, 2, 99, 1, 1, 1, 0), + RK3036_PLL_RATE(1104000000, 1, 46, 1, 1, 1, 0), + RK3036_PLL_RATE(1100000000, 12, 550, 1, 1, 1, 0), + RK3036_PLL_RATE(1008000000, 1, 84, 2, 1, 1, 0), + RK3036_PLL_RATE(1000000000, 6, 500, 2, 1, 1, 0), + RK3036_PLL_RATE( 984000000, 1, 82, 2, 1, 1, 0), + RK3036_PLL_RATE( 960000000, 1, 80, 2, 1, 1, 0), + RK3036_PLL_RATE( 936000000, 1, 78, 2, 1, 1, 0), + RK3036_PLL_RATE( 912000000, 1, 76, 2, 1, 1, 0), + RK3036_PLL_RATE( 900000000, 4, 300, 2, 1, 1, 0), + RK3036_PLL_RATE( 888000000, 1, 74, 2, 1, 1, 0), + RK3036_PLL_RATE( 864000000, 1, 72, 2, 1, 1, 0), + RK3036_PLL_RATE( 840000000, 1, 70, 2, 1, 1, 0), + RK3036_PLL_RATE( 816000000, 1, 68, 2, 1, 1, 0), + RK3036_PLL_RATE( 800000000, 6, 400, 2, 1, 1, 0), + RK3036_PLL_RATE( 700000000, 6, 350, 2, 1, 1, 0), + RK3036_PLL_RATE( 696000000, 1, 58, 2, 1, 1, 0), + RK3036_PLL_RATE( 600000000, 1, 75, 3, 1, 1, 0), + RK3036_PLL_RATE( 594000000, 2, 99, 2, 1, 1, 0), + RK3036_PLL_RATE( 504000000, 1, 63, 3, 1, 1, 0), + RK3036_PLL_RATE( 500000000, 6, 250, 2, 1, 1, 0), + RK3036_PLL_RATE( 408000000, 1, 68, 2, 2, 1, 0), + RK3036_PLL_RATE( 312000000, 1, 52, 2, 2, 1, 0), + RK3036_PLL_RATE( 216000000, 1, 72, 4, 2, 1, 0), + RK3036_PLL_RATE( 96000000, 1, 64, 4, 4, 1, 0), + { /* sentinel */ }, +}; + +#define RV1108_DIV_CORE_MASK 0xf +#define RV1108_DIV_CORE_SHIFT 4 + +#define RV1108_CLKSEL0(_core_peri_div) \ + { \ + .reg = RV1108_CLKSEL_CON(1), \ + .val = HIWORD_UPDATE(_core_peri_div, RV1108_DIV_CORE_MASK,\ + RV1108_DIV_CORE_SHIFT) \ + } + +#define RV1108_CPUCLK_RATE(_prate, _core_peri_div) \ + { \ + .prate = _prate, \ + .divs = { \ + RV1108_CLKSEL0(_core_peri_div), \ + }, \ + } + +static struct rockchip_cpuclk_rate_table rv1108_cpuclk_rates[] __initdata = { + RV1108_CPUCLK_RATE(816000000, 4), + RV1108_CPUCLK_RATE(600000000, 4), + RV1108_CPUCLK_RATE(312000000, 4), +}; + +static const struct rockchip_cpuclk_reg_data rv1108_cpuclk_data = { + .core_reg = RV1108_CLKSEL_CON(0), + .div_core_shift = 0, + .div_core_mask = 0x1f, + .mux_core_alt = 1, + .mux_core_main = 0, + .mux_core_shift = 8, + .mux_core_mask = 0x1, +}; + +PNAME(mux_pll_p) = { "xin24m", "xin24m"}; +PNAME(mux_ddrphy_p) = { "dpll_ddr", "gpll_ddr", "apll_ddr" }; +PNAME(mux_armclk_p) = { "apll_core", "gpll_core", "dpll_core" }; +PNAME(mux_usb480m_pre_p) = { "usbphy", "xin24m" }; +PNAME(mux_hdmiphy_phy_p) = { "hdmiphy", "xin24m" }; +PNAME(mux_dclk_hdmiphy_pre_p) = { "dclk_hdmiphy_src_gpll", "dclk_hdmiphy_src_dpll" }; +PNAME(mux_pll_src_4plls_p) = { "dpll", "hdmiphy", "gpll", "usb480m" }; +PNAME(mux_pll_src_3plls_p) = { "apll", "gpll", "dpll" }; +PNAME(mux_pll_src_2plls_p) = { "dpll", "gpll" }; +PNAME(mux_pll_src_apll_gpll_p) = { "apll", "gpll" }; +PNAME(mux_aclk_peri_src_p) = { "aclk_peri_src_dpll", "aclk_peri_src_gpll" }; +PNAME(mux_aclk_bus_src_p) = { "aclk_bus_src_gpll", "aclk_bus_src_apll", "aclk_bus_src_dpll" }; +PNAME(mux_mmc_src_p) = { "dpll", "gpll", "xin24m", "usb480m" }; +PNAME(mux_pll_src_dpll_gpll_usb480m_p) = { "dpll", "gpll", "usb480m" }; +PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" }; +PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" }; +PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" }; +PNAME(mux_sclk_macphy_p) = { "sclk_macphy_pre", "ext_gmac" }; +PNAME(mux_i2s0_pre_p) = { "i2s0_src", "i2s0_frac", "ext_i2s", "xin12m" }; +PNAME(mux_i2s_out_p) = { "i2s0_pre", "xin12m" }; +PNAME(mux_i2s1_p) = { "i2s1_src", "i2s1_frac", "xin12m" }; +PNAME(mux_i2s2_p) = { "i2s2_src", "i2s2_frac", "xin12m" }; + +static struct rockchip_pll_clock rv1108_pll_clks[] __initdata = { + [apll] = PLL(pll_rk3399, PLL_APLL, "apll", mux_pll_p, 0, RV1108_PLL_CON(0), + RV1108_PLL_CON(3), 8, 31, 0, rv1108_pll_rates), + [dpll] = PLL(pll_rk3399, PLL_DPLL, "dpll", mux_pll_p, 0, RV1108_PLL_CON(8), + RV1108_PLL_CON(11), 8, 31, 0, NULL), + [gpll] = PLL(pll_rk3399, PLL_GPLL, "gpll", mux_pll_p, 0, RV1108_PLL_CON(16), + RV1108_PLL_CON(19), 8, 31, ROCKCHIP_PLL_SYNC_RATE, rv1108_pll_rates), +}; + +#define MFLAGS CLK_MUX_HIWORD_MASK +#define DFLAGS CLK_DIVIDER_HIWORD_MASK +#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE) +#define IFLAGS ROCKCHIP_INVERTER_HIWORD_MASK + +static struct rockchip_clk_branch rv1108_uart0_fracmux __initdata = + MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, CLK_SET_RATE_PARENT, + RV1108_CLKSEL_CON(13), 8, 2, MFLAGS); + +static struct rockchip_clk_branch rv1108_uart1_fracmux __initdata = + MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, CLK_SET_RATE_PARENT, + RV1108_CLKSEL_CON(14), 8, 2, MFLAGS); + +static struct rockchip_clk_branch rv1108_uart2_fracmux __initdata = + MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, CLK_SET_RATE_PARENT, + RV1108_CLKSEL_CON(15), 8, 2, MFLAGS); + +static struct rockchip_clk_branch rv1108_i2s0_fracmux __initdata = + MUX(0, "i2s0_pre", mux_i2s0_pre_p, CLK_SET_RATE_PARENT, + RV1108_CLKSEL_CON(5), 12, 2, MFLAGS); + +static struct rockchip_clk_branch rv1108_i2s1_fracmux __initdata = + MUX(0, "i2s1_pre", mux_i2s1_p, CLK_SET_RATE_PARENT, + RV1108_CLKSEL_CON(6), 12, 2, MFLAGS); + +static struct rockchip_clk_branch rv1108_i2s2_fracmux __initdata = + MUX(0, "i2s2_pre", mux_i2s2_p, CLK_SET_RATE_PARENT, + RV1108_CLKSEL_CON(7), 12, 2, MFLAGS); + +static struct rockchip_clk_branch rv1108_clk_branches[] __initdata = { + MUX(0, "hdmi_phy", mux_hdmiphy_phy_p, CLK_SET_RATE_PARENT, + RV1108_MISC_CON, 13, 2, MFLAGS), + MUX(0, "usb480m", mux_usb480m_pre_p, CLK_SET_RATE_PARENT, + RV1108_MISC_CON, 15, 2, MFLAGS), + /* + * Clock-Architecture Diagram 2 + */ + + /* PD_CORE */ + GATE(0, "dpll_core", "dpll", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(0), 1, GFLAGS), + GATE(0, "apll_core", "apll", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(0), 0, GFLAGS), + GATE(0, "gpll_core", "gpll", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(0), 2, GFLAGS), + COMPOSITE_NOMUX(0, "pclken_dbg", "armclk", CLK_IGNORE_UNUSED, + RV1108_CLKSEL_CON(1), 4, 4, DFLAGS | CLK_DIVIDER_READ_ONLY, + RV1108_CLKGATE_CON(0), 5, GFLAGS), + COMPOSITE_NOMUX(ACLK_ENMCORE, "aclkenm_core", "armclk", CLK_IGNORE_UNUSED, + RV1108_CLKSEL_CON(1), 0, 3, DFLAGS | CLK_DIVIDER_READ_ONLY, + RV1108_CLKGATE_CON(0), 4, GFLAGS), + GATE(ACLK_CORE, "aclk_core", "aclkenm_core", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(11), 0, GFLAGS), + GATE(0, "pclk_dbg", "pclken_dbg", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(11), 1, GFLAGS), + + /* PD_RKVENC */ + + /* PD_RKVDEC */ + + /* PD_PMU_wrapper */ + COMPOSITE_NOMUX(0, "pmu_24m_ena", "gpll", CLK_IGNORE_UNUSED, + RV1108_CLKSEL_CON(38), 0, 5, DFLAGS, + RV1108_CLKGATE_CON(8), 12, GFLAGS), + GATE(0, "pmu", "pmu_24m_ena", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(10), 0, GFLAGS), + GATE(0, "intmem1", "pmu_24m_ena", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(10), 1, GFLAGS), + GATE(0, "gpio0_pmu", "pmu_24m_ena", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(10), 2, GFLAGS), + GATE(0, "pmugrf", "pmu_24m_ena", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(10), 3, GFLAGS), + GATE(0, "pmu_noc", "pmu_24m_ena", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(10), 4, GFLAGS), + GATE(0, "i2c0_pmu_pclk", "pmu_24m_ena", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(10), 5, GFLAGS), + GATE(0, "pwm0_pmu_pclk", "pmu_24m_ena", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(10), 6, GFLAGS), + COMPOSITE(0, "pwm0_pmu_clk", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED, + RV1108_CLKSEL_CON(12), 7, 1, MFLAGS, 0, 7, DFLAGS, + RV1108_CLKGATE_CON(8), 15, GFLAGS), + COMPOSITE(0, "i2c0_pmu_clk", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED, + RV1108_CLKSEL_CON(19), 7, 1, MFLAGS, 0, 7, DFLAGS, + RV1108_CLKGATE_CON(8), 14, GFLAGS), + GATE(0, "pvtm_pmu", "xin24m", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(8), 13, GFLAGS), + + /* + * Clock-Architecture Diagram 4 + */ + COMPOSITE(0, "aclk_vio0_2wrap_occ", mux_pll_src_4plls_p, CLK_IGNORE_UNUSED, + RV1108_CLKSEL_CON(28), 6, 2, MFLAGS, 0, 5, DFLAGS, + RV1108_CLKGATE_CON(6), 0, GFLAGS), + GATE(0, "aclk_vio0_pre", "aclk_vio0_2wrap_occ", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(17), 0, GFLAGS), + COMPOSITE_NOMUX(0, "hclk_vio_pre", "aclk_vio0_pre", 0, + RV1108_CLKSEL_CON(29), 0, 5, DFLAGS, + RV1108_CLKGATE_CON(7), 2, GFLAGS), + COMPOSITE_NOMUX(0, "pclk_vio_pre", "aclk_vio0_pre", 0, + RV1108_CLKSEL_CON(29), 8, 5, DFLAGS, + RV1108_CLKGATE_CON(7), 3, GFLAGS), + + INVERTER(0, "pclk_vip", "ext_vip", + RV1108_CLKSEL_CON(31), 8, IFLAGS), + GATE(0, "pclk_isp_pre", "pclk_vip", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(7), 6, GFLAGS), + GATE(0, "pclk_isp", "pclk_isp_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(18), 10, GFLAGS), + GATE(0, "dclk_hdmiphy_src_gpll", "gpll", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(6), 5, GFLAGS), + GATE(0, "dclk_hdmiphy_src_dpll", "dpll", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(6), 4, GFLAGS), + COMPOSITE_NOGATE(0, "dclk_hdmiphy", mux_dclk_hdmiphy_pre_p, 0, + RV1108_CLKSEL_CON(32), 6, 2, MFLAGS, 8, 6, DFLAGS), + + /* + * Clock-Architecture Diagram 5 + */ + + FACTOR(0, "xin12m", "xin24m", 0, 1, 2), + + COMPOSITE(0, "i2s0_src", mux_pll_src_2plls_p, 0, + RV1108_CLKSEL_CON(5), 8, 1, MFLAGS, 0, 7, DFLAGS, + RV1108_CLKGATE_CON(2), 0, GFLAGS), + COMPOSITE_FRACMUX(0, "i2s1_frac", "i2s1_src", CLK_SET_RATE_PARENT, + RV1108_CLKSEL_CON(8), 0, + RV1108_CLKGATE_CON(2), 1, GFLAGS, + &rv1108_i2s0_fracmux), + GATE(SCLK_I2S0, "sclk_i2s0", "i2s0_pre", CLK_SET_RATE_PARENT, + RV1108_CLKGATE_CON(2), 2, GFLAGS), + COMPOSITE_NODIV(0, "i2s_out", mux_i2s_out_p, 0, + RV1108_CLKSEL_CON(5), 15, 1, MFLAGS, + RV1108_CLKGATE_CON(2), 3, GFLAGS), + + COMPOSITE(0, "i2s1_src", mux_pll_src_2plls_p, 0, + RV1108_CLKSEL_CON(6), 8, 1, MFLAGS, 0, 7, DFLAGS, + RV1108_CLKGATE_CON(2), 4, GFLAGS), + COMPOSITE_FRACMUX(0, "i2s1_frac", "i2s1_src", CLK_SET_RATE_PARENT, + RK2928_CLKSEL_CON(9), 0, + RK2928_CLKGATE_CON(2), 5, GFLAGS, + &rv1108_i2s1_fracmux), + GATE(SCLK_I2S1, "sclk_i2s1", "i2s1_pre", CLK_SET_RATE_PARENT, + RV1108_CLKGATE_CON(2), 6, GFLAGS), + + COMPOSITE(0, "i2s2_src", mux_pll_src_2plls_p, 0, + RV1108_CLKSEL_CON(7), 8, 1, MFLAGS, 0, 7, DFLAGS, + RV1108_CLKGATE_CON(3), 8, GFLAGS), + COMPOSITE_FRACMUX(0, "i2s2_frac", "i2s2_src", CLK_SET_RATE_PARENT, + RV1108_CLKSEL_CON(10), 0, + RV1108_CLKGATE_CON(2), 9, GFLAGS, + &rv1108_i2s2_fracmux), + GATE(SCLK_I2S2, "sclk_i2s2", "i2s2_pre", CLK_SET_RATE_PARENT, + RV1108_CLKGATE_CON(2), 10, GFLAGS), + + /* PD_BUS */ + GATE(0, "aclk_bus_src_gpll", "gpll", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(1), 0, GFLAGS), + GATE(0, "aclk_bus_src_apll", "apll", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(1), 1, GFLAGS), + GATE(0, "aclk_bus_src_dpll", "dpll", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(1), 2, GFLAGS), + COMPOSITE_NOGATE(ACLK_PRE, "aclk_bus_pre", mux_aclk_bus_src_p, 0, + RV1108_CLKSEL_CON(2), 8, 2, MFLAGS, 0, 5, DFLAGS), + COMPOSITE_NOMUX(0, "hclk_bus_pre", "aclk_bus_2wrap_occ", 0, + RV1108_CLKSEL_CON(3), 0, 5, DFLAGS, + RV1108_CLKGATE_CON(1), 4, GFLAGS), + COMPOSITE_NOMUX(0, "pclken_bus", "aclk_bus_2wrap_occ", 0, + RV1108_CLKSEL_CON(3), 8, 5, DFLAGS, + RV1108_CLKGATE_CON(1), 5, GFLAGS), + GATE(0, "pclk_bus_pre", "pclken_bus", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(1), 6, GFLAGS), + GATE(0, "pclk_top_pre", "pclken_bus", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(1), 7, GFLAGS), + GATE(0, "pclk_ddr_pre", "pclken_bus", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(1), 8, GFLAGS), + GATE(0, "clk_timer0", "mux_pll_p", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(1), 9, GFLAGS), + GATE(0, "clk_timer1", "mux_pll_p", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(1), 10, GFLAGS), + GATE(0, "pclk_timer", "pclk_bus_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(13), 4, GFLAGS), + + COMPOSITE(0, "uart0_src", mux_pll_src_dpll_gpll_usb480m_p, CLK_IGNORE_UNUSED, + RV1108_CLKSEL_CON(13), 12, 2, MFLAGS, 0, 7, DFLAGS, + RV1108_CLKGATE_CON(3), 1, GFLAGS), + COMPOSITE(0, "uart1_src", mux_pll_src_dpll_gpll_usb480m_p, CLK_IGNORE_UNUSED, + RV1108_CLKSEL_CON(14), 12, 2, MFLAGS, 0, 7, DFLAGS, + RV1108_CLKGATE_CON(3), 3, GFLAGS), + COMPOSITE(0, "uart21_src", mux_pll_src_dpll_gpll_usb480m_p, CLK_IGNORE_UNUSED, + RV1108_CLKSEL_CON(15), 12, 2, MFLAGS, 0, 7, DFLAGS, + RV1108_CLKGATE_CON(3), 5, GFLAGS), + + COMPOSITE_FRACMUX(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT, + RV1108_CLKSEL_CON(16), 0, + RV1108_CLKGATE_CON(3), 2, GFLAGS, + &rv1108_uart0_fracmux), + COMPOSITE_FRACMUX(0, "uart1_frac", "uart1_src", CLK_SET_RATE_PARENT, + RV1108_CLKSEL_CON(17), 0, + RV1108_CLKGATE_CON(3), 4, GFLAGS, + &rv1108_uart1_fracmux), + COMPOSITE_FRACMUX(0, "uart2_frac", "uart2_src", CLK_SET_RATE_PARENT, + RV1108_CLKSEL_CON(18), 0, + RV1108_CLKGATE_CON(3), 6, GFLAGS, + &rv1108_uart2_fracmux), + GATE(PCLK_UART0, "pclk_uart0", "pclk_bus_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(13), 10, GFLAGS), + GATE(PCLK_UART1, "pclk_uart1", "pclk_bus_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(13), 11, GFLAGS), + GATE(PCLK_UART2, "pclk_uart2", "pclk_bus_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(13), 12, GFLAGS), + + COMPOSITE(0, "clk_i2c1", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED, + RV1108_CLKSEL_CON(19), 15, 2, MFLAGS, 8, 7, DFLAGS, + RV1108_CLKGATE_CON(3), 7, GFLAGS), + COMPOSITE(0, "clk_i2c2", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED, + RV1108_CLKSEL_CON(20), 7, 2, MFLAGS, 0, 7, DFLAGS, + RV1108_CLKGATE_CON(3), 8, GFLAGS), + COMPOSITE(0, "clk_i2c3", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED, + RV1108_CLKSEL_CON(20), 15, 2, MFLAGS, 8, 7, DFLAGS, + RV1108_CLKGATE_CON(3), 9, GFLAGS), + GATE(0, "pclk_i2c1", "pclk_bus_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(13), 0, GFLAGS), + GATE(0, "pclk_i2c2", "pclk_bus_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(13), 1, GFLAGS), + GATE(0, "pclk_i2c3", "pclk_bus_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(13), 2, GFLAGS), + COMPOSITE(0, "clk_pwm1", mux_pll_src_2plls_p, CLK_IGNORE_UNUSED, + RV1108_CLKSEL_CON(12), 15, 2, MFLAGS, 8, 7, DFLAGS, + RV1108_CLKGATE_CON(3), 10, GFLAGS), + GATE(0, "pclk_pwm1", "pclk_bus_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(13), 6, GFLAGS), + GATE(0, "pclk_wdt", "pclk_bus_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(13), 3, GFLAGS), + GATE(0, "pclk_gpio1", "pclk_bus_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(13), 7, GFLAGS), + GATE(0, "pclk_gpio2", "pclk_bus_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(13), 8, GFLAGS), + GATE(0, "pclk_gpio3", "pclk_bus_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(13), 9, GFLAGS), + + GATE(0, "pclk_grf", "pclk_bus_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(14), 0, GFLAGS), + + GATE(ACLK_DMAC, "aclk_dmac", "aclk_bus_pre", 0, + RV1108_CLKGATE_CON(12), 2, GFLAGS), + GATE(0, "hclk_rom", "hclk_bus_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(12), 3, GFLAGS), + GATE(0, "aclk_intmem", "aclk_bus_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(12), 1, GFLAGS), + + /* PD_DDR */ + GATE(0, "apll_ddr", "apll", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(0), 8, GFLAGS), + GATE(0, "dpll_ddr", "dpll", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(0), 9, GFLAGS), + GATE(0, "gpll_ddr", "gpll", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(0), 10, GFLAGS), + COMPOSITE(0, "ddrphy4x", mux_ddrphy_p, CLK_IGNORE_UNUSED, + RV1108_CLKSEL_CON(4), 8, 2, MFLAGS, 0, 3, + DFLAGS | CLK_DIVIDER_POWER_OF_TWO, + RV1108_CLKGATE_CON(10), 9, GFLAGS), + GATE(0, "ddrupctl", "ddrphy_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(12), 4, GFLAGS), + GATE(0, "ddrc", "ddrphy", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(12), 5, GFLAGS), + GATE(0, "ddrmon", "ddrphy_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(12), 6, GFLAGS), + GATE(0, "timer_clk", "xin24m", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(0), 11, GFLAGS), + + /* + * Clock-Architecture Diagram 6 + */ + + /* PD_PERI */ + COMPOSITE_NOMUX(0, "pclk_periph_pre", "gpll", 0, + RV1108_CLKSEL_CON(23), 10, 5, DFLAGS, + RV1108_CLKGATE_CON(4), 5, GFLAGS), + GATE(0, "pclk_periph", "pclk_periph_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(15), 13, GFLAGS), + COMPOSITE_NOMUX(0, "hclk_periph_pre", "gpll", 0, + RV1108_CLKSEL_CON(23), 5, 5, DFLAGS, + RV1108_CLKGATE_CON(4), 4, GFLAGS), + GATE(0, "hclk_periph", "hclk_periph_pre", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(15), 12, GFLAGS), + + GATE(0, "aclk_peri_src_dpll", "dpll", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(4), 1, GFLAGS), + GATE(0, "aclk_peri_src_gpll", "gpll", CLK_IGNORE_UNUSED, + RV1108_CLKGATE_CON(4), 2, GFLAGS), + COMPOSITE(0, "aclk_periph", mux_aclk_peri_src_p, CLK_IGNORE_UNUSED, + RV1108_CLKSEL_CON(23), 15, 2, MFLAGS, 0, 5, DFLAGS, + RV1108_CLKGATE_CON(15), 11, GFLAGS), + + COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0, + RV1108_CLKSEL_CON(25), 8, 2, MFLAGS, 0, 8, DFLAGS, + RV1108_CLKGATE_CON(5), 0, GFLAGS), + + COMPOSITE_NODIV(0, "sclk_sdio_src", mux_mmc_src_p, 0, + RV1108_CLKSEL_CON(25), 10, 2, MFLAGS, + RV1108_CLKGATE_CON(5), 2, GFLAGS), + DIV(SCLK_SDIO, "sclk_sdio", "sclk_sdio_src", 0, + RV1108_CLKSEL_CON(26), 0, 8, DFLAGS), + + COMPOSITE_NODIV(0, "sclk_emmc_src", mux_mmc_src_p, 0, + RV1108_CLKSEL_CON(25), 12, 2, MFLAGS, + RV1108_CLKGATE_CON(5), 1, GFLAGS), + DIV(SCLK_EMMC, "sclk_emmc", "sclk_emmc_src", 0, + RK2928_CLKSEL_CON(26), 8, 8, DFLAGS), + GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_periph", 0, RV1108_CLKGATE_CON(15), 0, GFLAGS), + GATE(HCLK_SDIO, "hclk_sdio", "hclk_periph", 0, RV1108_CLKGATE_CON(15), 1, GFLAGS), + GATE(HCLK_EMMC, "hclk_emmc", "hclk_periph", 0, RV1108_CLKGATE_CON(15), 2, GFLAGS), + + COMPOSITE(SCLK_NANDC, "sclk_nandc", mux_pll_src_2plls_p, 0, + RV1108_CLKSEL_CON(27), 14, 2, MFLAGS, 8, 5, DFLAGS, + RV1108_CLKGATE_CON(5), 3, GFLAGS), + GATE(HCLK_NANDC, "hclk_nandc", "hclk_periph", 0, RV1108_CLKGATE_CON(15), 3, GFLAGS), + + COMPOSITE(SCLK_SFC, "sclk_sfc", mux_pll_src_2plls_p, 0, + RV1108_CLKSEL_CON(27), 7, 2, MFLAGS, 0, 7, DFLAGS, + RV1108_CLKGATE_CON(5), 4, GFLAGS), + GATE(HCLK_SFC, "hclk_sfc", "hclk_periph", 0, RV1108_CLKGATE_CON(15), 10, GFLAGS), + + COMPOSITE(0, "sclk_macphy_pre", mux_pll_src_apll_gpll_p, 0, + RV1108_CLKSEL_CON(24), 12, 2, MFLAGS, 0, 5, DFLAGS, + RV1108_CLKGATE_CON(4), 10, GFLAGS), + MUX(0, "sclk_macphy", mux_sclk_macphy_p, CLK_SET_RATE_PARENT, + RV1108_CLKSEL_CON(24), 8, 2, MFLAGS), + GATE(0, "sclk_macphy_rx", "sclk_macphy", 0, RV1108_CLKGATE_CON(4), 8, GFLAGS), + GATE(0, "sclk_mac_ref", "sclk_macphy", 0, RV1108_CLKGATE_CON(4), 6, GFLAGS), + GATE(0, "sclk_mac_refout", "sclk_macphy", 0, RV1108_CLKGATE_CON(4), 7, GFLAGS), + + MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "sclk_sdmmc", RV1108_SDMMC_CON0, 1), + MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RV1108_SDMMC_CON1, 1), + + MMC(SCLK_SDIO_DRV, "sdio_drv", "sclk_sdio", RV1108_SDIO_CON0, 1), + MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "sclk_sdio", RV1108_SDIO_CON1, 1), + + MMC(SCLK_EMMC_DRV, "emmc_drv", "sclk_emmc", RV1108_EMMC_CON0, 1), + MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "sclk_emmc", RV1108_EMMC_CON1, 1), +}; + +static const char *const rv1108_critical_clocks[] __initconst = { + "aclk_core", + "aclk_bus_src_gpll", + "aclk_periph", + "hclk_periph", + "pclk_periph", +}; + +static void __init rv1108_clk_init(struct device_node *np) +{ + struct rockchip_clk_provider *ctx; + void __iomem *reg_base; + + reg_base = of_iomap(np, 0); + if (!reg_base) { + pr_err("%s: could not map cru region\n", __func__); + return; + } + + ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS); + if (IS_ERR(ctx)) { + pr_err("%s: rockchip clk init failed\n", __func__); + iounmap(reg_base); + return; + } + + rockchip_clk_register_plls(ctx, rv1108_pll_clks, + ARRAY_SIZE(rv1108_pll_clks), + RV1108_GRF_SOC_STATUS0); + rockchip_clk_register_branches(ctx, rv1108_clk_branches, + ARRAY_SIZE(rv1108_clk_branches)); + rockchip_clk_protect_critical(rv1108_critical_clocks, + ARRAY_SIZE(rv1108_critical_clocks)); + + rockchip_clk_register_armclk(ctx, ARMCLK, "armclk", + mux_armclk_p, ARRAY_SIZE(mux_armclk_p), + &rv1108_cpuclk_data, rv1108_cpuclk_rates, + ARRAY_SIZE(rv1108_cpuclk_rates)); + + rockchip_register_softrst(np, 13, reg_base + RV1108_SOFTRST_CON(0), + ROCKCHIP_SOFTRST_HIWORD_MASK); + + rockchip_register_restart_notifier(ctx, RV1108_GLB_SRST_FST, NULL); + + rockchip_clk_of_add_provider(np, ctx); +} +CLK_OF_DECLARE(rv1108_cru, "rockchip,rv1108-cru", rv1108_clk_init); diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h index 7c15473ea72b..ef601dded32c 100644 --- a/drivers/clk/rockchip/clk.h +++ b/drivers/clk/rockchip/clk.h @@ -34,20 +34,20 @@ struct clk; #define HIWORD_UPDATE(val, mask, shift) \ ((val) << (shift) | (mask) << ((shift) + 16)) -/* register positions shared by RK1108, RK2928, RK3036, RK3066, RK3188 and RK3228 */ -#define RK1108_PLL_CON(x) ((x) * 0x4) -#define RK1108_CLKSEL_CON(x) ((x) * 0x4 + 0x60) -#define RK1108_CLKGATE_CON(x) ((x) * 0x4 + 0x120) -#define RK1108_SOFTRST_CON(x) ((x) * 0x4 + 0x180) -#define RK1108_GLB_SRST_FST 0x1c0 -#define RK1108_GLB_SRST_SND 0x1c4 -#define RK1108_MISC_CON 0x1cc -#define RK1108_SDMMC_CON0 0x1d8 -#define RK1108_SDMMC_CON1 0x1dc -#define RK1108_SDIO_CON0 0x1e0 -#define RK1108_SDIO_CON1 0x1e4 -#define RK1108_EMMC_CON0 0x1e8 -#define RK1108_EMMC_CON1 0x1ec +/* register positions shared by RV1108, RK2928, RK3036, RK3066, RK3188 and RK3228 */ +#define RV1108_PLL_CON(x) ((x) * 0x4) +#define RV1108_CLKSEL_CON(x) ((x) * 0x4 + 0x60) +#define RV1108_CLKGATE_CON(x) ((x) * 0x4 + 0x120) +#define RV1108_SOFTRST_CON(x) ((x) * 0x4 + 0x180) +#define RV1108_GLB_SRST_FST 0x1c0 +#define RV1108_GLB_SRST_SND 0x1c4 +#define RV1108_MISC_CON 0x1cc +#define RV1108_SDMMC_CON0 0x1d8 +#define RV1108_SDMMC_CON1 0x1dc +#define RV1108_SDIO_CON0 0x1e0 +#define RV1108_SDIO_CON1 0x1e4 +#define RV1108_EMMC_CON0 0x1e8 +#define RV1108_EMMC_CON1 0x1ec #define RK2928_PLL_CON(x) ((x) * 0x4) #define RK2928_MODE_CON 0x40 diff --git a/include/dt-bindings/clock/rk1108-cru.h b/include/dt-bindings/clock/rk1108-cru.h deleted file mode 100644 index 9350a5527a36..000000000000 --- a/include/dt-bindings/clock/rk1108-cru.h +++ /dev/null @@ -1,269 +0,0 @@ -/* - * Copyright (c) 2016 Rockchip Electronics Co. Ltd. - * Author: Shawn Lin - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK1108_H -#define _DT_BINDINGS_CLK_ROCKCHIP_RK1108_H - -/* pll id */ -#define PLL_APLL 0 -#define PLL_DPLL 1 -#define PLL_GPLL 2 -#define ARMCLK 3 - -/* sclk gates (special clocks) */ -#define SCLK_SPI0 65 -#define SCLK_NANDC 67 -#define SCLK_SDMMC 68 -#define SCLK_SDIO 69 -#define SCLK_EMMC 71 -#define SCLK_UART0 72 -#define SCLK_UART1 73 -#define SCLK_UART2 74 -#define SCLK_I2S0 75 -#define SCLK_I2S1 76 -#define SCLK_I2S2 77 -#define SCLK_TIMER0 78 -#define SCLK_TIMER1 79 -#define SCLK_SFC 80 -#define SCLK_SDMMC_DRV 81 -#define SCLK_SDIO_DRV 82 -#define SCLK_EMMC_DRV 83 -#define SCLK_SDMMC_SAMPLE 84 -#define SCLK_SDIO_SAMPLE 85 -#define SCLK_EMMC_SAMPLE 86 - -/* aclk gates */ -#define ACLK_DMAC 192 -#define ACLK_PRE 193 -#define ACLK_CORE 194 -#define ACLK_ENMCORE 195 - -/* pclk gates */ -#define PCLK_GPIO1 256 -#define PCLK_GPIO2 257 -#define PCLK_GPIO3 258 -#define PCLK_GRF 259 -#define PCLK_I2C1 260 -#define PCLK_I2C2 261 -#define PCLK_I2C3 262 -#define PCLK_SPI 263 -#define PCLK_SFC 264 -#define PCLK_UART0 265 -#define PCLK_UART1 266 -#define PCLK_UART2 267 -#define PCLK_TSADC 268 -#define PCLK_PWM 269 -#define PCLK_TIMER 270 -#define PCLK_PERI 271 - -/* hclk gates */ -#define HCLK_I2S0_8CH 320 -#define HCLK_I2S1_8CH 321 -#define HCLK_I2S2_2CH 322 -#define HCLK_NANDC 323 -#define HCLK_SDMMC 324 -#define HCLK_SDIO 325 -#define HCLK_EMMC 326 -#define HCLK_PERI 327 -#define HCLK_SFC 328 - -#define CLK_NR_CLKS (HCLK_SFC + 1) - -/* reset id */ -#define SRST_CORE_PO_AD 0 -#define SRST_CORE_AD 1 -#define SRST_L2_AD 2 -#define SRST_CPU_NIU_AD 3 -#define SRST_CORE_PO 4 -#define SRST_CORE 5 -#define SRST_L2 6 -#define SRST_CORE_DBG 8 -#define PRST_DBG 9 -#define RST_DAP 10 -#define PRST_DBG_NIU 11 -#define ARST_STRC_SYS_AD 15 - -#define SRST_DDRPHY_CLKDIV 16 -#define SRST_DDRPHY 17 -#define PRST_DDRPHY 18 -#define PRST_HDMIPHY 19 -#define PRST_VDACPHY 20 -#define PRST_VADCPHY 21 -#define PRST_MIPI_CSI_PHY 22 -#define PRST_MIPI_DSI_PHY 23 -#define PRST_ACODEC 24 -#define ARST_BUS_NIU 25 -#define PRST_TOP_NIU 26 -#define ARST_INTMEM 27 -#define HRST_ROM 28 -#define ARST_DMAC 29 -#define SRST_MSCH_NIU 30 -#define PRST_MSCH_NIU 31 - -#define PRST_DDRUPCTL 32 -#define NRST_DDRUPCTL 33 -#define PRST_DDRMON 34 -#define HRST_I2S0_8CH 35 -#define MRST_I2S0_8CH 36 -#define HRST_I2S1_2CH 37 -#define MRST_IS21_2CH 38 -#define HRST_I2S2_2CH 39 -#define MRST_I2S2_2CH 40 -#define HRST_CRYPTO 41 -#define SRST_CRYPTO 42 -#define PRST_SPI 43 -#define SRST_SPI 44 -#define PRST_UART0 45 -#define PRST_UART1 46 -#define PRST_UART2 47 - -#define SRST_UART0 48 -#define SRST_UART1 49 -#define SRST_UART2 50 -#define PRST_I2C1 51 -#define PRST_I2C2 52 -#define PRST_I2C3 53 -#define SRST_I2C1 54 -#define SRST_I2C2 55 -#define SRST_I2C3 56 -#define PRST_PWM1 58 -#define SRST_PWM1 60 -#define PRST_WDT 61 -#define PRST_GPIO1 62 -#define PRST_GPIO2 63 - -#define PRST_GPIO3 64 -#define PRST_GRF 65 -#define PRST_EFUSE 66 -#define PRST_EFUSE512 67 -#define PRST_TIMER0 68 -#define SRST_TIMER0 69 -#define SRST_TIMER1 70 -#define PRST_TSADC 71 -#define SRST_TSADC 72 -#define PRST_SARADC 73 -#define SRST_SARADC 74 -#define HRST_SYSBUS 75 -#define PRST_USBGRF 76 - -#define ARST_PERIPH_NIU 80 -#define HRST_PERIPH_NIU 81 -#define PRST_PERIPH_NIU 82 -#define HRST_PERIPH 83 -#define HRST_SDMMC 84 -#define HRST_SDIO 85 -#define HRST_EMMC 86 -#define HRST_NANDC 87 -#define NRST_NANDC 88 -#define HRST_SFC 89 -#define SRST_SFC 90 -#define ARST_GMAC 91 -#define HRST_OTG 92 -#define SRST_OTG 93 -#define SRST_OTG_ADP 94 -#define HRST_HOST0 95 - -#define HRST_HOST0_AUX 96 -#define HRST_HOST0_ARB 97 -#define SRST_HOST0_EHCIPHY 98 -#define SRST_HOST0_UTMI 99 -#define SRST_USBPOR 100 -#define SRST_UTMI0 101 -#define SRST_UTMI1 102 - -#define ARST_VIO0_NIU 102 -#define ARST_VIO1_NIU 103 -#define HRST_VIO_NIU 104 -#define PRST_VIO_NIU 105 -#define ARST_VOP 106 -#define HRST_VOP 107 -#define DRST_VOP 108 -#define ARST_IEP 109 -#define HRST_IEP 110 -#define ARST_RGA 111 -#define HRST_RGA 112 -#define SRST_RGA 113 -#define PRST_CVBS 114 -#define PRST_HDMI 115 -#define SRST_HDMI 116 -#define PRST_MIPI_DSI 117 - -#define ARST_ISP_NIU 118 -#define HRST_ISP_NIU 119 -#define HRST_ISP 120 -#define SRST_ISP 121 -#define ARST_VIP0 122 -#define HRST_VIP0 123 -#define PRST_VIP0 124 -#define ARST_VIP1 125 -#define HRST_VIP1 126 -#define PRST_VIP1 127 -#define ARST_VIP2 128 -#define HRST_VIP2 129 -#define PRST_VIP2 120 -#define ARST_VIP3 121 -#define HRST_VIP3 122 -#define PRST_VIP4 123 - -#define PRST_CIF1TO4 124 -#define SRST_CVBS_CLK 125 -#define HRST_CVBS 126 - -#define ARST_VPU_NIU 140 -#define HRST_VPU_NIU 141 -#define ARST_VPU 142 -#define HRST_VPU 143 -#define ARST_RKVDEC_NIU 144 -#define HRST_RKVDEC_NIU 145 -#define ARST_RKVDEC 146 -#define HRST_RKVDEC 147 -#define SRST_RKVDEC_CABAC 148 -#define SRST_RKVDEC_CORE 149 -#define ARST_RKVENC_NIU 150 -#define HRST_RKVENC_NIU 151 -#define ARST_RKVENC 152 -#define HRST_RKVENC 153 -#define SRST_RKVENC_CORE 154 - -#define SRST_DSP_CORE 156 -#define SRST_DSP_SYS 157 -#define SRST_DSP_GLOBAL 158 -#define SRST_DSP_OECM 159 -#define PRST_DSP_IOP_NIU 160 -#define ARST_DSP_EPP_NIU 161 -#define ARST_DSP_EDP_NIU 162 -#define PRST_DSP_DBG_NIU 163 -#define PRST_DSP_CFG_NIU 164 -#define PRST_DSP_GRF 165 -#define PRST_DSP_MAILBOX 166 -#define PRST_DSP_INTC 167 -#define PRST_DSP_PFM_MON 169 -#define SRST_DSP_PFM_MON 170 -#define ARST_DSP_EDAP_NIU 171 - -#define SRST_PMU 172 -#define SRST_PMU_I2C0 173 -#define PRST_PMU_I2C0 174 -#define PRST_PMU_GPIO0 175 -#define PRST_PMU_INTMEM 176 -#define PRST_PMU_PWM0 177 -#define SRST_PMU_PWM0 178 -#define PRST_PMU_GRF 179 -#define SRST_PMU_NIU 180 -#define SRST_PMU_PVTM 181 -#define ARST_DSP_EDP_PERF 184 -#define ARST_DSP_EPP_PERF 185 - -#endif /* _DT_BINDINGS_CLK_ROCKCHIP_RK1108_H */ diff --git a/include/dt-bindings/clock/rv1108-cru.h b/include/dt-bindings/clock/rv1108-cru.h new file mode 100644 index 000000000000..ae26f8105914 --- /dev/null +++ b/include/dt-bindings/clock/rv1108-cru.h @@ -0,0 +1,269 @@ +/* + * Copyright (c) 2016 Rockchip Electronics Co. Ltd. + * Author: Shawn Lin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RV1108_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RV1108_H + +/* pll id */ +#define PLL_APLL 0 +#define PLL_DPLL 1 +#define PLL_GPLL 2 +#define ARMCLK 3 + +/* sclk gates (special clocks) */ +#define SCLK_SPI0 65 +#define SCLK_NANDC 67 +#define SCLK_SDMMC 68 +#define SCLK_SDIO 69 +#define SCLK_EMMC 71 +#define SCLK_UART0 72 +#define SCLK_UART1 73 +#define SCLK_UART2 74 +#define SCLK_I2S0 75 +#define SCLK_I2S1 76 +#define SCLK_I2S2 77 +#define SCLK_TIMER0 78 +#define SCLK_TIMER1 79 +#define SCLK_SFC 80 +#define SCLK_SDMMC_DRV 81 +#define SCLK_SDIO_DRV 82 +#define SCLK_EMMC_DRV 83 +#define SCLK_SDMMC_SAMPLE 84 +#define SCLK_SDIO_SAMPLE 85 +#define SCLK_EMMC_SAMPLE 86 + +/* aclk gates */ +#define ACLK_DMAC 192 +#define ACLK_PRE 193 +#define ACLK_CORE 194 +#define ACLK_ENMCORE 195 + +/* pclk gates */ +#define PCLK_GPIO1 256 +#define PCLK_GPIO2 257 +#define PCLK_GPIO3 258 +#define PCLK_GRF 259 +#define PCLK_I2C1 260 +#define PCLK_I2C2 261 +#define PCLK_I2C3 262 +#define PCLK_SPI 263 +#define PCLK_SFC 264 +#define PCLK_UART0 265 +#define PCLK_UART1 266 +#define PCLK_UART2 267 +#define PCLK_TSADC 268 +#define PCLK_PWM 269 +#define PCLK_TIMER 270 +#define PCLK_PERI 271 + +/* hclk gates */ +#define HCLK_I2S0_8CH 320 +#define HCLK_I2S1_8CH 321 +#define HCLK_I2S2_2CH 322 +#define HCLK_NANDC 323 +#define HCLK_SDMMC 324 +#define HCLK_SDIO 325 +#define HCLK_EMMC 326 +#define HCLK_PERI 327 +#define HCLK_SFC 328 + +#define CLK_NR_CLKS (HCLK_SFC + 1) + +/* reset id */ +#define SRST_CORE_PO_AD 0 +#define SRST_CORE_AD 1 +#define SRST_L2_AD 2 +#define SRST_CPU_NIU_AD 3 +#define SRST_CORE_PO 4 +#define SRST_CORE 5 +#define SRST_L2 6 +#define SRST_CORE_DBG 8 +#define PRST_DBG 9 +#define RST_DAP 10 +#define PRST_DBG_NIU 11 +#define ARST_STRC_SYS_AD 15 + +#define SRST_DDRPHY_CLKDIV 16 +#define SRST_DDRPHY 17 +#define PRST_DDRPHY 18 +#define PRST_HDMIPHY 19 +#define PRST_VDACPHY 20 +#define PRST_VADCPHY 21 +#define PRST_MIPI_CSI_PHY 22 +#define PRST_MIPI_DSI_PHY 23 +#define PRST_ACODEC 24 +#define ARST_BUS_NIU 25 +#define PRST_TOP_NIU 26 +#define ARST_INTMEM 27 +#define HRST_ROM 28 +#define ARST_DMAC 29 +#define SRST_MSCH_NIU 30 +#define PRST_MSCH_NIU 31 + +#define PRST_DDRUPCTL 32 +#define NRST_DDRUPCTL 33 +#define PRST_DDRMON 34 +#define HRST_I2S0_8CH 35 +#define MRST_I2S0_8CH 36 +#define HRST_I2S1_2CH 37 +#define MRST_IS21_2CH 38 +#define HRST_I2S2_2CH 39 +#define MRST_I2S2_2CH 40 +#define HRST_CRYPTO 41 +#define SRST_CRYPTO 42 +#define PRST_SPI 43 +#define SRST_SPI 44 +#define PRST_UART0 45 +#define PRST_UART1 46 +#define PRST_UART2 47 + +#define SRST_UART0 48 +#define SRST_UART1 49 +#define SRST_UART2 50 +#define PRST_I2C1 51 +#define PRST_I2C2 52 +#define PRST_I2C3 53 +#define SRST_I2C1 54 +#define SRST_I2C2 55 +#define SRST_I2C3 56 +#define PRST_PWM1 58 +#define SRST_PWM1 60 +#define PRST_WDT 61 +#define PRST_GPIO1 62 +#define PRST_GPIO2 63 + +#define PRST_GPIO3 64 +#define PRST_GRF 65 +#define PRST_EFUSE 66 +#define PRST_EFUSE512 67 +#define PRST_TIMER0 68 +#define SRST_TIMER0 69 +#define SRST_TIMER1 70 +#define PRST_TSADC 71 +#define SRST_TSADC 72 +#define PRST_SARADC 73 +#define SRST_SARADC 74 +#define HRST_SYSBUS 75 +#define PRST_USBGRF 76 + +#define ARST_PERIPH_NIU 80 +#define HRST_PERIPH_NIU 81 +#define PRST_PERIPH_NIU 82 +#define HRST_PERIPH 83 +#define HRST_SDMMC 84 +#define HRST_SDIO 85 +#define HRST_EMMC 86 +#define HRST_NANDC 87 +#define NRST_NANDC 88 +#define HRST_SFC 89 +#define SRST_SFC 90 +#define ARST_GMAC 91 +#define HRST_OTG 92 +#define SRST_OTG 93 +#define SRST_OTG_ADP 94 +#define HRST_HOST0 95 + +#define HRST_HOST0_AUX 96 +#define HRST_HOST0_ARB 97 +#define SRST_HOST0_EHCIPHY 98 +#define SRST_HOST0_UTMI 99 +#define SRST_USBPOR 100 +#define SRST_UTMI0 101 +#define SRST_UTMI1 102 + +#define ARST_VIO0_NIU 102 +#define ARST_VIO1_NIU 103 +#define HRST_VIO_NIU 104 +#define PRST_VIO_NIU 105 +#define ARST_VOP 106 +#define HRST_VOP 107 +#define DRST_VOP 108 +#define ARST_IEP 109 +#define HRST_IEP 110 +#define ARST_RGA 111 +#define HRST_RGA 112 +#define SRST_RGA 113 +#define PRST_CVBS 114 +#define PRST_HDMI 115 +#define SRST_HDMI 116 +#define PRST_MIPI_DSI 117 + +#define ARST_ISP_NIU 118 +#define HRST_ISP_NIU 119 +#define HRST_ISP 120 +#define SRST_ISP 121 +#define ARST_VIP0 122 +#define HRST_VIP0 123 +#define PRST_VIP0 124 +#define ARST_VIP1 125 +#define HRST_VIP1 126 +#define PRST_VIP1 127 +#define ARST_VIP2 128 +#define HRST_VIP2 129 +#define PRST_VIP2 120 +#define ARST_VIP3 121 +#define HRST_VIP3 122 +#define PRST_VIP4 123 + +#define PRST_CIF1TO4 124 +#define SRST_CVBS_CLK 125 +#define HRST_CVBS 126 + +#define ARST_VPU_NIU 140 +#define HRST_VPU_NIU 141 +#define ARST_VPU 142 +#define HRST_VPU 143 +#define ARST_RKVDEC_NIU 144 +#define HRST_RKVDEC_NIU 145 +#define ARST_RKVDEC 146 +#define HRST_RKVDEC 147 +#define SRST_RKVDEC_CABAC 148 +#define SRST_RKVDEC_CORE 149 +#define ARST_RKVENC_NIU 150 +#define HRST_RKVENC_NIU 151 +#define ARST_RKVENC 152 +#define HRST_RKVENC 153 +#define SRST_RKVENC_CORE 154 + +#define SRST_DSP_CORE 156 +#define SRST_DSP_SYS 157 +#define SRST_DSP_GLOBAL 158 +#define SRST_DSP_OECM 159 +#define PRST_DSP_IOP_NIU 160 +#define ARST_DSP_EPP_NIU 161 +#define ARST_DSP_EDP_NIU 162 +#define PRST_DSP_DBG_NIU 163 +#define PRST_DSP_CFG_NIU 164 +#define PRST_DSP_GRF 165 +#define PRST_DSP_MAILBOX 166 +#define PRST_DSP_INTC 167 +#define PRST_DSP_PFM_MON 169 +#define SRST_DSP_PFM_MON 170 +#define ARST_DSP_EDAP_NIU 171 + +#define SRST_PMU 172 +#define SRST_PMU_I2C0 173 +#define PRST_PMU_I2C0 174 +#define PRST_PMU_GPIO0 175 +#define PRST_PMU_INTMEM 176 +#define PRST_PMU_PWM0 177 +#define SRST_PMU_PWM0 178 +#define PRST_PMU_GRF 179 +#define SRST_PMU_NIU 180 +#define SRST_PMU_PVTM 181 +#define ARST_DSP_EDP_PERF 184 +#define ARST_DSP_EPP_PERF 185 + +#endif /* _DT_BINDINGS_CLK_ROCKCHIP_RV1108_H */ -- cgit v1.2.3 From e8bb4673596ea28fab287dbc417e8100d798cd40 Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Mon, 27 Mar 2017 07:31:03 +0200 Subject: dmaengine: pl330: remove pdata based initialization This driver is now used only on platforms which support device tree, so it is safe to remove legacy platform data based initialization code. Signed-off-by: Marek Szyprowski Reviewed-by: Ulf Hansson Acked-by: Arnd Bergmann For plat-samsung: Acked-by: Krzysztof Kozlowski Signed-off-by: Vinod Koul --- arch/arm/plat-samsung/devs.c | 1 - drivers/dma/pl330.c | 42 ++++++++---------------------------------- include/linux/amba/pl330.h | 35 ----------------------------------- 3 files changed, 8 insertions(+), 70 deletions(-) delete mode 100644 include/linux/amba/pl330.h (limited to 'include') diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c index 03fac123676d..dc269d9143bc 100644 --- a/arch/arm/plat-samsung/devs.c +++ b/arch/arm/plat-samsung/devs.c @@ -10,7 +10,6 @@ * published by the Free Software Foundation. */ -#include #include #include #include diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index f37f4978dabb..8b0da7fa520d 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include @@ -2077,18 +2076,6 @@ static void pl330_tasklet(unsigned long data) } } -bool pl330_filter(struct dma_chan *chan, void *param) -{ - u8 *peri_id; - - if (chan->device->dev->driver != &pl330_driver.drv) - return false; - - peri_id = chan->private; - return *peri_id == (unsigned long)param; -} -EXPORT_SYMBOL(pl330_filter); - static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { @@ -2833,7 +2820,6 @@ static SIMPLE_DEV_PM_OPS(pl330_pm, pl330_suspend, pl330_resume); static int pl330_probe(struct amba_device *adev, const struct amba_id *id) { - struct dma_pl330_platdata *pdat; struct pl330_config *pcfg; struct pl330_dmac *pl330; struct dma_pl330_chan *pch, *_p; @@ -2843,8 +2829,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) int num_chan; struct device_node *np = adev->dev.of_node; - pdat = dev_get_platdata(&adev->dev); - ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32)); if (ret) return ret; @@ -2857,7 +2841,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) pd = &pl330->ddma; pd->dev = &adev->dev; - pl330->mcbufsz = pdat ? pdat->mcbuf_sz : 0; + pl330->mcbufsz = 0; /* get quirk */ for (i = 0; i < ARRAY_SIZE(of_quirks); i++) @@ -2901,10 +2885,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) INIT_LIST_HEAD(&pd->channels); /* Initialize channel parameters */ - if (pdat) - num_chan = max_t(int, pdat->nr_valid_peri, pcfg->num_chan); - else - num_chan = max_t(int, pcfg->num_peri, pcfg->num_chan); + num_chan = max_t(int, pcfg->num_peri, pcfg->num_chan); pl330->num_peripherals = num_chan; @@ -2916,11 +2897,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) for (i = 0; i < num_chan; i++) { pch = &pl330->peripherals[i]; - if (!adev->dev.of_node) - pch->chan.private = pdat ? &pdat->peri_id[i] : NULL; - else - pch->chan.private = adev->dev.of_node; + pch->chan.private = adev->dev.of_node; INIT_LIST_HEAD(&pch->submitted_list); INIT_LIST_HEAD(&pch->work_list); INIT_LIST_HEAD(&pch->completed_list); @@ -2933,15 +2911,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) list_add_tail(&pch->chan.device_node, &pd->channels); } - if (pdat) { - pd->cap_mask = pdat->cap_mask; - } else { - dma_cap_set(DMA_MEMCPY, pd->cap_mask); - if (pcfg->num_peri) { - dma_cap_set(DMA_SLAVE, pd->cap_mask); - dma_cap_set(DMA_CYCLIC, pd->cap_mask); - dma_cap_set(DMA_PRIVATE, pd->cap_mask); - } + dma_cap_set(DMA_MEMCPY, pd->cap_mask); + if (pcfg->num_peri) { + dma_cap_set(DMA_SLAVE, pd->cap_mask); + dma_cap_set(DMA_CYCLIC, pd->cap_mask); + dma_cap_set(DMA_PRIVATE, pd->cap_mask); } pd->device_alloc_chan_resources = pl330_alloc_chan_resources; diff --git a/include/linux/amba/pl330.h b/include/linux/amba/pl330.h deleted file mode 100644 index fe93758e8403..000000000000 --- a/include/linux/amba/pl330.h +++ /dev/null @@ -1,35 +0,0 @@ -/* linux/include/linux/amba/pl330.h - * - * Copyright (C) 2010 Samsung Electronics Co. Ltd. - * Jaswinder Singh - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef __AMBA_PL330_H_ -#define __AMBA_PL330_H_ - -#include - -struct dma_pl330_platdata { - /* - * Number of valid peripherals connected to DMAC. - * This may be different from the value read from - * CR0, as the PL330 implementation might have 'holes' - * in the peri list or the peri could also be reached - * from another DMAC which the platform prefers. - */ - u8 nr_valid_peri; - /* Array of valid peripherals */ - u8 *peri_id; - /* Operational capabilities */ - dma_cap_mask_t cap_mask; - /* Bytes to allocate for MC buffer */ - unsigned mcbuf_sz; -}; - -extern bool pl330_filter(struct dma_chan *chan, void *param); -#endif /* __AMBA_PL330_H_ */ -- cgit v1.2.3 From 89f1b1c614253d7ea57543f769d93fced99d4d05 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 29 Sep 2016 13:06:15 +0200 Subject: clk: renesas: Add r8a7795 ES2.0 CPG Core Clock Definitions Add all R-Car H3 ES2.0 Clock Pulse Generator Core Clock Outputs, as listed in Table 8.2a ("List of Clocks [R-Car H3]") of the R-Car Gen3 Hardware User's Manual rev. 0.53E. Signed-off-by: Geert Uytterhoeven --- include/dt-bindings/clock/r8a7795-cpg-mssr.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include') diff --git a/include/dt-bindings/clock/r8a7795-cpg-mssr.h b/include/dt-bindings/clock/r8a7795-cpg-mssr.h index e864aae0a256..f047eaf261f3 100644 --- a/include/dt-bindings/clock/r8a7795-cpg-mssr.h +++ b/include/dt-bindings/clock/r8a7795-cpg-mssr.h @@ -60,4 +60,11 @@ #define R8A7795_CLK_R 45 #define R8A7795_CLK_OSC 46 +/* r8a7795 ES2.0 CPG Core Clocks */ +#define R8A7795_CLK_S0D2 47 +#define R8A7795_CLK_S0D3 48 +#define R8A7795_CLK_S0D6 49 +#define R8A7795_CLK_S0D8 50 +#define R8A7795_CLK_S0D12 51 + #endif /* __DT_BINDINGS_CLOCK_R8A7795_CPG_MSSR_H__ */ -- cgit v1.2.3 From 88da44c5edb93d58f704fb9ea21962ca357d06a1 Mon Sep 17 00:00:00 2001 From: Peter De Schrijver Date: Wed, 22 Mar 2017 16:23:16 +0200 Subject: clk: tegra: Add missing Tegra210 clocks iqc1, iqc2, tegra_clk_pll_a_out_adsp, tegra_clk_pll_a_out0_out_adsp, adsp and adsp neon were not modelled. dp2 wasn't modelled for Tegra210. Signed-off-by: Peter De Schrijver Signed-off-by: Thierry Reding --- drivers/clk/tegra/clk-id.h | 6 ++++++ drivers/clk/tegra/clk-tegra-periph.c | 6 ++++++ drivers/clk/tegra/clk-tegra210.c | 7 +++++++ include/dt-bindings/clock/tegra210-car.h | 16 ++++++++-------- 4 files changed, 27 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h index d3a79a8011ca..689f344377a7 100644 --- a/drivers/clk/tegra/clk-id.h +++ b/drivers/clk/tegra/clk-id.h @@ -318,6 +318,12 @@ enum clk_id { tegra_clk_dmic1_sync_clk_mux, tegra_clk_dmic2_sync_clk_mux, tegra_clk_dmic3_sync_clk_mux, + tegra_clk_iqc1, + tegra_clk_iqc2, + tegra_clk_pll_a_out_adsp, + tegra_clk_pll_a_out0_out_adsp, + tegra_clk_adsp, + tegra_clk_adsp_neon, tegra_clk_max, }; diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c index 9e6ac11ccf75..294bfe40a4f5 100644 --- a/drivers/clk/tegra/clk-tegra-periph.c +++ b/drivers/clk/tegra/clk-tegra-periph.c @@ -859,6 +859,12 @@ static struct tegra_periph_init_data gate_clks[] = { GATE("pll_p_out_adsp", "pll_p", 187, 0, tegra_clk_pll_p_out_adsp, 0), GATE("apb2ape", "clk_m", 107, 0, tegra_clk_apb2ape, 0), GATE("cec", "pclk", 136, 0, tegra_clk_cec, 0), + GATE("iqc1", "clk_m", 221, 0, tegra_clk_iqc1, 0), + GATE("iqc2", "clk_m", 220, 0, tegra_clk_iqc1, 0), + GATE("pll_a_out_adsp", "pll_a", 188, 0, tegra_clk_pll_a_out_adsp, 0), + GATE("pll_a_out0_out_adsp", "pll_a", 188, 0, tegra_clk_pll_a_out0_out_adsp, 0), + GATE("adsp", "aclk", 199, 0, tegra_clk_adsp, 0), + GATE("adsp_neon", "aclk", 218, 0, tegra_clk_adsp_neon, 0), }; static struct tegra_periph_init_data div_clks[] = { diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c index 9897dc55676b..cdf1101c40f8 100644 --- a/drivers/clk/tegra/clk-tegra210.c +++ b/drivers/clk/tegra/clk-tegra210.c @@ -2326,6 +2326,13 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = { [tegra_clk_dmic1_sync_clk_mux] = { .dt_id = TEGRA210_CLK_DMIC1_SYNC_CLK_MUX, .present = true }, [tegra_clk_dmic2_sync_clk_mux] = { .dt_id = TEGRA210_CLK_DMIC2_SYNC_CLK_MUX, .present = true }, [tegra_clk_dmic3_sync_clk_mux] = { .dt_id = TEGRA210_CLK_DMIC3_SYNC_CLK_MUX, .present = true }, + [tegra_clk_dp2] = { .dt_id = TEGRA210_CLK_DP2, .present = true }, + [tegra_clk_iqc1] = { .dt_id = TEGRA210_CLK_IQC1, .present = true }, + [tegra_clk_iqc2] = { .dt_id = TEGRA210_CLK_IQC2, .present = true }, + [tegra_clk_pll_a_out_adsp] = { .dt_id = TEGRA210_CLK_PLL_A_OUT_ADSP, .present = true }, + [tegra_clk_pll_a_out0_out_adsp] = { .dt_id = TEGRA210_CLK_PLL_A_OUT0_OUT_ADSP, .present = true }, + [tegra_clk_adsp] = { .dt_id = TEGRA210_CLK_ADSP, .present = true }, + [tegra_clk_adsp_neon] = { .dt_id = TEGRA210_CLK_ADSP_NEON, .present = true }, }; static struct tegra_devclk devclks[] __initdata = { diff --git a/include/dt-bindings/clock/tegra210-car.h b/include/dt-bindings/clock/tegra210-car.h index 8744b19cca3e..46689cd3750b 100644 --- a/include/dt-bindings/clock/tegra210-car.h +++ b/include/dt-bindings/clock/tegra210-car.h @@ -173,7 +173,7 @@ #define TEGRA210_CLK_ENTROPY 149 /* 150 */ /* 151 */ -/* 152 */ +#define TEGRA210_CLK_DP2 152 /* 153 */ /* 154 */ /* 155 (bit affects dfll_ref and dfll_soc) */ @@ -210,7 +210,7 @@ #define TEGRA210_CLK_DBGAPB 185 /* 186 */ #define TEGRA210_CLK_PLL_P_OUT_ADSP 187 -/* 188 */ +/* 188 ((bit affects pll_a_out_adsp and pll_a_out0_out_adsp)*/ #define TEGRA210_CLK_PLL_G_REF 189 /* 190 */ /* 191 */ @@ -222,7 +222,7 @@ /* 196 */ #define TEGRA210_CLK_DMIC3 197 #define TEGRA210_CLK_APE 198 -/* 199 */ +#define TEGRA210_CLK_ADSP 199 /* 200 */ /* 201 */ #define TEGRA210_CLK_MAUD 202 @@ -241,10 +241,10 @@ /* 215 */ /* 216 */ /* 217 */ -/* 218 */ +#define TEGRA210_CLK_ADSP_NEON 218 #define TEGRA210_CLK_NVENC 219 -/* 220 */ -/* 221 */ +#define TEGRA210_CLK_IQC2 220 +#define TEGRA210_CLK_IQC1 221 #define TEGRA210_CLK_SOR_SAFE 222 #define TEGRA210_CLK_PLL_P_OUT_CPU 223 @@ -350,8 +350,8 @@ /* 320 */ /* 321 */ #define TEGRA210_CLK_ISP 322 -/* 323 */ -/* 324 */ +#define TEGRA210_CLK_PLL_A_OUT_ADSP 323 +#define TEGRA210_CLK_PLL_A_OUT0_OUT_ADSP 324 /* 325 */ /* 326 */ /* 327 */ -- cgit v1.2.3 From cdb8b80b60935515b86cfe534f69934b13937052 Mon Sep 17 00:00:00 2001 From: Icenowy Zheng Date: Tue, 4 Apr 2017 17:50:57 +0800 Subject: clk: sunxi-ng: add support for PRCM CCUs SoCs after A31 has a clock controller module in the PRCM part. Support the clock controller module on H3/5 and A64 now. Signed-off-by: Icenowy Zheng Signed-off-by: Maxime Ripard --- drivers/clk/sunxi-ng/Kconfig | 6 + drivers/clk/sunxi-ng/Makefile | 1 + drivers/clk/sunxi-ng/ccu-sun8i-r.c | 213 ++++++++++++++++++++++++++++++++ drivers/clk/sunxi-ng/ccu-sun8i-r.h | 27 ++++ include/dt-bindings/clock/sun8i-r-ccu.h | 59 +++++++++ include/dt-bindings/reset/sun8i-r-ccu.h | 53 ++++++++ 6 files changed, 359 insertions(+) create mode 100644 drivers/clk/sunxi-ng/ccu-sun8i-r.c create mode 100644 drivers/clk/sunxi-ng/ccu-sun8i-r.h create mode 100644 include/dt-bindings/clock/sun8i-r-ccu.h create mode 100644 include/dt-bindings/reset/sun8i-r-ccu.h (limited to 'include') diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig index 213cf64e4fab..056752317aee 100644 --- a/drivers/clk/sunxi-ng/Kconfig +++ b/drivers/clk/sunxi-ng/Kconfig @@ -150,4 +150,10 @@ config SUN9I_A80_CCU default MACH_SUN9I depends on MACH_SUN9I || COMPILE_TEST +config SUN8I_R_CCU + bool "Support for Allwinner SoCs' PRCM CCUs" + select SUNXI_CCU_DIV + select SUNXI_CCU_GATE + default MACH_SUN8I || (ARCH_SUNXI && ARM64) + endif diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile index 6feaac0c5600..0ec02fe14c50 100644 --- a/drivers/clk/sunxi-ng/Makefile +++ b/drivers/clk/sunxi-ng/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_SUN8I_A23_CCU) += ccu-sun8i-a23.o obj-$(CONFIG_SUN8I_A33_CCU) += ccu-sun8i-a33.o obj-$(CONFIG_SUN8I_H3_CCU) += ccu-sun8i-h3.o obj-$(CONFIG_SUN8I_V3S_CCU) += ccu-sun8i-v3s.o +obj-$(CONFIG_SUN8I_R_CCU) += ccu-sun8i-r.o obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80.o obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-de.o obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-usb.o diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.c b/drivers/clk/sunxi-ng/ccu-sun8i-r.c new file mode 100644 index 000000000000..0d027d53dbdf --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.c @@ -0,0 +1,213 @@ +/* + * Copyright (c) 2016 Icenowy Zheng + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include + +#include "ccu_common.h" +#include "ccu_reset.h" + +#include "ccu_div.h" +#include "ccu_gate.h" +#include "ccu_mp.h" +#include "ccu_nm.h" + +#include "ccu-sun8i-r.h" + +static const char * const ar100_parents[] = { "osc32k", "osc24M", + "pll-periph0", "iosc" }; + +static struct ccu_div ar100_clk = { + .div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO), + + .mux = { + .shift = 16, + .width = 2, + + .variable_prediv = { + .index = 2, + .shift = 8, + .width = 5, + }, + }, + + .common = { + .reg = 0x00, + .features = CCU_FEATURE_VARIABLE_PREDIV, + .hw.init = CLK_HW_INIT_PARENTS("ar100", + ar100_parents, + &ccu_div_ops, + 0), + }, +}; + +static CLK_FIXED_FACTOR(ahb0_clk, "ahb0", "ar100", 1, 1, 0); + +static struct ccu_div apb0_clk = { + .div = _SUNXI_CCU_DIV_FLAGS(0, 2, CLK_DIVIDER_POWER_OF_TWO), + + .common = { + .reg = 0x0c, + .hw.init = CLK_HW_INIT("apb0", + "ahb0", + &ccu_div_ops, + 0), + }, +}; + +static SUNXI_CCU_GATE(apb0_pio_clk, "apb0-pio", "apb0", + 0x28, BIT(0), 0); +static SUNXI_CCU_GATE(apb0_ir_clk, "apb0-ir", "apb0", + 0x28, BIT(1), 0); +static SUNXI_CCU_GATE(apb0_timer_clk, "apb0-timer", "apb0", + 0x28, BIT(2), 0); +static SUNXI_CCU_GATE(apb0_rsb_clk, "apb0-rsb", "apb0", + 0x28, BIT(3), 0); +static SUNXI_CCU_GATE(apb0_uart_clk, "apb0-uart", "apb0", + 0x28, BIT(4), 0); +static SUNXI_CCU_GATE(apb0_i2c_clk, "apb0-i2c", "apb0", + 0x28, BIT(6), 0); +static SUNXI_CCU_GATE(apb0_twd_clk, "apb0-twd", "apb0", + 0x28, BIT(7), 0); + +static const char * const r_mod0_default_parents[] = { "osc32K", "osc24M" }; +static SUNXI_CCU_MP_WITH_MUX_GATE(ir_clk, "ir", + r_mod0_default_parents, 0x54, + 0, 4, /* M */ + 16, 2, /* P */ + 24, 2, /* mux */ + BIT(31), /* gate */ + 0); + +static struct ccu_common *sun8i_h3_r_ccu_clks[] = { + &ar100_clk.common, + &apb0_clk.common, + &apb0_pio_clk.common, + &apb0_ir_clk.common, + &apb0_timer_clk.common, + &apb0_uart_clk.common, + &apb0_i2c_clk.common, + &apb0_twd_clk.common, + &ir_clk.common, +}; + +static struct ccu_common *sun50i_a64_r_ccu_clks[] = { + &ar100_clk.common, + &apb0_clk.common, + &apb0_pio_clk.common, + &apb0_ir_clk.common, + &apb0_timer_clk.common, + &apb0_rsb_clk.common, + &apb0_uart_clk.common, + &apb0_i2c_clk.common, + &apb0_twd_clk.common, + &ir_clk.common, +}; + +static struct clk_hw_onecell_data sun8i_h3_r_hw_clks = { + .hws = { + [CLK_AR100] = &ar100_clk.common.hw, + [CLK_AHB0] = &ahb0_clk.hw, + [CLK_APB0] = &apb0_clk.common.hw, + [CLK_APB0_PIO] = &apb0_pio_clk.common.hw, + [CLK_APB0_IR] = &apb0_ir_clk.common.hw, + [CLK_APB0_TIMER] = &apb0_timer_clk.common.hw, + [CLK_APB0_UART] = &apb0_uart_clk.common.hw, + [CLK_APB0_I2C] = &apb0_i2c_clk.common.hw, + [CLK_APB0_TWD] = &apb0_twd_clk.common.hw, + [CLK_IR] = &ir_clk.common.hw, + }, + .num = CLK_NUMBER, +}; + +static struct clk_hw_onecell_data sun50i_a64_r_hw_clks = { + .hws = { + [CLK_AR100] = &ar100_clk.common.hw, + [CLK_AHB0] = &ahb0_clk.hw, + [CLK_APB0] = &apb0_clk.common.hw, + [CLK_APB0_PIO] = &apb0_pio_clk.common.hw, + [CLK_APB0_IR] = &apb0_ir_clk.common.hw, + [CLK_APB0_TIMER] = &apb0_timer_clk.common.hw, + [CLK_APB0_RSB] = &apb0_rsb_clk.common.hw, + [CLK_APB0_UART] = &apb0_uart_clk.common.hw, + [CLK_APB0_I2C] = &apb0_i2c_clk.common.hw, + [CLK_APB0_TWD] = &apb0_twd_clk.common.hw, + [CLK_IR] = &ir_clk.common.hw, + }, + .num = CLK_NUMBER, +}; + +static struct ccu_reset_map sun8i_h3_r_ccu_resets[] = { + [RST_APB0_IR] = { 0xb0, BIT(1) }, + [RST_APB0_TIMER] = { 0xb0, BIT(2) }, + [RST_APB0_UART] = { 0xb0, BIT(4) }, + [RST_APB0_I2C] = { 0xb0, BIT(6) }, +}; + +static struct ccu_reset_map sun50i_a64_r_ccu_resets[] = { + [RST_APB0_IR] = { 0xb0, BIT(1) }, + [RST_APB0_TIMER] = { 0xb0, BIT(2) }, + [RST_APB0_RSB] = { 0xb0, BIT(3) }, + [RST_APB0_UART] = { 0xb0, BIT(4) }, + [RST_APB0_I2C] = { 0xb0, BIT(6) }, +}; + +static const struct sunxi_ccu_desc sun8i_h3_r_ccu_desc = { + .ccu_clks = sun8i_h3_r_ccu_clks, + .num_ccu_clks = ARRAY_SIZE(sun8i_h3_r_ccu_clks), + + .hw_clks = &sun8i_h3_r_hw_clks, + + .resets = sun8i_h3_r_ccu_resets, + .num_resets = ARRAY_SIZE(sun8i_h3_r_ccu_resets), +}; + +static const struct sunxi_ccu_desc sun50i_a64_r_ccu_desc = { + .ccu_clks = sun50i_a64_r_ccu_clks, + .num_ccu_clks = ARRAY_SIZE(sun50i_a64_r_ccu_clks), + + .hw_clks = &sun50i_a64_r_hw_clks, + + .resets = sun50i_a64_r_ccu_resets, + .num_resets = ARRAY_SIZE(sun50i_a64_r_ccu_resets), +}; + +static void __init sunxi_r_ccu_init(struct device_node *node, + const struct sunxi_ccu_desc *desc) +{ + void __iomem *reg; + + reg = of_io_request_and_map(node, 0, of_node_full_name(node)); + if (IS_ERR(reg)) { + pr_err("%s: Could not map the clock registers\n", + of_node_full_name(node)); + return; + } + + sunxi_ccu_probe(node, reg, desc); +} + +static void __init sun8i_h3_r_ccu_setup(struct device_node *node) +{ + sunxi_r_ccu_init(node, &sun8i_h3_r_ccu_desc); +} +CLK_OF_DECLARE(sun8i_h3_r_ccu, "allwinner,sun8i-h3-r-ccu", + sun8i_h3_r_ccu_setup); + +static void __init sun50i_a64_r_ccu_setup(struct device_node *node) +{ + sunxi_r_ccu_init(node, &sun50i_a64_r_ccu_desc); +} +CLK_OF_DECLARE(sun50i_a64_r_ccu, "allwinner,sun50i-a64-r-ccu", + sun50i_a64_r_ccu_setup); diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.h b/drivers/clk/sunxi-ng/ccu-sun8i-r.h new file mode 100644 index 000000000000..eaa431fd1d8f --- /dev/null +++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.h @@ -0,0 +1,27 @@ +/* + * Copyright 2016 Icenowy + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CCU_SUN8I_R_H +#define _CCU_SUN8I_R_H_ + +#include +#include + +/* AHB/APB bus clocks are not exported */ +#define CLK_AHB0 1 +#define CLK_APB0 2 + +#define CLK_NUMBER (CLK_APB0_TWD + 1) + +#endif /* _CCU_SUN8I_R_H */ diff --git a/include/dt-bindings/clock/sun8i-r-ccu.h b/include/dt-bindings/clock/sun8i-r-ccu.h new file mode 100644 index 000000000000..779d20aa0d05 --- /dev/null +++ b/include/dt-bindings/clock/sun8i-r-ccu.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2016 Icenowy Zheng + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_CLK_SUN8I_R_CCU_H_ +#define _DT_BINDINGS_CLK_SUN8I_R_CCU_H_ + +#define CLK_AR100 0 + +#define CLK_APB0_PIO 3 +#define CLK_APB0_IR 4 +#define CLK_APB0_TIMER 5 +#define CLK_APB0_RSB 6 +#define CLK_APB0_UART 7 +/* 8 is reserved for CLK_APB0_W1 on A31 */ +#define CLK_APB0_I2C 9 +#define CLK_APB0_TWD 10 + +#define CLK_IR 11 + +#endif /* _DT_BINDINGS_CLK_SUN8I_R_CCU_H_ */ diff --git a/include/dt-bindings/reset/sun8i-r-ccu.h b/include/dt-bindings/reset/sun8i-r-ccu.h new file mode 100644 index 000000000000..4ba64f3d6fc9 --- /dev/null +++ b/include/dt-bindings/reset/sun8i-r-ccu.h @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2016 Icenowy Zheng + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DT_BINDINGS_RST_SUN8I_R_CCU_H_ +#define _DT_BINDINGS_RST_SUN8I_R_CCU_H_ + +#define RST_APB0_IR 0 +#define RST_APB0_TIMER 1 +#define RST_APB0_RSB 2 +#define RST_APB0_UART 3 +/* 4 is reserved for RST_APB0_W1 on A31 */ +#define RST_APB0_I2C 5 + +#endif /* _DT_BINDINGS_RST_SUN8I_R_CCU_H_ */ -- cgit v1.2.3 From bf616d21f41174389c6d720ae21bf40f154474c8 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 4 Apr 2017 16:54:21 +0100 Subject: Annotate module params that specify hardware parameters (eg. ioport) Provided an annotation for module parameters that specify hardware parameters (such as io ports, iomem addresses, irqs, dma channels, fixed dma buffers and other types). This will enable such parameters to be locked down in the core parameter parser for secure boot support. I've also included annotations as to what sort of hardware configuration each module is dealing with for future use. Some of these are straightforward (ioport, iomem, irq, dma), but there are also: (1) drivers that switch the semantics of a parameter between ioport and iomem depending on a second parameter, (2) drivers that appear to reserve a CPU memory buffer at a fixed address, (3) other parameters, such as bus types and irq selection bitmasks. For the moment, the hardware configuration type isn't actually stored, though its validity is checked. Signed-off-by: David Howells --- include/linux/moduleparam.h | 65 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 64 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index 52666d90ca94..6be1949ebcdf 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -60,9 +60,11 @@ struct kernel_param_ops { * Flags available for kernel_param * * UNSAFE - the parameter is dangerous and setting it will taint the kernel + * HWPARAM - Hardware param not permitted in lockdown mode */ enum { - KERNEL_PARAM_FL_UNSAFE = (1 << 0) + KERNEL_PARAM_FL_UNSAFE = (1 << 0), + KERNEL_PARAM_FL_HWPARAM = (1 << 1), }; struct kernel_param { @@ -451,6 +453,67 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp); perm, -1, 0); \ __MODULE_PARM_TYPE(name, "array of " #type) +enum hwparam_type { + hwparam_ioport, /* Module parameter configures an I/O port */ + hwparam_iomem, /* Module parameter configures an I/O mem address */ + hwparam_ioport_or_iomem, /* Module parameter could be either, depending on other option */ + hwparam_irq, /* Module parameter configures an I/O port */ + hwparam_dma, /* Module parameter configures a DMA channel */ + hwparam_dma_addr, /* Module parameter configures a DMA buffer address */ + hwparam_other, /* Module parameter configures some other value */ +}; + +/** + * module_param_hw_named - A parameter representing a hw parameters + * @name: a valid C identifier which is the parameter name. + * @value: the actual lvalue to alter. + * @type: the type of the parameter + * @hwtype: what the value represents (enum hwparam_type) + * @perm: visibility in sysfs. + * + * Usually it's a good idea to have variable names and user-exposed names the + * same, but that's harder if the variable must be non-static or is inside a + * structure. This allows exposure under a different name. + */ +#define module_param_hw_named(name, value, type, hwtype, perm) \ + param_check_##type(name, &(value)); \ + __module_param_call(MODULE_PARAM_PREFIX, name, \ + ¶m_ops_##type, &value, \ + perm, -1, \ + KERNEL_PARAM_FL_HWPARAM | (hwparam_##hwtype & 0)); \ + __MODULE_PARM_TYPE(name, #type) + +#define module_param_hw(name, type, hwtype, perm) \ + module_param_hw_named(name, name, type, hwtype, perm) + +/** + * module_param_hw_array - A parameter representing an array of hw parameters + * @name: the name of the array variable + * @type: the type, as per module_param() + * @hwtype: what the value represents (enum hwparam_type) + * @nump: optional pointer filled in with the number written + * @perm: visibility in sysfs + * + * Input and output are as comma-separated values. Commas inside values + * don't work properly (eg. an array of charp). + * + * ARRAY_SIZE(@name) is used to determine the number of elements in the + * array, so the definition must be visible. + */ +#define module_param_hw_array(name, type, hwtype, nump, perm) \ + param_check_##type(name, &(name)[0]); \ + static const struct kparam_array __param_arr_##name \ + = { .max = ARRAY_SIZE(name), .num = nump, \ + .ops = ¶m_ops_##type, \ + .elemsize = sizeof(name[0]), .elem = name }; \ + __module_param_call(MODULE_PARAM_PREFIX, name, \ + ¶m_array_ops, \ + .arr = &__param_arr_##name, \ + perm, -1, \ + KERNEL_PARAM_FL_HWPARAM | (hwparam_##hwtype & 0)); \ + __MODULE_PARM_TYPE(name, "array of " #type) + + extern const struct kernel_param_ops param_array_ops; extern const struct kernel_param_ops param_ops_string; -- cgit v1.2.3 From adf5e5168bd51c42332ebaa709351fa6ed65ea73 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Fri, 27 Jan 2017 12:22:54 +0000 Subject: iommu: Better document the IOMMU_PRIV flag This is a fairly subtle thing - let's make sure it's described as clearly as possible to avoid potential misunderstandings. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- include/linux/iommu.h | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 2e4de0deee53..88ec8c6580d3 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -32,10 +32,13 @@ #define IOMMU_NOEXEC (1 << 3) #define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ /* - * This is to make the IOMMU API setup privileged - * mapppings accessible by the master only at higher - * privileged execution level and inaccessible at - * less privileged levels. + * Where the bus hardware includes a privilege level as part of its access type + * markings, and certain devices are capable of issuing transactions marked as + * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other + * given permission flags only apply to accesses at the higher privilege level, + * and that unprivileged transactions should have as little access as possible. + * This would usually imply the same permissions as kernel mappings on the CPU, + * if the IOMMU page table format is equivalent. */ #define IOMMU_PRIV (1 << 5) -- cgit v1.2.3 From b0459491ca2dcda4223b3f3c80a635ae12218580 Mon Sep 17 00:00:00 2001 From: Leo Yan Date: Sun, 26 Mar 2017 02:23:15 +0800 Subject: clk: hi6220: add debug APB clock The debug APB clock is absent in hi6220 driver, so this patch is to add support for it. Signed-off-by: Leo Yan Signed-off-by: Stephen Boyd Signed-off-by: Michael Turquette --- drivers/clk/hisilicon/clk-hi6220.c | 1 + include/dt-bindings/clock/hi6220-clock.h | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c index c0e8e1f196aa..2ae151ce623a 100644 --- a/drivers/clk/hisilicon/clk-hi6220.c +++ b/drivers/clk/hisilicon/clk-hi6220.c @@ -134,6 +134,7 @@ static struct hisi_gate_clock hi6220_separated_gate_clks_sys[] __initdata = { { HI6220_UART4_PCLK, "uart4_pclk", "uart4_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 8, 0, }, { HI6220_SPI_CLK, "spi_clk", "clk_150m", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 9, 0, }, { HI6220_TSENSOR_CLK, "tsensor_clk", "clk_bus", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 12, 0, }, + { HI6220_DAPB_CLK, "dapb_clk", "cs_dapb", CLK_SET_RATE_PARENT|CLK_IS_CRITICAL, 0x230, 18, 0, }, { HI6220_MMU_CLK, "mmu_clk", "ddrc_axi1", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x240, 11, 0, }, { HI6220_HIFI_SEL, "hifi_sel", "hifi_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 0, 0, }, { HI6220_MMC0_SYSPLL, "mmc0_syspll", "syspll", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 1, 0, }, diff --git a/include/dt-bindings/clock/hi6220-clock.h b/include/dt-bindings/clock/hi6220-clock.h index 6b03c84f4278..b8ba665aab7b 100644 --- a/include/dt-bindings/clock/hi6220-clock.h +++ b/include/dt-bindings/clock/hi6220-clock.h @@ -124,7 +124,10 @@ #define HI6220_CS_DAPB 57 #define HI6220_CS_ATB_DIV 58 -#define HI6220_SYS_NR_CLKS 59 +/* gate clock */ +#define HI6220_DAPB_CLK 59 + +#define HI6220_SYS_NR_CLKS 60 /* clk in Hi6220 media controller */ /* gate clocks */ -- cgit v1.2.3 From 490b54d6fb75f6ffd0471ec58bb38a992e2b40cd Mon Sep 17 00:00:00 2001 From: Elena Reshetova Date: Fri, 3 Mar 2017 10:55:12 +0200 Subject: btrfs: convert extent_map.refs from atomic_t to refcount_t refcount_t type and corresponding API should be used instead of atomic_t when the variable is used as a reference counter. This allows to avoid accidental refcounter overflows that might lead to use-after-free situations. Signed-off-by: Elena Reshetova Signed-off-by: Hans Liljestrand Signed-off-by: Kees Cook Signed-off-by: David Windsor Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 4 ++-- fs/btrfs/extent_map.c | 10 +++++----- fs/btrfs/extent_map.h | 3 ++- fs/btrfs/tree-log.c | 2 +- fs/btrfs/volumes.c | 2 +- include/trace/events/btrfs.h | 2 +- 6 files changed, 12 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 27fdb250b446..3649932e48d5 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2859,7 +2859,7 @@ __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset, em = *em_cached; if (extent_map_in_tree(em) && start >= em->start && start < extent_map_end(em)) { - atomic_inc(&em->refs); + refcount_inc(&em->refs); return em; } @@ -2870,7 +2870,7 @@ __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset, em = get_extent(BTRFS_I(inode), page, pg_offset, start, len, 0); if (em_cached && !IS_ERR_OR_NULL(em)) { BUG_ON(*em_cached); - atomic_inc(&em->refs); + refcount_inc(&em->refs); *em_cached = em; } return em; diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 26f9ac719d20..69850155870c 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -55,7 +55,7 @@ struct extent_map *alloc_extent_map(void) em->flags = 0; em->compress_type = BTRFS_COMPRESS_NONE; em->generation = 0; - atomic_set(&em->refs, 1); + refcount_set(&em->refs, 1); INIT_LIST_HEAD(&em->list); return em; } @@ -71,8 +71,8 @@ void free_extent_map(struct extent_map *em) { if (!em) return; - WARN_ON(atomic_read(&em->refs) == 0); - if (atomic_dec_and_test(&em->refs)) { + WARN_ON(refcount_read(&em->refs) == 0); + if (refcount_dec_and_test(&em->refs)) { WARN_ON(extent_map_in_tree(em)); WARN_ON(!list_empty(&em->list)); if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) @@ -322,7 +322,7 @@ static inline void setup_extent_mapping(struct extent_map_tree *tree, struct extent_map *em, int modified) { - atomic_inc(&em->refs); + refcount_inc(&em->refs); em->mod_start = em->start; em->mod_len = em->len; @@ -381,7 +381,7 @@ __lookup_extent_mapping(struct extent_map_tree *tree, if (strict && !(end > em->start && start < extent_map_end(em))) return NULL; - atomic_inc(&em->refs); + refcount_inc(&em->refs); return em; } diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h index eb8b8fae036b..a67b2def5413 100644 --- a/fs/btrfs/extent_map.h +++ b/fs/btrfs/extent_map.h @@ -2,6 +2,7 @@ #define __EXTENTMAP__ #include +#include #define EXTENT_MAP_LAST_BYTE ((u64)-4) #define EXTENT_MAP_HOLE ((u64)-3) @@ -41,7 +42,7 @@ struct extent_map { */ struct map_lookup *map_lookup; }; - atomic_t refs; + refcount_t refs; unsigned int compress_type; struct list_head list; }; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index a59674c3e69e..ccfe9fe7754a 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -4196,7 +4196,7 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, if (em->generation <= test_gen) continue; /* Need a ref to keep it from getting evicted from cache */ - atomic_inc(&em->refs); + refcount_inc(&em->refs); set_bit(EXTENT_FLAG_LOGGING, &em->flags); list_add_tail(&em->list, &extents); num++; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 0f6706047167..dce59fb59b0c 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4839,7 +4839,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, ret = add_extent_mapping(em_tree, em, 0); if (!ret) { list_add_tail(&em->list, &trans->transaction->pending_chunks); - atomic_inc(&em->refs); + refcount_inc(&em->refs); } write_unlock(&em_tree->lock); if (ret) { diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index a3c3cab643a9..9dd29e806fed 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -213,7 +213,7 @@ TRACE_EVENT_CONDITION(btrfs_get_extent, __entry->block_start = map->block_start; __entry->block_len = map->block_len; __entry->flags = map->flags; - __entry->refs = atomic_read(&map->refs); + __entry->refs = refcount_read(&map->refs); __entry->compress_type = map->compress_type; ), -- cgit v1.2.3 From e76edab7f059bc1047c1865141e2709d70e74852 Mon Sep 17 00:00:00 2001 From: Elena Reshetova Date: Fri, 3 Mar 2017 10:55:13 +0200 Subject: btrfs: convert btrfs_ordered_extent.refs from atomic_t to refcount_t refcount_t type and corresponding API should be used instead of atomic_t when the variable is used as a reference counter. This allows to avoid accidental refcounter overflows that might lead to use-after-free situations. Signed-off-by: Elena Reshetova Signed-off-by: Hans Liljestrand Signed-off-by: Kees Cook Signed-off-by: David Windsor Signed-off-by: David Sterba --- fs/btrfs/ordered-data.c | 18 +++++++++--------- fs/btrfs/ordered-data.h | 2 +- include/trace/events/btrfs.h | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index da5399f9fb54..7b40e2e7292a 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -212,7 +212,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, set_bit(BTRFS_ORDERED_DIRECT, &entry->flags); /* one ref for the tree */ - atomic_set(&entry->refs, 1); + refcount_set(&entry->refs, 1); init_waitqueue_head(&entry->wait); INIT_LIST_HEAD(&entry->list); INIT_LIST_HEAD(&entry->root_extent_list); @@ -358,7 +358,7 @@ int btrfs_dec_test_first_ordered_pending(struct inode *inode, out: if (!ret && cached && entry) { *cached = entry; - atomic_inc(&entry->refs); + refcount_inc(&entry->refs); } spin_unlock_irqrestore(&tree->lock, flags); return ret == 0; @@ -425,7 +425,7 @@ have_entry: out: if (!ret && cached && entry) { *cached = entry; - atomic_inc(&entry->refs); + refcount_inc(&entry->refs); } spin_unlock_irqrestore(&tree->lock, flags); return ret == 0; @@ -456,7 +456,7 @@ void btrfs_get_logged_extents(struct btrfs_inode *inode, if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags)) continue; list_add(&ordered->log_list, logged_list); - atomic_inc(&ordered->refs); + refcount_inc(&ordered->refs); } spin_unlock_irq(&tree->lock); } @@ -565,7 +565,7 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) trace_btrfs_ordered_extent_put(entry->inode, entry); - if (atomic_dec_and_test(&entry->refs)) { + if (refcount_dec_and_test(&entry->refs)) { ASSERT(list_empty(&entry->log_list)); ASSERT(list_empty(&entry->trans_list)); ASSERT(list_empty(&entry->root_extent_list)); @@ -690,7 +690,7 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr, list_move_tail(&ordered->root_extent_list, &root->ordered_extents); - atomic_inc(&ordered->refs); + refcount_inc(&ordered->refs); spin_unlock(&root->ordered_extent_lock); btrfs_init_work(&ordered->flush_work, @@ -870,7 +870,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, if (!offset_in_entry(entry, file_offset)) entry = NULL; if (entry) - atomic_inc(&entry->refs); + refcount_inc(&entry->refs); out: spin_unlock_irq(&tree->lock); return entry; @@ -911,7 +911,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range( } out: if (entry) - atomic_inc(&entry->refs); + refcount_inc(&entry->refs); spin_unlock_irq(&tree->lock); return entry; } @@ -948,7 +948,7 @@ btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) goto out; entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); - atomic_inc(&entry->refs); + refcount_inc(&entry->refs); out: spin_unlock_irq(&tree->lock); return entry; diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 195c93b67fe0..e0c1d5b8d859 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -113,7 +113,7 @@ struct btrfs_ordered_extent { int compress_type; /* reference count */ - atomic_t refs; + refcount_t refs; /* the inode we belong to */ struct inode *inode; diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 9dd29e806fed..8f206263fee7 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -275,7 +275,7 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent, __entry->bytes_left = ordered->bytes_left; __entry->flags = ordered->flags; __entry->compress_type = ordered->compress_type; - __entry->refs = atomic_read(&ordered->refs); + __entry->refs = refcount_read(&ordered->refs); __entry->root_objectid = BTRFS_I(inode)->root->root_key.objectid; __entry->truncated_len = ordered->truncated_len; -- cgit v1.2.3 From 09ed2f165cb3449237dec842b3564044e12d22cb Mon Sep 17 00:00:00 2001 From: Liu Bo Date: Fri, 10 Mar 2017 11:09:48 -0800 Subject: Btrfs: add file item tracepoints While debugging truncate problems, I found that these tracepoints could help us quickly know what went wrong. Two sets of tracepoints are created to track regular/prealloc file item and inline file item respectively, I put inline as a separate one since what inline file items cares about are way less than the regular one. This adds four tracepoints: - btrfs_get_extent_show_fi_regular - btrfs_get_extent_show_fi_inline - btrfs_truncate_show_fi_regular - btrfs_truncate_show_fi_inline Cc: David Sterba Signed-off-by: Liu Bo Reviewed-by: David Sterba [ formatting adjustments ] Signed-off-by: David Sterba --- fs/btrfs/inode.c | 15 +++++ include/trace/events/btrfs.h | 139 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 154 insertions(+) (limited to 'include') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 568b28114f09..da71119b5b7c 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4401,9 +4401,17 @@ search_again: if (extent_type != BTRFS_FILE_EXTENT_INLINE) { item_end += btrfs_file_extent_num_bytes(leaf, fi); + + trace_btrfs_truncate_show_fi_regular( + BTRFS_I(inode), leaf, fi, + found_key.offset); } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { item_end += btrfs_file_extent_inline_len(leaf, path->slots[0], fi); + + trace_btrfs_truncate_show_fi_inline( + BTRFS_I(inode), leaf, fi, path->slots[0], + found_key.offset); } item_end--; } @@ -6828,11 +6836,18 @@ again: found_type == BTRFS_FILE_EXTENT_PREALLOC) { extent_end = extent_start + btrfs_file_extent_num_bytes(leaf, item); + + trace_btrfs_get_extent_show_fi_regular(inode, leaf, item, + extent_start); } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { size_t size; size = btrfs_file_extent_inline_len(leaf, path->slots[0], item); extent_end = ALIGN(extent_start + size, fs_info->sectorsize); + + trace_btrfs_get_extent_show_fi_inline(inode, leaf, item, + path->slots[0], + extent_start); } next: if (start >= extent_end) { diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 8f206263fee7..cef39e2baf21 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -12,6 +12,7 @@ struct btrfs_root; struct btrfs_fs_info; struct btrfs_inode; struct extent_map; +struct btrfs_file_extent_item; struct btrfs_ordered_extent; struct btrfs_delayed_ref_node; struct btrfs_delayed_tree_ref; @@ -54,6 +55,12 @@ struct btrfs_qgroup_extent_record; (obj >= BTRFS_ROOT_TREE_OBJECTID && \ obj <= BTRFS_QUOTA_TREE_OBJECTID)) ? __show_root_type(obj) : "-" +#define show_fi_type(type) \ + __print_symbolic(type, \ + { BTRFS_FILE_EXTENT_INLINE, "INLINE" }, \ + { BTRFS_FILE_EXTENT_REG, "REG" }, \ + { BTRFS_FILE_EXTENT_PREALLOC, "PREALLOC"}) + #define BTRFS_GROUP_FLAGS \ { BTRFS_BLOCK_GROUP_DATA, "DATA"}, \ { BTRFS_BLOCK_GROUP_SYSTEM, "SYSTEM"}, \ @@ -232,6 +239,138 @@ TRACE_EVENT_CONDITION(btrfs_get_extent, __entry->refs, __entry->compress_type) ); +/* file extent item */ +DECLARE_EVENT_CLASS(btrfs__file_extent_item_regular, + + TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l, + struct btrfs_file_extent_item *fi, u64 start), + + TP_ARGS(bi, l, fi, start), + + TP_STRUCT__entry_btrfs( + __field( u64, root_obj ) + __field( u64, ino ) + __field( loff_t, isize ) + __field( u64, disk_isize ) + __field( u64, num_bytes ) + __field( u64, ram_bytes ) + __field( u64, disk_bytenr ) + __field( u64, disk_num_bytes ) + __field( u64, extent_offset ) + __field( u8, extent_type ) + __field( u8, compression ) + __field( u64, extent_start ) + __field( u64, extent_end ) + ), + + TP_fast_assign_btrfs(bi->root->fs_info, + __entry->root_obj = bi->root->objectid; + __entry->ino = btrfs_ino(bi); + __entry->isize = bi->vfs_inode.i_size; + __entry->disk_isize = bi->disk_i_size; + __entry->num_bytes = btrfs_file_extent_num_bytes(l, fi); + __entry->ram_bytes = btrfs_file_extent_ram_bytes(l, fi); + __entry->disk_bytenr = btrfs_file_extent_disk_bytenr(l, fi); + __entry->disk_num_bytes = btrfs_file_extent_disk_num_bytes(l, fi); + __entry->extent_offset = btrfs_file_extent_offset(l, fi); + __entry->extent_type = btrfs_file_extent_type(l, fi); + __entry->compression = btrfs_file_extent_compression(l, fi); + __entry->extent_start = start; + __entry->extent_end = (start + __entry->num_bytes); + ), + + TP_printk_btrfs( + "root=%llu(%s) inode=%llu size=%llu disk_isize=%llu " + "file extent range=[%llu %llu] " + "(num_bytes=%llu ram_bytes=%llu disk_bytenr=%llu " + "disk_num_bytes=%llu extent_offset=%llu type=%s " + "compression=%u", + show_root_type(__entry->root_obj), __entry->ino, + __entry->isize, + __entry->disk_isize, __entry->extent_start, + __entry->extent_end, __entry->num_bytes, __entry->ram_bytes, + __entry->disk_bytenr, __entry->disk_num_bytes, + __entry->extent_offset, show_fi_type(__entry->extent_type), + __entry->compression) +); + +DECLARE_EVENT_CLASS( + btrfs__file_extent_item_inline, + + TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l, + struct btrfs_file_extent_item *fi, int slot, u64 start), + + TP_ARGS(bi, l, fi, slot, start), + + TP_STRUCT__entry_btrfs( + __field( u64, root_obj ) + __field( u64, ino ) + __field( loff_t, isize ) + __field( u64, disk_isize ) + __field( u8, extent_type ) + __field( u8, compression ) + __field( u64, extent_start ) + __field( u64, extent_end ) + ), + + TP_fast_assign_btrfs( + bi->root->fs_info, + __entry->root_obj = bi->root->objectid; + __entry->ino = btrfs_ino(bi); + __entry->isize = bi->vfs_inode.i_size; + __entry->disk_isize = bi->disk_i_size; + __entry->extent_type = btrfs_file_extent_type(l, fi); + __entry->compression = btrfs_file_extent_compression(l, fi); + __entry->extent_start = start; + __entry->extent_end = (start + btrfs_file_extent_inline_len(l, slot, fi)); + ), + + TP_printk_btrfs( + "root=%llu(%s) inode=%llu size=%llu disk_isize=%llu " + "file extent range=[%llu %llu] " + "extent_type=%s compression=%u", + show_root_type(__entry->root_obj), __entry->ino, __entry->isize, + __entry->disk_isize, __entry->extent_start, + __entry->extent_end, show_fi_type(__entry->extent_type), + __entry->compression) +); + +DEFINE_EVENT( + btrfs__file_extent_item_regular, btrfs_get_extent_show_fi_regular, + + TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l, + struct btrfs_file_extent_item *fi, u64 start), + + TP_ARGS(bi, l, fi, start) +); + +DEFINE_EVENT( + btrfs__file_extent_item_regular, btrfs_truncate_show_fi_regular, + + TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l, + struct btrfs_file_extent_item *fi, u64 start), + + TP_ARGS(bi, l, fi, start) +); + +DEFINE_EVENT( + btrfs__file_extent_item_inline, btrfs_get_extent_show_fi_inline, + + TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l, + struct btrfs_file_extent_item *fi, int slot, u64 start), + + TP_ARGS(bi, l, fi, slot, start) +); + +DEFINE_EVENT( + btrfs__file_extent_item_inline, btrfs_truncate_show_fi_inline, + + TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l, + struct btrfs_file_extent_item *fi, int slot, u64 start), + + TP_ARGS(bi, l, fi, slot, start) +); + #define show_ordered_flags(flags) \ __print_flags(flags, "|", \ { (1 << BTRFS_ORDERED_IO_DONE), "IO_DONE" }, \ -- cgit v1.2.3 From 261cc2cca0a8c1d817be65434052feb1db1fd961 Mon Sep 17 00:00:00 2001 From: Hans van Kranenburg Date: Wed, 8 Mar 2017 18:58:43 +0100 Subject: Btrfs: consistent usage of types in balance_args The btrfs_balance_args are only used for the balance ioctl, so use __u instead of __le here for consistency. The __le usage was introduced in bc3094673f22d and dee32d0ac3719 and was probably a result of copy/pasting when the code was written. The usage of __le did not break anything, but it's unnecessary. Also, this change makes the code less confusing for the careful reader. Signed-off-by: Hans van Kranenburg Reviewed-by: David Sterba Signed-off-by: David Sterba --- include/uapi/linux/btrfs.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index dcfc3a5a9cb1..a456e5309238 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -291,10 +291,10 @@ struct btrfs_ioctl_feature_flags { struct btrfs_balance_args { __u64 profiles; union { - __le64 usage; + __u64 usage; struct { - __le32 usage_min; - __le32 usage_max; + __u32 usage_min; + __u32 usage_max; }; }; __u64 devid; @@ -324,8 +324,8 @@ struct btrfs_balance_args { * Process chunks that cross stripes_min..stripes_max devices, * BTRFS_BALANCE_ARGS_STRIPES_RANGE */ - __le32 stripes_min; - __le32 stripes_max; + __u32 stripes_min; + __u32 stripes_max; __u64 unused[6]; } __attribute__ ((__packed__)); -- cgit v1.2.3 From 3159fe7baef3a50fc332455e252d8a01a18f1ff1 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Mon, 13 Mar 2017 15:52:08 +0800 Subject: btrfs: qgroup: Add trace point for qgroup reserved space Introduce the following trace points: qgroup_update_reserve qgroup_meta_reserve These trace points are handy to trace qgroup reserve space related problems. Also export btrfs_qgroup structure, as now we directly pass btrfs_qgroup structure to trace points, so that structure needs to be exported. Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/qgroup.c | 52 +++++++------------------------------------- fs/btrfs/qgroup.h | 44 +++++++++++++++++++++++++++++++++++++ include/trace/events/btrfs.h | 44 +++++++++++++++++++++++++++++++++++++ 3 files changed, 96 insertions(+), 44 deletions(-) (limited to 'include') diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 1d7942c5a38a..27a229619808 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -47,50 +47,6 @@ * - check all ioctl parameters */ -/* - * one struct for each qgroup, organized in fs_info->qgroup_tree. - */ -struct btrfs_qgroup { - u64 qgroupid; - - /* - * state - */ - u64 rfer; /* referenced */ - u64 rfer_cmpr; /* referenced compressed */ - u64 excl; /* exclusive */ - u64 excl_cmpr; /* exclusive compressed */ - - /* - * limits - */ - u64 lim_flags; /* which limits are set */ - u64 max_rfer; - u64 max_excl; - u64 rsv_rfer; - u64 rsv_excl; - - /* - * reservation tracking - */ - u64 reserved; - - /* - * lists - */ - struct list_head groups; /* groups this group is member of */ - struct list_head members; /* groups that are members of this group */ - struct list_head dirty; /* dirty groups */ - struct rb_node node; /* tree of qgroups */ - - /* - * temp variables for accounting operations - * Refer to qgroup_shared_accounting() for details. - */ - u64 old_refcnt; - u64 new_refcnt; -}; - static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq, int mod) { @@ -1075,6 +1031,7 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, qgroup->excl += sign * num_bytes; qgroup->excl_cmpr += sign * num_bytes; if (sign > 0) { + trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes); if (WARN_ON(qgroup->reserved < num_bytes)) report_reserved_underflow(fs_info, qgroup, num_bytes); else @@ -1100,6 +1057,8 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, WARN_ON(sign < 0 && qgroup->excl < num_bytes); qgroup->excl += sign * num_bytes; if (sign > 0) { + trace_qgroup_update_reserve(fs_info, qgroup, + -(s64)num_bytes); if (WARN_ON(qgroup->reserved < num_bytes)) report_reserved_underflow(fs_info, qgroup, num_bytes); @@ -2446,6 +2405,7 @@ retry: qg = unode_aux_to_qgroup(unode); + trace_qgroup_update_reserve(fs_info, qg, num_bytes); qg->reserved += num_bytes; } @@ -2491,6 +2451,7 @@ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info, qg = unode_aux_to_qgroup(unode); + trace_qgroup_update_reserve(fs_info, qg, -(s64)num_bytes); if (WARN_ON(qg->reserved < num_bytes)) report_reserved_underflow(fs_info, qg, num_bytes); else @@ -2955,6 +2916,7 @@ int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, return 0; BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); + trace_qgroup_meta_reserve(root, (s64)num_bytes); ret = qgroup_reserve(root, num_bytes, enforce); if (ret < 0) return ret; @@ -2974,6 +2936,7 @@ void btrfs_qgroup_free_meta_all(struct btrfs_root *root) reserved = atomic64_xchg(&root->qgroup_meta_rsv, 0); if (reserved == 0) return; + trace_qgroup_meta_reserve(root, -(s64)reserved); btrfs_qgroup_free_refroot(fs_info, root->objectid, reserved); } @@ -2988,6 +2951,7 @@ void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes) BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); WARN_ON(atomic64_read(&root->qgroup_meta_rsv) < num_bytes); atomic64_sub(num_bytes, &root->qgroup_meta_rsv); + trace_qgroup_meta_reserve(root, -(s64)num_bytes); btrfs_qgroup_free_refroot(fs_info, root->objectid, num_bytes); } diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h index 96fc56ebf55a..31e468b16175 100644 --- a/fs/btrfs/qgroup.h +++ b/fs/btrfs/qgroup.h @@ -61,6 +61,50 @@ struct btrfs_qgroup_extent_record { struct ulist *old_roots; }; +/* + * one struct for each qgroup, organized in fs_info->qgroup_tree. + */ +struct btrfs_qgroup { + u64 qgroupid; + + /* + * state + */ + u64 rfer; /* referenced */ + u64 rfer_cmpr; /* referenced compressed */ + u64 excl; /* exclusive */ + u64 excl_cmpr; /* exclusive compressed */ + + /* + * limits + */ + u64 lim_flags; /* which limits are set */ + u64 max_rfer; + u64 max_excl; + u64 rsv_rfer; + u64 rsv_excl; + + /* + * reservation tracking + */ + u64 reserved; + + /* + * lists + */ + struct list_head groups; /* groups this group is member of */ + struct list_head members; /* groups that are members of this group */ + struct list_head dirty; /* dirty groups */ + struct rb_node node; /* tree of qgroups */ + + /* + * temp variables for accounting operations + * Refer to qgroup_shared_accounting() for details. + */ + u64 old_refcnt; + u64 new_refcnt; +}; + /* * For qgroup event trace points only */ diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index cef39e2baf21..e37973526153 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -25,6 +25,7 @@ struct extent_buffer; struct btrfs_work; struct __btrfs_workqueue; struct btrfs_qgroup_extent_record; +struct btrfs_qgroup; #define show_ref_type(type) \ __print_symbolic(type, \ @@ -1614,6 +1615,49 @@ TRACE_EVENT(qgroup_update_counters, __entry->cur_new_count) ); +TRACE_EVENT(qgroup_update_reserve, + + TP_PROTO(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup, + s64 diff), + + TP_ARGS(fs_info, qgroup, diff), + + TP_STRUCT__entry_btrfs( + __field( u64, qgid ) + __field( u64, cur_reserved ) + __field( s64, diff ) + ), + + TP_fast_assign_btrfs(fs_info, + __entry->qgid = qgroup->qgroupid; + __entry->cur_reserved = qgroup->reserved; + __entry->diff = diff; + ), + + TP_printk_btrfs("qgid=%llu cur_reserved=%llu diff=%lld", + __entry->qgid, __entry->cur_reserved, __entry->diff) +); + +TRACE_EVENT(qgroup_meta_reserve, + + TP_PROTO(struct btrfs_root *root, s64 diff), + + TP_ARGS(root, diff), + + TP_STRUCT__entry_btrfs( + __field( u64, refroot ) + __field( s64, diff ) + ), + + TP_fast_assign_btrfs(root->fs_info, + __entry->refroot = root->objectid; + __entry->diff = diff; + ), + + TP_printk_btrfs("refroot=%llu(%s) diff=%lld", + show_root_type(__entry->refroot), __entry->diff) +); + #endif /* _TRACE_BTRFS_H */ /* This part must be outside protection */ -- cgit v1.2.3 From b8c17e6664c461e4aed545a943304c3b32dd309c Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 8 Nov 2016 14:25:21 -0800 Subject: rcu: Maintain special bits at bottom of ->dynticks counter Currently, IPIs are used to force other CPUs to invalidate their TLBs in response to a kernel virtual-memory mapping change. This works, but degrades both battery lifetime (for idle CPUs) and real-time response (for nohz_full CPUs), and in addition results in unnecessary IPIs due to the fact that CPUs executing in usermode are unaffected by stale kernel mappings. It would be better to cause a CPU executing in usermode to wait until it is entering kernel mode to do the flush, first to avoid interrupting usemode tasks and second to handle multiple flush requests with a single flush in the case of a long-running user task. This commit therefore reserves a bit at the bottom of the ->dynticks counter, which is checked upon exit from extended quiescent states. If it is set, it is cleared and then a new rcu_eqs_special_exit() macro is invoked, which, if not supplied, is an empty single-pass do-while loop. If this bottom bit is set on -entry- to an extended quiescent state, then a WARN_ON_ONCE() triggers. This bottom bit may be set using a new rcu_eqs_special_set() function, which returns true if the bit was set, or false if the CPU turned out to not be in an extended quiescent state. Please note that this function refuses to set the bit for a non-nohz_full CPU when that CPU is executing in usermode because usermode execution is tracked by RCU as a dyntick-idle extended quiescent state only for nohz_full CPUs. Reported-by: Andy Lutomirski Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- include/linux/rcutiny.h | 5 ++++ kernel/rcu/tree.c | 77 +++++++++++++++++++++++++++++++++++++++---------- kernel/rcu/tree.h | 1 + 3 files changed, 67 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index b452953e21c8..6c9d941e3962 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -33,6 +33,11 @@ static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp) return 0; } +static inline bool rcu_eqs_special_set(int cpu) +{ + return false; /* Never flag non-existent other CPUs! */ +} + static inline unsigned long get_state_synchronize_rcu(void) { return 0; diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 50fee7689e71..0efad311ded4 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -274,9 +274,19 @@ void rcu_bh_qs(void) static DEFINE_PER_CPU(int, rcu_sched_qs_mask); +/* + * Steal a bit from the bottom of ->dynticks for idle entry/exit + * control. Initially this is for TLB flushing. + */ +#define RCU_DYNTICK_CTRL_MASK 0x1 +#define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1) +#ifndef rcu_eqs_special_exit +#define rcu_eqs_special_exit() do { } while (0) +#endif + static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE, - .dynticks = ATOMIC_INIT(1), + .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR), #ifdef CONFIG_NO_HZ_FULL_SYSIDLE .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE, .dynticks_idle = ATOMIC_INIT(1), @@ -290,15 +300,20 @@ static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { static void rcu_dynticks_eqs_enter(void) { struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); - int special; + int seq; /* - * CPUs seeing atomic_inc_return() must see prior RCU read-side + * CPUs seeing atomic_add_return() must see prior RCU read-side * critical sections, and we also must force ordering with the * next idle sojourn. */ - special = atomic_inc_return(&rdtp->dynticks); - WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && special & 0x1); + seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks); + /* Better be in an extended quiescent state! */ + WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && + (seq & RCU_DYNTICK_CTRL_CTR)); + /* Better not have special action (TLB flush) pending! */ + WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && + (seq & RCU_DYNTICK_CTRL_MASK)); } /* @@ -308,15 +323,22 @@ static void rcu_dynticks_eqs_enter(void) static void rcu_dynticks_eqs_exit(void) { struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); - int special; + int seq; /* - * CPUs seeing atomic_inc_return() must see prior idle sojourns, + * CPUs seeing atomic_add_return() must see prior idle sojourns, * and we also must force ordering with the next RCU read-side * critical section. */ - special = atomic_inc_return(&rdtp->dynticks); - WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(special & 0x1)); + seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks); + WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && + !(seq & RCU_DYNTICK_CTRL_CTR)); + if (seq & RCU_DYNTICK_CTRL_MASK) { + atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdtp->dynticks); + smp_mb__after_atomic(); /* _exit after clearing mask. */ + /* Prefer duplicate flushes to losing a flush. */ + rcu_eqs_special_exit(); + } } /* @@ -333,9 +355,9 @@ static void rcu_dynticks_eqs_online(void) { struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); - if (atomic_read(&rdtp->dynticks) & 0x1) + if (atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR) return; - atomic_add(0x1, &rdtp->dynticks); + atomic_add(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks); } /* @@ -347,7 +369,7 @@ bool rcu_dynticks_curr_cpu_in_eqs(void) { struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); - return !(atomic_read(&rdtp->dynticks) & 0x1); + return !(atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR); } /* @@ -358,7 +380,7 @@ int rcu_dynticks_snap(struct rcu_dynticks *rdtp) { int snap = atomic_add_return(0, &rdtp->dynticks); - return snap; + return snap & ~RCU_DYNTICK_CTRL_MASK; } /* @@ -367,7 +389,7 @@ int rcu_dynticks_snap(struct rcu_dynticks *rdtp) */ static bool rcu_dynticks_in_eqs(int snap) { - return !(snap & 0x1); + return !(snap & RCU_DYNTICK_CTRL_CTR); } /* @@ -387,10 +409,33 @@ static bool rcu_dynticks_in_eqs_since(struct rcu_dynticks *rdtp, int snap) static void rcu_dynticks_momentary_idle(void) { struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); - int special = atomic_add_return(2, &rdtp->dynticks); + int special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR, + &rdtp->dynticks); /* It is illegal to call this from idle state. */ - WARN_ON_ONCE(!(special & 0x1)); + WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR)); +} + +/* + * Set the special (bottom) bit of the specified CPU so that it + * will take special action (such as flushing its TLB) on the + * next exit from an extended quiescent state. Returns true if + * the bit was successfully set, or false if the CPU was not in + * an extended quiescent state. + */ +bool rcu_eqs_special_set(int cpu) +{ + int old; + int new; + struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); + + do { + old = atomic_read(&rdtp->dynticks); + if (old & RCU_DYNTICK_CTRL_CTR) + return false; + new = old | RCU_DYNTICK_CTRL_MASK; + } while (atomic_cmpxchg(&rdtp->dynticks, old, new) != old); + return true; } DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr); diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index ec62a05bfdb3..7468b4de7e0c 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -596,6 +596,7 @@ extern struct rcu_state rcu_preempt_state; #endif /* #ifdef CONFIG_PREEMPT_RCU */ int rcu_dynticks_snap(struct rcu_dynticks *rdtp); +bool rcu_eqs_special_set(int cpu); #ifdef CONFIG_RCU_BOOST DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); -- cgit v1.2.3 From 77e5849688670280b173bb9e0544e9da7b2acc36 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 14 Jan 2017 13:32:50 -0800 Subject: rcu: Make arch select smp_mb__after_unlock_lock() strength The definition of smp_mb__after_unlock_lock() is currently smp_mb() for CONFIG_PPC and a no-op otherwise. It would be better to instead provide an architecture-selectable Kconfig option, and select the strength of smp_mb__after_unlock_lock() based on that option. This commit therefore creates ARCH_WEAK_RELEASE_ACQUIRE, has PPC select it, and bases the definition of smp_mb__after_unlock_lock() on this new ARCH_WEAK_RELEASE_ACQUIRE Kconfig option. Reported-by: Ingo Molnar Signed-off-by: Paul E. McKenney Cc: Peter Zijlstra Cc: Will Deacon Cc: Boqun Feng Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Acked-by: Michael Ellerman Cc: Reviewed-by: Josh Triplett --- arch/Kconfig | 3 +++ arch/powerpc/Kconfig | 1 + include/linux/rcupdate.h | 6 +++--- 3 files changed, 7 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/arch/Kconfig b/arch/Kconfig index cd211a14a88f..adefaf344239 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -320,6 +320,9 @@ config HAVE_CMPXCHG_LOCAL config HAVE_CMPXCHG_DOUBLE bool +config ARCH_WEAK_RELEASE_ACQUIRE + bool + config ARCH_WANT_IPC_PARSE_VERSION bool diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 97a8bc8a095c..7a5c9b764cd2 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -99,6 +99,7 @@ config PPC select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF if PPC64 select ARCH_WANT_IPC_PARSE_VERSION + select ARCH_WEAK_RELEASE_ACQUIRE select BINFMT_ELF select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index de88b33c0974..e6146d0074f8 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -1127,11 +1127,11 @@ do { \ * if the UNLOCK and LOCK are executed by the same CPU or if the * UNLOCK and LOCK operate on the same lock variable. */ -#ifdef CONFIG_PPC +#ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE #define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */ -#else /* #ifdef CONFIG_PPC */ +#else /* #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */ #define smp_mb__after_unlock_lock() do { } while (0) -#endif /* #else #ifdef CONFIG_PPC */ +#endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */ #endif /* __LINUX_RCUPDATE_H */ -- cgit v1.2.3 From 900b1028ec388e50c98200641ae4274794c807cf Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 10 Feb 2017 14:32:54 -0800 Subject: srcu: Allow SRCU to access rcu_scheduler_active This is primarily a code-movement commit in preparation for allowing SRCU to handle early-boot SRCU grace periods. Signed-off-by: Paul E. McKenney --- include/linux/rcutiny.h | 6 +++--- kernel/rcu/tiny_plugin.h | 9 +++++---- kernel/rcu/tree.c | 2 +- kernel/rcu/tree_exp.h | 12 ----------- kernel/rcu/update.c | 52 +++++++++++++++++++++++++++++++----------------- 5 files changed, 43 insertions(+), 38 deletions(-) (limited to 'include') diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 6c9d941e3962..5219be250f00 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -217,14 +217,14 @@ static inline void exit_rcu(void) { } -#ifdef CONFIG_DEBUG_LOCK_ALLOC +#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) extern int rcu_scheduler_active __read_mostly; void rcu_scheduler_starting(void); -#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ +#else /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */ static inline void rcu_scheduler_starting(void) { } -#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ +#endif /* #else #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */ #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h index df3a60e19f07..371034e77f87 100644 --- a/kernel/rcu/tiny_plugin.h +++ b/kernel/rcu/tiny_plugin.h @@ -52,7 +52,7 @@ static struct rcu_ctrlblk rcu_bh_ctrlblk = { RCU_TRACE(.name = "rcu_bh") }; -#ifdef CONFIG_DEBUG_LOCK_ALLOC +#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) #include int rcu_scheduler_active __read_mostly; @@ -65,15 +65,16 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active); * to RCU_SCHEDULER_RUNNING, skipping the RCU_SCHEDULER_INIT stage. * The reason for this is that Tiny RCU does not need kthreads, so does * not have to care about the fact that the scheduler is half-initialized - * at a certain phase of the boot process. + * at a certain phase of the boot process. Unless SRCU is in the mix. */ void __init rcu_scheduler_starting(void) { WARN_ON(nr_context_switches() > 0); - rcu_scheduler_active = RCU_SCHEDULER_RUNNING; + rcu_scheduler_active = IS_ENABLED(CONFIG_SRCU) + ? RCU_SCHEDULER_INIT : RCU_SCHEDULER_RUNNING; } -#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ +#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */ #ifdef CONFIG_RCU_TRACE diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 8cc9d40b41ea..2380d1e3dfb8 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3974,7 +3974,7 @@ early_initcall(rcu_spawn_gp_kthread); * task is booting the system, and such primitives are no-ops). After this * function is called, any synchronous grace-period primitives are run as * expedited, with the requesting task driving the grace period forward. - * A later core_initcall() rcu_exp_runtime_mode() will switch to full + * A later core_initcall() rcu_set_runtime_mode() will switch to full * runtime RCU functionality. */ void rcu_scheduler_starting(void) diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index a1f52bbe9db6..51ca287828a2 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -737,15 +737,3 @@ void synchronize_rcu_expedited(void) EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ - -/* - * Switch to run-time mode once Tree RCU has fully initialized. - */ -static int __init rcu_exp_runtime_mode(void) -{ - rcu_test_sync_prims(); - rcu_scheduler_active = RCU_SCHEDULER_RUNNING; - rcu_test_sync_prims(); - return 0; -} -core_initcall(rcu_exp_runtime_mode); diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index 55c8530316c7..c5df0d756900 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -124,7 +124,7 @@ EXPORT_SYMBOL(rcu_read_lock_sched_held); * non-expedited counterparts? Intended for use within RCU. Note * that if the user specifies both rcu_expedited and rcu_normal, then * rcu_normal wins. (Except during the time period during boot from - * when the first task is spawned until the rcu_exp_runtime_mode() + * when the first task is spawned until the rcu_set_runtime_mode() * core_initcall() is invoked, at which point everything is expedited.) */ bool rcu_gp_is_normal(void) @@ -190,6 +190,39 @@ void rcu_end_inkernel_boot(void) #endif /* #ifndef CONFIG_TINY_RCU */ +/* + * Test each non-SRCU synchronous grace-period wait API. This is + * useful just after a change in mode for these primitives, and + * during early boot. + */ +void rcu_test_sync_prims(void) +{ + if (!IS_ENABLED(CONFIG_PROVE_RCU)) + return; + synchronize_rcu(); + synchronize_rcu_bh(); + synchronize_sched(); + synchronize_rcu_expedited(); + synchronize_rcu_bh_expedited(); + synchronize_sched_expedited(); +} + +#if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU) + +/* + * Switch to run-time mode once RCU has fully initialized. + */ +static int __init rcu_set_runtime_mode(void) +{ + rcu_test_sync_prims(); + rcu_scheduler_active = RCU_SCHEDULER_RUNNING; + rcu_test_sync_prims(); + return 0; +} +core_initcall(rcu_set_runtime_mode); + +#endif /* #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU) */ + #ifdef CONFIG_PREEMPT_RCU /* @@ -817,23 +850,6 @@ static void rcu_spawn_tasks_kthread(void) #endif /* #ifdef CONFIG_TASKS_RCU */ -/* - * Test each non-SRCU synchronous grace-period wait API. This is - * useful just after a change in mode for these primitives, and - * during early boot. - */ -void rcu_test_sync_prims(void) -{ - if (!IS_ENABLED(CONFIG_PROVE_RCU)) - return; - synchronize_rcu(); - synchronize_rcu_bh(); - synchronize_sched(); - synchronize_rcu_expedited(); - synchronize_rcu_bh_expedited(); - synchronize_sched_expedited(); -} - #ifdef CONFIG_PROVE_RCU /* -- cgit v1.2.3 From c2a8ec0778b2ca0d360ba9b5cac7fcd5ddfe798f Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 10 Mar 2017 15:31:55 -0800 Subject: srcu: Move to state-based grace-period sequencing The current SRCU grace-period processing might never reach the last portion of srcu_advance_batches(). This is OK given the current implementation, as the first portion, up to the try_check_zero() following the srcu_flip() is sufficient to drive grace periods forward. However, it has the unfortunate side-effect of making it impossible to determine when a given grace period has ended, and it will be necessary to efficiently trace ends of grace periods in order to efficiently handle per-CPU SRCU callback lists. This commit therefore adds states to the SRCU grace-period processing, so that the end of a given SRCU grace period is marked by the transition to the SRCU_STATE_DONE state. Signed-off-by: Paul E. McKenney --- include/linux/srcu.h | 10 ++++- kernel/rcu/srcu.c | 111 ++++++++++++++++++++++++++++----------------------- 2 files changed, 69 insertions(+), 52 deletions(-) (limited to 'include') diff --git a/include/linux/srcu.h b/include/linux/srcu.h index a598cf3ac70c..f149a685896c 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -48,7 +48,7 @@ struct srcu_struct { unsigned long completed; struct srcu_array __percpu *per_cpu_ref; spinlock_t queue_lock; /* protect ->batch_queue, ->running */ - bool running; + int srcu_state; /* callbacks just queued */ struct rcu_batch batch_queue; /* callbacks try to do the first check_zero */ @@ -62,6 +62,12 @@ struct srcu_struct { #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ }; +/* Values for -> state variable. */ +#define SRCU_STATE_IDLE 0 +#define SRCU_STATE_SCAN1 1 +#define SRCU_STATE_SCAN2 2 +#define SRCU_STATE_DONE 3 + #ifdef CONFIG_DEBUG_LOCK_ALLOC int __init_srcu_struct(struct srcu_struct *sp, const char *name, @@ -89,7 +95,7 @@ void process_srcu(struct work_struct *work); .completed = -300, \ .per_cpu_ref = &name##_srcu_array, \ .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \ - .running = false, \ + .srcu_state = SRCU_STATE_IDLE, \ .batch_queue = RCU_BATCH_INIT(name.batch_queue), \ .batch_check0 = RCU_BATCH_INIT(name.batch_check0), \ .batch_check1 = RCU_BATCH_INIT(name.batch_check1), \ diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c index 821ecda873f2..70286f68f3a1 100644 --- a/kernel/rcu/srcu.c +++ b/kernel/rcu/srcu.c @@ -111,7 +111,7 @@ static int init_srcu_struct_fields(struct srcu_struct *sp) { sp->completed = 0; spin_lock_init(&sp->queue_lock); - sp->running = false; + sp->srcu_state = SRCU_STATE_IDLE; rcu_batch_init(&sp->batch_queue); rcu_batch_init(&sp->batch_check0); rcu_batch_init(&sp->batch_check1); @@ -270,7 +270,7 @@ void cleanup_srcu_struct(struct srcu_struct *sp) if (WARN_ON(!rcu_all_batches_empty(sp))) return; /* Leakage unless caller handles error. */ flush_delayed_work(&sp->work); - if (WARN_ON(sp->running)) + if (WARN_ON(READ_ONCE(sp->srcu_state) != SRCU_STATE_IDLE)) return; /* Caller forgot to stop doing call_srcu()? */ free_percpu(sp->per_cpu_ref); sp->per_cpu_ref = NULL; @@ -391,8 +391,8 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *head, spin_lock_irqsave(&sp->queue_lock, flags); smp_mb__after_unlock_lock(); /* Caller's prior accesses before GP. */ rcu_batch_queue(&sp->batch_queue, head); - if (!sp->running) { - sp->running = true; + if (READ_ONCE(sp->srcu_state) == SRCU_STATE_IDLE) { + WRITE_ONCE(sp->srcu_state, SRCU_STATE_SCAN1); queue_delayed_work(system_power_efficient_wq, &sp->work, 0); } spin_unlock_irqrestore(&sp->queue_lock, flags); @@ -424,9 +424,9 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount) head->func = wakeme_after_rcu; spin_lock_irq(&sp->queue_lock); smp_mb__after_unlock_lock(); /* Caller's prior accesses before GP. */ - if (!sp->running) { + if (READ_ONCE(sp->srcu_state) == SRCU_STATE_IDLE) { /* steal the processing owner */ - sp->running = true; + WRITE_ONCE(sp->srcu_state, SRCU_STATE_SCAN1); rcu_batch_queue(&sp->batch_check0, head); spin_unlock_irq(&sp->queue_lock); /* give the processing owner to work_struct */ @@ -548,7 +548,9 @@ static void srcu_collect_new(struct srcu_struct *sp) */ static void srcu_advance_batches(struct srcu_struct *sp, int trycount) { - int idx = 1 ^ (sp->completed & 1); + int idx; + + WARN_ON_ONCE(sp->srcu_state == SRCU_STATE_IDLE); /* * Because readers might be delayed for an extended period after @@ -558,48 +560,56 @@ static void srcu_advance_batches(struct srcu_struct *sp, int trycount) * invoking a callback. */ - if (rcu_batch_empty(&sp->batch_check0) && - rcu_batch_empty(&sp->batch_check1)) - return; /* no callbacks need to be advanced */ - - if (!try_check_zero(sp, idx, trycount)) - return; /* failed to advance, will try after SRCU_INTERVAL */ - - /* - * The callbacks in ->batch_check1 have already done with their - * first zero check and flip back when they were enqueued on - * ->batch_check0 in a previous invocation of srcu_advance_batches(). - * (Presumably try_check_zero() returned false during that - * invocation, leaving the callbacks stranded on ->batch_check1.) - * They are therefore ready to invoke, so move them to ->batch_done. - */ - rcu_batch_move(&sp->batch_done, &sp->batch_check1); - - if (rcu_batch_empty(&sp->batch_check0)) - return; /* no callbacks need to be advanced */ - srcu_flip(sp); - - /* - * The callbacks in ->batch_check0 just finished their - * first check zero and flip, so move them to ->batch_check1 - * for future checking on the other idx. - */ - rcu_batch_move(&sp->batch_check1, &sp->batch_check0); - - /* - * SRCU read-side critical sections are normally short, so check - * at least twice in quick succession after a flip. - */ - trycount = trycount < 2 ? 2 : trycount; - if (!try_check_zero(sp, idx^1, trycount)) - return; /* failed to advance, will try after SRCU_INTERVAL */ + if (sp->srcu_state == SRCU_STATE_DONE) + WRITE_ONCE(sp->srcu_state, SRCU_STATE_SCAN1); + + if (sp->srcu_state == SRCU_STATE_SCAN1) { + idx = 1 ^ (sp->completed & 1); + if (!try_check_zero(sp, idx, trycount)) + return; /* readers present, retry after SRCU_INTERVAL */ + + /* + * The callbacks in ->batch_check1 have already done + * with their first zero check and flip back when they were + * enqueued on ->batch_check0 in a previous invocation of + * srcu_advance_batches(). (Presumably try_check_zero() + * returned false during that invocation, leaving the + * callbacks stranded on ->batch_check1.) They are therefore + * ready to invoke, so move them to ->batch_done. + */ + rcu_batch_move(&sp->batch_done, &sp->batch_check1); + srcu_flip(sp); + + /* + * The callbacks in ->batch_check0 just finished their + * first check zero and flip, so move them to ->batch_check1 + * for future checking on the other idx. + */ + rcu_batch_move(&sp->batch_check1, &sp->batch_check0); + + WRITE_ONCE(sp->srcu_state, SRCU_STATE_SCAN2); + } - /* - * The callbacks in ->batch_check1 have now waited for all - * pre-existing readers using both idx values. They are therefore - * ready to invoke, so move them to ->batch_done. - */ - rcu_batch_move(&sp->batch_done, &sp->batch_check1); + if (sp->srcu_state == SRCU_STATE_SCAN2) { + + /* + * SRCU read-side critical sections are normally short, + * so check at least twice in quick succession after a flip. + */ + idx = 1 ^ (sp->completed & 1); + trycount = trycount < 2 ? 2 : trycount; + if (!try_check_zero(sp, idx, trycount)) + return; /* readers present, retry after SRCU_INTERVAL */ + + /* + * The callbacks in ->batch_check1 have now waited for + * all pre-existing readers using both idx values. They are + * therefore ready to invoke, so move them to ->batch_done. + */ + rcu_batch_move(&sp->batch_done, &sp->batch_check1); + + WRITE_ONCE(sp->srcu_state, SRCU_STATE_DONE); + } } /* @@ -633,8 +643,9 @@ static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay) if (rcu_all_batches_empty(sp)) { spin_lock_irq(&sp->queue_lock); - if (rcu_all_batches_empty(sp)) { - sp->running = false; + if (rcu_all_batches_empty(sp) && + READ_ONCE(sp->srcu_state) == SRCU_STATE_DONE) { + WRITE_ONCE(sp->srcu_state, SRCU_STATE_IDLE); pending = false; } spin_unlock_irq(&sp->queue_lock); -- cgit v1.2.3 From ac367c1c621b75689f6d5cd8301d364ba2c9f292 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 11 Mar 2017 07:14:06 -0800 Subject: srcu: Add grace-period sequence numbers This commit adds grace-period sequence numbers, which will be used to handle mid-boot grace periods and per-CPU callback lists. Signed-off-by: Paul E. McKenney --- include/linux/srcu.h | 1 + kernel/rcu/srcu.c | 27 +++++++++++++++++++++++---- 2 files changed, 24 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/srcu.h b/include/linux/srcu.h index f149a685896c..047ac8c28a4e 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -46,6 +46,7 @@ struct rcu_batch { struct srcu_struct { unsigned long completed; + unsigned long srcu_gp_seq; struct srcu_array __percpu *per_cpu_ref; spinlock_t queue_lock; /* protect ->batch_queue, ->running */ int srcu_state; diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c index 70286f68f3a1..d464986d82b6 100644 --- a/kernel/rcu/srcu.c +++ b/kernel/rcu/srcu.c @@ -110,6 +110,7 @@ static inline void rcu_batch_move(struct rcu_batch *to, struct rcu_batch *from) static int init_srcu_struct_fields(struct srcu_struct *sp) { sp->completed = 0; + sp->srcu_gp_seq = 0; spin_lock_init(&sp->queue_lock); sp->srcu_state = SRCU_STATE_IDLE; rcu_batch_init(&sp->batch_queue); @@ -318,6 +319,15 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock); #define SYNCHRONIZE_SRCU_TRYCOUNT 2 #define SYNCHRONIZE_SRCU_EXP_TRYCOUNT 12 +/* + * Start an SRCU grace period. + */ +static void srcu_gp_start(struct srcu_struct *sp) +{ + WRITE_ONCE(sp->srcu_state, SRCU_STATE_SCAN1); + rcu_seq_start(&sp->srcu_gp_seq); +} + /* * @@@ Wait until all pre-existing readers complete. Such readers * will have used the index specified by "idx". @@ -354,6 +364,15 @@ static void srcu_flip(struct srcu_struct *sp) smp_mb(); /* D */ /* Pairs with C. */ } +/* + * End an SRCU grace period. + */ +static void srcu_gp_end(struct srcu_struct *sp) +{ + rcu_seq_end(&sp->srcu_gp_seq); + WRITE_ONCE(sp->srcu_state, SRCU_STATE_DONE); +} + /* * Enqueue an SRCU callback on the specified srcu_struct structure, * initiating grace-period processing if it is not already running. @@ -392,7 +411,7 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *head, smp_mb__after_unlock_lock(); /* Caller's prior accesses before GP. */ rcu_batch_queue(&sp->batch_queue, head); if (READ_ONCE(sp->srcu_state) == SRCU_STATE_IDLE) { - WRITE_ONCE(sp->srcu_state, SRCU_STATE_SCAN1); + srcu_gp_start(sp); queue_delayed_work(system_power_efficient_wq, &sp->work, 0); } spin_unlock_irqrestore(&sp->queue_lock, flags); @@ -426,7 +445,7 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount) smp_mb__after_unlock_lock(); /* Caller's prior accesses before GP. */ if (READ_ONCE(sp->srcu_state) == SRCU_STATE_IDLE) { /* steal the processing owner */ - WRITE_ONCE(sp->srcu_state, SRCU_STATE_SCAN1); + srcu_gp_start(sp); rcu_batch_queue(&sp->batch_check0, head); spin_unlock_irq(&sp->queue_lock); /* give the processing owner to work_struct */ @@ -561,7 +580,7 @@ static void srcu_advance_batches(struct srcu_struct *sp, int trycount) */ if (sp->srcu_state == SRCU_STATE_DONE) - WRITE_ONCE(sp->srcu_state, SRCU_STATE_SCAN1); + srcu_gp_start(sp); if (sp->srcu_state == SRCU_STATE_SCAN1) { idx = 1 ^ (sp->completed & 1); @@ -608,7 +627,7 @@ static void srcu_advance_batches(struct srcu_struct *sp, int trycount) */ rcu_batch_move(&sp->batch_done, &sp->batch_check1); - WRITE_ONCE(sp->srcu_state, SRCU_STATE_DONE); + srcu_gp_end(sp); } } -- cgit v1.2.3 From 8660b7d8a545227fd9ee80508aa82528ea9947d7 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 13 Mar 2017 16:48:18 -0700 Subject: srcu: Use rcu_segcblist to track SRCU callbacks This commit switches SRCU from custom-built callback queues to the new rcu_segcblist structure. This change associates grace-period sequence numbers with groups of callbacks, which will be needed for efficient processing of per-CPU callbacks. Signed-off-by: Paul E. McKenney --- include/linux/rcu_segcblist.h | 678 ++++++++++++++++++++++++++++++++++++++++++ include/linux/srcu.h | 24 +- kernel/rcu/rcu.h | 6 + kernel/rcu/rcu_segcblist.h | 670 ----------------------------------------- kernel/rcu/srcu.c | 159 ++-------- kernel/rcu/tree.h | 2 +- 6 files changed, 721 insertions(+), 818 deletions(-) create mode 100644 include/linux/rcu_segcblist.h delete mode 100644 kernel/rcu/rcu_segcblist.h (limited to 'include') diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h new file mode 100644 index 000000000000..74b1e7243955 --- /dev/null +++ b/include/linux/rcu_segcblist.h @@ -0,0 +1,678 @@ +/* + * RCU segmented callback lists + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + * Copyright IBM Corporation, 2017 + * + * Authors: Paul E. McKenney + */ + +#ifndef __KERNEL_RCU_SEGCBLIST_H +#define __KERNEL_RCU_SEGCBLIST_H + +/* Simple unsegmented callback lists. */ +struct rcu_cblist { + struct rcu_head *head; + struct rcu_head **tail; + long len; + long len_lazy; +}; + +#define RCU_CBLIST_INITIALIZER(n) { .head = NULL, .tail = &n.head } + +/* Initialize simple callback list. */ +static inline void rcu_cblist_init(struct rcu_cblist *rclp) +{ + rclp->head = NULL; + rclp->tail = &rclp->head; + rclp->len = 0; + rclp->len_lazy = 0; +} + +/* Is simple callback list empty? */ +static inline bool rcu_cblist_empty(struct rcu_cblist *rclp) +{ + return !rclp->head; +} + +/* Return number of callbacks in simple callback list. */ +static inline long rcu_cblist_n_cbs(struct rcu_cblist *rclp) +{ + return rclp->len; +} + +/* Return number of lazy callbacks in simple callback list. */ +static inline long rcu_cblist_n_lazy_cbs(struct rcu_cblist *rclp) +{ + return rclp->len_lazy; +} + +/* + * Debug function to actually count the number of callbacks. + * If the number exceeds the limit specified, return -1. + */ +static inline long rcu_cblist_count_cbs(struct rcu_cblist *rclp, long lim) +{ + int cnt = 0; + struct rcu_head **rhpp = &rclp->head; + + for (;;) { + if (!*rhpp) + return cnt; + if (++cnt > lim) + return -1; + rhpp = &(*rhpp)->next; + } +} + +/* + * Dequeue the oldest rcu_head structure from the specified callback + * list. This function assumes that the callback is non-lazy, but + * the caller can later invoke rcu_cblist_dequeued_lazy() if it + * finds otherwise (and if it cares about laziness). This allows + * different users to have different ways of determining laziness. + */ +static inline struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp) +{ + struct rcu_head *rhp; + + rhp = rclp->head; + if (!rhp) + return NULL; + rclp->len--; + rclp->head = rhp->next; + if (!rclp->head) + rclp->tail = &rclp->head; + return rhp; +} + +/* + * Account for the fact that a previously dequeued callback turned out + * to be marked as lazy. + */ +static inline void rcu_cblist_dequeued_lazy(struct rcu_cblist *rclp) +{ + rclp->len_lazy--; +} + +/* + * Interim function to return rcu_cblist head pointer. Longer term, the + * rcu_cblist will be used more pervasively, removing the need for this + * function. + */ +static inline struct rcu_head *rcu_cblist_head(struct rcu_cblist *rclp) +{ + return rclp->head; +} + +/* + * Interim function to return rcu_cblist head pointer. Longer term, the + * rcu_cblist will be used more pervasively, removing the need for this + * function. + */ +static inline struct rcu_head **rcu_cblist_tail(struct rcu_cblist *rclp) +{ + WARN_ON_ONCE(rcu_cblist_empty(rclp)); + return rclp->tail; +} + +/* Complicated segmented callback lists. ;-) */ + +/* + * Index values for segments in rcu_segcblist structure. + * + * The segments are as follows: + * + * [head, *tails[RCU_DONE_TAIL]): + * Callbacks whose grace period has elapsed, and thus can be invoked. + * [*tails[RCU_DONE_TAIL], *tails[RCU_WAIT_TAIL]): + * Callbacks waiting for the current GP from the current CPU's viewpoint. + * [*tails[RCU_WAIT_TAIL], *tails[RCU_NEXT_READY_TAIL]): + * Callbacks that arrived before the next GP started, again from + * the current CPU's viewpoint. These can be handled by the next GP. + * [*tails[RCU_NEXT_READY_TAIL], *tails[RCU_NEXT_TAIL]): + * Callbacks that might have arrived after the next GP started. + * There is some uncertainty as to when a given GP starts and + * ends, but a CPU knows the exact times if it is the one starting + * or ending the GP. Other CPUs know that the previous GP ends + * before the next one starts. + * + * Note that RCU_WAIT_TAIL cannot be empty unless RCU_NEXT_READY_TAIL is also + * empty. + * + * The ->gp_seq[] array contains the grace-period number at which the + * corresponding segment of callbacks will be ready to invoke. A given + * element of this array is meaningful only when the corresponding segment + * is non-empty, and it is never valid for RCU_DONE_TAIL (whose callbacks + * are already ready to invoke) or for RCU_NEXT_TAIL (whose callbacks have + * not yet been assigned a grace-period number). + */ +#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */ +#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */ +#define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */ +#define RCU_NEXT_TAIL 3 +#define RCU_CBLIST_NSEGS 4 + +struct rcu_segcblist { + struct rcu_head *head; + struct rcu_head **tails[RCU_CBLIST_NSEGS]; + unsigned long gp_seq[RCU_CBLIST_NSEGS]; + long len; + long len_lazy; +}; + +#define RCU_SEGCBLIST_INITIALIZER(n) \ +{ \ + .head = NULL, \ + .tails[RCU_DONE_TAIL] = &n.head, \ + .tails[RCU_WAIT_TAIL] = &n.head, \ + .tails[RCU_NEXT_READY_TAIL] = &n.head, \ + .tails[RCU_NEXT_TAIL] = &n.head, \ +} + +/* + * Initialize an rcu_segcblist structure. + */ +static inline void rcu_segcblist_init(struct rcu_segcblist *rsclp) +{ + int i; + + BUILD_BUG_ON(RCU_NEXT_TAIL + 1 != ARRAY_SIZE(rsclp->gp_seq)); + BUILD_BUG_ON(ARRAY_SIZE(rsclp->tails) != ARRAY_SIZE(rsclp->gp_seq)); + rsclp->head = NULL; + for (i = 0; i < RCU_CBLIST_NSEGS; i++) + rsclp->tails[i] = &rsclp->head; + rsclp->len = 0; + rsclp->len_lazy = 0; +} + +/* + * Is the specified rcu_segcblist structure empty? + * + * But careful! The fact that the ->head field is NULL does not + * necessarily imply that there are no callbacks associated with + * this structure. When callbacks are being invoked, they are + * removed as a group. If callback invocation must be preempted, + * the remaining callbacks will be added back to the list. Either + * way, the counts are updated later. + * + * So it is often the case that rcu_segcblist_n_cbs() should be used + * instead. + */ +static inline bool rcu_segcblist_empty(struct rcu_segcblist *rsclp) +{ + return !rsclp->head; +} + +/* Return number of callbacks in segmented callback list. */ +static inline long rcu_segcblist_n_cbs(struct rcu_segcblist *rsclp) +{ + return READ_ONCE(rsclp->len); +} + +/* Return number of lazy callbacks in segmented callback list. */ +static inline long rcu_segcblist_n_lazy_cbs(struct rcu_segcblist *rsclp) +{ + return rsclp->len_lazy; +} + +/* Return number of lazy callbacks in segmented callback list. */ +static inline long rcu_segcblist_n_nonlazy_cbs(struct rcu_segcblist *rsclp) +{ + return rsclp->len - rsclp->len_lazy; +} + +/* + * Is the specified rcu_segcblist enabled, for example, not corresponding + * to an offline or callback-offloaded CPU? + */ +static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp) +{ + return !!rsclp->tails[RCU_NEXT_TAIL]; +} + +/* + * Disable the specified rcu_segcblist structure, so that callbacks can + * no longer be posted to it. This structure must be empty. + */ +static inline void rcu_segcblist_disable(struct rcu_segcblist *rsclp) +{ + WARN_ON_ONCE(!rcu_segcblist_empty(rsclp)); + WARN_ON_ONCE(rcu_segcblist_n_cbs(rsclp)); + WARN_ON_ONCE(rcu_segcblist_n_lazy_cbs(rsclp)); + rsclp->tails[RCU_NEXT_TAIL] = NULL; +} + +/* + * Is the specified segment of the specified rcu_segcblist structure + * empty of callbacks? + */ +static inline bool rcu_segcblist_segempty(struct rcu_segcblist *rsclp, int seg) +{ + if (seg == RCU_DONE_TAIL) + return &rsclp->head == rsclp->tails[RCU_DONE_TAIL]; + return rsclp->tails[seg - 1] == rsclp->tails[seg]; +} + +/* + * Are all segments following the specified segment of the specified + * rcu_segcblist structure empty of callbacks? (The specified + * segment might well contain callbacks.) + */ +static inline bool rcu_segcblist_restempty(struct rcu_segcblist *rsclp, int seg) +{ + return !*rsclp->tails[seg]; +} + +/* + * Does the specified rcu_segcblist structure contain callbacks that + * are ready to be invoked? + */ +static inline bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp) +{ + return rcu_segcblist_is_enabled(rsclp) && + &rsclp->head != rsclp->tails[RCU_DONE_TAIL]; +} + +/* + * Does the specified rcu_segcblist structure contain callbacks that + * are still pending, that is, not yet ready to be invoked? + */ +static inline bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp) +{ + return rcu_segcblist_is_enabled(rsclp) && + !rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL); +} + +/* + * Dequeue and return the first ready-to-invoke callback. If there + * are no ready-to-invoke callbacks, return NULL. Disables interrupts + * to avoid interference. Does not protect from interference from other + * CPUs or tasks. + */ +static inline struct rcu_head * +rcu_segcblist_dequeue(struct rcu_segcblist *rsclp) +{ + unsigned long flags; + int i; + struct rcu_head *rhp; + + local_irq_save(flags); + if (!rcu_segcblist_ready_cbs(rsclp)) { + local_irq_restore(flags); + return NULL; + } + rhp = rsclp->head; + BUG_ON(!rhp); + rsclp->head = rhp->next; + for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++) { + if (rsclp->tails[i] != &rhp->next) + break; + rsclp->tails[i] = &rsclp->head; + } + smp_mb(); /* Dequeue before decrement for rcu_barrier(). */ + WRITE_ONCE(rsclp->len, rsclp->len - 1); + local_irq_restore(flags); + return rhp; +} + +/* + * Account for the fact that a previously dequeued callback turned out + * to be marked as lazy. + */ +static inline void rcu_segcblist_dequeued_lazy(struct rcu_segcblist *rsclp) +{ + unsigned long flags; + + local_irq_save(flags); + rsclp->len_lazy--; + local_irq_restore(flags); +} + +/* + * Return a pointer to the first callback in the specified rcu_segcblist + * structure. This is useful for diagnostics. + */ +static inline struct rcu_head * +rcu_segcblist_first_cb(struct rcu_segcblist *rsclp) +{ + if (rcu_segcblist_is_enabled(rsclp)) + return rsclp->head; + return NULL; +} + +/* + * Return a pointer to the first pending callback in the specified + * rcu_segcblist structure. This is useful just after posting a given + * callback -- if that callback is the first pending callback, then + * you cannot rely on someone else having already started up the required + * grace period. + */ +static inline struct rcu_head * +rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp) +{ + if (rcu_segcblist_is_enabled(rsclp)) + return *rsclp->tails[RCU_DONE_TAIL]; + return NULL; +} + +/* + * Does the specified rcu_segcblist structure contain callbacks that + * have not yet been processed beyond having been posted, that is, + * does it contain callbacks in its last segment? + */ +static inline bool rcu_segcblist_new_cbs(struct rcu_segcblist *rsclp) +{ + return rcu_segcblist_is_enabled(rsclp) && + !rcu_segcblist_restempty(rsclp, RCU_NEXT_READY_TAIL); +} + +/* + * Enqueue the specified callback onto the specified rcu_segcblist + * structure, updating accounting as needed. Note that the ->len + * field may be accessed locklessly, hence the WRITE_ONCE(). + * The ->len field is used by rcu_barrier() and friends to determine + * if it must post a callback on this structure, and it is OK + * for rcu_barrier() to sometimes post callbacks needlessly, but + * absolutely not OK for it to ever miss posting a callback. + */ +static inline void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp, + struct rcu_head *rhp, bool lazy) +{ + WRITE_ONCE(rsclp->len, rsclp->len + 1); /* ->len sampled locklessly. */ + if (lazy) + rsclp->len_lazy++; + smp_mb(); /* Ensure counts are updated before callback is enqueued. */ + rhp->next = NULL; + *rsclp->tails[RCU_NEXT_TAIL] = rhp; + rsclp->tails[RCU_NEXT_TAIL] = &rhp->next; +} + +/* + * Extract only the counts from the specified rcu_segcblist structure, + * and place them in the specified rcu_cblist structure. This function + * supports both callback orphaning and invocation, hence the separation + * of counts and callbacks. (Callbacks ready for invocation must be + * orphaned and adopted separately from pending callbacks, but counts + * apply to all callbacks. Locking must be used to make sure that + * both orphaned-callbacks lists are consistent.) + */ +static inline void rcu_segcblist_extract_count(struct rcu_segcblist *rsclp, + struct rcu_cblist *rclp) +{ + rclp->len_lazy += rsclp->len_lazy; + rclp->len += rsclp->len; + rsclp->len_lazy = 0; + WRITE_ONCE(rsclp->len, 0); /* ->len sampled locklessly. */ +} + +/* + * Extract only those callbacks ready to be invoked from the specified + * rcu_segcblist structure and place them in the specified rcu_cblist + * structure. + */ +static inline void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp, + struct rcu_cblist *rclp) +{ + int i; + + if (!rcu_segcblist_ready_cbs(rsclp)) + return; /* Nothing to do. */ + *rclp->tail = rsclp->head; + rsclp->head = *rsclp->tails[RCU_DONE_TAIL]; + *rsclp->tails[RCU_DONE_TAIL] = NULL; + rclp->tail = rsclp->tails[RCU_DONE_TAIL]; + for (i = RCU_CBLIST_NSEGS - 1; i >= RCU_DONE_TAIL; i--) + if (rsclp->tails[i] == rsclp->tails[RCU_DONE_TAIL]) + rsclp->tails[i] = &rsclp->head; +} + +/* + * Extract only those callbacks still pending (not yet ready to be + * invoked) from the specified rcu_segcblist structure and place them in + * the specified rcu_cblist structure. Note that this loses information + * about any callbacks that might have been partway done waiting for + * their grace period. Too bad! They will have to start over. + */ +static inline void +rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp, + struct rcu_cblist *rclp) +{ + int i; + + if (!rcu_segcblist_pend_cbs(rsclp)) + return; /* Nothing to do. */ + *rclp->tail = *rsclp->tails[RCU_DONE_TAIL]; + rclp->tail = rsclp->tails[RCU_NEXT_TAIL]; + *rsclp->tails[RCU_DONE_TAIL] = NULL; + for (i = RCU_DONE_TAIL + 1; i < RCU_CBLIST_NSEGS; i++) + rsclp->tails[i] = rsclp->tails[RCU_DONE_TAIL]; +} + +/* + * Move the entire contents of the specified rcu_segcblist structure, + * counts, callbacks, and all, to the specified rcu_cblist structure. + * @@@ Why do we need this??? Moving early-boot CBs to NOCB lists? + * @@@ Memory barrier needed? (Not if only used at boot time...) + */ +static inline void rcu_segcblist_extract_all(struct rcu_segcblist *rsclp, + struct rcu_cblist *rclp) +{ + rcu_segcblist_extract_done_cbs(rsclp, rclp); + rcu_segcblist_extract_pend_cbs(rsclp, rclp); + rcu_segcblist_extract_count(rsclp, rclp); +} + +/* + * Insert counts from the specified rcu_cblist structure in the + * specified rcu_segcblist structure. + */ +static inline void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp, + struct rcu_cblist *rclp) +{ + rsclp->len_lazy += rclp->len_lazy; + /* ->len sampled locklessly. */ + WRITE_ONCE(rsclp->len, rsclp->len + rclp->len); + rclp->len_lazy = 0; + rclp->len = 0; +} + +/* + * Move callbacks from the specified rcu_cblist to the beginning of the + * done-callbacks segment of the specified rcu_segcblist. + */ +static inline void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp, + struct rcu_cblist *rclp) +{ + int i; + + if (!rclp->head) + return; /* No callbacks to move. */ + *rclp->tail = rsclp->head; + rsclp->head = rclp->head; + for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++) + if (&rsclp->head == rsclp->tails[i]) + rsclp->tails[i] = rclp->tail; + else + break; + rclp->head = NULL; + rclp->tail = &rclp->head; +} + +/* + * Move callbacks from the specified rcu_cblist to the end of the + * new-callbacks segment of the specified rcu_segcblist. + */ +static inline void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist *rsclp, + struct rcu_cblist *rclp) +{ + if (!rclp->head) + return; /* Nothing to do. */ + *rsclp->tails[RCU_NEXT_TAIL] = rclp->head; + rsclp->tails[RCU_NEXT_TAIL] = rclp->tail; + rclp->head = NULL; + rclp->tail = &rclp->head; +} + +/* + * Advance the callbacks in the specified rcu_segcblist structure based + * on the current value passed in for the grace-period counter. + */ +static inline void rcu_segcblist_advance(struct rcu_segcblist *rsclp, + unsigned long seq) +{ + int i, j; + + WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp)); + WARN_ON_ONCE(rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL)); + + /* + * Find all callbacks whose ->gp_seq numbers indicate that they + * are ready to invoke, and put them into the RCU_DONE_TAIL segment. + */ + for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) { + if (ULONG_CMP_LT(seq, rsclp->gp_seq[i])) + break; + rsclp->tails[RCU_DONE_TAIL] = rsclp->tails[i]; + } + + /* If no callbacks moved, nothing more need be done. */ + if (i == RCU_WAIT_TAIL) + return; + + /* Clean up tail pointers that might have been misordered above. */ + for (j = RCU_WAIT_TAIL; j < i; j++) + rsclp->tails[j] = rsclp->tails[RCU_DONE_TAIL]; + + /* + * Callbacks moved, so clean up the misordered ->tails[] pointers + * that now point into the middle of the list of ready-to-invoke + * callbacks. The overall effect is to copy down the later pointers + * into the gap that was created by the now-ready segments. + */ + for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) { + if (rsclp->tails[j] == rsclp->tails[RCU_NEXT_TAIL]) + break; /* No more callbacks. */ + rsclp->tails[j] = rsclp->tails[i]; + rsclp->gp_seq[j] = rsclp->gp_seq[i]; + } +} + +/* + * "Accelerate" callbacks based on more-accurate grace-period information. + * The reason for this is that RCU does not synchronize the beginnings and + * ends of grace periods, and that callbacks are posted locally. This in + * turn means that the callbacks must be labelled conservatively early + * on, as getting exact information would degrade both performance and + * scalability. When more accurate grace-period information becomes + * available, previously posted callbacks can be "accelerated", marking + * them to complete at the end of the earlier grace period. + * + * This function operates on an rcu_segcblist structure, and also the + * grace-period sequence number at which new callbacks would become + * ready to invoke. + */ +static inline bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, + unsigned long seq) +{ + int i; + + WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp)); + WARN_ON_ONCE(rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL)); + + /* + * Find the segment preceding the oldest segment of callbacks + * whose ->gp_seq[] completion is at or after that passed in via + * "seq", skipping any empty segments. This oldest segment, along + * with any later segments, can be merged in with any newly arrived + * callbacks in the RCU_NEXT_TAIL segment, and assigned "seq" + * as their ->gp_seq[] grace-period completion sequence number. + */ + for (i = RCU_NEXT_READY_TAIL; i > RCU_DONE_TAIL; i--) + if (rsclp->tails[i] != rsclp->tails[i - 1] && + ULONG_CMP_LT(rsclp->gp_seq[i], seq)) + break; + + /* + * If all the segments contain callbacks that correspond to + * earlier grace-period sequence numbers than "seq", leave. + * Assuming that the rcu_segcblist structure has enough + * segments in its arrays, this can only happen if some of + * the non-done segments contain callbacks that really are + * ready to invoke. This situation will get straightened + * out by the next call to rcu_segcblist_advance(). + * + * Also advance to the oldest segment of callbacks whose + * ->gp_seq[] completion is at or after that passed in via "seq", + * skipping any empty segments. + */ + if (++i >= RCU_NEXT_TAIL) + return false; + + /* + * Merge all later callbacks, including newly arrived callbacks, + * into the segment located by the for-loop above. Assign "seq" + * as the ->gp_seq[] value in order to correctly handle the case + * where there were no pending callbacks in the rcu_segcblist + * structure other than in the RCU_NEXT_TAIL segment. + */ + for (; i < RCU_NEXT_TAIL; i++) { + rsclp->tails[i] = rsclp->tails[RCU_NEXT_TAIL]; + rsclp->gp_seq[i] = seq; + } + return true; +} + +/* + * Scan the specified rcu_segcblist structure for callbacks that need + * a grace period later than the one specified by "seq". We don't look + * at the RCU_DONE_TAIL or RCU_NEXT_TAIL segments because they don't + * have a grace-period sequence number. + */ +static inline bool rcu_segcblist_future_gp_needed(struct rcu_segcblist *rsclp, + unsigned long seq) +{ + int i; + + for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) + if (rsclp->tails[i - 1] != rsclp->tails[i] && + ULONG_CMP_LT(seq, rsclp->gp_seq[i])) + return true; + return false; +} + +/* + * Interim function to return rcu_segcblist head pointer. Longer term, the + * rcu_segcblist will be used more pervasively, removing the need for this + * function. + */ +static inline struct rcu_head *rcu_segcblist_head(struct rcu_segcblist *rsclp) +{ + return rsclp->head; +} + +/* + * Interim function to return rcu_segcblist head pointer. Longer term, the + * rcu_segcblist will be used more pervasively, removing the need for this + * function. + */ +static inline struct rcu_head **rcu_segcblist_tail(struct rcu_segcblist *rsclp) +{ + WARN_ON_ONCE(rcu_segcblist_empty(rsclp)); + return rsclp->tails[RCU_NEXT_TAIL]; +} + +#endif /* __KERNEL_RCU_SEGCBLIST_H */ diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 047ac8c28a4e..ad154a7bc114 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -22,7 +22,7 @@ * Lai Jiangshan * * For detailed explanation of Read-Copy Update mechanism see - - * Documentation/RCU/ *.txt + * Documentation/RCU/ *.txt * */ @@ -32,31 +32,20 @@ #include #include #include +#include struct srcu_array { unsigned long lock_count[2]; unsigned long unlock_count[2]; }; -struct rcu_batch { - struct rcu_head *head, **tail; -}; - -#define RCU_BATCH_INIT(name) { NULL, &(name.head) } - struct srcu_struct { unsigned long completed; unsigned long srcu_gp_seq; struct srcu_array __percpu *per_cpu_ref; - spinlock_t queue_lock; /* protect ->batch_queue, ->running */ + spinlock_t queue_lock; /* protect ->srcu_cblist, ->srcu_state */ int srcu_state; - /* callbacks just queued */ - struct rcu_batch batch_queue; - /* callbacks try to do the first check_zero */ - struct rcu_batch batch_check0; - /* callbacks done with the first check_zero and the flip */ - struct rcu_batch batch_check1; - struct rcu_batch batch_done; + struct rcu_segcblist srcu_cblist; struct delayed_work work; #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; @@ -97,10 +86,7 @@ void process_srcu(struct work_struct *work); .per_cpu_ref = &name##_srcu_array, \ .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \ .srcu_state = SRCU_STATE_IDLE, \ - .batch_queue = RCU_BATCH_INIT(name.batch_queue), \ - .batch_check0 = RCU_BATCH_INIT(name.batch_check0), \ - .batch_check1 = RCU_BATCH_INIT(name.batch_check1), \ - .batch_done = RCU_BATCH_INIT(name.batch_done), \ + .srcu_cblist = RCU_SEGCBLIST_INITIALIZER(name.srcu_cblist),\ .work = __DELAYED_WORK_INITIALIZER(name.work, process_srcu, 0),\ __SRCU_DEP_MAP_INIT(name) \ } diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index 0bc1313c49e2..a943b42a9cf7 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -87,6 +87,12 @@ static inline unsigned long rcu_seq_snap(unsigned long *sp) return s; } +/* Return the current value the update side's sequence number, no ordering. */ +static inline unsigned long rcu_seq_current(unsigned long *sp) +{ + return READ_ONCE(*sp); +} + /* * Given a snapshot from rcu_seq_snap(), determine whether or not a * full update-side operation has occurred. diff --git a/kernel/rcu/rcu_segcblist.h b/kernel/rcu/rcu_segcblist.h deleted file mode 100644 index 982e3e05b22a..000000000000 --- a/kernel/rcu/rcu_segcblist.h +++ /dev/null @@ -1,670 +0,0 @@ -/* - * RCU segmented callback lists - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * - * Copyright IBM Corporation, 2017 - * - * Authors: Paul E. McKenney - */ - -#ifndef __KERNEL_RCU_SEGCBLIST_H -#define __KERNEL_RCU_SEGCBLIST_H - -/* Simple unsegmented callback lists. */ -struct rcu_cblist { - struct rcu_head *head; - struct rcu_head **tail; - long len; - long len_lazy; -}; - -#define RCU_CBLIST_INITIALIZER(n) { .head = NULL, .tail = &n.head } - -/* Initialize simple callback list. */ -static inline void rcu_cblist_init(struct rcu_cblist *rclp) -{ - rclp->head = NULL; - rclp->tail = &rclp->head; - rclp->len = 0; - rclp->len_lazy = 0; -} - -/* Is simple callback list empty? */ -static inline bool rcu_cblist_empty(struct rcu_cblist *rclp) -{ - return !rclp->head; -} - -/* Return number of callbacks in simple callback list. */ -static inline long rcu_cblist_n_cbs(struct rcu_cblist *rclp) -{ - return rclp->len; -} - -/* Return number of lazy callbacks in simple callback list. */ -static inline long rcu_cblist_n_lazy_cbs(struct rcu_cblist *rclp) -{ - return rclp->len_lazy; -} - -/* - * Debug function to actually count the number of callbacks. - * If the number exceeds the limit specified, return -1. - */ -static inline long rcu_cblist_count_cbs(struct rcu_cblist *rclp, long lim) -{ - int cnt = 0; - struct rcu_head **rhpp = &rclp->head; - - for (;;) { - if (!*rhpp) - return cnt; - if (++cnt > lim) - return -1; - rhpp = &(*rhpp)->next; - } -} - -/* - * Dequeue the oldest rcu_head structure from the specified callback - * list. This function assumes that the callback is non-lazy, but - * the caller can later invoke rcu_cblist_dequeued_lazy() if it - * finds otherwise (and if it cares about laziness). This allows - * different users to have different ways of determining laziness. - */ -static inline struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp) -{ - struct rcu_head *rhp; - - rhp = rclp->head; - if (!rhp) - return NULL; - prefetch(rhp); - rclp->len--; - rclp->head = rhp->next; - if (!rclp->head) - rclp->tail = &rclp->head; - return rhp; -} - -/* - * Account for the fact that a previously dequeued callback turned out - * to be marked as lazy. - */ -static inline void rcu_cblist_dequeued_lazy(struct rcu_cblist *rclp) -{ - rclp->len_lazy--; -} - -/* - * Interim function to return rcu_cblist head pointer. Longer term, the - * rcu_cblist will be used more pervasively, removing the need for this - * function. - */ -static inline struct rcu_head *rcu_cblist_head(struct rcu_cblist *rclp) -{ - return rclp->head; -} - -/* - * Interim function to return rcu_cblist head pointer. Longer term, the - * rcu_cblist will be used more pervasively, removing the need for this - * function. - */ -static inline struct rcu_head **rcu_cblist_tail(struct rcu_cblist *rclp) -{ - WARN_ON_ONCE(rcu_cblist_empty(rclp)); - return rclp->tail; -} - -/* Complicated segmented callback lists. ;-) */ - -/* - * Index values for segments in rcu_segcblist structure. - * - * The segments are as follows: - * - * [head, *tails[RCU_DONE_TAIL]): - * Callbacks whose grace period has elapsed, and thus can be invoked. - * [*tails[RCU_DONE_TAIL], *tails[RCU_WAIT_TAIL]): - * Callbacks waiting for the current GP from the current CPU's viewpoint. - * [*tails[RCU_WAIT_TAIL], *tails[RCU_NEXT_READY_TAIL]): - * Callbacks that arrived before the next GP started, again from - * the current CPU's viewpoint. These can be handled by the next GP. - * [*tails[RCU_NEXT_READY_TAIL], *tails[RCU_NEXT_TAIL]): - * Callbacks that might have arrived after the next GP started. - * There is some uncertainty as to when a given GP starts and - * ends, but a CPU knows the exact times if it is the one starting - * or ending the GP. Other CPUs know that the previous GP ends - * before the next one starts. - * - * Note that RCU_WAIT_TAIL cannot be empty unless RCU_NEXT_READY_TAIL is also - * empty. - * - * The ->gp_seq[] array contains the grace-period number at which the - * corresponding segment of callbacks will be ready to invoke. A given - * element of this array is meaningful only when the corresponding segment - * is non-empty, and it is never valid for RCU_DONE_TAIL (whose callbacks - * are already ready to invoke) or for RCU_NEXT_TAIL (whose callbacks have - * not yet been assigned a grace-period number). - */ -#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */ -#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */ -#define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */ -#define RCU_NEXT_TAIL 3 -#define RCU_CBLIST_NSEGS 4 - -struct rcu_segcblist { - struct rcu_head *head; - struct rcu_head **tails[RCU_CBLIST_NSEGS]; - unsigned long gp_seq[RCU_CBLIST_NSEGS]; - long len; - long len_lazy; -}; - -/* - * Initialize an rcu_segcblist structure. - */ -static inline void rcu_segcblist_init(struct rcu_segcblist *rsclp) -{ - int i; - - BUILD_BUG_ON(RCU_NEXT_TAIL + 1 != ARRAY_SIZE(rsclp->gp_seq)); - BUILD_BUG_ON(ARRAY_SIZE(rsclp->tails) != ARRAY_SIZE(rsclp->gp_seq)); - rsclp->head = NULL; - for (i = 0; i < RCU_CBLIST_NSEGS; i++) - rsclp->tails[i] = &rsclp->head; - rsclp->len = 0; - rsclp->len_lazy = 0; -} - -/* - * Is the specified rcu_segcblist structure empty? - * - * But careful! The fact that the ->head field is NULL does not - * necessarily imply that there are no callbacks associated with - * this structure. When callbacks are being invoked, they are - * removed as a group. If callback invocation must be preempted, - * the remaining callbacks will be added back to the list. Either - * way, the counts are updated later. - * - * So it is often the case that rcu_segcblist_n_cbs() should be used - * instead. - */ -static inline bool rcu_segcblist_empty(struct rcu_segcblist *rsclp) -{ - return !rsclp->head; -} - -/* Return number of callbacks in segmented callback list. */ -static inline long rcu_segcblist_n_cbs(struct rcu_segcblist *rsclp) -{ - return READ_ONCE(rsclp->len); -} - -/* Return number of lazy callbacks in segmented callback list. */ -static inline long rcu_segcblist_n_lazy_cbs(struct rcu_segcblist *rsclp) -{ - return rsclp->len_lazy; -} - -/* Return number of lazy callbacks in segmented callback list. */ -static inline long rcu_segcblist_n_nonlazy_cbs(struct rcu_segcblist *rsclp) -{ - return rsclp->len - rsclp->len_lazy; -} - -/* - * Is the specified rcu_segcblist enabled, for example, not corresponding - * to an offline or callback-offloaded CPU? - */ -static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp) -{ - return !!rsclp->tails[RCU_NEXT_TAIL]; -} - -/* - * Disable the specified rcu_segcblist structure, so that callbacks can - * no longer be posted to it. This structure must be empty. - */ -static inline void rcu_segcblist_disable(struct rcu_segcblist *rsclp) -{ - WARN_ON_ONCE(!rcu_segcblist_empty(rsclp)); - WARN_ON_ONCE(rcu_segcblist_n_cbs(rsclp)); - WARN_ON_ONCE(rcu_segcblist_n_lazy_cbs(rsclp)); - rsclp->tails[RCU_NEXT_TAIL] = NULL; -} - -/* - * Is the specified segment of the specified rcu_segcblist structure - * empty of callbacks? - */ -static inline bool rcu_segcblist_segempty(struct rcu_segcblist *rsclp, int seg) -{ - if (seg == RCU_DONE_TAIL) - return &rsclp->head == rsclp->tails[RCU_DONE_TAIL]; - return rsclp->tails[seg - 1] == rsclp->tails[seg]; -} - -/* - * Are all segments following the specified segment of the specified - * rcu_segcblist structure empty of callbacks? (The specified - * segment might well contain callbacks.) - */ -static inline bool rcu_segcblist_restempty(struct rcu_segcblist *rsclp, int seg) -{ - return !*rsclp->tails[seg]; -} - -/* - * Does the specified rcu_segcblist structure contain callbacks that - * are ready to be invoked? - */ -static inline bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp) -{ - return rcu_segcblist_is_enabled(rsclp) && - &rsclp->head != rsclp->tails[RCU_DONE_TAIL]; -} - -/* - * Does the specified rcu_segcblist structure contain callbacks that - * are still pending, that is, not yet ready to be invoked? - */ -static inline bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp) -{ - return rcu_segcblist_is_enabled(rsclp) && - !rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL); -} - -/* - * Dequeue and return the first ready-to-invoke callback. If there - * are no ready-to-invoke callbacks, return NULL. Disables interrupts - * to avoid interference. Does not protect from interference from other - * CPUs or tasks. - */ -static inline struct rcu_head * -rcu_segcblist_dequeue(struct rcu_segcblist *rsclp) -{ - unsigned long flags; - int i; - struct rcu_head *rhp; - - local_irq_save(flags); - if (!rcu_segcblist_ready_cbs(rsclp)) { - local_irq_restore(flags); - return NULL; - } - rhp = rsclp->head; - BUG_ON(!rhp); - rsclp->head = rhp->next; - for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++) { - if (rsclp->tails[i] != &rhp->next) - break; - rsclp->tails[i] = &rsclp->head; - } - smp_mb(); /* Dequeue before decrement for rcu_barrier(). */ - WRITE_ONCE(rsclp->len, rsclp->len - 1); - local_irq_restore(flags); - return rhp; -} - -/* - * Account for the fact that a previously dequeued callback turned out - * to be marked as lazy. - */ -static inline void rcu_segcblist_dequeued_lazy(struct rcu_segcblist *rsclp) -{ - unsigned long flags; - - local_irq_save(flags); - rsclp->len_lazy--; - local_irq_restore(flags); -} - -/* - * Return a pointer to the first callback in the specified rcu_segcblist - * structure. This is useful for diagnostics. - */ -static inline struct rcu_head * -rcu_segcblist_first_cb(struct rcu_segcblist *rsclp) -{ - if (rcu_segcblist_is_enabled(rsclp)) - return rsclp->head; - return NULL; -} - -/* - * Return a pointer to the first pending callback in the specified - * rcu_segcblist structure. This is useful just after posting a given - * callback -- if that callback is the first pending callback, then - * you cannot rely on someone else having already started up the required - * grace period. - */ -static inline struct rcu_head * -rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp) -{ - if (rcu_segcblist_is_enabled(rsclp)) - return *rsclp->tails[RCU_DONE_TAIL]; - return NULL; -} - -/* - * Does the specified rcu_segcblist structure contain callbacks that - * have not yet been processed beyond having been posted, that is, - * does it contain callbacks in its last segment? - */ -static inline bool rcu_segcblist_new_cbs(struct rcu_segcblist *rsclp) -{ - return rcu_segcblist_is_enabled(rsclp) && - !rcu_segcblist_restempty(rsclp, RCU_NEXT_READY_TAIL); -} - -/* - * Enqueue the specified callback onto the specified rcu_segcblist - * structure, updating accounting as needed. Note that the ->len - * field may be accessed locklessly, hence the WRITE_ONCE(). - * The ->len field is used by rcu_barrier() and friends to determine - * if it must post a callback on this structure, and it is OK - * for rcu_barrier() to sometimes post callbacks needlessly, but - * absolutely not OK for it to ever miss posting a callback. - */ -static inline void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp, - struct rcu_head *rhp, bool lazy) -{ - WRITE_ONCE(rsclp->len, rsclp->len + 1); /* ->len sampled locklessly. */ - if (lazy) - rsclp->len_lazy++; - smp_mb(); /* Ensure counts are updated before callback is enqueued. */ - rhp->next = NULL; - *rsclp->tails[RCU_NEXT_TAIL] = rhp; - rsclp->tails[RCU_NEXT_TAIL] = &rhp->next; -} - -/* - * Extract only the counts from the specified rcu_segcblist structure, - * and place them in the specified rcu_cblist structure. This function - * supports both callback orphaning and invocation, hence the separation - * of counts and callbacks. (Callbacks ready for invocation must be - * orphaned and adopted separately from pending callbacks, but counts - * apply to all callbacks. Locking must be used to make sure that - * both orphaned-callbacks lists are consistent.) - */ -static inline void rcu_segcblist_extract_count(struct rcu_segcblist *rsclp, - struct rcu_cblist *rclp) -{ - rclp->len_lazy += rsclp->len_lazy; - rclp->len += rsclp->len; - rsclp->len_lazy = 0; - WRITE_ONCE(rsclp->len, 0); /* ->len sampled locklessly. */ -} - -/* - * Extract only those callbacks ready to be invoked from the specified - * rcu_segcblist structure and place them in the specified rcu_cblist - * structure. - */ -static inline void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp, - struct rcu_cblist *rclp) -{ - int i; - - if (!rcu_segcblist_ready_cbs(rsclp)) - return; /* Nothing to do. */ - *rclp->tail = rsclp->head; - rsclp->head = *rsclp->tails[RCU_DONE_TAIL]; - *rsclp->tails[RCU_DONE_TAIL] = NULL; - rclp->tail = rsclp->tails[RCU_DONE_TAIL]; - for (i = RCU_CBLIST_NSEGS - 1; i >= RCU_DONE_TAIL; i--) - if (rsclp->tails[i] == rsclp->tails[RCU_DONE_TAIL]) - rsclp->tails[i] = &rsclp->head; -} - -/* - * Extract only those callbacks still pending (not yet ready to be - * invoked) from the specified rcu_segcblist structure and place them in - * the specified rcu_cblist structure. Note that this loses information - * about any callbacks that might have been partway done waiting for - * their grace period. Too bad! They will have to start over. - */ -static inline void -rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp, - struct rcu_cblist *rclp) -{ - int i; - - if (!rcu_segcblist_pend_cbs(rsclp)) - return; /* Nothing to do. */ - *rclp->tail = *rsclp->tails[RCU_DONE_TAIL]; - rclp->tail = rsclp->tails[RCU_NEXT_TAIL]; - *rsclp->tails[RCU_DONE_TAIL] = NULL; - for (i = RCU_DONE_TAIL + 1; i < RCU_CBLIST_NSEGS; i++) - rsclp->tails[i] = rsclp->tails[RCU_DONE_TAIL]; -} - -/* - * Move the entire contents of the specified rcu_segcblist structure, - * counts, callbacks, and all, to the specified rcu_cblist structure. - * @@@ Why do we need this??? Moving early-boot CBs to NOCB lists? - * @@@ Memory barrier needed? (Not if only used at boot time...) - */ -static inline void rcu_segcblist_extract_all(struct rcu_segcblist *rsclp, - struct rcu_cblist *rclp) -{ - rcu_segcblist_extract_done_cbs(rsclp, rclp); - rcu_segcblist_extract_pend_cbs(rsclp, rclp); - rcu_segcblist_extract_count(rsclp, rclp); -} - -/* - * Insert counts from the specified rcu_cblist structure in the - * specified rcu_segcblist structure. - */ -static inline void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp, - struct rcu_cblist *rclp) -{ - rsclp->len_lazy += rclp->len_lazy; - /* ->len sampled locklessly. */ - WRITE_ONCE(rsclp->len, rsclp->len + rclp->len); - rclp->len_lazy = 0; - rclp->len = 0; -} - -/* - * Move callbacks from the specified rcu_cblist to the beginning of the - * done-callbacks segment of the specified rcu_segcblist. - */ -static inline void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp, - struct rcu_cblist *rclp) -{ - int i; - - if (!rclp->head) - return; /* No callbacks to move. */ - *rclp->tail = rsclp->head; - rsclp->head = rclp->head; - for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++) - if (&rsclp->head == rsclp->tails[i]) - rsclp->tails[i] = rclp->tail; - else - break; - rclp->head = NULL; - rclp->tail = &rclp->head; -} - -/* - * Move callbacks from the specified rcu_cblist to the end of the - * new-callbacks segment of the specified rcu_segcblist. - */ -static inline void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist *rsclp, - struct rcu_cblist *rclp) -{ - if (!rclp->head) - return; /* Nothing to do. */ - *rsclp->tails[RCU_NEXT_TAIL] = rclp->head; - rsclp->tails[RCU_NEXT_TAIL] = rclp->tail; - rclp->head = NULL; - rclp->tail = &rclp->head; -} - -/* - * Advance the callbacks in the specified rcu_segcblist structure based - * on the current value passed in for the grace-period counter. - */ -static inline void rcu_segcblist_advance(struct rcu_segcblist *rsclp, - unsigned long seq) -{ - int i, j; - - WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp)); - WARN_ON_ONCE(rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL)); - - /* - * Find all callbacks whose ->gp_seq numbers indicate that they - * are ready to invoke, and put them into the RCU_DONE_TAIL segment. - */ - for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) { - if (ULONG_CMP_LT(seq, rsclp->gp_seq[i])) - break; - rsclp->tails[RCU_DONE_TAIL] = rsclp->tails[i]; - } - - /* If no callbacks moved, nothing more need be done. */ - if (i == RCU_WAIT_TAIL) - return; - - /* Clean up tail pointers that might have been misordered above. */ - for (j = RCU_WAIT_TAIL; j < i; j++) - rsclp->tails[j] = rsclp->tails[RCU_DONE_TAIL]; - - /* - * Callbacks moved, so clean up the misordered ->tails[] pointers - * that now point into the middle of the list of ready-to-invoke - * callbacks. The overall effect is to copy down the later pointers - * into the gap that was created by the now-ready segments. - */ - for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) { - if (rsclp->tails[j] == rsclp->tails[RCU_NEXT_TAIL]) - break; /* No more callbacks. */ - rsclp->tails[j] = rsclp->tails[i]; - rsclp->gp_seq[j] = rsclp->gp_seq[i]; - } -} - -/* - * "Accelerate" callbacks based on more-accurate grace-period information. - * The reason for this is that RCU does not synchronize the beginnings and - * ends of grace periods, and that callbacks are posted locally. This in - * turn means that the callbacks must be labelled conservatively early - * on, as getting exact information would degrade both performance and - * scalability. When more accurate grace-period information becomes - * available, previously posted callbacks can be "accelerated", marking - * them to complete at the end of the earlier grace period. - * - * This function operates on an rcu_segcblist structure, and also the - * grace-period sequence number at which new callbacks would become - * ready to invoke. - */ -static inline bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, - unsigned long seq) -{ - int i; - - WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp)); - WARN_ON_ONCE(rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL)); - - /* - * Find the segment preceding the oldest segment of callbacks - * whose ->gp_seq[] completion is at or after that passed in via - * "seq", skipping any empty segments. This oldest segment, along - * with any later segments, can be merged in with any newly arrived - * callbacks in the RCU_NEXT_TAIL segment, and assigned "seq" - * as their ->gp_seq[] grace-period completion sequence number. - */ - for (i = RCU_NEXT_READY_TAIL; i > RCU_DONE_TAIL; i--) - if (rsclp->tails[i] != rsclp->tails[i - 1] && - ULONG_CMP_LT(rsclp->gp_seq[i], seq)) - break; - - /* - * If all the segments contain callbacks that correspond to - * earlier grace-period sequence numbers than "seq", leave. - * Assuming that the rcu_segcblist structure has enough - * segments in its arrays, this can only happen if some of - * the non-done segments contain callbacks that really are - * ready to invoke. This situation will get straightened - * out by the next call to rcu_segcblist_advance(). - * - * Also advance to the oldest segment of callbacks whose - * ->gp_seq[] completion is at or after that passed in via "seq", - * skipping any empty segments. - */ - if (++i >= RCU_NEXT_TAIL) - return false; - - /* - * Merge all later callbacks, including newly arrived callbacks, - * into the segment located by the for-loop above. Assign "seq" - * as the ->gp_seq[] value in order to correctly handle the case - * where there were no pending callbacks in the rcu_segcblist - * structure other than in the RCU_NEXT_TAIL segment. - */ - for (; i < RCU_NEXT_TAIL; i++) { - rsclp->tails[i] = rsclp->tails[RCU_NEXT_TAIL]; - rsclp->gp_seq[i] = seq; - } - return true; -} - -/* - * Scan the specified rcu_segcblist structure for callbacks that need - * a grace period later than the one specified by "seq". We don't look - * at the RCU_DONE_TAIL or RCU_NEXT_TAIL segments because they don't - * have a grace-period sequence number. - */ -static inline bool rcu_segcblist_future_gp_needed(struct rcu_segcblist *rsclp, - unsigned long seq) -{ - int i; - - for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) - if (rsclp->tails[i - 1] != rsclp->tails[i] && - ULONG_CMP_LT(seq, rsclp->gp_seq[i])) - return true; - return false; -} - -/* - * Interim function to return rcu_segcblist head pointer. Longer term, the - * rcu_segcblist will be used more pervasively, removing the need for this - * function. - */ -static inline struct rcu_head *rcu_segcblist_head(struct rcu_segcblist *rsclp) -{ - return rsclp->head; -} - -/* - * Interim function to return rcu_segcblist head pointer. Longer term, the - * rcu_segcblist will be used more pervasively, removing the need for this - * function. - */ -static inline struct rcu_head **rcu_segcblist_tail(struct rcu_segcblist *rsclp) -{ - WARN_ON_ONCE(rcu_segcblist_empty(rsclp)); - return rsclp->tails[RCU_NEXT_TAIL]; -} - -#endif /* __KERNEL_RCU_SEGCBLIST_H */ diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c index d464986d82b6..56fd30862122 100644 --- a/kernel/rcu/srcu.c +++ b/kernel/rcu/srcu.c @@ -22,7 +22,7 @@ * Lai Jiangshan * * For detailed explanation of Read-Copy Update mechanism see - - * Documentation/RCU/ *.txt + * Documentation/RCU/ *.txt * */ @@ -38,85 +38,13 @@ #include "rcu.h" -/* - * Initialize an rcu_batch structure to empty. - */ -static inline void rcu_batch_init(struct rcu_batch *b) -{ - b->head = NULL; - b->tail = &b->head; -} - -/* - * Enqueue a callback onto the tail of the specified rcu_batch structure. - */ -static inline void rcu_batch_queue(struct rcu_batch *b, struct rcu_head *head) -{ - *b->tail = head; - b->tail = &head->next; -} - -/* - * Is the specified rcu_batch structure empty? - */ -static inline bool rcu_batch_empty(struct rcu_batch *b) -{ - return b->tail == &b->head; -} - -/* - * Are all batches empty for the specified srcu_struct? - */ -static inline bool rcu_all_batches_empty(struct srcu_struct *sp) -{ - return rcu_batch_empty(&sp->batch_done) && - rcu_batch_empty(&sp->batch_check1) && - rcu_batch_empty(&sp->batch_check0) && - rcu_batch_empty(&sp->batch_queue); -} - -/* - * Remove the callback at the head of the specified rcu_batch structure - * and return a pointer to it, or return NULL if the structure is empty. - */ -static inline struct rcu_head *rcu_batch_dequeue(struct rcu_batch *b) -{ - struct rcu_head *head; - - if (rcu_batch_empty(b)) - return NULL; - - head = b->head; - b->head = head->next; - if (b->tail == &head->next) - rcu_batch_init(b); - - return head; -} - -/* - * Move all callbacks from the rcu_batch structure specified by "from" to - * the structure specified by "to". - */ -static inline void rcu_batch_move(struct rcu_batch *to, struct rcu_batch *from) -{ - if (!rcu_batch_empty(from)) { - *to->tail = from->head; - to->tail = from->tail; - rcu_batch_init(from); - } -} - static int init_srcu_struct_fields(struct srcu_struct *sp) { sp->completed = 0; sp->srcu_gp_seq = 0; spin_lock_init(&sp->queue_lock); sp->srcu_state = SRCU_STATE_IDLE; - rcu_batch_init(&sp->batch_queue); - rcu_batch_init(&sp->batch_check0); - rcu_batch_init(&sp->batch_check1); - rcu_batch_init(&sp->batch_done); + rcu_segcblist_init(&sp->srcu_cblist); INIT_DELAYED_WORK(&sp->work, process_srcu); sp->per_cpu_ref = alloc_percpu(struct srcu_array); return sp->per_cpu_ref ? 0 : -ENOMEM; @@ -268,7 +196,7 @@ void cleanup_srcu_struct(struct srcu_struct *sp) { if (WARN_ON(srcu_readers_active(sp))) return; /* Leakage unless caller handles error. */ - if (WARN_ON(!rcu_all_batches_empty(sp))) + if (WARN_ON(!rcu_segcblist_empty(&sp->srcu_cblist))) return; /* Leakage unless caller handles error. */ flush_delayed_work(&sp->work); if (WARN_ON(READ_ONCE(sp->srcu_state) != SRCU_STATE_IDLE)) @@ -324,6 +252,8 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock); */ static void srcu_gp_start(struct srcu_struct *sp) { + rcu_segcblist_accelerate(&sp->srcu_cblist, + rcu_seq_snap(&sp->srcu_gp_seq)); WRITE_ONCE(sp->srcu_state, SRCU_STATE_SCAN1); rcu_seq_start(&sp->srcu_gp_seq); } @@ -371,6 +301,11 @@ static void srcu_gp_end(struct srcu_struct *sp) { rcu_seq_end(&sp->srcu_gp_seq); WRITE_ONCE(sp->srcu_state, SRCU_STATE_DONE); + + spin_lock_irq(&sp->queue_lock); + rcu_segcblist_advance(&sp->srcu_cblist, + rcu_seq_current(&sp->srcu_gp_seq)); + spin_unlock_irq(&sp->queue_lock); } /* @@ -409,7 +344,7 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *head, head->func = func; spin_lock_irqsave(&sp->queue_lock, flags); smp_mb__after_unlock_lock(); /* Caller's prior accesses before GP. */ - rcu_batch_queue(&sp->batch_queue, head); + rcu_segcblist_enqueue(&sp->srcu_cblist, head, false); if (READ_ONCE(sp->srcu_state) == SRCU_STATE_IDLE) { srcu_gp_start(sp); queue_delayed_work(system_power_efficient_wq, &sp->work, 0); @@ -445,13 +380,13 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount) smp_mb__after_unlock_lock(); /* Caller's prior accesses before GP. */ if (READ_ONCE(sp->srcu_state) == SRCU_STATE_IDLE) { /* steal the processing owner */ + rcu_segcblist_enqueue(&sp->srcu_cblist, head, false); srcu_gp_start(sp); - rcu_batch_queue(&sp->batch_check0, head); spin_unlock_irq(&sp->queue_lock); /* give the processing owner to work_struct */ srcu_reschedule(sp, 0); } else { - rcu_batch_queue(&sp->batch_queue, head); + rcu_segcblist_enqueue(&sp->srcu_cblist, head, false); spin_unlock_irq(&sp->queue_lock); } @@ -548,19 +483,6 @@ EXPORT_SYMBOL_GPL(srcu_batches_completed); #define SRCU_CALLBACK_BATCH 10 #define SRCU_INTERVAL 1 -/* - * Move any new SRCU callbacks to the first stage of the SRCU grace - * period pipeline. - */ -static void srcu_collect_new(struct srcu_struct *sp) -{ - if (!rcu_batch_empty(&sp->batch_queue)) { - spin_lock_irq(&sp->queue_lock); - rcu_batch_move(&sp->batch_check0, &sp->batch_queue); - spin_unlock_irq(&sp->queue_lock); - } -} - /* * Core SRCU state machine. Advance callbacks from ->batch_check0 to * ->batch_check1 and then to ->batch_done as readers drain. @@ -586,26 +508,7 @@ static void srcu_advance_batches(struct srcu_struct *sp, int trycount) idx = 1 ^ (sp->completed & 1); if (!try_check_zero(sp, idx, trycount)) return; /* readers present, retry after SRCU_INTERVAL */ - - /* - * The callbacks in ->batch_check1 have already done - * with their first zero check and flip back when they were - * enqueued on ->batch_check0 in a previous invocation of - * srcu_advance_batches(). (Presumably try_check_zero() - * returned false during that invocation, leaving the - * callbacks stranded on ->batch_check1.) They are therefore - * ready to invoke, so move them to ->batch_done. - */ - rcu_batch_move(&sp->batch_done, &sp->batch_check1); srcu_flip(sp); - - /* - * The callbacks in ->batch_check0 just finished their - * first check zero and flip, so move them to ->batch_check1 - * for future checking on the other idx. - */ - rcu_batch_move(&sp->batch_check1, &sp->batch_check0); - WRITE_ONCE(sp->srcu_state, SRCU_STATE_SCAN2); } @@ -619,14 +522,6 @@ static void srcu_advance_batches(struct srcu_struct *sp, int trycount) trycount = trycount < 2 ? 2 : trycount; if (!try_check_zero(sp, idx, trycount)) return; /* readers present, retry after SRCU_INTERVAL */ - - /* - * The callbacks in ->batch_check1 have now waited for - * all pre-existing readers using both idx values. They are - * therefore ready to invoke, so move them to ->batch_done. - */ - rcu_batch_move(&sp->batch_done, &sp->batch_check1); - srcu_gp_end(sp); } } @@ -639,17 +534,26 @@ static void srcu_advance_batches(struct srcu_struct *sp, int trycount) */ static void srcu_invoke_callbacks(struct srcu_struct *sp) { - int i; - struct rcu_head *head; + struct rcu_cblist ready_cbs; + struct rcu_head *rhp; - for (i = 0; i < SRCU_CALLBACK_BATCH; i++) { - head = rcu_batch_dequeue(&sp->batch_done); - if (!head) - break; + spin_lock_irq(&sp->queue_lock); + if (!rcu_segcblist_ready_cbs(&sp->srcu_cblist)) { + spin_unlock_irq(&sp->queue_lock); + return; + } + rcu_cblist_init(&ready_cbs); + rcu_segcblist_extract_done_cbs(&sp->srcu_cblist, &ready_cbs); + spin_unlock_irq(&sp->queue_lock); + rhp = rcu_cblist_dequeue(&ready_cbs); + for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) { local_bh_disable(); - head->func(head); + rhp->func(rhp); local_bh_enable(); } + spin_lock_irq(&sp->queue_lock); + rcu_segcblist_insert_count(&sp->srcu_cblist, &ready_cbs); + spin_unlock_irq(&sp->queue_lock); } /* @@ -660,9 +564,9 @@ static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay) { bool pending = true; - if (rcu_all_batches_empty(sp)) { + if (rcu_segcblist_empty(&sp->srcu_cblist)) { spin_lock_irq(&sp->queue_lock); - if (rcu_all_batches_empty(sp) && + if (rcu_segcblist_empty(&sp->srcu_cblist) && READ_ONCE(sp->srcu_state) == SRCU_STATE_DONE) { WRITE_ONCE(sp->srcu_state, SRCU_STATE_IDLE); pending = false; @@ -683,7 +587,6 @@ void process_srcu(struct work_struct *work) sp = container_of(work, struct srcu_struct, work.work); - srcu_collect_new(sp); srcu_advance_batches(sp, 1); srcu_invoke_callbacks(sp); srcu_reschedule(sp, SRCU_INTERVAL); diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 93889ff21dbb..4f62651588ea 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -30,7 +30,7 @@ #include #include #include -#include "rcu_segcblist.h" +#include /* * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and -- cgit v1.2.3 From f2425b4efb0c69e77c0b9666b605ae4a1ecaae47 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 14 Mar 2017 12:42:30 -0700 Subject: srcu: Move combining-tree definitions for SRCU's benefit This commit moves the C preprocessor code that defines the default shape of the rcu_node combining tree to a new include/linux/rcu_node_tree.h file as a first step towards enabling SRCU to create its own combining tree, which in turn enables SRCU to implement per-CPU callback handling, thus avoiding contention on the lock currently guarding the single list of callbacks. Note that users of SRCU still need to know the size of the srcu_struct structure, hence include/linux rather than kernel/rcu. This commit is code-movement only. Signed-off-by: Paul E. McKenney --- include/linux/rcu_node_tree.h | 102 ++++++++++++++++++++++++++++++++++++++++++ kernel/rcu/tree.h | 71 +---------------------------- 2 files changed, 103 insertions(+), 70 deletions(-) create mode 100644 include/linux/rcu_node_tree.h (limited to 'include') diff --git a/include/linux/rcu_node_tree.h b/include/linux/rcu_node_tree.h new file mode 100644 index 000000000000..b7eb97096b1c --- /dev/null +++ b/include/linux/rcu_node_tree.h @@ -0,0 +1,102 @@ +/* + * RCU node combining tree definitions. These are used to compute + * global attributes while avoiding common-case global contention. A key + * property that these computations rely on is a tournament-style approach + * where only one of the tasks contending a lower level in the tree need + * advance to the next higher level. If properly configured, this allows + * unlimited scalability while maintaining a constant level of contention + * on the root node. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + * Copyright IBM Corporation, 2017 + * + * Author: Paul E. McKenney + */ + +#ifndef __LINUX_RCU_NODE_TREE_H +#define __LINUX_RCU_NODE_TREE_H + +/* + * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and + * CONFIG_RCU_FANOUT_LEAF. + * In theory, it should be possible to add more levels straightforwardly. + * In practice, this did work well going from three levels to four. + * Of course, your mileage may vary. + */ + +#ifdef CONFIG_RCU_FANOUT +#define RCU_FANOUT CONFIG_RCU_FANOUT +#else /* #ifdef CONFIG_RCU_FANOUT */ +# ifdef CONFIG_64BIT +# define RCU_FANOUT 64 +# else +# define RCU_FANOUT 32 +# endif +#endif /* #else #ifdef CONFIG_RCU_FANOUT */ + +#ifdef CONFIG_RCU_FANOUT_LEAF +#define RCU_FANOUT_LEAF CONFIG_RCU_FANOUT_LEAF +#else /* #ifdef CONFIG_RCU_FANOUT_LEAF */ +#define RCU_FANOUT_LEAF 16 +#endif /* #else #ifdef CONFIG_RCU_FANOUT_LEAF */ + +#define RCU_FANOUT_1 (RCU_FANOUT_LEAF) +#define RCU_FANOUT_2 (RCU_FANOUT_1 * RCU_FANOUT) +#define RCU_FANOUT_3 (RCU_FANOUT_2 * RCU_FANOUT) +#define RCU_FANOUT_4 (RCU_FANOUT_3 * RCU_FANOUT) + +#if NR_CPUS <= RCU_FANOUT_1 +# define RCU_NUM_LVLS 1 +# define NUM_RCU_LVL_0 1 +# define NUM_RCU_NODES NUM_RCU_LVL_0 +# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0 } +# define RCU_NODE_NAME_INIT { "rcu_node_0" } +# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0" } +#elif NR_CPUS <= RCU_FANOUT_2 +# define RCU_NUM_LVLS 2 +# define NUM_RCU_LVL_0 1 +# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) +# define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1) +# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1 } +# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1" } +# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1" } +#elif NR_CPUS <= RCU_FANOUT_3 +# define RCU_NUM_LVLS 3 +# define NUM_RCU_LVL_0 1 +# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) +# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) +# define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2) +# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2 } +# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2" } +# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2" } +#elif NR_CPUS <= RCU_FANOUT_4 +# define RCU_NUM_LVLS 4 +# define NUM_RCU_LVL_0 1 +# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3) +# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) +# define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) +# define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3) +# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2, NUM_RCU_LVL_3 } +# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2", "rcu_node_3" } +# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" } +#else +# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" +#endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */ + +extern int rcu_num_lvls; +extern int rcu_num_nodes; + +#endif /* __LINUX_RCU_NODE_TREE_H */ diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 4f62651588ea..1bec3958d44f 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -31,76 +31,7 @@ #include #include #include - -/* - * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and - * CONFIG_RCU_FANOUT_LEAF. - * In theory, it should be possible to add more levels straightforwardly. - * In practice, this did work well going from three levels to four. - * Of course, your mileage may vary. - */ - -#ifdef CONFIG_RCU_FANOUT -#define RCU_FANOUT CONFIG_RCU_FANOUT -#else /* #ifdef CONFIG_RCU_FANOUT */ -# ifdef CONFIG_64BIT -# define RCU_FANOUT 64 -# else -# define RCU_FANOUT 32 -# endif -#endif /* #else #ifdef CONFIG_RCU_FANOUT */ - -#ifdef CONFIG_RCU_FANOUT_LEAF -#define RCU_FANOUT_LEAF CONFIG_RCU_FANOUT_LEAF -#else /* #ifdef CONFIG_RCU_FANOUT_LEAF */ -#define RCU_FANOUT_LEAF 16 -#endif /* #else #ifdef CONFIG_RCU_FANOUT_LEAF */ - -#define RCU_FANOUT_1 (RCU_FANOUT_LEAF) -#define RCU_FANOUT_2 (RCU_FANOUT_1 * RCU_FANOUT) -#define RCU_FANOUT_3 (RCU_FANOUT_2 * RCU_FANOUT) -#define RCU_FANOUT_4 (RCU_FANOUT_3 * RCU_FANOUT) - -#if NR_CPUS <= RCU_FANOUT_1 -# define RCU_NUM_LVLS 1 -# define NUM_RCU_LVL_0 1 -# define NUM_RCU_NODES NUM_RCU_LVL_0 -# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0 } -# define RCU_NODE_NAME_INIT { "rcu_node_0" } -# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0" } -#elif NR_CPUS <= RCU_FANOUT_2 -# define RCU_NUM_LVLS 2 -# define NUM_RCU_LVL_0 1 -# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) -# define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1) -# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1 } -# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1" } -# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1" } -#elif NR_CPUS <= RCU_FANOUT_3 -# define RCU_NUM_LVLS 3 -# define NUM_RCU_LVL_0 1 -# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) -# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) -# define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2) -# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2 } -# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2" } -# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2" } -#elif NR_CPUS <= RCU_FANOUT_4 -# define RCU_NUM_LVLS 4 -# define NUM_RCU_LVL_0 1 -# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3) -# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) -# define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) -# define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3) -# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2, NUM_RCU_LVL_3 } -# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2", "rcu_node_3" } -# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" } -#else -# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" -#endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */ - -extern int rcu_num_lvls; -extern int rcu_num_nodes; +#include /* * Dynticks per-CPU state. -- cgit v1.2.3 From 2b34c43cc1671c59bad6dd1682ae3ee4f0919eb7 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 14 Mar 2017 14:29:53 -0700 Subject: srcu: Move rcu_init_levelspread() to rcu_tree_node.h This commit moves the rcu_init_levelspread() function from kernel/rcu/tree.c to kernel/rcu/rcu.h so that SRCU can access it. This is another step towards enabling SRCU to create its own combining tree. This commit is code-movement only, give or take knock-on adjustments. Signed-off-by: Paul E. McKenney --- include/linux/rcu_node_tree.h | 3 --- kernel/rcu/rcu.h | 36 ++++++++++++++++++++++++++++++++++++ kernel/rcu/srcu.c | 1 + kernel/rcu/tree.c | 25 ------------------------- kernel/rcu/tree_trace.c | 1 + 5 files changed, 38 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/include/linux/rcu_node_tree.h b/include/linux/rcu_node_tree.h index b7eb97096b1c..4b766b61e1a0 100644 --- a/include/linux/rcu_node_tree.h +++ b/include/linux/rcu_node_tree.h @@ -96,7 +96,4 @@ # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" #endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */ -extern int rcu_num_lvls; -extern int rcu_num_nodes; - #endif /* __LINUX_RCU_NODE_TREE_H */ diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index a943b42a9cf7..87326479b39a 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -190,4 +190,40 @@ void rcu_test_sync_prims(void); */ extern void resched_cpu(int cpu); +#if defined(SRCU) || !defined(TINY_RCU) + +#include + +extern int rcu_num_lvls; +extern int rcu_num_nodes; +static bool rcu_fanout_exact; +static int rcu_fanout_leaf; + +/* + * Compute the per-level fanout, either using the exact fanout specified + * or balancing the tree, depending on the rcu_fanout_exact boot parameter. + */ +static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt) +{ + int i; + + if (rcu_fanout_exact) { + levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf; + for (i = rcu_num_lvls - 2; i >= 0; i--) + levelspread[i] = RCU_FANOUT; + } else { + int ccur; + int cprv; + + cprv = nr_cpu_ids; + for (i = rcu_num_lvls - 1; i >= 0; i--) { + ccur = levelcnt[i]; + levelspread[i] = (cprv + ccur - 1) / ccur; + cprv = ccur; + } + } +} + +#endif /* #if defined(SRCU) || !defined(TINY_RCU) */ + #endif /* __LINUX_RCU_H */ diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c index 56fd30862122..0b511de7ca4d 100644 --- a/kernel/rcu/srcu.c +++ b/kernel/rcu/srcu.c @@ -36,6 +36,7 @@ #include #include +#include #include "rcu.h" static int init_srcu_struct_fields(struct srcu_struct *sp) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 844a030c1960..df3527744af8 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3951,31 +3951,6 @@ void rcu_scheduler_starting(void) rcu_test_sync_prims(); } -/* - * Compute the per-level fanout, either using the exact fanout specified - * or balancing the tree, depending on the rcu_fanout_exact boot parameter. - */ -static void __init rcu_init_levelspread(int *levelspread, const int *levelcnt) -{ - int i; - - if (rcu_fanout_exact) { - levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf; - for (i = rcu_num_lvls - 2; i >= 0; i--) - levelspread[i] = RCU_FANOUT; - } else { - int ccur; - int cprv; - - cprv = nr_cpu_ids; - for (i = rcu_num_lvls - 1; i >= 0; i--) { - ccur = levelcnt[i]; - levelspread[i] = (cprv + ccur - 1) / ccur; - cprv = ccur; - } - } -} - /* * Helper function for rcu_init() that initializes one rcu_state structure. */ diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c index 066c64071a7b..30c5bf89ee58 100644 --- a/kernel/rcu/tree_trace.c +++ b/kernel/rcu/tree_trace.c @@ -45,6 +45,7 @@ #define RCU_TREE_NONCORE #include "tree.h" +#include "rcu.h" static int r_open(struct inode *inode, struct file *file, const struct seq_operations *op) -- cgit v1.2.3 From 80a7956fe36c2ee40c6ff12c77926d267802b7c8 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 22 Mar 2017 15:26:18 -0700 Subject: srcu: Merge ->srcu_state into ->srcu_gp_seq Updating ->srcu_state and ->srcu_gp_seq will lead to extremely complex race conditions given multiple callback queues, so this commit takes advantage of the two-bit state now available in rcu_seq counters to store the state in the bottom two bits of ->srcu_gp_seq. Signed-off-by: Paul E. McKenney --- include/linux/srcu.h | 5 +---- kernel/rcu/rcu.h | 10 ++++++++++ kernel/rcu/srcu.c | 55 +++++++++++++++++++++++++++++++++------------------- 3 files changed, 46 insertions(+), 24 deletions(-) (limited to 'include') diff --git a/include/linux/srcu.h b/include/linux/srcu.h index ad154a7bc114..e7dbc01b61a1 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -43,8 +43,7 @@ struct srcu_struct { unsigned long completed; unsigned long srcu_gp_seq; struct srcu_array __percpu *per_cpu_ref; - spinlock_t queue_lock; /* protect ->srcu_cblist, ->srcu_state */ - int srcu_state; + spinlock_t queue_lock; /* protect ->srcu_cblist */ struct rcu_segcblist srcu_cblist; struct delayed_work work; #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -56,7 +55,6 @@ struct srcu_struct { #define SRCU_STATE_IDLE 0 #define SRCU_STATE_SCAN1 1 #define SRCU_STATE_SCAN2 2 -#define SRCU_STATE_DONE 3 #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -85,7 +83,6 @@ void process_srcu(struct work_struct *work); .completed = -300, \ .per_cpu_ref = &name##_srcu_array, \ .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \ - .srcu_state = SRCU_STATE_IDLE, \ .srcu_cblist = RCU_SEGCBLIST_INITIALIZER(name.srcu_cblist),\ .work = __DELAYED_WORK_INITIALIZER(name.work, process_srcu, 0),\ __SRCU_DEP_MAP_INIT(name) \ diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index 87a0ac95b551..73e16ec4054b 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -82,6 +82,16 @@ static inline int rcu_seq_state(unsigned long s) return s & RCU_SEQ_STATE_MASK; } +/* + * Set the state portion of the pointed-to sequence number. + * The caller is responsible for preventing conflicting updates. + */ +static inline void rcu_seq_set_state(unsigned long *sp, int newstate) +{ + WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK); + WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate); +} + /* Adjust sequence number for start of update-side operation. */ static inline void rcu_seq_start(unsigned long *sp) { diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c index 1a2dc74bb625..90ffea31b188 100644 --- a/kernel/rcu/srcu.c +++ b/kernel/rcu/srcu.c @@ -44,7 +44,6 @@ static int init_srcu_struct_fields(struct srcu_struct *sp) sp->completed = 0; sp->srcu_gp_seq = 0; spin_lock_init(&sp->queue_lock); - sp->srcu_state = SRCU_STATE_IDLE; rcu_segcblist_init(&sp->srcu_cblist); INIT_DELAYED_WORK(&sp->work, process_srcu); sp->per_cpu_ref = alloc_percpu(struct srcu_array); @@ -180,6 +179,9 @@ static bool srcu_readers_active(struct srcu_struct *sp) return sum; } +#define SRCU_CALLBACK_BATCH 10 +#define SRCU_INTERVAL 1 + /** * cleanup_srcu_struct - deconstruct a sleep-RCU structure * @sp: structure to clean up. @@ -200,8 +202,10 @@ void cleanup_srcu_struct(struct srcu_struct *sp) if (WARN_ON(!rcu_segcblist_empty(&sp->srcu_cblist))) return; /* Leakage unless caller handles error. */ flush_delayed_work(&sp->work); - if (WARN_ON(READ_ONCE(sp->srcu_state) != SRCU_STATE_IDLE)) + if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE)) { + pr_info("cleanup_srcu_struct: Active srcu_struct %lu CBs %c state: %d\n", rcu_segcblist_n_cbs(&sp->srcu_cblist), ".E"[rcu_segcblist_empty(&sp->srcu_cblist)], rcu_seq_state(READ_ONCE(sp->srcu_gp_seq))); return; /* Caller forgot to stop doing call_srcu()? */ + } free_percpu(sp->per_cpu_ref); sp->per_cpu_ref = NULL; } @@ -253,10 +257,13 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock); */ static void srcu_gp_start(struct srcu_struct *sp) { + int state; + rcu_segcblist_accelerate(&sp->srcu_cblist, rcu_seq_snap(&sp->srcu_gp_seq)); - WRITE_ONCE(sp->srcu_state, SRCU_STATE_SCAN1); rcu_seq_start(&sp->srcu_gp_seq); + state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); + WARN_ON_ONCE(state != SRCU_STATE_SCAN1); } /* @@ -300,7 +307,6 @@ static void srcu_flip(struct srcu_struct *sp) static void srcu_gp_end(struct srcu_struct *sp) { rcu_seq_end(&sp->srcu_gp_seq); - WRITE_ONCE(sp->srcu_state, SRCU_STATE_DONE); spin_lock_irq(&sp->queue_lock); rcu_segcblist_advance(&sp->srcu_cblist, @@ -345,7 +351,7 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *head, spin_lock_irqsave(&sp->queue_lock, flags); smp_mb__after_unlock_lock(); /* Caller's prior accesses before GP. */ rcu_segcblist_enqueue(&sp->srcu_cblist, head, false); - if (READ_ONCE(sp->srcu_state) == SRCU_STATE_IDLE) { + if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_IDLE) { srcu_gp_start(sp); queue_delayed_work(system_power_efficient_wq, &sp->work, 0); } @@ -378,7 +384,7 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount) head->func = wakeme_after_rcu; spin_lock_irq(&sp->queue_lock); smp_mb__after_unlock_lock(); /* Caller's prior accesses before GP. */ - if (READ_ONCE(sp->srcu_state) == SRCU_STATE_IDLE) { + if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_IDLE) { /* steal the processing owner */ rcu_segcblist_enqueue(&sp->srcu_cblist, head, false); srcu_gp_start(sp); @@ -480,9 +486,6 @@ unsigned long srcu_batches_completed(struct srcu_struct *sp) } EXPORT_SYMBOL_GPL(srcu_batches_completed); -#define SRCU_CALLBACK_BATCH 10 -#define SRCU_INTERVAL 1 - /* * Core SRCU state machine. Advance callbacks from ->batch_check0 to * ->batch_check1 and then to ->batch_done as readers drain. @@ -491,28 +494,40 @@ static void srcu_advance_batches(struct srcu_struct *sp, int trycount) { int idx; - WARN_ON_ONCE(sp->srcu_state == SRCU_STATE_IDLE); - /* * Because readers might be delayed for an extended period after * fetching ->completed for their index, at any point in time there * might well be readers using both idx=0 and idx=1. We therefore * need to wait for readers to clear from both index values before * invoking a callback. + * + * The load-acquire ensures that we see the accesses performed + * by the prior grace period. */ + idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */ + if (idx == SRCU_STATE_IDLE) { + spin_lock_irq(&sp->queue_lock); + if (rcu_segcblist_empty(&sp->srcu_cblist)) { + spin_unlock_irq(&sp->queue_lock); + return; + } + idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); + if (idx == SRCU_STATE_IDLE) + srcu_gp_start(sp); + spin_unlock_irq(&sp->queue_lock); + if (idx != SRCU_STATE_IDLE) + return; /* Someone else started the grace period. */ + } - if (sp->srcu_state == SRCU_STATE_DONE) - srcu_gp_start(sp); - - if (sp->srcu_state == SRCU_STATE_SCAN1) { + if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { idx = 1 ^ (sp->completed & 1); if (!try_check_zero(sp, idx, trycount)) return; /* readers present, retry after SRCU_INTERVAL */ srcu_flip(sp); - WRITE_ONCE(sp->srcu_state, SRCU_STATE_SCAN2); + rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2); } - if (sp->srcu_state == SRCU_STATE_SCAN2) { + if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) { /* * SRCU read-side critical sections are normally short, @@ -563,14 +578,14 @@ static void srcu_invoke_callbacks(struct srcu_struct *sp) static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay) { bool pending = true; + int state; if (rcu_segcblist_empty(&sp->srcu_cblist)) { spin_lock_irq(&sp->queue_lock); + state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); if (rcu_segcblist_empty(&sp->srcu_cblist) && - READ_ONCE(sp->srcu_state) == SRCU_STATE_DONE) { - WRITE_ONCE(sp->srcu_state, SRCU_STATE_IDLE); + state == SRCU_STATE_IDLE) pending = false; - } spin_unlock_irq(&sp->queue_lock); } -- cgit v1.2.3 From f60d231a87c5c9f23f10e69996f396d46f5bf901 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 24 Mar 2017 13:46:33 -0700 Subject: srcu: Crude control of expedited grace periods SRCU's implementation of expedited grace periods has always assumed that the SRCU instance is idle when the expedited request arrives. This commit improves this a bit by maintaining a count of the number of outstanding expedited requests, thus allowing prior non-expedited grace periods accommodate these requests by shifting to expedited mode. However, any non-expedited wait already in progress will still wait for the full duration. Improved control of expedited grace periods is planned, but one step at a time. Signed-off-by: Paul E. McKenney --- include/linux/srcu.h | 1 + kernel/rcu/srcu.c | 84 ++++++++++++++++++++++++++++------------------------ 2 files changed, 47 insertions(+), 38 deletions(-) (limited to 'include') diff --git a/include/linux/srcu.h b/include/linux/srcu.h index e7dbc01b61a1..73a1b6296224 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -42,6 +42,7 @@ struct srcu_array { struct srcu_struct { unsigned long completed; unsigned long srcu_gp_seq; + atomic_t srcu_exp_cnt; struct srcu_array __percpu *per_cpu_ref; spinlock_t queue_lock; /* protect ->srcu_cblist */ struct rcu_segcblist srcu_cblist; diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c index 90ffea31b188..3cfcc59bddf3 100644 --- a/kernel/rcu/srcu.c +++ b/kernel/rcu/srcu.c @@ -43,6 +43,7 @@ static int init_srcu_struct_fields(struct srcu_struct *sp) { sp->completed = 0; sp->srcu_gp_seq = 0; + atomic_set(&sp->srcu_exp_cnt, 0); spin_lock_init(&sp->queue_lock); rcu_segcblist_init(&sp->srcu_cblist); INIT_DELAYED_WORK(&sp->work, process_srcu); @@ -179,7 +180,6 @@ static bool srcu_readers_active(struct srcu_struct *sp) return sum; } -#define SRCU_CALLBACK_BATCH 10 #define SRCU_INTERVAL 1 /** @@ -197,6 +197,7 @@ static bool srcu_readers_active(struct srcu_struct *sp) */ void cleanup_srcu_struct(struct srcu_struct *sp) { + WARN_ON_ONCE(atomic_read(&sp->srcu_exp_cnt)); if (WARN_ON(srcu_readers_active(sp))) return; /* Leakage unless caller handles error. */ if (WARN_ON(!rcu_segcblist_empty(&sp->srcu_cblist))) @@ -244,13 +245,10 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock); * We use an adaptive strategy for synchronize_srcu() and especially for * synchronize_srcu_expedited(). We spin for a fixed time period * (defined below) to allow SRCU readers to exit their read-side critical - * sections. If there are still some readers after 10 microseconds, - * we repeatedly block for 1-millisecond time periods. This approach - * has done well in testing, so there is no need for a config parameter. + * sections. If there are still some readers after a few microseconds, + * we repeatedly block for 1-millisecond time periods. */ #define SRCU_RETRY_CHECK_DELAY 5 -#define SYNCHRONIZE_SRCU_TRYCOUNT 2 -#define SYNCHRONIZE_SRCU_EXP_TRYCOUNT 12 /* * Start an SRCU grace period. @@ -267,16 +265,16 @@ static void srcu_gp_start(struct srcu_struct *sp) } /* - * Wait until all readers counted by array index idx complete, but loop - * a maximum of trycount times. The caller must ensure that ->completed - * is not changed while checking. + * Wait until all readers counted by array index idx complete, but + * loop an additional time if there is an expedited grace period pending. + * The caller must ensure that ->completed is not changed while checking. */ static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount) { for (;;) { if (srcu_readers_active_idx_check(sp, idx)) return true; - if (--trycount <= 0) + if (--trycount + !!atomic_read(&sp->srcu_exp_cnt) <= 0) return false; udelay(SRCU_RETRY_CHECK_DELAY); } @@ -364,7 +362,7 @@ static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay); /* * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). */ -static void __synchronize_srcu(struct srcu_struct *sp, int trycount) +static void __synchronize_srcu(struct srcu_struct *sp) { struct rcu_synchronize rcu; struct rcu_head *head = &rcu.head; @@ -400,6 +398,32 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount) smp_mb(); /* Caller's later accesses after GP. */ } +/** + * synchronize_srcu_expedited - Brute-force SRCU grace period + * @sp: srcu_struct with which to synchronize. + * + * Wait for an SRCU grace period to elapse, but be more aggressive about + * spinning rather than blocking when waiting. + * + * Note that synchronize_srcu_expedited() has the same deadlock and + * memory-ordering properties as does synchronize_srcu(). + */ +void synchronize_srcu_expedited(struct srcu_struct *sp) +{ + bool do_norm = rcu_gp_is_normal(); + + if (!do_norm) { + atomic_inc(&sp->srcu_exp_cnt); + smp_mb__after_atomic(); /* increment before GP. */ + } + __synchronize_srcu(sp); + if (!do_norm) { + smp_mb__before_atomic(); /* GP before decrement. */ + atomic_dec(&sp->srcu_exp_cnt); + } +} +EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); + /** * synchronize_srcu - wait for prior SRCU read-side critical-section completion * @sp: srcu_struct with which to synchronize. @@ -441,28 +465,13 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount) */ void synchronize_srcu(struct srcu_struct *sp) { - __synchronize_srcu(sp, (rcu_gp_is_expedited() && !rcu_gp_is_normal()) - ? SYNCHRONIZE_SRCU_EXP_TRYCOUNT - : SYNCHRONIZE_SRCU_TRYCOUNT); + if (rcu_gp_is_expedited()) + synchronize_srcu_expedited(sp); + else + __synchronize_srcu(sp); } EXPORT_SYMBOL_GPL(synchronize_srcu); -/** - * synchronize_srcu_expedited - Brute-force SRCU grace period - * @sp: srcu_struct with which to synchronize. - * - * Wait for an SRCU grace period to elapse, but be more aggressive about - * spinning rather than blocking when waiting. - * - * Note that synchronize_srcu_expedited() has the same deadlock and - * memory-ordering properties as does synchronize_srcu(). - */ -void synchronize_srcu_expedited(struct srcu_struct *sp) -{ - __synchronize_srcu(sp, SYNCHRONIZE_SRCU_EXP_TRYCOUNT); -} -EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); - /** * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete. * @sp: srcu_struct on which to wait for in-flight callbacks. @@ -490,7 +499,7 @@ EXPORT_SYMBOL_GPL(srcu_batches_completed); * Core SRCU state machine. Advance callbacks from ->batch_check0 to * ->batch_check1 and then to ->batch_done as readers drain. */ -static void srcu_advance_batches(struct srcu_struct *sp, int trycount) +static void srcu_advance_batches(struct srcu_struct *sp) { int idx; @@ -521,8 +530,8 @@ static void srcu_advance_batches(struct srcu_struct *sp, int trycount) if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { idx = 1 ^ (sp->completed & 1); - if (!try_check_zero(sp, idx, trycount)) - return; /* readers present, retry after SRCU_INTERVAL */ + if (!try_check_zero(sp, idx, 1)) + return; /* readers present, retry later. */ srcu_flip(sp); rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2); } @@ -534,9 +543,8 @@ static void srcu_advance_batches(struct srcu_struct *sp, int trycount) * so check at least twice in quick succession after a flip. */ idx = 1 ^ (sp->completed & 1); - trycount = trycount < 2 ? 2 : trycount; - if (!try_check_zero(sp, idx, trycount)) - return; /* readers present, retry after SRCU_INTERVAL */ + if (!try_check_zero(sp, idx, 2)) + return; /* readers present, retry after later. */ srcu_gp_end(sp); } } @@ -602,8 +610,8 @@ void process_srcu(struct work_struct *work) sp = container_of(work, struct srcu_struct, work.work); - srcu_advance_batches(sp, 1); + srcu_advance_batches(sp); srcu_invoke_callbacks(sp); - srcu_reschedule(sp, SRCU_INTERVAL); + srcu_reschedule(sp, atomic_read(&sp->srcu_exp_cnt) ? 0 : SRCU_INTERVAL); } EXPORT_SYMBOL_GPL(process_srcu); -- cgit v1.2.3 From d8be81735aa89413b333de488251f0e64e2be591 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 25 Mar 2017 09:59:38 -0700 Subject: srcu: Create a tiny SRCU In response to automated complaints about modifications to SRCU increasing its size, this commit creates a tiny SRCU that is used in SMP=n && PREEMPT=n builds. Signed-off-by: Paul E. McKenney --- include/linux/srcu.h | 69 ++------------- include/linux/srcutiny.h | 81 ++++++++++++++++++ include/linux/srcutree.h | 91 ++++++++++++++++++++ init/Kconfig | 12 +++ kernel/rcu/Makefile | 3 +- kernel/rcu/rcutorture.c | 2 + kernel/rcu/srcutiny.c | 215 +++++++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 411 insertions(+), 62 deletions(-) create mode 100644 include/linux/srcutiny.h create mode 100644 include/linux/srcutree.h create mode 100644 kernel/rcu/srcutiny.c (limited to 'include') diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 73a1b6296224..907f09b14eda 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -34,28 +34,7 @@ #include #include -struct srcu_array { - unsigned long lock_count[2]; - unsigned long unlock_count[2]; -}; - -struct srcu_struct { - unsigned long completed; - unsigned long srcu_gp_seq; - atomic_t srcu_exp_cnt; - struct srcu_array __percpu *per_cpu_ref; - spinlock_t queue_lock; /* protect ->srcu_cblist */ - struct rcu_segcblist srcu_cblist; - struct delayed_work work; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ -}; - -/* Values for -> state variable. */ -#define SRCU_STATE_IDLE 0 -#define SRCU_STATE_SCAN1 1 -#define SRCU_STATE_SCAN2 2 +struct srcu_struct; #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -77,42 +56,13 @@ int init_srcu_struct(struct srcu_struct *sp); #define __SRCU_DEP_MAP_INIT(srcu_name) #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ -void process_srcu(struct work_struct *work); - -#define __SRCU_STRUCT_INIT(name) \ - { \ - .completed = -300, \ - .per_cpu_ref = &name##_srcu_array, \ - .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \ - .srcu_cblist = RCU_SEGCBLIST_INITIALIZER(name.srcu_cblist),\ - .work = __DELAYED_WORK_INITIALIZER(name.work, process_srcu, 0),\ - __SRCU_DEP_MAP_INIT(name) \ - } - -/* - * Define and initialize a srcu struct at build time. - * Do -not- call init_srcu_struct() nor cleanup_srcu_struct() on it. - * - * Note that although DEFINE_STATIC_SRCU() hides the name from other - * files, the per-CPU variable rules nevertheless require that the - * chosen name be globally unique. These rules also prohibit use of - * DEFINE_STATIC_SRCU() within a function. If these rules are too - * restrictive, declare the srcu_struct manually. For example, in - * each file: - * - * static struct srcu_struct my_srcu; - * - * Then, before the first use of each my_srcu, manually initialize it: - * - * init_srcu_struct(&my_srcu); - * - * See include/linux/percpu-defs.h for the rules on per-CPU variables. - */ -#define __DEFINE_SRCU(name, is_static) \ - static DEFINE_PER_CPU(struct srcu_array, name##_srcu_array);\ - is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name) -#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */) -#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static) +#ifdef CONFIG_TINY_SRCU +#include +#elif defined(CONFIG_TREE_SRCU) +#include +#else +#error "Unknown SRCU implementation specified to kernel configuration" +#endif /** * call_srcu() - Queue a callback for invocation after an SRCU grace period @@ -138,9 +88,6 @@ void cleanup_srcu_struct(struct srcu_struct *sp); int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp); void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); void synchronize_srcu(struct srcu_struct *sp); -void synchronize_srcu_expedited(struct srcu_struct *sp); -unsigned long srcu_batches_completed(struct srcu_struct *sp); -void srcu_barrier(struct srcu_struct *sp); #ifdef CONFIG_DEBUG_LOCK_ALLOC diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h new file mode 100644 index 000000000000..4f284e4f4d8c --- /dev/null +++ b/include/linux/srcutiny.h @@ -0,0 +1,81 @@ +/* + * Sleepable Read-Copy Update mechanism for mutual exclusion, + * tiny variant. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + * Copyright (C) IBM Corporation, 2017 + * + * Author: Paul McKenney + */ + +#ifndef _LINUX_SRCU_TINY_H +#define _LINUX_SRCU_TINY_H + +#include + +struct srcu_struct { + int srcu_lock_nesting[2]; /* srcu_read_lock() nesting depth. */ + struct swait_queue_head srcu_wq; + /* Last srcu_read_unlock() wakes GP. */ + unsigned long srcu_gp_seq; /* GP seq # for callback tagging. */ + struct rcu_segcblist srcu_cblist; + /* Pending SRCU callbacks. */ + int srcu_idx; /* Current reader array element. */ + bool srcu_gp_running; /* GP workqueue running? */ + bool srcu_gp_waiting; /* GP waiting for readers? */ + struct work_struct srcu_work; /* For driving grace periods. */ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ +}; + +void srcu_drive_gp(struct work_struct *wp); + +#define __SRCU_STRUCT_INIT(name) \ +{ \ + .srcu_wq = __SWAIT_QUEUE_HEAD_INITIALIZER(name.srcu_wq), \ + .srcu_cblist = RCU_SEGCBLIST_INITIALIZER(name.srcu_cblist), \ + .srcu_work = __WORK_INITIALIZER(name.srcu_work, srcu_drive_gp), \ + __SRCU_DEP_MAP_INIT(name) \ +} + +/* + * This odd _STATIC_ arrangement is needed for API compatibility with + * Tree SRCU, which needs some per-CPU data. + */ +#define DEFINE_SRCU(name) \ + struct srcu_struct name = __SRCU_STRUCT_INIT(name) +#define DEFINE_STATIC_SRCU(name) \ + static struct srcu_struct name = __SRCU_STRUCT_INIT(name) + +void synchronize_srcu(struct srcu_struct *sp); + +static inline void synchronize_srcu_expedited(struct srcu_struct *sp) +{ + synchronize_srcu(sp); +} + +static inline void srcu_barrier(struct srcu_struct *sp) +{ + synchronize_srcu(sp); +} + +static inline unsigned long srcu_batches_completed(struct srcu_struct *sp) +{ + return 0; +} + +#endif diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h new file mode 100644 index 000000000000..f2b3bd6c6bc2 --- /dev/null +++ b/include/linux/srcutree.h @@ -0,0 +1,91 @@ +/* + * Sleepable Read-Copy Update mechanism for mutual exclusion, + * tree variant. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + * Copyright (C) IBM Corporation, 2017 + * + * Author: Paul McKenney + */ + +#ifndef _LINUX_SRCU_TREE_H +#define _LINUX_SRCU_TREE_H + +struct srcu_array { + unsigned long lock_count[2]; + unsigned long unlock_count[2]; +}; + +struct srcu_struct { + unsigned long completed; + unsigned long srcu_gp_seq; + atomic_t srcu_exp_cnt; + struct srcu_array __percpu *per_cpu_ref; + spinlock_t queue_lock; /* protect ->srcu_cblist */ + struct rcu_segcblist srcu_cblist; + struct delayed_work work; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ +}; + +/* Values for -> state variable. */ +#define SRCU_STATE_IDLE 0 +#define SRCU_STATE_SCAN1 1 +#define SRCU_STATE_SCAN2 2 + +void process_srcu(struct work_struct *work); + +#define __SRCU_STRUCT_INIT(name) \ + { \ + .completed = -300, \ + .per_cpu_ref = &name##_srcu_array, \ + .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \ + .srcu_cblist = RCU_SEGCBLIST_INITIALIZER(name.srcu_cblist),\ + .work = __DELAYED_WORK_INITIALIZER(name.work, process_srcu, 0),\ + __SRCU_DEP_MAP_INIT(name) \ + } + +/* + * Define and initialize a srcu struct at build time. + * Do -not- call init_srcu_struct() nor cleanup_srcu_struct() on it. + * + * Note that although DEFINE_STATIC_SRCU() hides the name from other + * files, the per-CPU variable rules nevertheless require that the + * chosen name be globally unique. These rules also prohibit use of + * DEFINE_STATIC_SRCU() within a function. If these rules are too + * restrictive, declare the srcu_struct manually. For example, in + * each file: + * + * static struct srcu_struct my_srcu; + * + * Then, before the first use of each my_srcu, manually initialize it: + * + * init_srcu_struct(&my_srcu); + * + * See include/linux/percpu-defs.h for the rules on per-CPU variables. + */ +#define __DEFINE_SRCU(name, is_static) \ + static DEFINE_PER_CPU(struct srcu_array, name##_srcu_array);\ + is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name) +#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */) +#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static) + +void synchronize_srcu_expedited(struct srcu_struct *sp); +void srcu_barrier(struct srcu_struct *sp); +unsigned long srcu_batches_completed(struct srcu_struct *sp); + +#endif diff --git a/init/Kconfig b/init/Kconfig index a92f27da4a27..d269f2ca17b8 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -526,6 +526,18 @@ config SRCU permits arbitrary sleeping or blocking within RCU read-side critical sections. +config TINY_SRCU + bool + default y if TINY_RCU + help + This option selects the single-CPU non-preemptible version of SRCU. + +config TREE_SRCU + bool + default y if !TINY_RCU + help + This option selects the full-fledged version of SRCU. + config TASKS_RCU bool default n diff --git a/kernel/rcu/Makefile b/kernel/rcu/Makefile index 18dfc485225c..b853214a2b99 100644 --- a/kernel/rcu/Makefile +++ b/kernel/rcu/Makefile @@ -3,7 +3,8 @@ KCOV_INSTRUMENT := n obj-y += update.o sync.o -obj-$(CONFIG_SRCU) += srcu.o +obj-$(CONFIG_TREE_SRCU) += srcu.o +obj-$(CONFIG_TINY_SRCU) += srcutiny.o obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o obj-$(CONFIG_RCU_PERF_TEST) += rcuperf.o obj-$(CONFIG_TREE_RCU) += tree.o diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index cccc417a8135..98591e16db1a 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -559,6 +559,7 @@ static void srcu_torture_barrier(void) static void srcu_torture_stats(void) { +#ifdef CONFIG_TREE_SRCU int cpu; int idx = srcu_ctlp->completed & 0x1; @@ -587,6 +588,7 @@ static void srcu_torture_stats(void) pr_cont(" %d(%ld,%ld)", cpu, c0, c1); } pr_cont("\n"); +#endif } static void srcu_torture_synchronize_expedited(void) diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c new file mode 100644 index 000000000000..b8293527ee18 --- /dev/null +++ b/kernel/rcu/srcutiny.c @@ -0,0 +1,215 @@ +/* + * Sleepable Read-Copy Update mechanism for mutual exclusion, + * tiny version for non-preemptible single-CPU use. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + * Copyright (C) IBM Corporation, 2017 + * + * Author: Paul McKenney + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include "rcu.h" + +static int init_srcu_struct_fields(struct srcu_struct *sp) +{ + sp->srcu_lock_nesting[0] = 0; + sp->srcu_lock_nesting[1] = 0; + init_swait_queue_head(&sp->srcu_wq); + sp->srcu_gp_seq = 0; + rcu_segcblist_init(&sp->srcu_cblist); + sp->srcu_gp_running = false; + sp->srcu_gp_waiting = false; + sp->srcu_idx = 0; + INIT_WORK(&sp->srcu_work, srcu_drive_gp); + return 0; +} + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + +int __init_srcu_struct(struct srcu_struct *sp, const char *name, + struct lock_class_key *key) +{ + /* Don't re-initialize a lock while it is held. */ + debug_check_no_locks_freed((void *)sp, sizeof(*sp)); + lockdep_init_map(&sp->dep_map, name, key, 0); + return init_srcu_struct_fields(sp); +} +EXPORT_SYMBOL_GPL(__init_srcu_struct); + +#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +/* + * init_srcu_struct - initialize a sleep-RCU structure + * @sp: structure to initialize. + * + * Must invoke this on a given srcu_struct before passing that srcu_struct + * to any other function. Each srcu_struct represents a separate domain + * of SRCU protection. + */ +int init_srcu_struct(struct srcu_struct *sp) +{ + return init_srcu_struct_fields(sp); +} +EXPORT_SYMBOL_GPL(init_srcu_struct); + +#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +/* + * cleanup_srcu_struct - deconstruct a sleep-RCU structure + * @sp: structure to clean up. + * + * Must invoke this after you are finished using a given srcu_struct that + * was initialized via init_srcu_struct(), else you leak memory. + */ +void cleanup_srcu_struct(struct srcu_struct *sp) +{ + WARN_ON(sp->srcu_lock_nesting[0] || sp->srcu_lock_nesting[1]); + flush_work(&sp->srcu_work); + WARN_ON(rcu_seq_state(sp->srcu_gp_seq)); + WARN_ON(sp->srcu_gp_running); + WARN_ON(sp->srcu_gp_waiting); + WARN_ON(!rcu_segcblist_empty(&sp->srcu_cblist)); +} +EXPORT_SYMBOL_GPL(cleanup_srcu_struct); + +/* + * Counts the new reader in the appropriate per-CPU element of the + * srcu_struct. Must be called from process context. + * Returns an index that must be passed to the matching srcu_read_unlock(). + */ +int __srcu_read_lock(struct srcu_struct *sp) +{ + int idx; + + idx = READ_ONCE(sp->srcu_idx); + WRITE_ONCE(sp->srcu_lock_nesting[idx], sp->srcu_lock_nesting[idx] + 1); + return idx; +} +EXPORT_SYMBOL_GPL(__srcu_read_lock); + +/* + * Removes the count for the old reader from the appropriate element of + * the srcu_struct. Must be called from process context. + */ +void __srcu_read_unlock(struct srcu_struct *sp, int idx) +{ + int newval = sp->srcu_lock_nesting[idx] - 1; + + WRITE_ONCE(sp->srcu_lock_nesting[idx], newval); + if (!newval && READ_ONCE(sp->srcu_gp_waiting)) + swake_up(&sp->srcu_wq); +} +EXPORT_SYMBOL_GPL(__srcu_read_unlock); + +/* + * Workqueue handler to drive one grace period and invoke any callbacks + * that become ready as a result. Single-CPU and !PREEMPT operation + * means that we get away with murder on synchronization. ;-) + */ +void srcu_drive_gp(struct work_struct *wp) +{ + int idx; + struct rcu_cblist ready_cbs; + struct srcu_struct *sp; + struct rcu_head *rhp; + + sp = container_of(wp, struct srcu_struct, srcu_work); + if (sp->srcu_gp_running || rcu_segcblist_empty(&sp->srcu_cblist)) + return; /* Already running or nothing to do. */ + + /* Tag recently arrived callbacks and wait for readers. */ + WRITE_ONCE(sp->srcu_gp_running, true); + rcu_segcblist_accelerate(&sp->srcu_cblist, + rcu_seq_snap(&sp->srcu_gp_seq)); + rcu_seq_start(&sp->srcu_gp_seq); + idx = sp->srcu_idx; + WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx); + WRITE_ONCE(sp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */ + swait_event(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx])); + WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */ + rcu_seq_end(&sp->srcu_gp_seq); + + /* Update callback list based on GP, and invoke ready callbacks. */ + rcu_segcblist_advance(&sp->srcu_cblist, + rcu_seq_current(&sp->srcu_gp_seq)); + if (rcu_segcblist_ready_cbs(&sp->srcu_cblist)) { + rcu_cblist_init(&ready_cbs); + local_irq_disable(); + rcu_segcblist_extract_done_cbs(&sp->srcu_cblist, &ready_cbs); + local_irq_enable(); + rhp = rcu_cblist_dequeue(&ready_cbs); + for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) { + local_bh_disable(); + rhp->func(rhp); + local_bh_enable(); + } + local_irq_disable(); + rcu_segcblist_insert_count(&sp->srcu_cblist, &ready_cbs); + local_irq_enable(); + } + WRITE_ONCE(sp->srcu_gp_running, false); + + /* + * If more callbacks, reschedule ourselves. This can race with + * a call_srcu() at interrupt level, but the ->srcu_gp_running + * checks will straighten that out. + */ + if (!rcu_segcblist_empty(&sp->srcu_cblist)) + schedule_work(&sp->srcu_work); +} +EXPORT_SYMBOL_GPL(srcu_drive_gp); + +/* + * Enqueue an SRCU callback on the specified srcu_struct structure, + * initiating grace-period processing if it is not already running. + */ +void call_srcu(struct srcu_struct *sp, struct rcu_head *head, + rcu_callback_t func) +{ + unsigned long flags; + + head->func = func; + local_irq_save(flags); + rcu_segcblist_enqueue(&sp->srcu_cblist, head, false); + local_irq_restore(flags); + if (!READ_ONCE(sp->srcu_gp_running)) + schedule_work(&sp->srcu_work); +} +EXPORT_SYMBOL_GPL(call_srcu); + +/* + * synchronize_srcu - wait for prior SRCU read-side critical-section completion + */ +void synchronize_srcu(struct srcu_struct *sp) +{ + struct rcu_synchronize rs; + + init_rcu_head_on_stack(&rs.head); + init_completion(&rs.completion); + call_srcu(sp, &rs.head, wakeme_after_rcu); + wait_for_completion(&rs.completion); + destroy_rcu_head_on_stack(&rs.head); +} +EXPORT_SYMBOL_GPL(synchronize_srcu); -- cgit v1.2.3 From dad81a2026841b5e2651aab58a7398c13cc05847 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 25 Mar 2017 17:23:44 -0700 Subject: srcu: Introduce CLASSIC_SRCU Kconfig option The TREE_SRCU rewrite is large and a bit on the non-simple side, so this commit helps reduce risk by allowing the old v4.11 SRCU algorithm to be selected using a new CLASSIC_SRCU Kconfig option that depends on RCU_EXPERT. The default is to use the new TREE_SRCU and TINY_SRCU algorithms, in order to help get these the testing that they need. However, if your users do not require the update-side scalability that is to be provided by TREE_SRCU, select RCU_EXPERT and then CLASSIC_SRCU to revert back to the old classic SRCU algorithm. Signed-off-by: Paul E. McKenney --- include/linux/srcu.h | 2 + include/linux/srcuclassic.h | 101 ++++++++ init/Kconfig | 21 +- kernel/rcu/Makefile | 3 +- kernel/rcu/rcutorture.c | 2 +- kernel/rcu/srcu.c | 347 ++++++++++++++----------- kernel/rcu/srcutree.c | 613 ++++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 934 insertions(+), 155 deletions(-) create mode 100644 include/linux/srcuclassic.h create mode 100644 kernel/rcu/srcutree.c (limited to 'include') diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 907f09b14eda..167ad8831aaf 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -60,6 +60,8 @@ int init_srcu_struct(struct srcu_struct *sp); #include #elif defined(CONFIG_TREE_SRCU) #include +#elif defined(CONFIG_CLASSIC_SRCU) +#include #else #error "Unknown SRCU implementation specified to kernel configuration" #endif diff --git a/include/linux/srcuclassic.h b/include/linux/srcuclassic.h new file mode 100644 index 000000000000..41cf99930f34 --- /dev/null +++ b/include/linux/srcuclassic.h @@ -0,0 +1,101 @@ +/* + * Sleepable Read-Copy Update mechanism for mutual exclusion, + * classic v4.11 variant. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + * Copyright (C) IBM Corporation, 2017 + * + * Author: Paul McKenney + */ + +#ifndef _LINUX_SRCU_CLASSIC_H +#define _LINUX_SRCU_CLASSIC_H + +struct srcu_array { + unsigned long lock_count[2]; + unsigned long unlock_count[2]; +}; + +struct rcu_batch { + struct rcu_head *head, **tail; +}; + +#define RCU_BATCH_INIT(name) { NULL, &(name.head) } + +struct srcu_struct { + unsigned long completed; + struct srcu_array __percpu *per_cpu_ref; + spinlock_t queue_lock; /* protect ->batch_queue, ->running */ + bool running; + /* callbacks just queued */ + struct rcu_batch batch_queue; + /* callbacks try to do the first check_zero */ + struct rcu_batch batch_check0; + /* callbacks done with the first check_zero and the flip */ + struct rcu_batch batch_check1; + struct rcu_batch batch_done; + struct delayed_work work; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ +}; + +void process_srcu(struct work_struct *work); + +#define __SRCU_STRUCT_INIT(name) \ + { \ + .completed = -300, \ + .per_cpu_ref = &name##_srcu_array, \ + .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \ + .running = false, \ + .batch_queue = RCU_BATCH_INIT(name.batch_queue), \ + .batch_check0 = RCU_BATCH_INIT(name.batch_check0), \ + .batch_check1 = RCU_BATCH_INIT(name.batch_check1), \ + .batch_done = RCU_BATCH_INIT(name.batch_done), \ + .work = __DELAYED_WORK_INITIALIZER(name.work, process_srcu, 0),\ + __SRCU_DEP_MAP_INIT(name) \ + } + +/* + * Define and initialize a srcu struct at build time. + * Do -not- call init_srcu_struct() nor cleanup_srcu_struct() on it. + * + * Note that although DEFINE_STATIC_SRCU() hides the name from other + * files, the per-CPU variable rules nevertheless require that the + * chosen name be globally unique. These rules also prohibit use of + * DEFINE_STATIC_SRCU() within a function. If these rules are too + * restrictive, declare the srcu_struct manually. For example, in + * each file: + * + * static struct srcu_struct my_srcu; + * + * Then, before the first use of each my_srcu, manually initialize it: + * + * init_srcu_struct(&my_srcu); + * + * See include/linux/percpu-defs.h for the rules on per-CPU variables. + */ +#define __DEFINE_SRCU(name, is_static) \ + static DEFINE_PER_CPU(struct srcu_array, name##_srcu_array);\ + is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name) +#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */) +#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static) + +void synchronize_srcu_expedited(struct srcu_struct *sp); +void srcu_barrier(struct srcu_struct *sp); +unsigned long srcu_batches_completed(struct srcu_struct *sp); + +#endif diff --git a/init/Kconfig b/init/Kconfig index d269f2ca17b8..558cc3638ab9 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -526,15 +526,32 @@ config SRCU permits arbitrary sleeping or blocking within RCU read-side critical sections. +config CLASSIC_SRCU + bool "Use v4.11 classic SRCU implementation" + default n + depends on RCU_EXPERT && SRCU + help + This option selects the traditional well-tested classic SRCU + implementation from v4.11, as might be desired for enterprise + Linux distributions. Without this option, the shiny new + Tiny SRCU and Tree SRCU implementations are used instead. + At some point, it is hoped that Tiny SRCU and Tree SRCU + will accumulate enough test time and confidence to allow + Classic SRCU to be dropped entirely. + + Say Y if you need a rock-solid SRCU. + + Say N if you would like help test Tree SRCU. + config TINY_SRCU bool - default y if TINY_RCU + default y if TINY_RCU && !CLASSIC_SRCU help This option selects the single-CPU non-preemptible version of SRCU. config TREE_SRCU bool - default y if !TINY_RCU + default y if !TINY_RCU && !CLASSIC_SRCU help This option selects the full-fledged version of SRCU. diff --git a/kernel/rcu/Makefile b/kernel/rcu/Makefile index b853214a2b99..158e6593d58c 100644 --- a/kernel/rcu/Makefile +++ b/kernel/rcu/Makefile @@ -3,7 +3,8 @@ KCOV_INSTRUMENT := n obj-y += update.o sync.o -obj-$(CONFIG_TREE_SRCU) += srcu.o +obj-$(CONFIG_CLASSIC_SRCU) += srcu.o +obj-$(CONFIG_TREE_SRCU) += srcutree.o obj-$(CONFIG_TINY_SRCU) += srcutiny.o obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o obj-$(CONFIG_RCU_PERF_TEST) += rcuperf.o diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 9cbb8a7b909d..6f344b6748a8 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -562,7 +562,7 @@ static void srcu_torture_stats(void) int __maybe_unused cpu; int idx; -#ifdef CONFIG_TREE_SRCU +#if defined(CONFIG_TREE_SRCU) || defined(CONFIG_CLASSIC_SRCU) idx = srcu_ctlp->completed & 0x1; pr_alert("%s%s Tree SRCU per-CPU(idx=%d):", torture_type, TORTURE_FLAG, idx); diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c index 3cfcc59bddf3..584d8a983883 100644 --- a/kernel/rcu/srcu.c +++ b/kernel/rcu/srcu.c @@ -36,16 +36,75 @@ #include #include -#include #include "rcu.h" +/* + * Initialize an rcu_batch structure to empty. + */ +static inline void rcu_batch_init(struct rcu_batch *b) +{ + b->head = NULL; + b->tail = &b->head; +} + +/* + * Enqueue a callback onto the tail of the specified rcu_batch structure. + */ +static inline void rcu_batch_queue(struct rcu_batch *b, struct rcu_head *head) +{ + *b->tail = head; + b->tail = &head->next; +} + +/* + * Is the specified rcu_batch structure empty? + */ +static inline bool rcu_batch_empty(struct rcu_batch *b) +{ + return b->tail == &b->head; +} + +/* + * Remove the callback at the head of the specified rcu_batch structure + * and return a pointer to it, or return NULL if the structure is empty. + */ +static inline struct rcu_head *rcu_batch_dequeue(struct rcu_batch *b) +{ + struct rcu_head *head; + + if (rcu_batch_empty(b)) + return NULL; + + head = b->head; + b->head = head->next; + if (b->tail == &head->next) + rcu_batch_init(b); + + return head; +} + +/* + * Move all callbacks from the rcu_batch structure specified by "from" to + * the structure specified by "to". + */ +static inline void rcu_batch_move(struct rcu_batch *to, struct rcu_batch *from) +{ + if (!rcu_batch_empty(from)) { + *to->tail = from->head; + to->tail = from->tail; + rcu_batch_init(from); + } +} + static int init_srcu_struct_fields(struct srcu_struct *sp) { sp->completed = 0; - sp->srcu_gp_seq = 0; - atomic_set(&sp->srcu_exp_cnt, 0); spin_lock_init(&sp->queue_lock); - rcu_segcblist_init(&sp->srcu_cblist); + sp->running = false; + rcu_batch_init(&sp->batch_queue); + rcu_batch_init(&sp->batch_check0); + rcu_batch_init(&sp->batch_check1); + rcu_batch_init(&sp->batch_done); INIT_DELAYED_WORK(&sp->work, process_srcu); sp->per_cpu_ref = alloc_percpu(struct srcu_array); return sp->per_cpu_ref ? 0 : -ENOMEM; @@ -180,8 +239,6 @@ static bool srcu_readers_active(struct srcu_struct *sp) return sum; } -#define SRCU_INTERVAL 1 - /** * cleanup_srcu_struct - deconstruct a sleep-RCU structure * @sp: structure to clean up. @@ -197,16 +254,8 @@ static bool srcu_readers_active(struct srcu_struct *sp) */ void cleanup_srcu_struct(struct srcu_struct *sp) { - WARN_ON_ONCE(atomic_read(&sp->srcu_exp_cnt)); if (WARN_ON(srcu_readers_active(sp))) return; /* Leakage unless caller handles error. */ - if (WARN_ON(!rcu_segcblist_empty(&sp->srcu_cblist))) - return; /* Leakage unless caller handles error. */ - flush_delayed_work(&sp->work); - if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE)) { - pr_info("cleanup_srcu_struct: Active srcu_struct %lu CBs %c state: %d\n", rcu_segcblist_n_cbs(&sp->srcu_cblist), ".E"[rcu_segcblist_empty(&sp->srcu_cblist)], rcu_seq_state(READ_ONCE(sp->srcu_gp_seq))); - return; /* Caller forgot to stop doing call_srcu()? */ - } free_percpu(sp->per_cpu_ref); sp->per_cpu_ref = NULL; } @@ -245,36 +294,26 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock); * We use an adaptive strategy for synchronize_srcu() and especially for * synchronize_srcu_expedited(). We spin for a fixed time period * (defined below) to allow SRCU readers to exit their read-side critical - * sections. If there are still some readers after a few microseconds, - * we repeatedly block for 1-millisecond time periods. + * sections. If there are still some readers after 10 microseconds, + * we repeatedly block for 1-millisecond time periods. This approach + * has done well in testing, so there is no need for a config parameter. */ #define SRCU_RETRY_CHECK_DELAY 5 +#define SYNCHRONIZE_SRCU_TRYCOUNT 2 +#define SYNCHRONIZE_SRCU_EXP_TRYCOUNT 12 /* - * Start an SRCU grace period. - */ -static void srcu_gp_start(struct srcu_struct *sp) -{ - int state; - - rcu_segcblist_accelerate(&sp->srcu_cblist, - rcu_seq_snap(&sp->srcu_gp_seq)); - rcu_seq_start(&sp->srcu_gp_seq); - state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); - WARN_ON_ONCE(state != SRCU_STATE_SCAN1); -} - -/* - * Wait until all readers counted by array index idx complete, but - * loop an additional time if there is an expedited grace period pending. - * The caller must ensure that ->completed is not changed while checking. + * @@@ Wait until all pre-existing readers complete. Such readers + * will have used the index specified by "idx". + * the caller should ensures the ->completed is not changed while checking + * and idx = (->completed & 1) ^ 1 */ static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount) { for (;;) { if (srcu_readers_active_idx_check(sp, idx)) return true; - if (--trycount + !!atomic_read(&sp->srcu_exp_cnt) <= 0) + if (--trycount <= 0) return false; udelay(SRCU_RETRY_CHECK_DELAY); } @@ -299,19 +338,6 @@ static void srcu_flip(struct srcu_struct *sp) smp_mb(); /* D */ /* Pairs with C. */ } -/* - * End an SRCU grace period. - */ -static void srcu_gp_end(struct srcu_struct *sp) -{ - rcu_seq_end(&sp->srcu_gp_seq); - - spin_lock_irq(&sp->queue_lock); - rcu_segcblist_advance(&sp->srcu_cblist, - rcu_seq_current(&sp->srcu_gp_seq)); - spin_unlock_irq(&sp->queue_lock); -} - /* * Enqueue an SRCU callback on the specified srcu_struct structure, * initiating grace-period processing if it is not already running. @@ -348,24 +374,26 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *head, head->func = func; spin_lock_irqsave(&sp->queue_lock, flags); smp_mb__after_unlock_lock(); /* Caller's prior accesses before GP. */ - rcu_segcblist_enqueue(&sp->srcu_cblist, head, false); - if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_IDLE) { - srcu_gp_start(sp); + rcu_batch_queue(&sp->batch_queue, head); + if (!sp->running) { + sp->running = true; queue_delayed_work(system_power_efficient_wq, &sp->work, 0); } spin_unlock_irqrestore(&sp->queue_lock, flags); } EXPORT_SYMBOL_GPL(call_srcu); -static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay); +static void srcu_advance_batches(struct srcu_struct *sp, int trycount); +static void srcu_reschedule(struct srcu_struct *sp); /* * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). */ -static void __synchronize_srcu(struct srcu_struct *sp) +static void __synchronize_srcu(struct srcu_struct *sp, int trycount) { struct rcu_synchronize rcu; struct rcu_head *head = &rcu.head; + bool done = false; RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) || lock_is_held(&rcu_bh_lock_map) || @@ -373,8 +401,6 @@ static void __synchronize_srcu(struct srcu_struct *sp) lock_is_held(&rcu_sched_lock_map), "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section"); - if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) - return; might_sleep(); init_completion(&rcu.completion); @@ -382,47 +408,31 @@ static void __synchronize_srcu(struct srcu_struct *sp) head->func = wakeme_after_rcu; spin_lock_irq(&sp->queue_lock); smp_mb__after_unlock_lock(); /* Caller's prior accesses before GP. */ - if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_IDLE) { + if (!sp->running) { /* steal the processing owner */ - rcu_segcblist_enqueue(&sp->srcu_cblist, head, false); - srcu_gp_start(sp); + sp->running = true; + rcu_batch_queue(&sp->batch_check0, head); spin_unlock_irq(&sp->queue_lock); + + srcu_advance_batches(sp, trycount); + if (!rcu_batch_empty(&sp->batch_done)) { + BUG_ON(sp->batch_done.head != head); + rcu_batch_dequeue(&sp->batch_done); + done = true; + } /* give the processing owner to work_struct */ - srcu_reschedule(sp, 0); + srcu_reschedule(sp); } else { - rcu_segcblist_enqueue(&sp->srcu_cblist, head, false); + rcu_batch_queue(&sp->batch_queue, head); spin_unlock_irq(&sp->queue_lock); } - wait_for_completion(&rcu.completion); - smp_mb(); /* Caller's later accesses after GP. */ -} - -/** - * synchronize_srcu_expedited - Brute-force SRCU grace period - * @sp: srcu_struct with which to synchronize. - * - * Wait for an SRCU grace period to elapse, but be more aggressive about - * spinning rather than blocking when waiting. - * - * Note that synchronize_srcu_expedited() has the same deadlock and - * memory-ordering properties as does synchronize_srcu(). - */ -void synchronize_srcu_expedited(struct srcu_struct *sp) -{ - bool do_norm = rcu_gp_is_normal(); - - if (!do_norm) { - atomic_inc(&sp->srcu_exp_cnt); - smp_mb__after_atomic(); /* increment before GP. */ - } - __synchronize_srcu(sp); - if (!do_norm) { - smp_mb__before_atomic(); /* GP before decrement. */ - atomic_dec(&sp->srcu_exp_cnt); + if (!done) { + wait_for_completion(&rcu.completion); + smp_mb(); /* Caller's later accesses after GP. */ } + } -EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); /** * synchronize_srcu - wait for prior SRCU read-side critical-section completion @@ -465,13 +475,28 @@ EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); */ void synchronize_srcu(struct srcu_struct *sp) { - if (rcu_gp_is_expedited()) - synchronize_srcu_expedited(sp); - else - __synchronize_srcu(sp); + __synchronize_srcu(sp, (rcu_gp_is_expedited() && !rcu_gp_is_normal()) + ? SYNCHRONIZE_SRCU_EXP_TRYCOUNT + : SYNCHRONIZE_SRCU_TRYCOUNT); } EXPORT_SYMBOL_GPL(synchronize_srcu); +/** + * synchronize_srcu_expedited - Brute-force SRCU grace period + * @sp: srcu_struct with which to synchronize. + * + * Wait for an SRCU grace period to elapse, but be more aggressive about + * spinning rather than blocking when waiting. + * + * Note that synchronize_srcu_expedited() has the same deadlock and + * memory-ordering properties as does synchronize_srcu(). + */ +void synchronize_srcu_expedited(struct srcu_struct *sp) +{ + __synchronize_srcu(sp, SYNCHRONIZE_SRCU_EXP_TRYCOUNT); +} +EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); + /** * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete. * @sp: srcu_struct on which to wait for in-flight callbacks. @@ -495,13 +520,29 @@ unsigned long srcu_batches_completed(struct srcu_struct *sp) } EXPORT_SYMBOL_GPL(srcu_batches_completed); +#define SRCU_CALLBACK_BATCH 10 +#define SRCU_INTERVAL 1 + +/* + * Move any new SRCU callbacks to the first stage of the SRCU grace + * period pipeline. + */ +static void srcu_collect_new(struct srcu_struct *sp) +{ + if (!rcu_batch_empty(&sp->batch_queue)) { + spin_lock_irq(&sp->queue_lock); + rcu_batch_move(&sp->batch_check0, &sp->batch_queue); + spin_unlock_irq(&sp->queue_lock); + } +} + /* * Core SRCU state machine. Advance callbacks from ->batch_check0 to * ->batch_check1 and then to ->batch_done as readers drain. */ -static void srcu_advance_batches(struct srcu_struct *sp) +static void srcu_advance_batches(struct srcu_struct *sp, int trycount) { - int idx; + int idx = 1 ^ (sp->completed & 1); /* * Because readers might be delayed for an extended period after @@ -509,44 +550,50 @@ static void srcu_advance_batches(struct srcu_struct *sp) * might well be readers using both idx=0 and idx=1. We therefore * need to wait for readers to clear from both index values before * invoking a callback. - * - * The load-acquire ensures that we see the accesses performed - * by the prior grace period. */ - idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */ - if (idx == SRCU_STATE_IDLE) { - spin_lock_irq(&sp->queue_lock); - if (rcu_segcblist_empty(&sp->srcu_cblist)) { - spin_unlock_irq(&sp->queue_lock); - return; - } - idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); - if (idx == SRCU_STATE_IDLE) - srcu_gp_start(sp); - spin_unlock_irq(&sp->queue_lock); - if (idx != SRCU_STATE_IDLE) - return; /* Someone else started the grace period. */ - } - if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { - idx = 1 ^ (sp->completed & 1); - if (!try_check_zero(sp, idx, 1)) - return; /* readers present, retry later. */ - srcu_flip(sp); - rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2); - } + if (rcu_batch_empty(&sp->batch_check0) && + rcu_batch_empty(&sp->batch_check1)) + return; /* no callbacks need to be advanced */ - if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) { + if (!try_check_zero(sp, idx, trycount)) + return; /* failed to advance, will try after SRCU_INTERVAL */ - /* - * SRCU read-side critical sections are normally short, - * so check at least twice in quick succession after a flip. - */ - idx = 1 ^ (sp->completed & 1); - if (!try_check_zero(sp, idx, 2)) - return; /* readers present, retry after later. */ - srcu_gp_end(sp); - } + /* + * The callbacks in ->batch_check1 have already done with their + * first zero check and flip back when they were enqueued on + * ->batch_check0 in a previous invocation of srcu_advance_batches(). + * (Presumably try_check_zero() returned false during that + * invocation, leaving the callbacks stranded on ->batch_check1.) + * They are therefore ready to invoke, so move them to ->batch_done. + */ + rcu_batch_move(&sp->batch_done, &sp->batch_check1); + + if (rcu_batch_empty(&sp->batch_check0)) + return; /* no callbacks need to be advanced */ + srcu_flip(sp); + + /* + * The callbacks in ->batch_check0 just finished their + * first check zero and flip, so move them to ->batch_check1 + * for future checking on the other idx. + */ + rcu_batch_move(&sp->batch_check1, &sp->batch_check0); + + /* + * SRCU read-side critical sections are normally short, so check + * at least twice in quick succession after a flip. + */ + trycount = trycount < 2 ? 2 : trycount; + if (!try_check_zero(sp, idx^1, trycount)) + return; /* failed to advance, will try after SRCU_INTERVAL */ + + /* + * The callbacks in ->batch_check1 have now waited for all + * pre-existing readers using both idx values. They are therefore + * ready to invoke, so move them to ->batch_done. + */ + rcu_batch_move(&sp->batch_done, &sp->batch_check1); } /* @@ -557,48 +604,45 @@ static void srcu_advance_batches(struct srcu_struct *sp) */ static void srcu_invoke_callbacks(struct srcu_struct *sp) { - struct rcu_cblist ready_cbs; - struct rcu_head *rhp; + int i; + struct rcu_head *head; - spin_lock_irq(&sp->queue_lock); - if (!rcu_segcblist_ready_cbs(&sp->srcu_cblist)) { - spin_unlock_irq(&sp->queue_lock); - return; - } - rcu_cblist_init(&ready_cbs); - rcu_segcblist_extract_done_cbs(&sp->srcu_cblist, &ready_cbs); - spin_unlock_irq(&sp->queue_lock); - rhp = rcu_cblist_dequeue(&ready_cbs); - for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) { + for (i = 0; i < SRCU_CALLBACK_BATCH; i++) { + head = rcu_batch_dequeue(&sp->batch_done); + if (!head) + break; local_bh_disable(); - rhp->func(rhp); + head->func(head); local_bh_enable(); } - spin_lock_irq(&sp->queue_lock); - rcu_segcblist_insert_count(&sp->srcu_cblist, &ready_cbs); - spin_unlock_irq(&sp->queue_lock); } /* * Finished one round of SRCU grace period. Start another if there are * more SRCU callbacks queued, otherwise put SRCU into not-running state. */ -static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay) +static void srcu_reschedule(struct srcu_struct *sp) { bool pending = true; - int state; - if (rcu_segcblist_empty(&sp->srcu_cblist)) { + if (rcu_batch_empty(&sp->batch_done) && + rcu_batch_empty(&sp->batch_check1) && + rcu_batch_empty(&sp->batch_check0) && + rcu_batch_empty(&sp->batch_queue)) { spin_lock_irq(&sp->queue_lock); - state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); - if (rcu_segcblist_empty(&sp->srcu_cblist) && - state == SRCU_STATE_IDLE) + if (rcu_batch_empty(&sp->batch_done) && + rcu_batch_empty(&sp->batch_check1) && + rcu_batch_empty(&sp->batch_check0) && + rcu_batch_empty(&sp->batch_queue)) { + sp->running = false; pending = false; + } spin_unlock_irq(&sp->queue_lock); } if (pending) - queue_delayed_work(system_power_efficient_wq, &sp->work, delay); + queue_delayed_work(system_power_efficient_wq, + &sp->work, SRCU_INTERVAL); } /* @@ -610,8 +654,9 @@ void process_srcu(struct work_struct *work) sp = container_of(work, struct srcu_struct, work.work); - srcu_advance_batches(sp); + srcu_collect_new(sp); + srcu_advance_batches(sp, 1); srcu_invoke_callbacks(sp); - srcu_reschedule(sp, atomic_read(&sp->srcu_exp_cnt) ? 0 : SRCU_INTERVAL); + srcu_reschedule(sp); } EXPORT_SYMBOL_GPL(process_srcu); diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c new file mode 100644 index 000000000000..da676b0d016b --- /dev/null +++ b/kernel/rcu/srcutree.c @@ -0,0 +1,613 @@ +/* + * Sleepable Read-Copy Update mechanism for mutual exclusion. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + * Copyright (C) IBM Corporation, 2006 + * Copyright (C) Fujitsu, 2012 + * + * Author: Paul McKenney + * Lai Jiangshan + * + * For detailed explanation of Read-Copy Update mechanism see - + * Documentation/RCU/ *.txt + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "rcu.h" + +static int init_srcu_struct_fields(struct srcu_struct *sp) +{ + sp->completed = 0; + sp->srcu_gp_seq = 0; + atomic_set(&sp->srcu_exp_cnt, 0); + spin_lock_init(&sp->queue_lock); + rcu_segcblist_init(&sp->srcu_cblist); + INIT_DELAYED_WORK(&sp->work, process_srcu); + sp->per_cpu_ref = alloc_percpu(struct srcu_array); + return sp->per_cpu_ref ? 0 : -ENOMEM; +} + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + +int __init_srcu_struct(struct srcu_struct *sp, const char *name, + struct lock_class_key *key) +{ + /* Don't re-initialize a lock while it is held. */ + debug_check_no_locks_freed((void *)sp, sizeof(*sp)); + lockdep_init_map(&sp->dep_map, name, key, 0); + return init_srcu_struct_fields(sp); +} +EXPORT_SYMBOL_GPL(__init_srcu_struct); + +#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +/** + * init_srcu_struct - initialize a sleep-RCU structure + * @sp: structure to initialize. + * + * Must invoke this on a given srcu_struct before passing that srcu_struct + * to any other function. Each srcu_struct represents a separate domain + * of SRCU protection. + */ +int init_srcu_struct(struct srcu_struct *sp) +{ + return init_srcu_struct_fields(sp); +} +EXPORT_SYMBOL_GPL(init_srcu_struct); + +#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +/* + * Returns approximate total of the readers' ->lock_count[] values for the + * rank of per-CPU counters specified by idx. + */ +static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx) +{ + int cpu; + unsigned long sum = 0; + + for_each_possible_cpu(cpu) { + struct srcu_array *cpuc = per_cpu_ptr(sp->per_cpu_ref, cpu); + + sum += READ_ONCE(cpuc->lock_count[idx]); + } + return sum; +} + +/* + * Returns approximate total of the readers' ->unlock_count[] values for the + * rank of per-CPU counters specified by idx. + */ +static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx) +{ + int cpu; + unsigned long sum = 0; + + for_each_possible_cpu(cpu) { + struct srcu_array *cpuc = per_cpu_ptr(sp->per_cpu_ref, cpu); + + sum += READ_ONCE(cpuc->unlock_count[idx]); + } + return sum; +} + +/* + * Return true if the number of pre-existing readers is determined to + * be zero. + */ +static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx) +{ + unsigned long unlocks; + + unlocks = srcu_readers_unlock_idx(sp, idx); + + /* + * Make sure that a lock is always counted if the corresponding + * unlock is counted. Needs to be a smp_mb() as the read side may + * contain a read from a variable that is written to before the + * synchronize_srcu() in the write side. In this case smp_mb()s + * A and B act like the store buffering pattern. + * + * This smp_mb() also pairs with smp_mb() C to prevent accesses + * after the synchronize_srcu() from being executed before the + * grace period ends. + */ + smp_mb(); /* A */ + + /* + * If the locks are the same as the unlocks, then there must have + * been no readers on this index at some time in between. This does + * not mean that there are no more readers, as one could have read + * the current index but not have incremented the lock counter yet. + * + * Possible bug: There is no guarantee that there haven't been + * ULONG_MAX increments of ->lock_count[] since the unlocks were + * counted, meaning that this could return true even if there are + * still active readers. Since there are no memory barriers around + * srcu_flip(), the CPU is not required to increment ->completed + * before running srcu_readers_unlock_idx(), which means that there + * could be an arbitrarily large number of critical sections that + * execute after srcu_readers_unlock_idx() but use the old value + * of ->completed. + */ + return srcu_readers_lock_idx(sp, idx) == unlocks; +} + +/** + * srcu_readers_active - returns true if there are readers. and false + * otherwise + * @sp: which srcu_struct to count active readers (holding srcu_read_lock). + * + * Note that this is not an atomic primitive, and can therefore suffer + * severe errors when invoked on an active srcu_struct. That said, it + * can be useful as an error check at cleanup time. + */ +static bool srcu_readers_active(struct srcu_struct *sp) +{ + int cpu; + unsigned long sum = 0; + + for_each_possible_cpu(cpu) { + struct srcu_array *cpuc = per_cpu_ptr(sp->per_cpu_ref, cpu); + + sum += READ_ONCE(cpuc->lock_count[0]); + sum += READ_ONCE(cpuc->lock_count[1]); + sum -= READ_ONCE(cpuc->unlock_count[0]); + sum -= READ_ONCE(cpuc->unlock_count[1]); + } + return sum; +} + +#define SRCU_INTERVAL 1 + +/** + * cleanup_srcu_struct - deconstruct a sleep-RCU structure + * @sp: structure to clean up. + * + * Must invoke this after you are finished using a given srcu_struct that + * was initialized via init_srcu_struct(), else you leak memory. + */ +void cleanup_srcu_struct(struct srcu_struct *sp) +{ + WARN_ON_ONCE(atomic_read(&sp->srcu_exp_cnt)); + if (WARN_ON(srcu_readers_active(sp))) + return; /* Leakage unless caller handles error. */ + if (WARN_ON(!rcu_segcblist_empty(&sp->srcu_cblist))) + return; /* Leakage unless caller handles error. */ + flush_delayed_work(&sp->work); + if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE)) { + pr_info("cleanup_srcu_struct: Active srcu_struct %lu CBs %c state: %d\n", rcu_segcblist_n_cbs(&sp->srcu_cblist), ".E"[rcu_segcblist_empty(&sp->srcu_cblist)], rcu_seq_state(READ_ONCE(sp->srcu_gp_seq))); + return; /* Caller forgot to stop doing call_srcu()? */ + } + free_percpu(sp->per_cpu_ref); + sp->per_cpu_ref = NULL; +} +EXPORT_SYMBOL_GPL(cleanup_srcu_struct); + +/* + * Counts the new reader in the appropriate per-CPU element of the + * srcu_struct. Must be called from process context. + * Returns an index that must be passed to the matching srcu_read_unlock(). + */ +int __srcu_read_lock(struct srcu_struct *sp) +{ + int idx; + + idx = READ_ONCE(sp->completed) & 0x1; + __this_cpu_inc(sp->per_cpu_ref->lock_count[idx]); + smp_mb(); /* B */ /* Avoid leaking the critical section. */ + return idx; +} +EXPORT_SYMBOL_GPL(__srcu_read_lock); + +/* + * Removes the count for the old reader from the appropriate per-CPU + * element of the srcu_struct. Note that this may well be a different + * CPU than that which was incremented by the corresponding srcu_read_lock(). + * Must be called from process context. + */ +void __srcu_read_unlock(struct srcu_struct *sp, int idx) +{ + smp_mb(); /* C */ /* Avoid leaking the critical section. */ + this_cpu_inc(sp->per_cpu_ref->unlock_count[idx]); +} +EXPORT_SYMBOL_GPL(__srcu_read_unlock); + +/* + * We use an adaptive strategy for synchronize_srcu() and especially for + * synchronize_srcu_expedited(). We spin for a fixed time period + * (defined below) to allow SRCU readers to exit their read-side critical + * sections. If there are still some readers after a few microseconds, + * we repeatedly block for 1-millisecond time periods. + */ +#define SRCU_RETRY_CHECK_DELAY 5 + +/* + * Start an SRCU grace period. + */ +static void srcu_gp_start(struct srcu_struct *sp) +{ + int state; + + rcu_segcblist_accelerate(&sp->srcu_cblist, + rcu_seq_snap(&sp->srcu_gp_seq)); + rcu_seq_start(&sp->srcu_gp_seq); + state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); + WARN_ON_ONCE(state != SRCU_STATE_SCAN1); +} + +/* + * Wait until all readers counted by array index idx complete, but + * loop an additional time if there is an expedited grace period pending. + * The caller must ensure that ->completed is not changed while checking. + */ +static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount) +{ + for (;;) { + if (srcu_readers_active_idx_check(sp, idx)) + return true; + if (--trycount + !!atomic_read(&sp->srcu_exp_cnt) <= 0) + return false; + udelay(SRCU_RETRY_CHECK_DELAY); + } +} + +/* + * Increment the ->completed counter so that future SRCU readers will + * use the other rank of the ->(un)lock_count[] arrays. This allows + * us to wait for pre-existing readers in a starvation-free manner. + */ +static void srcu_flip(struct srcu_struct *sp) +{ + WRITE_ONCE(sp->completed, sp->completed + 1); + + /* + * Ensure that if the updater misses an __srcu_read_unlock() + * increment, that task's next __srcu_read_lock() will see the + * above counter update. Note that both this memory barrier + * and the one in srcu_readers_active_idx_check() provide the + * guarantee for __srcu_read_lock(). + */ + smp_mb(); /* D */ /* Pairs with C. */ +} + +/* + * End an SRCU grace period. + */ +static void srcu_gp_end(struct srcu_struct *sp) +{ + rcu_seq_end(&sp->srcu_gp_seq); + + spin_lock_irq(&sp->queue_lock); + rcu_segcblist_advance(&sp->srcu_cblist, + rcu_seq_current(&sp->srcu_gp_seq)); + spin_unlock_irq(&sp->queue_lock); +} + +/* + * Enqueue an SRCU callback on the specified srcu_struct structure, + * initiating grace-period processing if it is not already running. + * + * Note that all CPUs must agree that the grace period extended beyond + * all pre-existing SRCU read-side critical section. On systems with + * more than one CPU, this means that when "func()" is invoked, each CPU + * is guaranteed to have executed a full memory barrier since the end of + * its last corresponding SRCU read-side critical section whose beginning + * preceded the call to call_rcu(). It also means that each CPU executing + * an SRCU read-side critical section that continues beyond the start of + * "func()" must have executed a memory barrier after the call_rcu() + * but before the beginning of that SRCU read-side critical section. + * Note that these guarantees include CPUs that are offline, idle, or + * executing in user mode, as well as CPUs that are executing in the kernel. + * + * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the + * resulting SRCU callback function "func()", then both CPU A and CPU + * B are guaranteed to execute a full memory barrier during the time + * interval between the call to call_rcu() and the invocation of "func()". + * This guarantee applies even if CPU A and CPU B are the same CPU (but + * again only if the system has more than one CPU). + * + * Of course, these guarantees apply only for invocations of call_srcu(), + * srcu_read_lock(), and srcu_read_unlock() that are all passed the same + * srcu_struct structure. + */ +void call_srcu(struct srcu_struct *sp, struct rcu_head *head, + rcu_callback_t func) +{ + unsigned long flags; + + head->next = NULL; + head->func = func; + spin_lock_irqsave(&sp->queue_lock, flags); + smp_mb__after_unlock_lock(); /* Caller's prior accesses before GP. */ + rcu_segcblist_enqueue(&sp->srcu_cblist, head, false); + if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_IDLE) { + srcu_gp_start(sp); + queue_delayed_work(system_power_efficient_wq, &sp->work, 0); + } + spin_unlock_irqrestore(&sp->queue_lock, flags); +} +EXPORT_SYMBOL_GPL(call_srcu); + +static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay); + +/* + * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). + */ +static void __synchronize_srcu(struct srcu_struct *sp) +{ + struct rcu_synchronize rcu; + struct rcu_head *head = &rcu.head; + + RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) || + lock_is_held(&rcu_bh_lock_map) || + lock_is_held(&rcu_lock_map) || + lock_is_held(&rcu_sched_lock_map), + "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section"); + + if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) + return; + might_sleep(); + init_completion(&rcu.completion); + + head->next = NULL; + head->func = wakeme_after_rcu; + spin_lock_irq(&sp->queue_lock); + smp_mb__after_unlock_lock(); /* Caller's prior accesses before GP. */ + if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_IDLE) { + /* steal the processing owner */ + rcu_segcblist_enqueue(&sp->srcu_cblist, head, false); + srcu_gp_start(sp); + spin_unlock_irq(&sp->queue_lock); + /* give the processing owner to work_struct */ + srcu_reschedule(sp, 0); + } else { + rcu_segcblist_enqueue(&sp->srcu_cblist, head, false); + spin_unlock_irq(&sp->queue_lock); + } + + wait_for_completion(&rcu.completion); + smp_mb(); /* Caller's later accesses after GP. */ +} + +/** + * synchronize_srcu_expedited - Brute-force SRCU grace period + * @sp: srcu_struct with which to synchronize. + * + * Wait for an SRCU grace period to elapse, but be more aggressive about + * spinning rather than blocking when waiting. + * + * Note that synchronize_srcu_expedited() has the same deadlock and + * memory-ordering properties as does synchronize_srcu(). + */ +void synchronize_srcu_expedited(struct srcu_struct *sp) +{ + bool do_norm = rcu_gp_is_normal(); + + if (!do_norm) { + atomic_inc(&sp->srcu_exp_cnt); + smp_mb__after_atomic(); /* increment before GP. */ + } + __synchronize_srcu(sp); + if (!do_norm) { + smp_mb__before_atomic(); /* GP before decrement. */ + atomic_dec(&sp->srcu_exp_cnt); + } +} +EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); + +/** + * synchronize_srcu - wait for prior SRCU read-side critical-section completion + * @sp: srcu_struct with which to synchronize. + * + * Wait for the count to drain to zero of both indexes. To avoid the + * possible starvation of synchronize_srcu(), it waits for the count of + * the index=((->completed & 1) ^ 1) to drain to zero at first, + * and then flip the completed and wait for the count of the other index. + * + * Can block; must be called from process context. + * + * Note that it is illegal to call synchronize_srcu() from the corresponding + * SRCU read-side critical section; doing so will result in deadlock. + * However, it is perfectly legal to call synchronize_srcu() on one + * srcu_struct from some other srcu_struct's read-side critical section, + * as long as the resulting graph of srcu_structs is acyclic. + * + * There are memory-ordering constraints implied by synchronize_srcu(). + * On systems with more than one CPU, when synchronize_srcu() returns, + * each CPU is guaranteed to have executed a full memory barrier since + * the end of its last corresponding SRCU-sched read-side critical section + * whose beginning preceded the call to synchronize_srcu(). In addition, + * each CPU having an SRCU read-side critical section that extends beyond + * the return from synchronize_srcu() is guaranteed to have executed a + * full memory barrier after the beginning of synchronize_srcu() and before + * the beginning of that SRCU read-side critical section. Note that these + * guarantees include CPUs that are offline, idle, or executing in user mode, + * as well as CPUs that are executing in the kernel. + * + * Furthermore, if CPU A invoked synchronize_srcu(), which returned + * to its caller on CPU B, then both CPU A and CPU B are guaranteed + * to have executed a full memory barrier during the execution of + * synchronize_srcu(). This guarantee applies even if CPU A and CPU B + * are the same CPU, but again only if the system has more than one CPU. + * + * Of course, these memory-ordering guarantees apply only when + * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are + * passed the same srcu_struct structure. + */ +void synchronize_srcu(struct srcu_struct *sp) +{ + if (rcu_gp_is_expedited()) + synchronize_srcu_expedited(sp); + else + __synchronize_srcu(sp); +} +EXPORT_SYMBOL_GPL(synchronize_srcu); + +/** + * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete. + * @sp: srcu_struct on which to wait for in-flight callbacks. + */ +void srcu_barrier(struct srcu_struct *sp) +{ + synchronize_srcu(sp); +} +EXPORT_SYMBOL_GPL(srcu_barrier); + +/** + * srcu_batches_completed - return batches completed. + * @sp: srcu_struct on which to report batch completion. + * + * Report the number of batches, correlated with, but not necessarily + * precisely the same as, the number of grace periods that have elapsed. + */ +unsigned long srcu_batches_completed(struct srcu_struct *sp) +{ + return sp->completed; +} +EXPORT_SYMBOL_GPL(srcu_batches_completed); + +/* + * Core SRCU state machine. Advance callbacks from ->batch_check0 to + * ->batch_check1 and then to ->batch_done as readers drain. + */ +static void srcu_advance_batches(struct srcu_struct *sp) +{ + int idx; + + /* + * Because readers might be delayed for an extended period after + * fetching ->completed for their index, at any point in time there + * might well be readers using both idx=0 and idx=1. We therefore + * need to wait for readers to clear from both index values before + * invoking a callback. + * + * The load-acquire ensures that we see the accesses performed + * by the prior grace period. + */ + idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */ + if (idx == SRCU_STATE_IDLE) { + spin_lock_irq(&sp->queue_lock); + if (rcu_segcblist_empty(&sp->srcu_cblist)) { + spin_unlock_irq(&sp->queue_lock); + return; + } + idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); + if (idx == SRCU_STATE_IDLE) + srcu_gp_start(sp); + spin_unlock_irq(&sp->queue_lock); + if (idx != SRCU_STATE_IDLE) + return; /* Someone else started the grace period. */ + } + + if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { + idx = 1 ^ (sp->completed & 1); + if (!try_check_zero(sp, idx, 1)) + return; /* readers present, retry later. */ + srcu_flip(sp); + rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2); + } + + if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) { + + /* + * SRCU read-side critical sections are normally short, + * so check at least twice in quick succession after a flip. + */ + idx = 1 ^ (sp->completed & 1); + if (!try_check_zero(sp, idx, 2)) + return; /* readers present, retry after later. */ + srcu_gp_end(sp); + } +} + +/* + * Invoke a limited number of SRCU callbacks that have passed through + * their grace period. If there are more to do, SRCU will reschedule + * the workqueue. Note that needed memory barriers have been executed + * in this task's context by srcu_readers_active_idx_check(). + */ +static void srcu_invoke_callbacks(struct srcu_struct *sp) +{ + struct rcu_cblist ready_cbs; + struct rcu_head *rhp; + + spin_lock_irq(&sp->queue_lock); + if (!rcu_segcblist_ready_cbs(&sp->srcu_cblist)) { + spin_unlock_irq(&sp->queue_lock); + return; + } + rcu_cblist_init(&ready_cbs); + rcu_segcblist_extract_done_cbs(&sp->srcu_cblist, &ready_cbs); + spin_unlock_irq(&sp->queue_lock); + rhp = rcu_cblist_dequeue(&ready_cbs); + for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) { + local_bh_disable(); + rhp->func(rhp); + local_bh_enable(); + } + spin_lock_irq(&sp->queue_lock); + rcu_segcblist_insert_count(&sp->srcu_cblist, &ready_cbs); + spin_unlock_irq(&sp->queue_lock); +} + +/* + * Finished one round of SRCU grace period. Start another if there are + * more SRCU callbacks queued, otherwise put SRCU into not-running state. + */ +static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay) +{ + bool pending = true; + int state; + + if (rcu_segcblist_empty(&sp->srcu_cblist)) { + spin_lock_irq(&sp->queue_lock); + state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); + if (rcu_segcblist_empty(&sp->srcu_cblist) && + state == SRCU_STATE_IDLE) + pending = false; + spin_unlock_irq(&sp->queue_lock); + } + + if (pending) + queue_delayed_work(system_power_efficient_wq, &sp->work, delay); +} + +/* + * This is the work-queue function that handles SRCU grace periods. + */ +void process_srcu(struct work_struct *work) +{ + struct srcu_struct *sp; + + sp = container_of(work, struct srcu_struct, work.work); + + srcu_advance_batches(sp); + srcu_invoke_callbacks(sp); + srcu_reschedule(sp, atomic_read(&sp->srcu_exp_cnt) ? 0 : SRCU_INTERVAL); +} +EXPORT_SYMBOL_GPL(process_srcu); -- cgit v1.2.3 From 5f0d5a3ae7cff0d7fa943c199c3a2e44f23e1fac Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 18 Jan 2017 02:53:44 -0800 Subject: mm: Rename SLAB_DESTROY_BY_RCU to SLAB_TYPESAFE_BY_RCU A group of Linux kernel hackers reported chasing a bug that resulted from their assumption that SLAB_DESTROY_BY_RCU provided an existence guarantee, that is, that no block from such a slab would be reallocated during an RCU read-side critical section. Of course, that is not the case. Instead, SLAB_DESTROY_BY_RCU only prevents freeing of an entire slab of blocks. However, there is a phrase for this, namely "type safety". This commit therefore renames SLAB_DESTROY_BY_RCU to SLAB_TYPESAFE_BY_RCU in order to avoid future instances of this sort of confusion. Signed-off-by: Paul E. McKenney Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Andrew Morton Cc: Acked-by: Johannes Weiner Acked-by: Vlastimil Babka [ paulmck: Add comments mentioning the old name, as requested by Eric Dumazet, in order to help people familiar with the old name find the new one. ] Acked-by: David Rientjes --- Documentation/RCU/00-INDEX | 2 +- Documentation/RCU/rculist_nulls.txt | 6 +++--- Documentation/RCU/whatisRCU.txt | 3 ++- drivers/gpu/drm/i915/i915_gem.c | 2 +- drivers/gpu/drm/i915/i915_gem_request.h | 2 +- drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c | 2 +- fs/jbd2/journal.c | 2 +- fs/signalfd.c | 2 +- include/linux/dma-fence.h | 4 ++-- include/linux/slab.h | 6 ++++-- include/net/sock.h | 2 +- kernel/fork.c | 4 ++-- kernel/signal.c | 2 +- mm/kasan/kasan.c | 6 +++--- mm/kmemcheck.c | 2 +- mm/rmap.c | 4 ++-- mm/slab.c | 6 +++--- mm/slab.h | 4 ++-- mm/slab_common.c | 6 +++--- mm/slob.c | 6 +++--- mm/slub.c | 12 ++++++------ net/dccp/ipv4.c | 2 +- net/dccp/ipv6.c | 2 +- net/ipv4/tcp_ipv4.c | 2 +- net/ipv6/tcp_ipv6.c | 2 +- net/llc/af_llc.c | 2 +- net/llc/llc_conn.c | 4 ++-- net/llc/llc_sap.c | 2 +- net/netfilter/nf_conntrack_core.c | 8 ++++---- net/smc/af_smc.c | 2 +- 30 files changed, 57 insertions(+), 54 deletions(-) (limited to 'include') diff --git a/Documentation/RCU/00-INDEX b/Documentation/RCU/00-INDEX index f773a264ae02..1672573b037a 100644 --- a/Documentation/RCU/00-INDEX +++ b/Documentation/RCU/00-INDEX @@ -17,7 +17,7 @@ rcu_dereference.txt rcubarrier.txt - RCU and Unloadable Modules rculist_nulls.txt - - RCU list primitives for use with SLAB_DESTROY_BY_RCU + - RCU list primitives for use with SLAB_TYPESAFE_BY_RCU rcuref.txt - Reference-count design for elements of lists/arrays protected by RCU rcu.txt diff --git a/Documentation/RCU/rculist_nulls.txt b/Documentation/RCU/rculist_nulls.txt index 18f9651ff23d..8151f0195f76 100644 --- a/Documentation/RCU/rculist_nulls.txt +++ b/Documentation/RCU/rculist_nulls.txt @@ -1,5 +1,5 @@ Using hlist_nulls to protect read-mostly linked lists and -objects using SLAB_DESTROY_BY_RCU allocations. +objects using SLAB_TYPESAFE_BY_RCU allocations. Please read the basics in Documentation/RCU/listRCU.txt @@ -7,7 +7,7 @@ Using special makers (called 'nulls') is a convenient way to solve following problem : A typical RCU linked list managing objects which are -allocated with SLAB_DESTROY_BY_RCU kmem_cache can +allocated with SLAB_TYPESAFE_BY_RCU kmem_cache can use following algos : 1) Lookup algo @@ -96,7 +96,7 @@ unlock_chain(); // typically a spin_unlock() 3) Remove algo -------------- Nothing special here, we can use a standard RCU hlist deletion. -But thanks to SLAB_DESTROY_BY_RCU, beware a deleted object can be reused +But thanks to SLAB_TYPESAFE_BY_RCU, beware a deleted object can be reused very very fast (before the end of RCU grace period) if (put_last_reference_on(obj) { diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt index 5cbd8b2395b8..91c912e86915 100644 --- a/Documentation/RCU/whatisRCU.txt +++ b/Documentation/RCU/whatisRCU.txt @@ -925,7 +925,8 @@ d. Do you need RCU grace periods to complete even in the face e. Is your workload too update-intensive for normal use of RCU, but inappropriate for other synchronization mechanisms? - If so, consider SLAB_DESTROY_BY_RCU. But please be careful! + If so, consider SLAB_TYPESAFE_BY_RCU (which was originally + named SLAB_DESTROY_BY_RCU). But please be careful! f. Do you need read-side critical sections that are respected even though they are in the middle of the idle loop, during diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6908123162d1..3b668895ac24 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4552,7 +4552,7 @@ i915_gem_load_init(struct drm_i915_private *dev_priv) dev_priv->requests = KMEM_CACHE(drm_i915_gem_request, SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT | - SLAB_DESTROY_BY_RCU); + SLAB_TYPESAFE_BY_RCU); if (!dev_priv->requests) goto err_vmas; diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h index ea511f06efaf..9ee2750e1dde 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.h +++ b/drivers/gpu/drm/i915/i915_gem_request.h @@ -493,7 +493,7 @@ static inline struct drm_i915_gem_request * __i915_gem_active_get_rcu(const struct i915_gem_active *active) { /* Performing a lockless retrieval of the active request is super - * tricky. SLAB_DESTROY_BY_RCU merely guarantees that the backing + * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing * slab of request objects will not be freed whilst we hold the * RCU read lock. It does not guarantee that the request itself * will not be freed and then *reused*. Viz, diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c index 12647af5a336..e7fb47e84a93 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c @@ -1071,7 +1071,7 @@ int ldlm_init(void) ldlm_lock_slab = kmem_cache_create("ldlm_locks", sizeof(struct ldlm_lock), 0, SLAB_HWCACHE_ALIGN | - SLAB_DESTROY_BY_RCU, NULL); + SLAB_TYPESAFE_BY_RCU, NULL); if (!ldlm_lock_slab) { kmem_cache_destroy(ldlm_resource_slab); return -ENOMEM; diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index a1a359bfcc9c..7f8f962454e5 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -2340,7 +2340,7 @@ static int jbd2_journal_init_journal_head_cache(void) jbd2_journal_head_cache = kmem_cache_create("jbd2_journal_head", sizeof(struct journal_head), 0, /* offset */ - SLAB_TEMPORARY | SLAB_DESTROY_BY_RCU, + SLAB_TEMPORARY | SLAB_TYPESAFE_BY_RCU, NULL); /* ctor */ retval = 0; if (!jbd2_journal_head_cache) { diff --git a/fs/signalfd.c b/fs/signalfd.c index 270221fcef42..7e3d71109f51 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c @@ -38,7 +38,7 @@ void signalfd_cleanup(struct sighand_struct *sighand) /* * The lockless check can race with remove_wait_queue() in progress, * but in this case its caller should run under rcu_read_lock() and - * sighand_cachep is SLAB_DESTROY_BY_RCU, we can safely return. + * sighand_cachep is SLAB_TYPESAFE_BY_RCU, we can safely return. */ if (likely(!waitqueue_active(wqh))) return; diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 6048fa404e57..a5195a7d6f77 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -229,7 +229,7 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence) * * Function returns NULL if no refcount could be obtained, or the fence. * This function handles acquiring a reference to a fence that may be - * reallocated within the RCU grace period (such as with SLAB_DESTROY_BY_RCU), + * reallocated within the RCU grace period (such as with SLAB_TYPESAFE_BY_RCU), * so long as the caller is using RCU on the pointer to the fence. * * An alternative mechanism is to employ a seqlock to protect a bunch of @@ -257,7 +257,7 @@ dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep) * have successfully acquire a reference to it. If it no * longer matches, we are holding a reference to some other * reallocated pointer. This is possible if the allocator - * is using a freelist like SLAB_DESTROY_BY_RCU where the + * is using a freelist like SLAB_TYPESAFE_BY_RCU where the * fence remains valid for the RCU grace period, but it * may be reallocated. When using such allocators, we are * responsible for ensuring the reference we get is to diff --git a/include/linux/slab.h b/include/linux/slab.h index 3c37a8c51921..04a7f7993e67 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -28,7 +28,7 @@ #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ /* - * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS! + * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS! * * This delays freeing the SLAB page by a grace period, it does _NOT_ * delay object freeing. This means that if you do kmem_cache_free() @@ -61,8 +61,10 @@ * * rcu_read_lock before reading the address, then rcu_read_unlock after * taking the spinlock within the structure expected at that address. + * + * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU. */ -#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ +#define SLAB_TYPESAFE_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ diff --git a/include/net/sock.h b/include/net/sock.h index 5e5997654db6..59cdccaa30e7 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -993,7 +993,7 @@ struct smc_hashinfo; struct module; /* - * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes + * caches using SLAB_TYPESAFE_BY_RCU should let .next pointer from nulls nodes * un-modified. Special care is taken when initializing object to zero. */ static inline void sk_prot_clear_nulls(struct sock *sk, int size) diff --git a/kernel/fork.c b/kernel/fork.c index 6c463c80e93d..9330ce24f1bb 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1313,7 +1313,7 @@ void __cleanup_sighand(struct sighand_struct *sighand) if (atomic_dec_and_test(&sighand->count)) { signalfd_cleanup(sighand); /* - * sighand_cachep is SLAB_DESTROY_BY_RCU so we can free it + * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it * without an RCU grace period, see __lock_task_sighand(). */ kmem_cache_free(sighand_cachep, sighand); @@ -2144,7 +2144,7 @@ void __init proc_caches_init(void) { sighand_cachep = kmem_cache_create("sighand_cache", sizeof(struct sighand_struct), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU| + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU| SLAB_NOTRACK|SLAB_ACCOUNT, sighand_ctor); signal_cachep = kmem_cache_create("signal_cache", sizeof(struct signal_struct), 0, diff --git a/kernel/signal.c b/kernel/signal.c index 7e59ebc2c25e..6df5f72158e4 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1237,7 +1237,7 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, } /* * This sighand can be already freed and even reused, but - * we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which + * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which * initializes ->siglock: this slab can't go away, it has * the same object type, ->siglock can't be reinitialized. * diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 98b27195e38b..4b20061102f6 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -413,7 +413,7 @@ void kasan_cache_create(struct kmem_cache *cache, size_t *size, *size += sizeof(struct kasan_alloc_meta); /* Add free meta. */ - if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor || + if (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor || cache->object_size < sizeof(struct kasan_free_meta)) { cache->kasan_info.free_meta_offset = *size; *size += sizeof(struct kasan_free_meta); @@ -561,7 +561,7 @@ static void kasan_poison_slab_free(struct kmem_cache *cache, void *object) unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE); /* RCU slabs could be legally used after free within the RCU period */ - if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU)) + if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) return; kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); @@ -572,7 +572,7 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object) s8 shadow_byte; /* RCU slabs could be legally used after free within the RCU period */ - if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU)) + if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) return false; shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object)); diff --git a/mm/kmemcheck.c b/mm/kmemcheck.c index 5bf191756a4a..2d5959c5f7c5 100644 --- a/mm/kmemcheck.c +++ b/mm/kmemcheck.c @@ -95,7 +95,7 @@ void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size) { /* TODO: RCU freeing is unsupported for now; hide false positives. */ - if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU)) + if (!s->ctor && !(s->flags & SLAB_TYPESAFE_BY_RCU)) kmemcheck_mark_freed(object, size); } diff --git a/mm/rmap.c b/mm/rmap.c index 49ed681ccc7b..8ffd59df8a3f 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -430,7 +430,7 @@ static void anon_vma_ctor(void *data) void __init anon_vma_init(void) { anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), - 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, + 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, anon_vma_ctor); anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC|SLAB_ACCOUNT); @@ -481,7 +481,7 @@ struct anon_vma *page_get_anon_vma(struct page *page) * If this page is still mapped, then its anon_vma cannot have been * freed. But if it has been unmapped, we have no security against the * anon_vma structure being freed and reused (for another anon_vma: - * SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero() + * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() * above cannot corrupt). */ if (!page_mapped(page)) { diff --git a/mm/slab.c b/mm/slab.c index 807d86c76908..93c827864862 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1728,7 +1728,7 @@ static void slab_destroy(struct kmem_cache *cachep, struct page *page) freelist = page->freelist; slab_destroy_debugcheck(cachep, page); - if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) + if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU)) call_rcu(&page->rcu_head, kmem_rcu_free); else kmem_freepages(cachep, page); @@ -1924,7 +1924,7 @@ static bool set_objfreelist_slab_cache(struct kmem_cache *cachep, cachep->num = 0; - if (cachep->ctor || flags & SLAB_DESTROY_BY_RCU) + if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU) return false; left = calculate_slab_order(cachep, size, @@ -2030,7 +2030,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN + 2 * sizeof(unsigned long long))) flags |= SLAB_RED_ZONE | SLAB_STORE_USER; - if (!(flags & SLAB_DESTROY_BY_RCU)) + if (!(flags & SLAB_TYPESAFE_BY_RCU)) flags |= SLAB_POISON; #endif #endif diff --git a/mm/slab.h b/mm/slab.h index 65e7c3fcac72..9cfcf099709c 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -126,7 +126,7 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size, /* Legal flag mask for kmem_cache_create(), for various configurations */ #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ - SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS ) + SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS ) #if defined(CONFIG_DEBUG_SLAB) #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) @@ -415,7 +415,7 @@ static inline size_t slab_ksize(const struct kmem_cache *s) * back there or track user information then we can * only use the space before that information. */ - if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) + if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) return s->inuse; /* * Else we can use all the padding etc for the allocation diff --git a/mm/slab_common.c b/mm/slab_common.c index 09d0e849b07f..01a0fe2eb332 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -39,7 +39,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work, * Set of flags that will prevent slab merging */ #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ - SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \ + SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \ SLAB_FAILSLAB | SLAB_KASAN) #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ @@ -500,7 +500,7 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work) struct kmem_cache *s, *s2; /* - * On destruction, SLAB_DESTROY_BY_RCU kmem_caches are put on the + * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the * @slab_caches_to_rcu_destroy list. The slab pages are freed * through RCU and and the associated kmem_cache are dereferenced * while freeing the pages, so the kmem_caches should be freed only @@ -537,7 +537,7 @@ static int shutdown_cache(struct kmem_cache *s) memcg_unlink_cache(s); list_del(&s->list); - if (s->flags & SLAB_DESTROY_BY_RCU) { + if (s->flags & SLAB_TYPESAFE_BY_RCU) { list_add_tail(&s->list, &slab_caches_to_rcu_destroy); schedule_work(&slab_caches_to_rcu_destroy_work); } else { diff --git a/mm/slob.c b/mm/slob.c index eac04d4357ec..1bae78d71096 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -126,7 +126,7 @@ static inline void clear_slob_page_free(struct page *sp) /* * struct slob_rcu is inserted at the tail of allocated slob blocks, which - * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free + * were created with a SLAB_TYPESAFE_BY_RCU slab. slob_rcu is used to free * the block using call_rcu. */ struct slob_rcu { @@ -524,7 +524,7 @@ EXPORT_SYMBOL(ksize); int __kmem_cache_create(struct kmem_cache *c, unsigned long flags) { - if (flags & SLAB_DESTROY_BY_RCU) { + if (flags & SLAB_TYPESAFE_BY_RCU) { /* leave room for rcu footer at the end of object */ c->size += sizeof(struct slob_rcu); } @@ -598,7 +598,7 @@ static void kmem_rcu_free(struct rcu_head *head) void kmem_cache_free(struct kmem_cache *c, void *b) { kmemleak_free_recursive(b, c->flags); - if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) { + if (unlikely(c->flags & SLAB_TYPESAFE_BY_RCU)) { struct slob_rcu *slob_rcu; slob_rcu = b + (c->size - sizeof(struct slob_rcu)); slob_rcu->size = c->size; diff --git a/mm/slub.c b/mm/slub.c index 7f4bc7027ed5..57e5156f02be 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1687,7 +1687,7 @@ static void rcu_free_slab(struct rcu_head *h) static void free_slab(struct kmem_cache *s, struct page *page) { - if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) { + if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) { struct rcu_head *head; if (need_reserve_slab_rcu) { @@ -2963,7 +2963,7 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page, * slab_free_freelist_hook() could have put the items into quarantine. * If so, no need to free them. */ - if (s->flags & SLAB_KASAN && !(s->flags & SLAB_DESTROY_BY_RCU)) + if (s->flags & SLAB_KASAN && !(s->flags & SLAB_TYPESAFE_BY_RCU)) return; do_slab_free(s, page, head, tail, cnt, addr); } @@ -3433,7 +3433,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) * the slab may touch the object after free or before allocation * then we should never poison the object itself. */ - if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) && + if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) && !s->ctor) s->flags |= __OBJECT_POISON; else @@ -3455,7 +3455,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) */ s->inuse = size; - if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || + if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) || s->ctor)) { /* * Relocate free pointer after the object if it is not @@ -3537,7 +3537,7 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags) s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor); s->reserved = 0; - if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU)) + if (need_reserve_slab_rcu && (s->flags & SLAB_TYPESAFE_BY_RCU)) s->reserved = sizeof(struct rcu_head); if (!calculate_sizes(s, -1)) @@ -5042,7 +5042,7 @@ SLAB_ATTR_RO(cache_dma); static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) { - return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU)); + return sprintf(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU)); } SLAB_ATTR_RO(destroy_by_rcu); diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 409d0cfd3447..90210a0e3888 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -950,7 +950,7 @@ static struct proto dccp_v4_prot = { .orphan_count = &dccp_orphan_count, .max_header = MAX_DCCP_HEADER, .obj_size = sizeof(struct dccp_sock), - .slab_flags = SLAB_DESTROY_BY_RCU, + .slab_flags = SLAB_TYPESAFE_BY_RCU, .rsk_prot = &dccp_request_sock_ops, .twsk_prot = &dccp_timewait_sock_ops, .h.hashinfo = &dccp_hashinfo, diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 233b57367758..b4019a5e4551 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -1012,7 +1012,7 @@ static struct proto dccp_v6_prot = { .orphan_count = &dccp_orphan_count, .max_header = MAX_DCCP_HEADER, .obj_size = sizeof(struct dccp6_sock), - .slab_flags = SLAB_DESTROY_BY_RCU, + .slab_flags = SLAB_TYPESAFE_BY_RCU, .rsk_prot = &dccp6_request_sock_ops, .twsk_prot = &dccp6_timewait_sock_ops, .h.hashinfo = &dccp_hashinfo, diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 9a89b8deafae..82c89abeb989 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2398,7 +2398,7 @@ struct proto tcp_prot = { .sysctl_rmem = sysctl_tcp_rmem, .max_header = MAX_TCP_HEADER, .obj_size = sizeof(struct tcp_sock), - .slab_flags = SLAB_DESTROY_BY_RCU, + .slab_flags = SLAB_TYPESAFE_BY_RCU, .twsk_prot = &tcp_timewait_sock_ops, .rsk_prot = &tcp_request_sock_ops, .h.hashinfo = &tcp_hashinfo, diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 60a5295a7de6..bdbc4327ebee 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1919,7 +1919,7 @@ struct proto tcpv6_prot = { .sysctl_rmem = sysctl_tcp_rmem, .max_header = MAX_TCP_HEADER, .obj_size = sizeof(struct tcp6_sock), - .slab_flags = SLAB_DESTROY_BY_RCU, + .slab_flags = SLAB_TYPESAFE_BY_RCU, .twsk_prot = &tcp6_timewait_sock_ops, .rsk_prot = &tcp6_request_sock_ops, .h.hashinfo = &tcp_hashinfo, diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 06186d608a27..d096ca563054 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -142,7 +142,7 @@ static struct proto llc_proto = { .name = "LLC", .owner = THIS_MODULE, .obj_size = sizeof(struct llc_sock), - .slab_flags = SLAB_DESTROY_BY_RCU, + .slab_flags = SLAB_TYPESAFE_BY_RCU, }; /** diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c index 8bc5a1bd2d45..9b02c13d258b 100644 --- a/net/llc/llc_conn.c +++ b/net/llc/llc_conn.c @@ -506,7 +506,7 @@ static struct sock *__llc_lookup_established(struct llc_sap *sap, again: sk_nulls_for_each_rcu(rc, node, laddr_hb) { if (llc_estab_match(sap, daddr, laddr, rc)) { - /* Extra checks required by SLAB_DESTROY_BY_RCU */ + /* Extra checks required by SLAB_TYPESAFE_BY_RCU */ if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt))) goto again; if (unlikely(llc_sk(rc)->sap != sap || @@ -565,7 +565,7 @@ static struct sock *__llc_lookup_listener(struct llc_sap *sap, again: sk_nulls_for_each_rcu(rc, node, laddr_hb) { if (llc_listener_match(sap, laddr, rc)) { - /* Extra checks required by SLAB_DESTROY_BY_RCU */ + /* Extra checks required by SLAB_TYPESAFE_BY_RCU */ if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt))) goto again; if (unlikely(llc_sk(rc)->sap != sap || diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c index 5404d0d195cc..63b6ab056370 100644 --- a/net/llc/llc_sap.c +++ b/net/llc/llc_sap.c @@ -328,7 +328,7 @@ static struct sock *llc_lookup_dgram(struct llc_sap *sap, again: sk_nulls_for_each_rcu(rc, node, laddr_hb) { if (llc_dgram_match(sap, laddr, rc)) { - /* Extra checks required by SLAB_DESTROY_BY_RCU */ + /* Extra checks required by SLAB_TYPESAFE_BY_RCU */ if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt))) goto again; if (unlikely(llc_sk(rc)->sap != sap || diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 071b97fcbefb..fdcdac7916b2 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -914,7 +914,7 @@ static unsigned int early_drop_list(struct net *net, continue; /* kill only if still in same netns -- might have moved due to - * SLAB_DESTROY_BY_RCU rules. + * SLAB_TYPESAFE_BY_RCU rules. * * We steal the timer reference. If that fails timer has * already fired or someone else deleted it. Just drop ref @@ -1069,7 +1069,7 @@ __nf_conntrack_alloc(struct net *net, /* * Do not use kmem_cache_zalloc(), as this cache uses - * SLAB_DESTROY_BY_RCU. + * SLAB_TYPESAFE_BY_RCU. */ ct = kmem_cache_alloc(nf_conntrack_cachep, gfp); if (ct == NULL) @@ -1114,7 +1114,7 @@ void nf_conntrack_free(struct nf_conn *ct) struct net *net = nf_ct_net(ct); /* A freed object has refcnt == 0, that's - * the golden rule for SLAB_DESTROY_BY_RCU + * the golden rule for SLAB_TYPESAFE_BY_RCU */ NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 0); @@ -1878,7 +1878,7 @@ int nf_conntrack_init_start(void) nf_conntrack_cachep = kmem_cache_create("nf_conntrack", sizeof(struct nf_conn), NFCT_INFOMASK + 1, - SLAB_DESTROY_BY_RCU | SLAB_HWCACHE_ALIGN, NULL); + SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN, NULL); if (!nf_conntrack_cachep) goto err_cachep; diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 85837ab90e89..d34bbd6d8f38 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -101,7 +101,7 @@ struct proto smc_proto = { .unhash = smc_unhash_sk, .obj_size = sizeof(struct smc_sock), .h.smc_hash = &smc_v4_hashinfo, - .slab_flags = SLAB_DESTROY_BY_RCU, + .slab_flags = SLAB_TYPESAFE_BY_RCU, }; EXPORT_SYMBOL_GPL(smc_proto); -- cgit v1.2.3 From df0225a45a8818396d01a3bbd6d1310c25401de2 Mon Sep 17 00:00:00 2001 From: Mars Cheng Date: Sat, 8 Apr 2017 09:20:29 +0800 Subject: clk: mediatek: add mt6797 clock IDs Signed-off-by: Mars Cheng Signed-off-by: Stephen Boyd --- include/dt-bindings/clock/mt6797-clk.h | 281 +++++++++++++++++++++++++++++++++ 1 file changed, 281 insertions(+) create mode 100644 include/dt-bindings/clock/mt6797-clk.h (limited to 'include') diff --git a/include/dt-bindings/clock/mt6797-clk.h b/include/dt-bindings/clock/mt6797-clk.h new file mode 100644 index 000000000000..2f25a5aca019 --- /dev/null +++ b/include/dt-bindings/clock/mt6797-clk.h @@ -0,0 +1,281 @@ +/* + * Copyright (c) 2017 MediaTek Inc. + * Author: Kevin Chen + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_MT6797_H +#define _DT_BINDINGS_CLK_MT6797_H + +/* TOPCKGEN */ +#define CLK_TOP_MUX_ULPOSC_AXI_CK_MUX_PRE 1 +#define CLK_TOP_MUX_ULPOSC_AXI_CK_MUX 2 +#define CLK_TOP_MUX_AXI 3 +#define CLK_TOP_MUX_MEM 4 +#define CLK_TOP_MUX_DDRPHYCFG 5 +#define CLK_TOP_MUX_MM 6 +#define CLK_TOP_MUX_PWM 7 +#define CLK_TOP_MUX_VDEC 8 +#define CLK_TOP_MUX_VENC 9 +#define CLK_TOP_MUX_MFG 10 +#define CLK_TOP_MUX_CAMTG 11 +#define CLK_TOP_MUX_UART 12 +#define CLK_TOP_MUX_SPI 13 +#define CLK_TOP_MUX_ULPOSC_SPI_CK_MUX 14 +#define CLK_TOP_MUX_USB20 15 +#define CLK_TOP_MUX_MSDC50_0_HCLK 16 +#define CLK_TOP_MUX_MSDC50_0 17 +#define CLK_TOP_MUX_MSDC30_1 18 +#define CLK_TOP_MUX_MSDC30_2 19 +#define CLK_TOP_MUX_AUDIO 20 +#define CLK_TOP_MUX_AUD_INTBUS 21 +#define CLK_TOP_MUX_PMICSPI 22 +#define CLK_TOP_MUX_SCP 23 +#define CLK_TOP_MUX_ATB 24 +#define CLK_TOP_MUX_MJC 25 +#define CLK_TOP_MUX_DPI0 26 +#define CLK_TOP_MUX_AUD_1 27 +#define CLK_TOP_MUX_AUD_2 28 +#define CLK_TOP_MUX_SSUSB_TOP_SYS 29 +#define CLK_TOP_MUX_SPM 30 +#define CLK_TOP_MUX_BSI_SPI 31 +#define CLK_TOP_MUX_AUDIO_H 32 +#define CLK_TOP_MUX_ANC_MD32 33 +#define CLK_TOP_MUX_MFG_52M 34 +#define CLK_TOP_SYSPLL_CK 35 +#define CLK_TOP_SYSPLL_D2 36 +#define CLK_TOP_SYSPLL1_D2 37 +#define CLK_TOP_SYSPLL1_D4 38 +#define CLK_TOP_SYSPLL1_D8 39 +#define CLK_TOP_SYSPLL1_D16 40 +#define CLK_TOP_SYSPLL_D3 41 +#define CLK_TOP_SYSPLL_D3_D3 42 +#define CLK_TOP_SYSPLL2_D2 43 +#define CLK_TOP_SYSPLL2_D4 44 +#define CLK_TOP_SYSPLL2_D8 45 +#define CLK_TOP_SYSPLL_D5 46 +#define CLK_TOP_SYSPLL3_D2 47 +#define CLK_TOP_SYSPLL3_D4 48 +#define CLK_TOP_SYSPLL_D7 49 +#define CLK_TOP_SYSPLL4_D2 50 +#define CLK_TOP_SYSPLL4_D4 51 +#define CLK_TOP_UNIVPLL_CK 52 +#define CLK_TOP_UNIVPLL_D7 53 +#define CLK_TOP_UNIVPLL_D26 54 +#define CLK_TOP_SSUSB_PHY_48M_CK 55 +#define CLK_TOP_USB_PHY48M_CK 56 +#define CLK_TOP_UNIVPLL_D2 57 +#define CLK_TOP_UNIVPLL1_D2 58 +#define CLK_TOP_UNIVPLL1_D4 59 +#define CLK_TOP_UNIVPLL1_D8 60 +#define CLK_TOP_UNIVPLL_D3 61 +#define CLK_TOP_UNIVPLL2_D2 62 +#define CLK_TOP_UNIVPLL2_D4 63 +#define CLK_TOP_UNIVPLL2_D8 64 +#define CLK_TOP_UNIVPLL_D5 65 +#define CLK_TOP_UNIVPLL3_D2 66 +#define CLK_TOP_UNIVPLL3_D4 67 +#define CLK_TOP_UNIVPLL3_D8 68 +#define CLK_TOP_ULPOSC_CK_ORG 69 +#define CLK_TOP_ULPOSC_CK 70 +#define CLK_TOP_ULPOSC_D2 71 +#define CLK_TOP_ULPOSC_D3 72 +#define CLK_TOP_ULPOSC_D4 73 +#define CLK_TOP_ULPOSC_D8 74 +#define CLK_TOP_ULPOSC_D10 75 +#define CLK_TOP_APLL1_CK 76 +#define CLK_TOP_APLL2_CK 77 +#define CLK_TOP_MFGPLL_CK 78 +#define CLK_TOP_MFGPLL_D2 79 +#define CLK_TOP_IMGPLL_CK 80 +#define CLK_TOP_IMGPLL_D2 81 +#define CLK_TOP_IMGPLL_D4 82 +#define CLK_TOP_CODECPLL_CK 83 +#define CLK_TOP_CODECPLL_D2 84 +#define CLK_TOP_VDECPLL_CK 85 +#define CLK_TOP_TVDPLL_CK 86 +#define CLK_TOP_TVDPLL_D2 87 +#define CLK_TOP_TVDPLL_D4 88 +#define CLK_TOP_TVDPLL_D8 89 +#define CLK_TOP_TVDPLL_D16 90 +#define CLK_TOP_MSDCPLL_CK 91 +#define CLK_TOP_MSDCPLL_D2 92 +#define CLK_TOP_MSDCPLL_D4 93 +#define CLK_TOP_MSDCPLL_D8 94 +#define CLK_TOP_NR 95 + +/* APMIXED_SYS */ +#define CLK_APMIXED_MAINPLL 1 +#define CLK_APMIXED_UNIVPLL 2 +#define CLK_APMIXED_MFGPLL 3 +#define CLK_APMIXED_MSDCPLL 4 +#define CLK_APMIXED_IMGPLL 5 +#define CLK_APMIXED_TVDPLL 6 +#define CLK_APMIXED_CODECPLL 7 +#define CLK_APMIXED_VDECPLL 8 +#define CLK_APMIXED_APLL1 9 +#define CLK_APMIXED_APLL2 10 +#define CLK_APMIXED_NR 11 + +/* INFRA_SYS */ +#define CLK_INFRA_PMIC_TMR 1 +#define CLK_INFRA_PMIC_AP 2 +#define CLK_INFRA_PMIC_MD 3 +#define CLK_INFRA_PMIC_CONN 4 +#define CLK_INFRA_SCP 5 +#define CLK_INFRA_SEJ 6 +#define CLK_INFRA_APXGPT 7 +#define CLK_INFRA_SEJ_13M 8 +#define CLK_INFRA_ICUSB 9 +#define CLK_INFRA_GCE 10 +#define CLK_INFRA_THERM 11 +#define CLK_INFRA_I2C0 12 +#define CLK_INFRA_I2C1 13 +#define CLK_INFRA_I2C2 14 +#define CLK_INFRA_I2C3 15 +#define CLK_INFRA_PWM_HCLK 16 +#define CLK_INFRA_PWM1 17 +#define CLK_INFRA_PWM2 18 +#define CLK_INFRA_PWM3 19 +#define CLK_INFRA_PWM4 20 +#define CLK_INFRA_PWM 21 +#define CLK_INFRA_UART0 22 +#define CLK_INFRA_UART1 23 +#define CLK_INFRA_UART2 24 +#define CLK_INFRA_UART3 25 +#define CLK_INFRA_MD2MD_CCIF_0 26 +#define CLK_INFRA_MD2MD_CCIF_1 27 +#define CLK_INFRA_MD2MD_CCIF_2 28 +#define CLK_INFRA_FHCTL 29 +#define CLK_INFRA_BTIF 30 +#define CLK_INFRA_MD2MD_CCIF_3 31 +#define CLK_INFRA_SPI 32 +#define CLK_INFRA_MSDC0 33 +#define CLK_INFRA_MD2MD_CCIF_4 34 +#define CLK_INFRA_MSDC1 35 +#define CLK_INFRA_MSDC2 36 +#define CLK_INFRA_MD2MD_CCIF_5 37 +#define CLK_INFRA_GCPU 38 +#define CLK_INFRA_TRNG 39 +#define CLK_INFRA_AUXADC 40 +#define CLK_INFRA_CPUM 41 +#define CLK_INFRA_AP_C2K_CCIF_0 42 +#define CLK_INFRA_AP_C2K_CCIF_1 43 +#define CLK_INFRA_CLDMA 44 +#define CLK_INFRA_DISP_PWM 45 +#define CLK_INFRA_AP_DMA 46 +#define CLK_INFRA_DEVICE_APC 47 +#define CLK_INFRA_L2C_SRAM 48 +#define CLK_INFRA_CCIF_AP 49 +#define CLK_INFRA_AUDIO 50 +#define CLK_INFRA_CCIF_MD 51 +#define CLK_INFRA_DRAMC_F26M 52 +#define CLK_INFRA_I2C4 53 +#define CLK_INFRA_I2C_APPM 54 +#define CLK_INFRA_I2C_GPUPM 55 +#define CLK_INFRA_I2C2_IMM 56 +#define CLK_INFRA_I2C2_ARB 57 +#define CLK_INFRA_I2C3_IMM 58 +#define CLK_INFRA_I2C3_ARB 59 +#define CLK_INFRA_I2C5 60 +#define CLK_INFRA_SYS_CIRQ 61 +#define CLK_INFRA_SPI1 62 +#define CLK_INFRA_DRAMC_B_F26M 63 +#define CLK_INFRA_ANC_MD32 64 +#define CLK_INFRA_ANC_MD32_32K 65 +#define CLK_INFRA_DVFS_SPM1 66 +#define CLK_INFRA_AES_TOP0 67 +#define CLK_INFRA_AES_TOP1 68 +#define CLK_INFRA_SSUSB_BUS 69 +#define CLK_INFRA_SPI2 70 +#define CLK_INFRA_SPI3 71 +#define CLK_INFRA_SPI4 72 +#define CLK_INFRA_SPI5 73 +#define CLK_INFRA_IRTX 74 +#define CLK_INFRA_SSUSB_SYS 75 +#define CLK_INFRA_SSUSB_REF 76 +#define CLK_INFRA_AUDIO_26M 77 +#define CLK_INFRA_AUDIO_26M_PAD_TOP 78 +#define CLK_INFRA_MODEM_TEMP_SHARE 79 +#define CLK_INFRA_VAD_WRAP_SOC 80 +#define CLK_INFRA_DRAMC_CONF 81 +#define CLK_INFRA_DRAMC_B_CONF 82 +#define CLK_INFRA_MFG_VCG 83 +#define CLK_INFRA_13M 84 +#define CLK_INFRA_NR 85 + +/* IMG_SYS */ +#define CLK_IMG_FDVT 1 +#define CLK_IMG_DPE 2 +#define CLK_IMG_DIP 3 +#define CLK_IMG_LARB6 4 +#define CLK_IMG_NR 5 + +/* MM_SYS */ +#define CLK_MM_SMI_COMMON 1 +#define CLK_MM_SMI_LARB0 2 +#define CLK_MM_SMI_LARB5 3 +#define CLK_MM_CAM_MDP 4 +#define CLK_MM_MDP_RDMA0 5 +#define CLK_MM_MDP_RDMA1 6 +#define CLK_MM_MDP_RSZ0 7 +#define CLK_MM_MDP_RSZ1 8 +#define CLK_MM_MDP_RSZ2 9 +#define CLK_MM_MDP_TDSHP 10 +#define CLK_MM_MDP_COLOR 11 +#define CLK_MM_MDP_WDMA 12 +#define CLK_MM_MDP_WROT0 13 +#define CLK_MM_MDP_WROT1 14 +#define CLK_MM_FAKE_ENG 15 +#define CLK_MM_DISP_OVL0 16 +#define CLK_MM_DISP_OVL1 17 +#define CLK_MM_DISP_OVL0_2L 18 +#define CLK_MM_DISP_OVL1_2L 19 +#define CLK_MM_DISP_RDMA0 20 +#define CLK_MM_DISP_RDMA1 21 +#define CLK_MM_DISP_WDMA0 22 +#define CLK_MM_DISP_WDMA1 23 +#define CLK_MM_DISP_COLOR 24 +#define CLK_MM_DISP_CCORR 25 +#define CLK_MM_DISP_AAL 26 +#define CLK_MM_DISP_GAMMA 27 +#define CLK_MM_DISP_OD 28 +#define CLK_MM_DISP_DITHER 29 +#define CLK_MM_DISP_UFOE 30 +#define CLK_MM_DISP_DSC 31 +#define CLK_MM_DISP_SPLIT 32 +#define CLK_MM_DSI0_MM_CLOCK 33 +#define CLK_MM_DSI1_MM_CLOCK 34 +#define CLK_MM_DPI_MM_CLOCK 35 +#define CLK_MM_DPI_INTERFACE_CLOCK 36 +#define CLK_MM_LARB4_AXI_ASIF_MM_CLOCK 37 +#define CLK_MM_LARB4_AXI_ASIF_MJC_CLOCK 38 +#define CLK_MM_DISP_OVL0_MOUT_CLOCK 39 +#define CLK_MM_FAKE_ENG2 40 +#define CLK_MM_DSI0_INTERFACE_CLOCK 41 +#define CLK_MM_DSI1_INTERFACE_CLOCK 42 +#define CLK_MM_NR 43 + +/* VDEC_SYS */ +#define CLK_VDEC_CKEN_ENG 1 +#define CLK_VDEC_ACTIVE 2 +#define CLK_VDEC_CKEN 3 +#define CLK_VDEC_LARB1_CKEN 4 +#define CLK_VDEC_NR 5 + +/* VENC_SYS */ +#define CLK_VENC_0 1 +#define CLK_VENC_1 2 +#define CLK_VENC_2 3 +#define CLK_VENC_3 4 +#define CLK_VENC_NR 5 + +#endif /* _DT_BINDINGS_CLK_MT6797_H */ -- cgit v1.2.3 From 468d01bec544286bb5283f012b95b5b84636565b Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 2 Feb 2017 11:40:15 -0800 Subject: types: Update obsolete callback_head comment The comment header for callback_head (and thus for rcu_head) states that the bottom two bits of a pointer to these structures must be zero. This is obsolete: The new requirement is that only the bottom bit need be zero. This commit therefore updates this comment. Signed-off-by: Paul E. McKenney --- include/linux/types.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/types.h b/include/linux/types.h index 1e7bd24848fc..258099a4ed82 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -209,7 +209,7 @@ struct ustat { * naturally due ABI requirements, but some architectures (like CRIS) have * weird ABI and we need to ask it explicitly. * - * The alignment is required to guarantee that bits 0 and 1 of @next will be + * The alignment is required to guarantee that bit 0 of @next will be * clear under normal conditions -- as long as we use call_rcu(), * call_rcu_bh(), call_rcu_sched(), or call_srcu() to queue callback. * -- cgit v1.2.3 From 48ac34666ff76843d8743db1cc78b303759916f1 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 27 Feb 2017 21:14:19 +0200 Subject: hlist_add_tail_rcu disable sparse warning sparse is unhappy about this code in hlist_add_tail_rcu: struct hlist_node *i, *last = NULL; for (i = hlist_first_rcu(h); i; i = hlist_next_rcu(i)) last = i; This is because hlist_next_rcu and hlist_next_rcu return __rcu pointers. It's a false positive - it's a write side primitive and so does not need to be called in a read side critical section. The following trivial patch disables the warning without changing the behaviour in any way. Note: __hlist_for_each_rcu would also remove the warning but it would be confusing since it calls rcu_derefence and is designed to run in the rcu read side critical section. Signed-off-by: Michael S. Tsirkin Reviewed-by: Steven Rostedt (VMware) Signed-off-by: Paul E. McKenney --- include/linux/rculist.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 4f7a9561b8c4..b1fd8bf85fdc 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -509,7 +509,8 @@ static inline void hlist_add_tail_rcu(struct hlist_node *n, { struct hlist_node *i, *last = NULL; - for (i = hlist_first_rcu(h); i; i = hlist_next_rcu(i)) + /* Note: write side code, so rcu accessors are not needed. */ + for (i = h->first; i; i = i->next) last = i; if (last) { -- cgit v1.2.3 From 4a67c9fde04fc1b6752fa68c495310ca3ed29eeb Mon Sep 17 00:00:00 2001 From: RafaÅ‚ MiÅ‚ecki Date: Fri, 31 Mar 2017 11:11:48 +0200 Subject: mtd: use dev_of_node helper in mtd_get_of_node MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This allows better compile-time optimizations with CONFIG_OF disabled. Signed-off-by: RafaÅ‚ MiÅ‚ecki Acked-by: Boris Brezillon Signed-off-by: Brian Norris --- include/linux/mtd/mtd.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index eebdc63cf6af..f8db5b2e4028 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -393,7 +393,7 @@ static inline void mtd_set_of_node(struct mtd_info *mtd, static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd) { - return mtd->dev.of_node; + return dev_of_node(&mtd->dev); } static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops) -- cgit v1.2.3 From 3f1866779cf8338e1c8bd32e5f6f5424795ef191 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Mon, 10 Apr 2017 16:50:58 +0530 Subject: of: dma: Make of_dma_deconfigure() public As part of moving DMA initializing to probe time the of_dma_deconfigure() function will need to be called from different source files. Make it public and move it to drivers/of/device.c where the of_dma_configure() function is. Tested-by: Marek Szyprowski Reviewed-by: Robin Murphy Acked-by: Rob Herring Signed-off-by: Laurent Pinchart Signed-off-by: Joerg Roedel --- drivers/of/device.c | 12 ++++++++++++ drivers/of/platform.c | 5 ----- include/linux/of_device.h | 3 +++ 3 files changed, 15 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/of/device.c b/drivers/of/device.c index b1e6bebda3f3..0d378c03e1a4 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -151,6 +151,18 @@ void of_dma_configure(struct device *dev, struct device_node *np) } EXPORT_SYMBOL_GPL(of_dma_configure); +/** + * of_dma_deconfigure - Clean up DMA configuration + * @dev: Device for which to clean up DMA configuration + * + * Clean up all configuration performed by of_dma_configure_ops() and free all + * resources that have been allocated. + */ +void of_dma_deconfigure(struct device *dev) +{ + arch_teardown_dma_ops(dev); +} + int of_device_register(struct platform_device *pdev) { device_initialize(&pdev->dev); diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 5dfcc967dd05..5344db50aa65 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -158,11 +158,6 @@ struct platform_device *of_device_alloc(struct device_node *np, } EXPORT_SYMBOL(of_device_alloc); -static void of_dma_deconfigure(struct device *dev) -{ - arch_teardown_dma_ops(dev); -} - /** * of_platform_device_create_pdata - Alloc, initialize and register an of_device * @np: pointer to node to create device for diff --git a/include/linux/of_device.h b/include/linux/of_device.h index c12dace043f3..af984551cc2b 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h @@ -56,6 +56,7 @@ static inline struct device_node *of_cpu_device_node_get(int cpu) } void of_dma_configure(struct device *dev, struct device_node *np); +void of_dma_deconfigure(struct device *dev); #else /* CONFIG_OF */ static inline int of_driver_match_device(struct device *dev, @@ -105,6 +106,8 @@ static inline struct device_node *of_cpu_device_node_get(int cpu) } static inline void of_dma_configure(struct device *dev, struct device_node *np) {} +static inline void of_dma_deconfigure(struct device *dev) +{} #endif /* CONFIG_OF */ #endif /* _LINUX_OF_DEVICE_H */ -- cgit v1.2.3 From 09515ef5ddad71c7820e5e428da418b709feeb26 Mon Sep 17 00:00:00 2001 From: Sricharan R Date: Mon, 10 Apr 2017 16:51:01 +0530 Subject: of/acpi: Configure dma operations at probe time for platform/amba/pci bus devices Configuring DMA ops at probe time will allow deferring device probe when the IOMMU isn't available yet. The dma_configure for the device is now called from the generic device_attach callback just before the bus/driver probe is called. This way, configuring the DMA ops for the device would be called at the same place for all bus_types, hence the deferred probing mechanism should work for all buses as well. pci_bus_add_devices (platform/amba)(_device_create/driver_register) | | pci_bus_add_device (device_add/driver_register) | | device_attach device_initial_probe | | __device_attach_driver __device_attach_driver | driver_probe_device | really_probe | dma_configure Similarly on the device/driver_unregister path __device_release_driver is called which inturn calls dma_deconfigure. This patch changes the dma ops configuration to probe time for both OF and ACPI based platform/amba/pci bus devices. Tested-by: Marek Szyprowski Tested-by: Hanjun Guo Reviewed-by: Robin Murphy Acked-by: Rob Herring Acked-by: Bjorn Helgaas (drivers/pci part) Acked-by: Rafael J. Wysocki Signed-off-by: Sricharan R Signed-off-by: Joerg Roedel --- drivers/acpi/glue.c | 5 ----- drivers/base/dd.c | 9 +++++++++ drivers/base/dma-mapping.c | 40 ++++++++++++++++++++++++++++++++++++++++ drivers/of/platform.c | 5 +---- drivers/pci/probe.c | 28 ---------------------------- include/linux/dma-mapping.h | 12 ++++++++++++ 6 files changed, 62 insertions(+), 37 deletions(-) (limited to 'include') diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index fb19e1cdb641..c05f24107bfc 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -176,7 +176,6 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev) struct list_head *physnode_list; unsigned int node_id; int retval = -EINVAL; - enum dev_dma_attr attr; if (has_acpi_companion(dev)) { if (acpi_dev) { @@ -233,10 +232,6 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev) if (!has_acpi_companion(dev)) ACPI_COMPANION_SET(dev, acpi_dev); - attr = acpi_get_dma_attr(acpi_dev); - if (attr != DEV_DMA_NOT_SUPPORTED) - acpi_dma_configure(dev, attr); - acpi_physnode_link_name(physical_node_name, node_id); retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, physical_node_name); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index a1fbf55c4d3a..4882f06d12df 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -356,6 +357,10 @@ re_probe: if (ret) goto pinctrl_bind_failed; + ret = dma_configure(dev); + if (ret) + goto dma_failed; + if (driver_sysfs_add(dev)) { printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n", __func__, dev_name(dev)); @@ -417,6 +422,8 @@ re_probe: goto done; probe_failed: + dma_deconfigure(dev); +dma_failed: if (dev->bus) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, BUS_NOTIFY_DRIVER_NOT_BOUND, dev); @@ -826,6 +833,8 @@ static void __device_release_driver(struct device *dev, struct device *parent) drv->remove(dev); device_links_driver_cleanup(dev); + dma_deconfigure(dev); + devres_release_all(dev); dev->driver = NULL; dev_set_drvdata(dev, NULL); diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index efd71cf4fdea..449b948c7427 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -7,9 +7,11 @@ * This file is released under the GPLv2. */ +#include #include #include #include +#include #include #include @@ -341,3 +343,41 @@ void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags) vunmap(cpu_addr); } #endif + +/* + * Common configuration to enable DMA API use for a device + */ +#include + +int dma_configure(struct device *dev) +{ + struct device *bridge = NULL, *dma_dev = dev; + enum dev_dma_attr attr; + + if (dev_is_pci(dev)) { + bridge = pci_get_host_bridge_device(to_pci_dev(dev)); + dma_dev = bridge; + if (IS_ENABLED(CONFIG_OF) && dma_dev->parent && + dma_dev->parent->of_node) + dma_dev = dma_dev->parent; + } + + if (dma_dev->of_node) { + of_dma_configure(dev, dma_dev->of_node); + } else if (has_acpi_companion(dma_dev)) { + attr = acpi_get_dma_attr(to_acpi_device_node(dma_dev->fwnode)); + if (attr != DEV_DMA_NOT_SUPPORTED) + acpi_dma_configure(dev, attr); + } + + if (bridge) + pci_put_host_bridge_device(bridge); + + return 0; +} + +void dma_deconfigure(struct device *dev) +{ + of_dma_deconfigure(dev); + acpi_dma_deconfigure(dev); +} diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 5344db50aa65..2aa4ebbde9cd 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -186,11 +187,9 @@ static struct platform_device *of_platform_device_create_pdata( dev->dev.bus = &platform_bus_type; dev->dev.platform_data = platform_data; - of_dma_configure(&dev->dev, dev->dev.of_node); of_msi_configure(&dev->dev, dev->dev.of_node); if (of_device_add(dev) != 0) { - of_dma_deconfigure(&dev->dev); platform_device_put(dev); goto err_clear_flag; } @@ -248,7 +247,6 @@ static struct amba_device *of_amba_device_create(struct device_node *node, dev_set_name(&dev->dev, "%s", bus_id); else of_device_make_bus_id(&dev->dev); - of_dma_configure(&dev->dev, dev->dev.of_node); /* Allow the HW Peripheral ID to be overridden */ prop = of_get_property(node, "arm,primecell-periphid", NULL); @@ -542,7 +540,6 @@ static int of_platform_device_destroy(struct device *dev, void *data) amba_device_unregister(to_amba_device(dev)); #endif - of_dma_deconfigure(dev); of_node_clear_flag(dev->of_node, OF_POPULATED); of_node_clear_flag(dev->of_node, OF_POPULATED_BUS); return 0; diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index dfc9a2794141..5a8dd43db336 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1893,33 +1893,6 @@ static void pci_set_msi_domain(struct pci_dev *dev) dev_set_msi_domain(&dev->dev, d); } -/** - * pci_dma_configure - Setup DMA configuration - * @dev: ptr to pci_dev struct of the PCI device - * - * Function to update PCI devices's DMA configuration using the same - * info from the OF node or ACPI node of host bridge's parent (if any). - */ -static void pci_dma_configure(struct pci_dev *dev) -{ - struct device *bridge = pci_get_host_bridge_device(dev); - - if (IS_ENABLED(CONFIG_OF) && - bridge->parent && bridge->parent->of_node) { - of_dma_configure(&dev->dev, bridge->parent->of_node); - } else if (has_acpi_companion(bridge)) { - struct acpi_device *adev = to_acpi_device_node(bridge->fwnode); - enum dev_dma_attr attr = acpi_get_dma_attr(adev); - - if (attr == DEV_DMA_NOT_SUPPORTED) - dev_warn(&dev->dev, "DMA not supported.\n"); - else - acpi_dma_configure(&dev->dev, attr); - } - - pci_put_host_bridge_device(bridge); -} - void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) { int ret; @@ -1933,7 +1906,6 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) dev->dev.dma_mask = &dev->dma_mask; dev->dev.dma_parms = &dev->dma_parms; dev->dev.coherent_dma_mask = 0xffffffffull; - pci_dma_configure(dev); pci_set_dma_max_seg_size(dev, 65536); pci_set_dma_seg_boundary(dev, 0xffffffff); diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 0977317c6835..4f3eecedca2d 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -728,6 +728,18 @@ dma_mark_declared_memory_occupied(struct device *dev, } #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ +#ifdef CONFIG_HAS_DMA +int dma_configure(struct device *dev); +void dma_deconfigure(struct device *dev); +#else +static inline int dma_configure(struct device *dev) +{ + return 0; +} + +static inline void dma_deconfigure(struct device *dev) {} +#endif + /* * Managed DMA API */ -- cgit v1.2.3 From 7b07cbefb68d486febf47e13b570fed53d9296b4 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Mon, 10 Apr 2017 16:51:02 +0530 Subject: iommu: of: Handle IOMMU lookup failure with deferred probing or error Failures to look up an IOMMU when parsing the DT iommus property need to be handled separately from the .of_xlate() failures to support deferred probing. The lack of a registered IOMMU can be caused by the lack of a driver for the IOMMU, the IOMMU device probe not having been performed yet, having been deferred, or having failed. The first case occurs when the device tree describes the bus master and IOMMU topology correctly but no device driver exists for the IOMMU yet or the device driver has not been compiled in. Return NULL, the caller will configure the device without an IOMMU. The second and third cases are handled by deferring the probe of the bus master device which will eventually get reprobed after the IOMMU. The last case is currently handled by deferring the probe of the bus master device as well. A mechanism to either configure the bus master device without an IOMMU or to fail the bus master device probe depending on whether the IOMMU is optional or mandatory would be a good enhancement. Tested-by: Marek Szyprowski Reviewed-by: Robin Murphy Acked-by: Rob Herring Signed-off-by: Laurent Pichart Signed-off-by: Sricharan R Signed-off-by: Joerg Roedel --- drivers/base/dma-mapping.c | 5 +++-- drivers/iommu/of_iommu.c | 4 ++-- drivers/of/device.c | 9 +++++++-- include/linux/of_device.h | 9 ++++++--- 4 files changed, 18 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 449b948c7427..82bd45ced7ff 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -353,6 +353,7 @@ int dma_configure(struct device *dev) { struct device *bridge = NULL, *dma_dev = dev; enum dev_dma_attr attr; + int ret = 0; if (dev_is_pci(dev)) { bridge = pci_get_host_bridge_device(to_pci_dev(dev)); @@ -363,7 +364,7 @@ int dma_configure(struct device *dev) } if (dma_dev->of_node) { - of_dma_configure(dev, dma_dev->of_node); + ret = of_dma_configure(dev, dma_dev->of_node); } else if (has_acpi_companion(dma_dev)) { attr = acpi_get_dma_attr(to_acpi_device_node(dma_dev->fwnode)); if (attr != DEV_DMA_NOT_SUPPORTED) @@ -373,7 +374,7 @@ int dma_configure(struct device *dev) if (bridge) pci_put_host_bridge_device(bridge); - return 0; + return ret; } void dma_deconfigure(struct device *dev) diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c index c8be889f5206..9f44ee8ea1bc 100644 --- a/drivers/iommu/of_iommu.c +++ b/drivers/iommu/of_iommu.c @@ -236,7 +236,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev, ops = ERR_PTR(err); } - return IS_ERR(ops) ? NULL : ops; + return ops; } static int __init of_iommu_init(void) @@ -247,7 +247,7 @@ static int __init of_iommu_init(void) for_each_matching_node_and_match(np, matches, &match) { const of_iommu_init_fn init_fn = match->data; - if (init_fn(np)) + if (init_fn && init_fn(np)) pr_err("Failed to initialise IOMMU %s\n", of_node_full_name(np)); } diff --git a/drivers/of/device.c b/drivers/of/device.c index e1ae9e7104a1..8bd3d8c09435 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -82,7 +82,7 @@ int of_device_add(struct platform_device *ofdev) * can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events * to fix up DMA configuration. */ -void of_dma_configure(struct device *dev, struct device_node *np) +int of_dma_configure(struct device *dev, struct device_node *np) { u64 dma_addr, paddr, size; int ret; @@ -123,7 +123,7 @@ void of_dma_configure(struct device *dev, struct device_node *np) if (!size) { dev_err(dev, "Adjusted size 0x%llx invalid\n", size); - return; + return -EINVAL; } dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", offset); } @@ -144,10 +144,15 @@ void of_dma_configure(struct device *dev, struct device_node *np) coherent ? " " : " not "); iommu = of_iommu_configure(dev, np); + if (IS_ERR(iommu)) + return PTR_ERR(iommu); + dev_dbg(dev, "device is%sbehind an iommu\n", iommu ? " " : " not "); arch_setup_dma_ops(dev, dma_addr, size, iommu, coherent); + + return 0; } EXPORT_SYMBOL_GPL(of_dma_configure); diff --git a/include/linux/of_device.h b/include/linux/of_device.h index af984551cc2b..2cacdd81062e 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h @@ -55,7 +55,7 @@ static inline struct device_node *of_cpu_device_node_get(int cpu) return of_node_get(cpu_dev->of_node); } -void of_dma_configure(struct device *dev, struct device_node *np); +int of_dma_configure(struct device *dev, struct device_node *np); void of_dma_deconfigure(struct device *dev); #else /* CONFIG_OF */ @@ -104,8 +104,11 @@ static inline struct device_node *of_cpu_device_node_get(int cpu) { return NULL; } -static inline void of_dma_configure(struct device *dev, struct device_node *np) -{} + +static inline int of_dma_configure(struct device *dev, struct device_node *np) +{ + return 0; +} static inline void of_dma_deconfigure(struct device *dev) {} #endif /* CONFIG_OF */ -- cgit v1.2.3 From 5a1bb638d5677053c7addcb228b56da6fccb5d68 Mon Sep 17 00:00:00 2001 From: Sricharan R Date: Mon, 10 Apr 2017 16:51:03 +0530 Subject: drivers: acpi: Handle IOMMU lookup failure with deferred probing or error This is an equivalent to the DT's handling of the iommu master's probe with deferred probing when the corrsponding iommu is not probed yet. The lack of a registered IOMMU can be caused by the lack of a driver for the IOMMU, the IOMMU device probe not having been performed yet, having been deferred, or having failed. The first case occurs when the firmware describes the bus master and IOMMU topology correctly but no device driver exists for the IOMMU yet or the device driver has not been compiled in. Return NULL, the caller will configure the device without an IOMMU. The second and third cases are handled by deferring the probe of the bus master device which will eventually get reprobed after the IOMMU. The last case is currently handled by deferring the probe of the bus master device as well. A mechanism to either configure the bus master device without an IOMMU or to fail the bus master device probe depending on whether the IOMMU is optional or mandatory would be a good enhancement. Tested-by: Hanjun Guo Reviewed-by: Robin Murphy [Lorenzo: Added fixes for dma_coherent_mask overflow, acpi_dma_configure called multiple times for same device] Signed-off-by: Lorenzo Pieralisi Signed-off-by: Sricharan R Signed-off-by: Joerg Roedel --- drivers/acpi/arm64/iort.c | 33 ++++++++++++++++++++++++++++++++- drivers/acpi/scan.c | 11 ++++++++--- drivers/base/dma-mapping.c | 2 +- include/acpi/acpi_bus.h | 2 +- include/linux/acpi.h | 7 +++++-- 5 files changed, 47 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 3dd9ec372dae..e323ece0314d 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -543,6 +543,14 @@ static const struct iommu_ops *iort_iommu_xlate(struct device *dev, const struct iommu_ops *ops = NULL; int ret = -ENODEV; struct fwnode_handle *iort_fwnode; + struct iommu_fwspec *fwspec = dev->iommu_fwspec; + + /* + * If we already translated the fwspec there + * is nothing left to do, return the iommu_ops. + */ + if (fwspec && fwspec->ops) + return fwspec->ops; if (node) { iort_fwnode = iort_get_fwnode(node); @@ -550,8 +558,17 @@ static const struct iommu_ops *iort_iommu_xlate(struct device *dev, return NULL; ops = iommu_ops_from_fwnode(iort_fwnode); + /* + * If the ops look-up fails, this means that either + * the SMMU drivers have not been probed yet or that + * the SMMU drivers are not built in the kernel; + * Depending on whether the SMMU drivers are built-in + * in the kernel or not, defer the IOMMU configuration + * or just abort it. + */ if (!ops) - return NULL; + return iort_iommu_driver_enabled(node->type) ? + ERR_PTR(-EPROBE_DEFER) : NULL; ret = arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops); } @@ -625,12 +642,26 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev) while (parent) { ops = iort_iommu_xlate(dev, parent, streamid); + if (IS_ERR_OR_NULL(ops)) + return ops; parent = iort_node_get_id(node, &streamid, IORT_IOMMU_TYPE, i++); } } + /* + * If we have reason to believe the IOMMU driver missed the initial + * add_device callback for dev, replay it to get things in order. + */ + if (!IS_ERR_OR_NULL(ops) && ops->add_device && + dev->bus && !dev->iommu_group) { + int err = ops->add_device(dev); + + if (err) + ops = ERR_PTR(err); + } + return ops; } diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 192691880d55..2a513cce332e 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1373,20 +1373,25 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev) * @dev: The pointer to the device * @attr: device dma attributes */ -void acpi_dma_configure(struct device *dev, enum dev_dma_attr attr) +int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr) { const struct iommu_ops *iommu; + u64 size; iort_set_dma_mask(dev); iommu = iort_iommu_configure(dev); + if (IS_ERR(iommu)) + return PTR_ERR(iommu); + size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); /* * Assume dma valid range starts at 0 and covers the whole * coherent_dma_mask. */ - arch_setup_dma_ops(dev, 0, dev->coherent_dma_mask + 1, iommu, - attr == DEV_DMA_COHERENT); + arch_setup_dma_ops(dev, 0, size, iommu, attr == DEV_DMA_COHERENT); + + return 0; } EXPORT_SYMBOL_GPL(acpi_dma_configure); diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 82bd45ced7ff..755a2b5354c5 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -368,7 +368,7 @@ int dma_configure(struct device *dev) } else if (has_acpi_companion(dma_dev)) { attr = acpi_get_dma_attr(to_acpi_device_node(dma_dev->fwnode)); if (attr != DEV_DMA_NOT_SUPPORTED) - acpi_dma_configure(dev, attr); + ret = acpi_dma_configure(dev, attr); } if (bridge) diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index ef0ae8aaa567..2a9a5de0fb00 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -575,7 +575,7 @@ struct acpi_pci_root { bool acpi_dma_supported(struct acpi_device *adev); enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev); -void acpi_dma_configure(struct device *dev, enum dev_dma_attr attr); +int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr); void acpi_dma_deconfigure(struct device *dev); struct acpi_device *acpi_find_child_device(struct acpi_device *parent, diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 9b05886f9773..79d06ef654c9 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -762,8 +762,11 @@ static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev) return DEV_DMA_NOT_SUPPORTED; } -static inline void acpi_dma_configure(struct device *dev, - enum dev_dma_attr attr) { } +static inline int acpi_dma_configure(struct device *dev, + enum dev_dma_attr attr) +{ + return 0; +} static inline void acpi_dma_deconfigure(struct device *dev) { } -- cgit v1.2.3 From 316ca8804ea84a782d5ba2163711ebb22116ff5a Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Mon, 10 Apr 2017 16:51:06 +0530 Subject: ACPI/IORT: Remove linker section for IORT entries probing The IORT linker section introduced by commit 34ceea275f62 ("ACPI/IORT: Introduce linker section for IORT entries probing") was needed to make sure SMMU drivers are registered (and therefore probed) in the kernel before devices using the SMMU have a chance to probe in turn. Through the introduction of deferred IOMMU configuration the linker section based IORT probing infrastructure is not needed any longer, in that device/SMMU probe dependencies are managed through the probe deferral mechanism, making the IORT linker section infrastructure unused, so that it can be removed. Remove the unused IORT linker section probing infrastructure from the kernel to complete the ACPI IORT IOMMU configure probe deferral mechanism implementation. Tested-by: Hanjun Guo Reviewed-by: Robin Murphy Signed-off-by: Lorenzo Pieralisi Cc: Sricharan R Signed-off-by: Joerg Roedel --- drivers/acpi/arm64/iort.c | 2 -- include/asm-generic/vmlinux.lds.h | 1 - include/linux/acpi_iort.h | 3 --- 3 files changed, 6 deletions(-) (limited to 'include') diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index e323ece0314d..e7b1940ff13b 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -1000,6 +1000,4 @@ void __init acpi_iort_init(void) } iort_init_platform_devices(); - - acpi_probe_device_table(iort); } diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 0968d13b3885..9faa26c41c14 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -566,7 +566,6 @@ IRQCHIP_OF_MATCH_TABLE() \ ACPI_PROBE_TABLE(irqchip) \ ACPI_PROBE_TABLE(clksrc) \ - ACPI_PROBE_TABLE(iort) \ EARLYCON_TABLE() #define INIT_TEXT \ diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h index 77e08099e554..f167e1d045ff 100644 --- a/include/linux/acpi_iort.h +++ b/include/linux/acpi_iort.h @@ -52,7 +52,4 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev) { return NULL; } #endif -#define IORT_ACPI_DECLARE(name, table_id, fn) \ - ACPI_DECLARE_PROBE_ENTRY(iort, name, table_id, 0, NULL, 0, fn) - #endif /* __ACPI_IORT_H__ */ -- cgit v1.2.3 From 49a57ef7f8492ef985ee1ecdb927ca78a6b2f308 Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Wed, 12 Apr 2017 00:21:27 -0500 Subject: iommu/omap: Drop legacy-style device support All the supported boards that have OMAP IOMMU devices do support DT boot only now. So, drop the support for the non-DT legacy-style devices from the OMAP IOMMU driver. Couple of the fields from the iommu platform data would no longer be required, so they have also been cleaned up. The IOMMU platform data is still needed though for performing reset management properly in a multi-arch environment. Signed-off-by: Suman Anna Signed-off-by: Joerg Roedel --- drivers/iommu/omap-iommu.c | 30 ++++++++++++++---------------- include/linux/platform_data/iommu-omap.h | 3 --- 2 files changed, 14 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 54556713c8d1..febd4fbe3445 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -928,28 +928,26 @@ static int omap_iommu_probe(struct platform_device *pdev) int irq; struct omap_iommu *obj; struct resource *res; - struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev); struct device_node *of = pdev->dev.of_node; + if (!of) { + pr_err("%s: only DT-based devices are supported\n", __func__); + return -ENODEV; + } + obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); if (!obj) return -ENOMEM; - if (of) { - obj->name = dev_name(&pdev->dev); - obj->nr_tlb_entries = 32; - err = of_property_read_u32(of, "ti,#tlb-entries", - &obj->nr_tlb_entries); - if (err && err != -EINVAL) - return err; - if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8) - return -EINVAL; - if (of_find_property(of, "ti,iommu-bus-err-back", NULL)) - obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN; - } else { - obj->nr_tlb_entries = pdata->nr_tlb_entries; - obj->name = pdata->name; - } + obj->name = dev_name(&pdev->dev); + obj->nr_tlb_entries = 32; + err = of_property_read_u32(of, "ti,#tlb-entries", &obj->nr_tlb_entries); + if (err && err != -EINVAL) + return err; + if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8) + return -EINVAL; + if (of_find_property(of, "ti,iommu-bus-err-back", NULL)) + obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN; obj->dev = &pdev->dev; obj->ctx = (void *)obj + sizeof(*obj); diff --git a/include/linux/platform_data/iommu-omap.h b/include/linux/platform_data/iommu-omap.h index 0496d171700a..a40fc0f4f9de 100644 --- a/include/linux/platform_data/iommu-omap.h +++ b/include/linux/platform_data/iommu-omap.h @@ -30,10 +30,7 @@ struct omap_iommu_arch_data { }; struct iommu_platform_data { - const char *name; const char *reset_name; - int nr_tlb_entries; - int (*assert_reset)(struct platform_device *pdev, const char *name); int (*deassert_reset)(struct platform_device *pdev, const char *name); }; -- cgit v1.2.3 From e73b7afe4e8ca5ec4304a9e1d5009755a85fff91 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 12 Apr 2017 00:21:28 -0500 Subject: iommu/omap: Move data structures to omap-iommu.h The internal data-structures are scattered over various header and C files. Consolidate them in omap-iommu.h. While at this, add the kerneldoc comment for the missing iommu domain variable and revise the iommu_arch_data name. Signed-off-by: Joerg Roedel [s-anna@ti.com: revise kerneldoc comments] Signed-off-by: Suman Anna Signed-off-by: Joerg Roedel --- drivers/iommu/omap-iommu.c | 16 ---------------- drivers/iommu/omap-iommu.h | 33 ++++++++++++++++++++++++++++++++ include/linux/platform_data/iommu-omap.h | 17 ---------------- 3 files changed, 33 insertions(+), 33 deletions(-) (limited to 'include') diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index febd4fbe3445..c1739a650654 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -42,22 +42,6 @@ /* bitmap of the page sizes currently supported */ #define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) -/** - * struct omap_iommu_domain - omap iommu domain - * @pgtable: the page table - * @iommu_dev: an omap iommu device attached to this domain. only a single - * iommu device can be attached for now. - * @dev: Device using this domain. - * @lock: domain lock, should be taken when attaching/detaching - */ -struct omap_iommu_domain { - u32 *pgtable; - struct omap_iommu *iommu_dev; - struct device *dev; - spinlock_t lock; - struct iommu_domain domain; -}; - #define MMU_LOCK_BASE_SHIFT 10 #define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT) #define MMU_LOCK_BASE(x) \ diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h index 59628e5017b4..3c33608f48ca 100644 --- a/drivers/iommu/omap-iommu.h +++ b/drivers/iommu/omap-iommu.h @@ -14,6 +14,7 @@ #define _OMAP_IOMMU_H #include +#include #define for_each_iotlb_cr(obj, n, __i, cr) \ for (__i = 0; \ @@ -27,6 +28,23 @@ struct iotlb_entry { u32 endian, elsz, mixed; }; +/** + * struct omap_iommu_domain - omap iommu domain + * @pgtable: the page table + * @iommu_dev: an omap iommu device attached to this domain. only a single + * iommu device can be attached for now. + * @dev: Device using this domain. + * @lock: domain lock, should be taken when attaching/detaching + * @domain: generic domain handle used by iommu core code + */ +struct omap_iommu_domain { + u32 *pgtable; + struct omap_iommu *iommu_dev; + struct device *dev; + spinlock_t lock; + struct iommu_domain domain; +}; + struct omap_iommu { const char *name; void __iomem *regbase; @@ -52,6 +70,21 @@ struct omap_iommu { u32 id; }; +/** + * struct omap_iommu_arch_data - omap iommu private data + * @name: name of the iommu device + * @iommu_dev: handle of the iommu device + * + * This is an omap iommu private data object, which binds an iommu user + * to its iommu device. This object should be placed at the iommu user's + * dev_archdata so generic IOMMU API can be used without having to + * utilize omap-specific plumbing anymore. + */ +struct omap_iommu_arch_data { + const char *name; + struct omap_iommu *iommu_dev; +}; + struct cr_regs { u32 cam; u32 ram; diff --git a/include/linux/platform_data/iommu-omap.h b/include/linux/platform_data/iommu-omap.h index a40fc0f4f9de..e8b12dbf6170 100644 --- a/include/linux/platform_data/iommu-omap.h +++ b/include/linux/platform_data/iommu-omap.h @@ -12,23 +12,6 @@ #include -#define MMU_REG_SIZE 256 - -/** - * struct iommu_arch_data - omap iommu private data - * @name: name of the iommu device - * @iommu_dev: handle of the iommu device - * - * This is an omap iommu private data object, which binds an iommu user - * to its iommu device. This object should be placed at the iommu user's - * dev_archdata so generic IOMMU API can be used without having to - * utilize omap-specific plumbing anymore. - */ -struct omap_iommu_arch_data { - const char *name; - struct omap_iommu *iommu_dev; -}; - struct iommu_platform_data { const char *reset_name; int (*assert_reset)(struct platform_device *pdev, const char *name); -- cgit v1.2.3 From 518662e0fcb9fa241fe90a337b59bc5066b2a930 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 10 Apr 2017 12:22:09 +1000 Subject: NFS: fix usage of mempools. When passed GFP flags that allow sleeping (such as GFP_NOIO), mempool_alloc() will never return NULL, it will wait until memory is available. This means that we don't need to handle failure, but that we do need to ensure one thread doesn't call mempool_alloc() twice on the one pool without queuing or freeing the first allocation. If multiple threads did this during times of high memory pressure, the pool could be exhausted and a deadlock could result. pnfs_generic_alloc_ds_commits() attempts to allocate from the nfs_commit_mempool while already holding an allocation from that pool. This is not safe. So change nfs_commitdata_alloc() to take a flag that indicates whether failure is acceptable. In pnfs_generic_alloc_ds_commits(), accept failure and handle it as we currently do. Else where, do not accept failure, and do not handle it. Even when failure is acceptable, we want to succeed if possible. That means both - using an entry from the pool if there is one - waiting for direct reclaim is there isn't. We call mempool_alloc(GFP_NOWAIT) to achieve the first, then kmem_cache_alloc(GFP_NOIO|__GFP_NORETRY) to achieve the second. Each of these can fail, but together they do the best they can without blocking indefinitely. The objects returned by kmem_cache_alloc() will still be freed by mempool_free(). This is safe as mempool_alloc() uses exactly the same function to allocate objects (since the mempool was created with mempool_create_slab_pool()). The object returned by mempool_alloc() and kmem_cache_alloc() are indistinguishable so mempool_free() will handle both identically, either adding to the pool or calling kmem_cache_free(). Also, don't test for failure when allocating from nfs_wdata_mempool. Signed-off-by: NeilBrown Signed-off-by: Trond Myklebust --- fs/nfs/pnfs_nfs.c | 16 +++++----------- fs/nfs/write.c | 35 +++++++++++++++++++++-------------- include/linux/nfs_fs.h | 2 +- 3 files changed, 27 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index 7250b95549ec..1edf5b84aba5 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -217,7 +217,7 @@ pnfs_generic_alloc_ds_commits(struct nfs_commit_info *cinfo, for (i = 0; i < fl_cinfo->nbuckets; i++, bucket++) { if (list_empty(&bucket->committing)) continue; - data = nfs_commitdata_alloc(); + data = nfs_commitdata_alloc(false); if (!data) break; data->ds_commit_index = i; @@ -283,16 +283,10 @@ pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages, unsigned int nreq = 0; if (!list_empty(mds_pages)) { - data = nfs_commitdata_alloc(); - if (data != NULL) { - data->ds_commit_index = -1; - list_add(&data->pages, &list); - nreq++; - } else { - nfs_retry_commit(mds_pages, NULL, cinfo, 0); - pnfs_generic_retry_commit(cinfo, 0); - return -ENOMEM; - } + data = nfs_commitdata_alloc(true); + data->ds_commit_index = -1; + list_add(&data->pages, &list); + nreq++; } nreq += pnfs_generic_alloc_ds_commits(cinfo, &list); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index abb2c8a3be42..bdfe5a7c5874 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -60,14 +60,28 @@ static mempool_t *nfs_wdata_mempool; static struct kmem_cache *nfs_cdata_cachep; static mempool_t *nfs_commit_mempool; -struct nfs_commit_data *nfs_commitdata_alloc(void) +struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail) { - struct nfs_commit_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOIO); + struct nfs_commit_data *p; - if (p) { - memset(p, 0, sizeof(*p)); - INIT_LIST_HEAD(&p->pages); + if (never_fail) + p = mempool_alloc(nfs_commit_mempool, GFP_NOIO); + else { + /* It is OK to do some reclaim, not no safe to wait + * for anything to be returned to the pool. + * mempool_alloc() cannot handle that particular combination, + * so we need two separate attempts. + */ + p = mempool_alloc(nfs_commit_mempool, GFP_NOWAIT); + if (!p) + p = kmem_cache_alloc(nfs_cdata_cachep, GFP_NOIO | + __GFP_NOWARN | __GFP_NORETRY); + if (!p) + return NULL; } + + memset(p, 0, sizeof(*p)); + INIT_LIST_HEAD(&p->pages); return p; } EXPORT_SYMBOL_GPL(nfs_commitdata_alloc); @@ -82,8 +96,7 @@ static struct nfs_pgio_header *nfs_writehdr_alloc(void) { struct nfs_pgio_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOIO); - if (p) - memset(p, 0, sizeof(*p)); + memset(p, 0, sizeof(*p)); return p; } @@ -1705,19 +1718,13 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how, if (list_empty(head)) return 0; - data = nfs_commitdata_alloc(); - - if (!data) - goto out_bad; + data = nfs_commitdata_alloc(true); /* Set up the argument struct */ nfs_init_commit(data, head, NULL, cinfo); atomic_inc(&cinfo->mds->rpcs_out); return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode), data->mds_ops, how, 0); - out_bad: - nfs_retry_commit(head, NULL, cinfo, 0); - return -ENOMEM; } int nfs_commit_file(struct file *file, struct nfs_write_verifier *verf) diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 287f34161086..1b29915247b2 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -502,7 +502,7 @@ extern int nfs_wb_all(struct inode *inode); extern int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder); extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); extern int nfs_commit_inode(struct inode *, int); -extern struct nfs_commit_data *nfs_commitdata_alloc(void); +extern struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail); extern void nfs_commit_free(struct nfs_commit_data *data); static inline int -- cgit v1.2.3 From fbe77c30e9abcb3429380dec622439991a718e31 Mon Sep 17 00:00:00 2001 From: Benjamin Coddington Date: Wed, 19 Apr 2017 10:11:35 -0400 Subject: NFS: move rw_mode to nfs_pageio_header Let's try to have it in a cacheline in nfs4_proc_pgio_rpc_prepare(). Signed-off-by: Benjamin Coddington Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 2 +- fs/nfs/pagelist.c | 8 +++----- fs/nfs/read.c | 9 ++++++--- fs/nfs/write.c | 8 +++++--- include/linux/nfs_page.h | 4 ++-- include/linux/nfs_xdr.h | 1 + 6 files changed, 18 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index dda19a35ad9e..4e52ac773d1f 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -4610,7 +4610,7 @@ static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task, return 0; if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context, hdr->args.lock_context, - hdr->rw_ops->rw_mode) == -EIO) + hdr->rw_mode) == -EIO) return -EIO; if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) return -EIO; diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 453255c30fa1..f53610672f03 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -664,11 +664,11 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc, const struct nfs_pgio_completion_ops *compl_ops, const struct nfs_rw_ops *rw_ops, size_t bsize, - int io_flags) + int io_flags, + gfp_t gfp_flags) { struct nfs_pgio_mirror *new; int i; - gfp_t gfp_flags = GFP_KERNEL; desc->pg_moreio = 0; desc->pg_inode = inode; @@ -688,8 +688,6 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc, if (pg_ops->pg_get_mirror_count) { /* until we have a request, we don't have an lseg and no * idea how many mirrors there will be */ - if (desc->pg_rw_ops->rw_mode == FMODE_WRITE) - gfp_flags = GFP_NOIO; new = kcalloc(NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX, sizeof(struct nfs_pgio_mirror), gfp_flags); desc->pg_mirrors_dynamic = new; @@ -753,7 +751,7 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc, if (pagecount <= ARRAY_SIZE(pg_array->page_array)) pg_array->pagevec = pg_array->page_array; else { - if (desc->pg_rw_ops->rw_mode == FMODE_WRITE) + if (hdr->rw_mode == FMODE_WRITE) gfp_flags = GFP_NOIO; pg_array->pagevec = kcalloc(pagecount, sizeof(struct page *), gfp_flags); if (!pg_array->pagevec) { diff --git a/fs/nfs/read.c b/fs/nfs/read.c index defc9233e985..a8421d9dab6a 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -35,7 +35,11 @@ static struct kmem_cache *nfs_rdata_cachep; static struct nfs_pgio_header *nfs_readhdr_alloc(void) { - return kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL); + struct nfs_pgio_header *p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL); + + if (p) + p->rw_mode = FMODE_READ; + return p; } static void nfs_readhdr_free(struct nfs_pgio_header *rhdr) @@ -64,7 +68,7 @@ void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, pg_ops = server->pnfs_curr_ld->pg_read_ops; #endif nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops, - server->rsize, 0); + server->rsize, 0, GFP_KERNEL); } EXPORT_SYMBOL_GPL(nfs_pageio_init_read); @@ -451,7 +455,6 @@ void nfs_destroy_readpagecache(void) } static const struct nfs_rw_ops nfs_rw_read_ops = { - .rw_mode = FMODE_READ, .rw_alloc_header = nfs_readhdr_alloc, .rw_free_header = nfs_readhdr_free, .rw_done = nfs_readpage_done, diff --git a/fs/nfs/write.c b/fs/nfs/write.c index e0bccbefbc9e..95ac001b6fdf 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -96,7 +96,10 @@ static struct nfs_pgio_header *nfs_writehdr_alloc(void) { struct nfs_pgio_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOIO); - memset(p, 0, sizeof(*p)); + if (p) { + memset(p, 0, sizeof(*p)); + p->rw_mode = FMODE_WRITE; + } return p; } @@ -1381,7 +1384,7 @@ void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, pg_ops = server->pnfs_curr_ld->pg_write_ops; #endif nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_write_ops, - server->wsize, ioflags); + server->wsize, ioflags, GFP_NOIO); } EXPORT_SYMBOL_GPL(nfs_pageio_init_write); @@ -2115,7 +2118,6 @@ void nfs_destroy_writepagecache(void) } static const struct nfs_rw_ops nfs_rw_write_ops = { - .rw_mode = FMODE_WRITE, .rw_alloc_header = nfs_writehdr_alloc, .rw_free_header = nfs_writehdr_free, .rw_done = nfs_writeback_done, diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 957049f72290..6f01e28bba27 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -64,7 +64,6 @@ struct nfs_pageio_ops { }; struct nfs_rw_ops { - const fmode_t rw_mode; struct nfs_pgio_header *(*rw_alloc_header)(void); void (*rw_free_header)(struct nfs_pgio_header *); int (*rw_done)(struct rpc_task *, struct nfs_pgio_header *, @@ -124,7 +123,8 @@ extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc, const struct nfs_pgio_completion_ops *compl_ops, const struct nfs_rw_ops *rw_ops, size_t bsize, - int how); + int how, + gfp_t gfp_flags); extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *, struct nfs_page *); extern int nfs_pageio_resend(struct nfs_pageio_descriptor *, diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 348f7c158084..51e27f9746ee 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1427,6 +1427,7 @@ struct nfs_pgio_header { struct list_head pages; struct nfs_page *req; struct nfs_writeverf verf; /* Used for writes */ + fmode_t rw_mode; struct pnfs_layout_segment *lseg; loff_t io_start; const struct rpc_call_ops *mds_ops; -- cgit v1.2.3 From 6ade8694f471d847500c7cec152cc15171cef5d5 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 20 Apr 2017 17:30:06 -0700 Subject: kvm: Move srcu_struct fields to end of struct kvm Parallelizing SRCU callback handling will increase the size of srcu_struct, which will move the kvm structure's kvm_arch field out of reach of powerpc's current assembly code, which will result in the following sort of build error: arch/powerpc/kvm/book3s_hv_rmhandlers.S:617: Error: operand out of range (0x000000000000b328 is not between 0xffffffffffff8000 and 0x0000000000007fff) This commit moves the srcu_struct fields in the kvm structure to follow the kvm_arch field, which will allow powerpc's assembly code to continue to be able to reach the kvm_arch field. Reported-by: Stephen Rothwell Reported-by: Michael Ellerman Reported-by: kbuild test robot Suggested-by: Paolo Bonzini Signed-off-by: Paul E. McKenney Tested-by: Michael Ellerman Acked-by: Paolo Bonzini [ paulmck: Moved this commit to precede SRCU callback parallelization, and reworded the commit log into future tense, all in the name of bisectability. ] --- include/linux/kvm_host.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 2c14ad9809da..96c8e29c6442 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -375,8 +375,6 @@ struct kvm { struct mutex slots_lock; struct mm_struct *mm; /* userspace tied to this vm */ struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM]; - struct srcu_struct srcu; - struct srcu_struct irq_srcu; struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; /* @@ -429,6 +427,8 @@ struct kvm { struct list_head devices; struct dentry *debugfs_dentry; struct kvm_stat_data **debugfs_stat_data; + struct srcu_struct srcu; + struct srcu_struct irq_srcu; }; #define kvm_err(fmt, ...) \ -- cgit v1.2.3 From da915ad5cf25b5f5d358dd3670c3378d8ae8c03e Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 5 Apr 2017 09:01:53 -0700 Subject: srcu: Parallelize callback handling Peter Zijlstra proposed using SRCU to reduce mmap_sem contention [1,2], however, there are workloads that could result in a high volume of concurrent invocations of call_srcu(), which with current SRCU would result in excessive lock contention on the srcu_struct structure's ->queue_lock, which protects SRCU's callback lists. This commit therefore moves SRCU to per-CPU callback lists, thus greatly reducing contention. Because a given SRCU instance no longer has a single centralized callback list, starting grace periods and invoking callbacks are both more complex than in the single-list Classic SRCU implementation. Starting grace periods and handling callbacks are now handled using an srcu_node tree that is in some ways similar to the rcu_node trees used by RCU-bh, RCU-preempt, and RCU-sched (for example, the srcu_node tree shape is controlled by exactly the same Kconfig options and boot parameters that control the shape of the rcu_node tree). In addition, the old per-CPU srcu_array structure is now named srcu_data and contains an rcu_segcblist structure named ->srcu_cblist for its callbacks (and a spinlock to protect this). The srcu_struct gets an srcu_gp_seq that is used to associate callback segments with the corresponding completion-time grace-period number. These completion-time grace-period numbers are propagated up the srcu_node tree so that the grace-period workqueue handler can determine whether additional grace periods are needed on the one hand and where to look for callbacks that are ready to be invoked. The srcu_barrier() function must now wait on all instances of the per-CPU ->srcu_cblist. Because each ->srcu_cblist is protected by ->lock, srcu_barrier() can remotely add the needed callbacks. In theory, it could also remotely start grace periods, but in practice doing so is complex and racy. And interestingly enough, it is never necessary for srcu_barrier() to start a grace period because srcu_barrier() only enqueues a callback when a callback is already present--and it turns out that a grace period has to have already been started for this pre-existing callback. Furthermore, it is only the callback that srcu_barrier() needs to wait on, not any particular grace period. Therefore, a new rcu_segcblist_entrain() function enqueues the srcu_barrier() function's callback into the same segment occupied by the last pre-existing callback in the list. The special case where all the pre-existing callbacks are on a different list (because they are in the process of being invoked) is handled by enqueuing srcu_barrier()'s callback into the RCU_DONE_TAIL segment, relying on the done-callbacks check that takes place after all callbacks are inovked. Note that the readers use the same algorithm as before. Note that there is a separate srcu_idx that tells the readers what counter to increment. This unfortunately cannot be combined with srcu_gp_seq because they need to be incremented at different times. This commit introduces some ugly #ifdefs in rcutorture. These will go away when I feel good enough about Tree SRCU to ditch Classic SRCU. Some crude performance comparisons, courtesy of a quickly hacked rcuperf asynchronous-grace-period capability: Callback Queuing Overhead ------------------------- # CPUS Classic SRCU Tree SRCU ------ ------------ --------- 2 0.349 us 0.342 us 16 31.66 us 0.4 us 41 --------- 0.417 us The times are the 90th percentiles, a statistic that was chosen to reject the overheads of the occasional srcu_barrier() call needed to avoid OOMing the test machine. The rcuperf test hangs when running Classic SRCU at 41 CPUs, hence the line of dashes. Despite the hacks to both the rcuperf code and that statistics, this is a convincing demonstration of Tree SRCU's performance and scalability advantages. [1] https://lwn.net/Articles/309030/ [2] https://patchwork.kernel.org/patch/5108281/ Signed-off-by: Paul E. McKenney [ paulmck: Fix initialization if synchronize_srcu_expedited() called first. ] --- include/linux/rcu_segcblist.h | 42 ++- include/linux/srcutree.h | 80 ++++-- kernel/rcu/rcutorture.c | 20 +- kernel/rcu/srcutree.c | 642 +++++++++++++++++++++++++++++++++--------- kernel/rcu/tree.c | 6 + kernel/rcu/tree.h | 8 + 6 files changed, 647 insertions(+), 151 deletions(-) (limited to 'include') diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h index 74b1e7243955..ced8f313fd05 100644 --- a/include/linux/rcu_segcblist.h +++ b/include/linux/rcu_segcblist.h @@ -401,6 +401,37 @@ static inline void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp, rsclp->tails[RCU_NEXT_TAIL] = &rhp->next; } +/* + * Entrain the specified callback onto the specified rcu_segcblist at + * the end of the last non-empty segment. If the entire rcu_segcblist + * is empty, make no change, but return false. + * + * This is intended for use by rcu_barrier()-like primitives, -not- + * for normal grace-period use. IMPORTANT: The callback you enqueue + * will wait for all prior callbacks, NOT necessarily for a grace + * period. You have been warned. + */ +static inline bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp, + struct rcu_head *rhp, bool lazy) +{ + int i; + + if (rcu_segcblist_n_cbs(rsclp) == 0) + return false; + WRITE_ONCE(rsclp->len, rsclp->len + 1); + if (lazy) + rsclp->len_lazy++; + smp_mb(); /* Ensure counts are updated before callback is entrained. */ + rhp->next = NULL; + for (i = RCU_NEXT_TAIL; i > RCU_DONE_TAIL; i--) + if (rsclp->tails[i] != rsclp->tails[i - 1]) + break; + *rsclp->tails[i] = rhp; + for (; i <= RCU_NEXT_TAIL; i++) + rsclp->tails[i] = &rhp->next; + return true; +} + /* * Extract only the counts from the specified rcu_segcblist structure, * and place them in the specified rcu_cblist structure. This function @@ -537,7 +568,8 @@ static inline void rcu_segcblist_advance(struct rcu_segcblist *rsclp, int i, j; WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp)); - WARN_ON_ONCE(rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL)); + if (rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL)) + return; /* * Find all callbacks whose ->gp_seq numbers indicate that they @@ -582,8 +614,9 @@ static inline void rcu_segcblist_advance(struct rcu_segcblist *rsclp, * them to complete at the end of the earlier grace period. * * This function operates on an rcu_segcblist structure, and also the - * grace-period sequence number at which new callbacks would become - * ready to invoke. + * grace-period sequence number seq at which new callbacks would become + * ready to invoke. Returns true if there are callbacks that won't be + * ready to invoke until seq, false otherwise. */ static inline bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, unsigned long seq) @@ -591,7 +624,8 @@ static inline bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, int i; WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp)); - WARN_ON_ONCE(rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL)); + if (rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL)) + return false; /* * Find the segment preceding the oldest segment of callbacks diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index f2b3bd6c6bc2..0400e211aa44 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -24,25 +24,75 @@ #ifndef _LINUX_SRCU_TREE_H #define _LINUX_SRCU_TREE_H -struct srcu_array { - unsigned long lock_count[2]; - unsigned long unlock_count[2]; +#include +#include + +struct srcu_node; +struct srcu_struct; + +/* + * Per-CPU structure feeding into leaf srcu_node, similar in function + * to rcu_node. + */ +struct srcu_data { + /* Read-side state. */ + unsigned long srcu_lock_count[2]; /* Locks per CPU. */ + unsigned long srcu_unlock_count[2]; /* Unlocks per CPU. */ + + /* Update-side state. */ + spinlock_t lock ____cacheline_internodealigned_in_smp; + struct rcu_segcblist srcu_cblist; /* List of callbacks.*/ + unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */ + bool srcu_cblist_invoking; /* Invoking these CBs? */ + struct delayed_work work; /* Context for CB invoking. */ + struct rcu_head srcu_barrier_head; /* For srcu_barrier() use. */ + struct srcu_node *mynode; /* Leaf srcu_node. */ + int cpu; + struct srcu_struct *sp; }; +/* + * Node in SRCU combining tree, similar in function to rcu_data. + */ +struct srcu_node { + spinlock_t lock; + unsigned long srcu_have_cbs[4]; /* GP seq for children */ + /* having CBs, but only */ + /* is > ->srcu_gq_seq. */ + struct srcu_node *srcu_parent; /* Next up in tree. */ + int grplo; /* Least CPU for node. */ + int grphi; /* Biggest CPU for node. */ +}; + +/* + * Per-SRCU-domain structure, similar in function to rcu_state. + */ struct srcu_struct { - unsigned long completed; - unsigned long srcu_gp_seq; - atomic_t srcu_exp_cnt; - struct srcu_array __percpu *per_cpu_ref; - spinlock_t queue_lock; /* protect ->srcu_cblist */ - struct rcu_segcblist srcu_cblist; + struct srcu_node node[NUM_RCU_NODES]; /* Combining tree. */ + struct srcu_node *level[RCU_NUM_LVLS + 1]; + /* First node at each level. */ + struct mutex srcu_cb_mutex; /* Serialize CB preparation. */ + spinlock_t gp_lock; /* protect ->srcu_cblist */ + struct mutex srcu_gp_mutex; /* Serialize GP work. */ + unsigned int srcu_idx; /* Current rdr array element. */ + unsigned long srcu_gp_seq; /* Grace-period seq #. */ + unsigned long srcu_gp_seq_needed; /* Latest gp_seq needed. */ + atomic_t srcu_exp_cnt; /* # ongoing expedited GPs. */ + struct srcu_data __percpu *sda; /* Per-CPU srcu_data array. */ + unsigned long srcu_barrier_seq; /* srcu_barrier seq #. */ + struct mutex srcu_barrier_mutex; /* Serialize barrier ops. */ + struct completion srcu_barrier_completion; + /* Awaken barrier rq at end. */ + atomic_t srcu_barrier_cpu_cnt; /* # CPUs not yet posting a */ + /* callback for the barrier */ + /* operation. */ struct delayed_work work; #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ }; -/* Values for -> state variable. */ +/* Values for state variable (bottom bits of ->srcu_gp_seq). */ #define SRCU_STATE_IDLE 0 #define SRCU_STATE_SCAN1 1 #define SRCU_STATE_SCAN2 2 @@ -51,11 +101,9 @@ void process_srcu(struct work_struct *work); #define __SRCU_STRUCT_INIT(name) \ { \ - .completed = -300, \ - .per_cpu_ref = &name##_srcu_array, \ - .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \ - .srcu_cblist = RCU_SEGCBLIST_INITIALIZER(name.srcu_cblist),\ - .work = __DELAYED_WORK_INITIALIZER(name.work, process_srcu, 0),\ + .sda = &name##_srcu_data, \ + .gp_lock = __SPIN_LOCK_UNLOCKED(name.gp_lock), \ + .srcu_gp_seq_needed = 0 - 1, \ __SRCU_DEP_MAP_INIT(name) \ } @@ -79,7 +127,7 @@ void process_srcu(struct work_struct *work); * See include/linux/percpu-defs.h for the rules on per-CPU variables. */ #define __DEFINE_SRCU(name, is_static) \ - static DEFINE_PER_CPU(struct srcu_array, name##_srcu_array);\ + static DEFINE_PER_CPU(struct srcu_data, name##_srcu_data);\ is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name) #define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */) #define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static) diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 6f344b6748a8..e9d4527cdd43 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -563,17 +563,30 @@ static void srcu_torture_stats(void) int idx; #if defined(CONFIG_TREE_SRCU) || defined(CONFIG_CLASSIC_SRCU) +#ifdef CONFIG_TREE_SRCU + idx = srcu_ctlp->srcu_idx & 0x1; +#else /* #ifdef CONFIG_TREE_SRCU */ idx = srcu_ctlp->completed & 0x1; +#endif /* #else #ifdef CONFIG_TREE_SRCU */ pr_alert("%s%s Tree SRCU per-CPU(idx=%d):", torture_type, TORTURE_FLAG, idx); for_each_possible_cpu(cpu) { unsigned long l0, l1; unsigned long u0, u1; long c0, c1; - struct srcu_array *counts = per_cpu_ptr(srcu_ctlp->per_cpu_ref, cpu); +#ifdef CONFIG_TREE_SRCU + struct srcu_data *counts; + counts = per_cpu_ptr(srcu_ctlp->sda, cpu); + u0 = counts->srcu_unlock_count[!idx]; + u1 = counts->srcu_unlock_count[idx]; +#else /* #ifdef CONFIG_TREE_SRCU */ + struct srcu_array *counts; + + counts = per_cpu_ptr(srcu_ctlp->per_cpu_ref, cpu); u0 = counts->unlock_count[!idx]; u1 = counts->unlock_count[idx]; +#endif /* #else #ifdef CONFIG_TREE_SRCU */ /* * Make sure that a lock is always counted if the corresponding @@ -581,8 +594,13 @@ static void srcu_torture_stats(void) */ smp_rmb(); +#ifdef CONFIG_TREE_SRCU + l0 = counts->srcu_lock_count[!idx]; + l1 = counts->srcu_lock_count[idx]; +#else /* #ifdef CONFIG_TREE_SRCU */ l0 = counts->lock_count[!idx]; l1 = counts->lock_count[idx]; +#endif /* #else #ifdef CONFIG_TREE_SRCU */ c0 = l0 - u0; c1 = l1 - u1; diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index da676b0d016b..12feeca18f46 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -36,19 +36,110 @@ #include #include -#include #include "rcu.h" -static int init_srcu_struct_fields(struct srcu_struct *sp) +static void srcu_invoke_callbacks(struct work_struct *work); +static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay); + +/* + * Initialize SRCU combining tree. Note that statically allocated + * srcu_struct structures might already have srcu_read_lock() and + * srcu_read_unlock() running against them. So if the is_static parameter + * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[]. + */ +static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static) { - sp->completed = 0; + int cpu; + int i; + int level = 0; + int levelspread[RCU_NUM_LVLS]; + struct srcu_data *sdp; + struct srcu_node *snp; + struct srcu_node *snp_first; + + /* Work out the overall tree geometry. */ + sp->level[0] = &sp->node[0]; + for (i = 1; i < rcu_num_lvls; i++) + sp->level[i] = sp->level[i - 1] + num_rcu_lvl[i - 1]; + rcu_init_levelspread(levelspread, num_rcu_lvl); + + /* Each pass through this loop initializes one srcu_node structure. */ + rcu_for_each_node_breadth_first(sp, snp) { + spin_lock_init(&snp->lock); + for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) + snp->srcu_have_cbs[i] = 0; + snp->grplo = -1; + snp->grphi = -1; + if (snp == &sp->node[0]) { + /* Root node, special case. */ + snp->srcu_parent = NULL; + continue; + } + + /* Non-root node. */ + if (snp == sp->level[level + 1]) + level++; + snp->srcu_parent = sp->level[level - 1] + + (snp - sp->level[level]) / + levelspread[level - 1]; + } + + /* + * Initialize the per-CPU srcu_data array, which feeds into the + * leaves of the srcu_node tree. + */ + WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) != + ARRAY_SIZE(sdp->srcu_unlock_count)); + level = rcu_num_lvls - 1; + snp_first = sp->level[level]; + for_each_possible_cpu(cpu) { + sdp = per_cpu_ptr(sp->sda, cpu); + spin_lock_init(&sdp->lock); + rcu_segcblist_init(&sdp->srcu_cblist); + sdp->srcu_cblist_invoking = false; + sdp->srcu_gp_seq_needed = sp->srcu_gp_seq; + sdp->mynode = &snp_first[cpu / levelspread[level]]; + for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) { + if (snp->grplo < 0) + snp->grplo = cpu; + snp->grphi = cpu; + } + sdp->cpu = cpu; + INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks); + sdp->sp = sp; + if (is_static) + continue; + + /* Dynamically allocated, better be no srcu_read_locks()! */ + for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) { + sdp->srcu_lock_count[i] = 0; + sdp->srcu_unlock_count[i] = 0; + } + } +} + +/* + * Initialize non-compile-time initialized fields, including the + * associated srcu_node and srcu_data structures. The is_static + * parameter is passed through to init_srcu_struct_nodes(), and + * also tells us that ->sda has already been wired up to srcu_data. + */ +static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static) +{ + mutex_init(&sp->srcu_cb_mutex); + mutex_init(&sp->srcu_gp_mutex); + sp->srcu_idx = 0; sp->srcu_gp_seq = 0; atomic_set(&sp->srcu_exp_cnt, 0); - spin_lock_init(&sp->queue_lock); - rcu_segcblist_init(&sp->srcu_cblist); + sp->srcu_barrier_seq = 0; + mutex_init(&sp->srcu_barrier_mutex); + atomic_set(&sp->srcu_barrier_cpu_cnt, 0); INIT_DELAYED_WORK(&sp->work, process_srcu); - sp->per_cpu_ref = alloc_percpu(struct srcu_array); - return sp->per_cpu_ref ? 0 : -ENOMEM; + if (!is_static) + sp->sda = alloc_percpu(struct srcu_data); + init_srcu_struct_nodes(sp, is_static); + smp_store_release(&sp->srcu_gp_seq_needed, 0); /* Init done. */ + return sp->sda ? 0 : -ENOMEM; } #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -59,7 +150,8 @@ int __init_srcu_struct(struct srcu_struct *sp, const char *name, /* Don't re-initialize a lock while it is held. */ debug_check_no_locks_freed((void *)sp, sizeof(*sp)); lockdep_init_map(&sp->dep_map, name, key, 0); - return init_srcu_struct_fields(sp); + spin_lock_init(&sp->gp_lock); + return init_srcu_struct_fields(sp, false); } EXPORT_SYMBOL_GPL(__init_srcu_struct); @@ -75,15 +167,41 @@ EXPORT_SYMBOL_GPL(__init_srcu_struct); */ int init_srcu_struct(struct srcu_struct *sp) { - return init_srcu_struct_fields(sp); + spin_lock_init(&sp->gp_lock); + return init_srcu_struct_fields(sp, false); } EXPORT_SYMBOL_GPL(init_srcu_struct); #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ /* - * Returns approximate total of the readers' ->lock_count[] values for the - * rank of per-CPU counters specified by idx. + * First-use initialization of statically allocated srcu_struct + * structure. Wiring up the combining tree is more than can be + * done with compile-time initialization, so this check is added + * to each update-side SRCU primitive. Use ->gp_lock, which -is- + * compile-time initialized, to resolve races involving multiple + * CPUs trying to garner first-use privileges. + */ +static void check_init_srcu_struct(struct srcu_struct *sp) +{ + unsigned long flags; + + WARN_ON_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INIT); + /* The smp_load_acquire() pairs with the smp_store_release(). */ + if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/ + return; /* Already initialized. */ + spin_lock_irqsave(&sp->gp_lock, flags); + if (!rcu_seq_state(sp->srcu_gp_seq_needed)) { + spin_unlock_irqrestore(&sp->gp_lock, flags); + return; + } + init_srcu_struct_fields(sp, true); + spin_unlock_irqrestore(&sp->gp_lock, flags); +} + +/* + * Returns approximate total of the readers' ->srcu_lock_count[] values + * for the rank of per-CPU counters specified by idx. */ static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx) { @@ -91,16 +209,16 @@ static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx) unsigned long sum = 0; for_each_possible_cpu(cpu) { - struct srcu_array *cpuc = per_cpu_ptr(sp->per_cpu_ref, cpu); + struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); - sum += READ_ONCE(cpuc->lock_count[idx]); + sum += READ_ONCE(cpuc->srcu_lock_count[idx]); } return sum; } /* - * Returns approximate total of the readers' ->unlock_count[] values for the - * rank of per-CPU counters specified by idx. + * Returns approximate total of the readers' ->srcu_unlock_count[] values + * for the rank of per-CPU counters specified by idx. */ static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx) { @@ -108,9 +226,9 @@ static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx) unsigned long sum = 0; for_each_possible_cpu(cpu) { - struct srcu_array *cpuc = per_cpu_ptr(sp->per_cpu_ref, cpu); + struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); - sum += READ_ONCE(cpuc->unlock_count[idx]); + sum += READ_ONCE(cpuc->srcu_unlock_count[idx]); } return sum; } @@ -145,14 +263,14 @@ static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx) * the current index but not have incremented the lock counter yet. * * Possible bug: There is no guarantee that there haven't been - * ULONG_MAX increments of ->lock_count[] since the unlocks were + * ULONG_MAX increments of ->srcu_lock_count[] since the unlocks were * counted, meaning that this could return true even if there are * still active readers. Since there are no memory barriers around - * srcu_flip(), the CPU is not required to increment ->completed + * srcu_flip(), the CPU is not required to increment ->srcu_idx * before running srcu_readers_unlock_idx(), which means that there * could be an arbitrarily large number of critical sections that * execute after srcu_readers_unlock_idx() but use the old value - * of ->completed. + * of ->srcu_idx. */ return srcu_readers_lock_idx(sp, idx) == unlocks; } @@ -172,12 +290,12 @@ static bool srcu_readers_active(struct srcu_struct *sp) unsigned long sum = 0; for_each_possible_cpu(cpu) { - struct srcu_array *cpuc = per_cpu_ptr(sp->per_cpu_ref, cpu); + struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); - sum += READ_ONCE(cpuc->lock_count[0]); - sum += READ_ONCE(cpuc->lock_count[1]); - sum -= READ_ONCE(cpuc->unlock_count[0]); - sum -= READ_ONCE(cpuc->unlock_count[1]); + sum += READ_ONCE(cpuc->srcu_lock_count[0]); + sum += READ_ONCE(cpuc->srcu_lock_count[1]); + sum -= READ_ONCE(cpuc->srcu_unlock_count[0]); + sum -= READ_ONCE(cpuc->srcu_unlock_count[1]); } return sum; } @@ -193,18 +311,21 @@ static bool srcu_readers_active(struct srcu_struct *sp) */ void cleanup_srcu_struct(struct srcu_struct *sp) { + int cpu; + WARN_ON_ONCE(atomic_read(&sp->srcu_exp_cnt)); if (WARN_ON(srcu_readers_active(sp))) return; /* Leakage unless caller handles error. */ - if (WARN_ON(!rcu_segcblist_empty(&sp->srcu_cblist))) - return; /* Leakage unless caller handles error. */ flush_delayed_work(&sp->work); - if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE)) { - pr_info("cleanup_srcu_struct: Active srcu_struct %lu CBs %c state: %d\n", rcu_segcblist_n_cbs(&sp->srcu_cblist), ".E"[rcu_segcblist_empty(&sp->srcu_cblist)], rcu_seq_state(READ_ONCE(sp->srcu_gp_seq))); + for_each_possible_cpu(cpu) + flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work); + if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) || + WARN_ON(srcu_readers_active(sp))) { + pr_info("cleanup_srcu_struct: Active srcu_struct %p state: %d\n", sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq))); return; /* Caller forgot to stop doing call_srcu()? */ } - free_percpu(sp->per_cpu_ref); - sp->per_cpu_ref = NULL; + free_percpu(sp->sda); + sp->sda = NULL; } EXPORT_SYMBOL_GPL(cleanup_srcu_struct); @@ -217,8 +338,8 @@ int __srcu_read_lock(struct srcu_struct *sp) { int idx; - idx = READ_ONCE(sp->completed) & 0x1; - __this_cpu_inc(sp->per_cpu_ref->lock_count[idx]); + idx = READ_ONCE(sp->srcu_idx) & 0x1; + __this_cpu_inc(sp->sda->srcu_lock_count[idx]); smp_mb(); /* B */ /* Avoid leaking the critical section. */ return idx; } @@ -233,7 +354,7 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock); void __srcu_read_unlock(struct srcu_struct *sp, int idx) { smp_mb(); /* C */ /* Avoid leaking the critical section. */ - this_cpu_inc(sp->per_cpu_ref->unlock_count[idx]); + this_cpu_inc(sp->sda->srcu_unlock_count[idx]); } EXPORT_SYMBOL_GPL(__srcu_read_unlock); @@ -251,19 +372,207 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock); */ static void srcu_gp_start(struct srcu_struct *sp) { + struct srcu_data *sdp = this_cpu_ptr(sp->sda); int state; - rcu_segcblist_accelerate(&sp->srcu_cblist, - rcu_seq_snap(&sp->srcu_gp_seq)); + RCU_LOCKDEP_WARN(!lockdep_is_held(&sp->gp_lock), + "Invoked srcu_gp_start() without ->gp_lock!"); + WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); + rcu_segcblist_advance(&sdp->srcu_cblist, + rcu_seq_current(&sp->srcu_gp_seq)); + (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, + rcu_seq_snap(&sp->srcu_gp_seq)); rcu_seq_start(&sp->srcu_gp_seq); state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); WARN_ON_ONCE(state != SRCU_STATE_SCAN1); } +/* + * Track online CPUs to guide callback workqueue placement. + */ +DEFINE_PER_CPU(bool, srcu_online); + +void srcu_online_cpu(unsigned int cpu) +{ + WRITE_ONCE(per_cpu(srcu_online, cpu), true); +} + +void srcu_offline_cpu(unsigned int cpu) +{ + WRITE_ONCE(per_cpu(srcu_online, cpu), false); +} + +/* + * Place the workqueue handler on the specified CPU if online, otherwise + * just run it whereever. This is useful for placing workqueue handlers + * that are to invoke the specified CPU's callbacks. + */ +static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq, + struct delayed_work *dwork, + unsigned long delay) +{ + bool ret; + + preempt_disable(); + if (READ_ONCE(per_cpu(srcu_online, cpu))) + ret = queue_delayed_work_on(cpu, wq, dwork, delay); + else + ret = queue_delayed_work(wq, dwork, delay); + preempt_enable(); + return ret; +} + +/* + * Schedule callback invocation for the specified srcu_data structure, + * if possible, on the corresponding CPU. + */ +static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay) +{ + srcu_queue_delayed_work_on(sdp->cpu, system_power_efficient_wq, + &sdp->work, delay); +} + +/* + * Schedule callback invocation for all srcu_data structures associated + * with the specified srcu_node structure, if possible, on the corresponding + * CPUs. + */ +static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp) +{ + int cpu; + + for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) + srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), SRCU_INTERVAL); +} + +/* + * Note the end of an SRCU grace period. Initiates callback invocation + * and starts a new grace period if needed. + * + * The ->srcu_cb_mutex acquisition does not protect any data, but + * instead prevents more than one grace period from starting while we + * are initiating callback invocation. This allows the ->srcu_have_cbs[] + * array to have a finite number of elements. + */ +static void srcu_gp_end(struct srcu_struct *sp) +{ + bool cbs; + unsigned long gpseq; + int idx; + int idxnext; + struct srcu_node *snp; + + /* Prevent more than one additional grace period. */ + mutex_lock(&sp->srcu_cb_mutex); + + /* End the current grace period. */ + spin_lock_irq(&sp->gp_lock); + idx = rcu_seq_state(sp->srcu_gp_seq); + WARN_ON_ONCE(idx != SRCU_STATE_SCAN2); + rcu_seq_end(&sp->srcu_gp_seq); + gpseq = rcu_seq_current(&sp->srcu_gp_seq); + spin_unlock_irq(&sp->gp_lock); + mutex_unlock(&sp->srcu_gp_mutex); + /* A new grace period can start at this point. But only one. */ + + /* Initiate callback invocation as needed. */ + idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs); + idxnext = (idx + 1) % ARRAY_SIZE(snp->srcu_have_cbs); + rcu_for_each_node_breadth_first(sp, snp) { + spin_lock_irq(&snp->lock); + cbs = false; + if (snp >= sp->level[rcu_num_lvls - 1]) + cbs = snp->srcu_have_cbs[idx] == gpseq; + snp->srcu_have_cbs[idx] = gpseq; + rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1); + spin_unlock_irq(&snp->lock); + if (cbs) { + smp_mb(); /* GP end before CB invocation. */ + srcu_schedule_cbs_snp(sp, snp); + } + } + + /* Callback initiation done, allow grace periods after next. */ + mutex_unlock(&sp->srcu_cb_mutex); + + /* Start a new grace period if needed. */ + spin_lock_irq(&sp->gp_lock); + gpseq = rcu_seq_current(&sp->srcu_gp_seq); + if (!rcu_seq_state(gpseq) && + ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) { + srcu_gp_start(sp); + spin_unlock_irq(&sp->gp_lock); + /* Throttle expedited grace periods: Should be rare! */ + srcu_reschedule(sp, atomic_read(&sp->srcu_exp_cnt) && + rcu_seq_ctr(gpseq) & 0xf + ? 0 + : SRCU_INTERVAL); + } else { + spin_unlock_irq(&sp->gp_lock); + } +} + +/* + * Funnel-locking scheme to scalably mediate many concurrent grace-period + * requests. The winner has to do the work of actually starting grace + * period s. Losers must either ensure that their desired grace-period + * number is recorded on at least their leaf srcu_node structure, or they + * must take steps to invoke their own callbacks. + */ +static void srcu_funnel_gp_start(struct srcu_struct *sp, + struct srcu_data *sdp, + unsigned long s) +{ + unsigned long flags; + int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs); + struct srcu_node *snp = sdp->mynode; + unsigned long snp_seq; + + /* Each pass through the loop does one level of the srcu_node tree. */ + for (; snp != NULL; snp = snp->srcu_parent) { + if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode) + return; /* GP already done and CBs recorded. */ + spin_lock_irqsave(&snp->lock, flags); + if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) { + snp_seq = snp->srcu_have_cbs[idx]; + spin_unlock_irqrestore(&snp->lock, flags); + if (snp == sdp->mynode && snp_seq != s) { + smp_mb(); /* CBs after GP! */ + srcu_schedule_cbs_sdp(sdp, 0); + } + return; + } + snp->srcu_have_cbs[idx] = s; + spin_unlock_irqrestore(&snp->lock, flags); + } + + /* Top of tree, must ensure the grace period will be started. */ + spin_lock_irqsave(&sp->gp_lock, flags); + if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) { + /* + * Record need for grace period s. Pair with load + * acquire setting up for initialization. + */ + smp_store_release(&sp->srcu_gp_seq_needed, s); /*^^^*/ + } + + /* If grace period not already done and none in progress, start it. */ + if (!rcu_seq_done(&sp->srcu_gp_seq, s) && + rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) { + WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); + srcu_gp_start(sp); + queue_delayed_work(system_power_efficient_wq, &sp->work, + atomic_read(&sp->srcu_exp_cnt) + ? 0 + : SRCU_INTERVAL); + } + spin_unlock_irqrestore(&sp->gp_lock, flags); +} + /* * Wait until all readers counted by array index idx complete, but * loop an additional time if there is an expedited grace period pending. - * The caller must ensure that ->completed is not changed while checking. + * The caller must ensure that ->srcu_idx is not changed while checking. */ static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount) { @@ -277,13 +586,13 @@ static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount) } /* - * Increment the ->completed counter so that future SRCU readers will - * use the other rank of the ->(un)lock_count[] arrays. This allows + * Increment the ->srcu_idx counter so that future SRCU readers will + * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows * us to wait for pre-existing readers in a starvation-free manner. */ static void srcu_flip(struct srcu_struct *sp) { - WRITE_ONCE(sp->completed, sp->completed + 1); + WRITE_ONCE(sp->srcu_idx, sp->srcu_idx + 1); /* * Ensure that if the updater misses an __srcu_read_unlock() @@ -296,21 +605,9 @@ static void srcu_flip(struct srcu_struct *sp) } /* - * End an SRCU grace period. - */ -static void srcu_gp_end(struct srcu_struct *sp) -{ - rcu_seq_end(&sp->srcu_gp_seq); - - spin_lock_irq(&sp->queue_lock); - rcu_segcblist_advance(&sp->srcu_cblist, - rcu_seq_current(&sp->srcu_gp_seq)); - spin_unlock_irq(&sp->queue_lock); -} - -/* - * Enqueue an SRCU callback on the specified srcu_struct structure, - * initiating grace-period processing if it is not already running. + * Enqueue an SRCU callback on the srcu_data structure associated with + * the current CPU and the specified srcu_struct structure, initiating + * grace-period processing if it is not already running. * * Note that all CPUs must agree that the grace period extended beyond * all pre-existing SRCU read-side critical section. On systems with @@ -335,33 +632,40 @@ static void srcu_gp_end(struct srcu_struct *sp) * srcu_read_lock(), and srcu_read_unlock() that are all passed the same * srcu_struct structure. */ -void call_srcu(struct srcu_struct *sp, struct rcu_head *head, +void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, rcu_callback_t func) { unsigned long flags; - - head->next = NULL; - head->func = func; - spin_lock_irqsave(&sp->queue_lock, flags); - smp_mb__after_unlock_lock(); /* Caller's prior accesses before GP. */ - rcu_segcblist_enqueue(&sp->srcu_cblist, head, false); - if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_IDLE) { - srcu_gp_start(sp); - queue_delayed_work(system_power_efficient_wq, &sp->work, 0); + bool needgp = false; + unsigned long s; + struct srcu_data *sdp; + + check_init_srcu_struct(sp); + rhp->func = func; + local_irq_save(flags); + sdp = this_cpu_ptr(sp->sda); + spin_lock(&sdp->lock); + rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false); + rcu_segcblist_advance(&sdp->srcu_cblist, + rcu_seq_current(&sp->srcu_gp_seq)); + s = rcu_seq_snap(&sp->srcu_gp_seq); + (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s); + if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { + sdp->srcu_gp_seq_needed = s; + needgp = true; } - spin_unlock_irqrestore(&sp->queue_lock, flags); + spin_unlock_irqrestore(&sdp->lock, flags); + if (needgp) + srcu_funnel_gp_start(sp, sdp, s); } EXPORT_SYMBOL_GPL(call_srcu); -static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay); - /* * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). */ static void __synchronize_srcu(struct srcu_struct *sp) { struct rcu_synchronize rcu; - struct rcu_head *head = &rcu.head; RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) || lock_is_held(&rcu_bh_lock_map) || @@ -372,26 +676,12 @@ static void __synchronize_srcu(struct srcu_struct *sp) if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) return; might_sleep(); + check_init_srcu_struct(sp); init_completion(&rcu.completion); - - head->next = NULL; - head->func = wakeme_after_rcu; - spin_lock_irq(&sp->queue_lock); - smp_mb__after_unlock_lock(); /* Caller's prior accesses before GP. */ - if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_IDLE) { - /* steal the processing owner */ - rcu_segcblist_enqueue(&sp->srcu_cblist, head, false); - srcu_gp_start(sp); - spin_unlock_irq(&sp->queue_lock); - /* give the processing owner to work_struct */ - srcu_reschedule(sp, 0); - } else { - rcu_segcblist_enqueue(&sp->srcu_cblist, head, false); - spin_unlock_irq(&sp->queue_lock); - } - + init_rcu_head_on_stack(&rcu.head); + call_srcu(sp, &rcu.head, wakeme_after_rcu); wait_for_completion(&rcu.completion); - smp_mb(); /* Caller's later accesses after GP. */ + destroy_rcu_head_on_stack(&rcu.head); } /** @@ -408,6 +698,7 @@ void synchronize_srcu_expedited(struct srcu_struct *sp) { bool do_norm = rcu_gp_is_normal(); + check_init_srcu_struct(sp); if (!do_norm) { atomic_inc(&sp->srcu_exp_cnt); smp_mb__after_atomic(); /* increment before GP. */ @@ -415,7 +706,7 @@ void synchronize_srcu_expedited(struct srcu_struct *sp) __synchronize_srcu(sp); if (!do_norm) { smp_mb__before_atomic(); /* GP before decrement. */ - atomic_dec(&sp->srcu_exp_cnt); + WARN_ON_ONCE(atomic_dec_return(&sp->srcu_exp_cnt) < 0); } } EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); @@ -426,8 +717,8 @@ EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); * * Wait for the count to drain to zero of both indexes. To avoid the * possible starvation of synchronize_srcu(), it waits for the count of - * the index=((->completed & 1) ^ 1) to drain to zero at first, - * and then flip the completed and wait for the count of the other index. + * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first, + * and then flip the srcu_idx and wait for the count of the other index. * * Can block; must be called from process context. * @@ -468,13 +759,69 @@ void synchronize_srcu(struct srcu_struct *sp) } EXPORT_SYMBOL_GPL(synchronize_srcu); +/* + * Callback function for srcu_barrier() use. + */ +static void srcu_barrier_cb(struct rcu_head *rhp) +{ + struct srcu_data *sdp; + struct srcu_struct *sp; + + sdp = container_of(rhp, struct srcu_data, srcu_barrier_head); + sp = sdp->sp; + if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt)) + complete(&sp->srcu_barrier_completion); +} + /** * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete. * @sp: srcu_struct on which to wait for in-flight callbacks. */ void srcu_barrier(struct srcu_struct *sp) { - synchronize_srcu(sp); + int cpu; + struct srcu_data *sdp; + unsigned long s = rcu_seq_snap(&sp->srcu_barrier_seq); + + check_init_srcu_struct(sp); + mutex_lock(&sp->srcu_barrier_mutex); + if (rcu_seq_done(&sp->srcu_barrier_seq, s)) { + smp_mb(); /* Force ordering following return. */ + mutex_unlock(&sp->srcu_barrier_mutex); + return; /* Someone else did our work for us. */ + } + rcu_seq_start(&sp->srcu_barrier_seq); + init_completion(&sp->srcu_barrier_completion); + + /* Initial count prevents reaching zero until all CBs are posted. */ + atomic_set(&sp->srcu_barrier_cpu_cnt, 1); + + /* + * Each pass through this loop enqueues a callback, but only + * on CPUs already having callbacks enqueued. Note that if + * a CPU already has callbacks enqueue, it must have already + * registered the need for a future grace period, so all we + * need do is enqueue a callback that will use the same + * grace period as the last callback already in the queue. + */ + for_each_possible_cpu(cpu) { + sdp = per_cpu_ptr(sp->sda, cpu); + spin_lock_irq(&sdp->lock); + atomic_inc(&sp->srcu_barrier_cpu_cnt); + sdp->srcu_barrier_head.func = srcu_barrier_cb; + if (!rcu_segcblist_entrain(&sdp->srcu_cblist, + &sdp->srcu_barrier_head, 0)) + atomic_dec(&sp->srcu_barrier_cpu_cnt); + spin_unlock_irq(&sdp->lock); + } + + /* Remove the initial count, at which point reaching zero can happen. */ + if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt)) + complete(&sp->srcu_barrier_completion); + wait_for_completion(&sp->srcu_barrier_completion); + + rcu_seq_end(&sp->srcu_barrier_seq); + mutex_unlock(&sp->srcu_barrier_mutex); } EXPORT_SYMBOL_GPL(srcu_barrier); @@ -487,21 +834,24 @@ EXPORT_SYMBOL_GPL(srcu_barrier); */ unsigned long srcu_batches_completed(struct srcu_struct *sp) { - return sp->completed; + return sp->srcu_idx; } EXPORT_SYMBOL_GPL(srcu_batches_completed); /* - * Core SRCU state machine. Advance callbacks from ->batch_check0 to - * ->batch_check1 and then to ->batch_done as readers drain. + * Core SRCU state machine. Push state bits of ->srcu_gp_seq + * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has + * completed in that state. */ -static void srcu_advance_batches(struct srcu_struct *sp) +static void srcu_advance_state(struct srcu_struct *sp) { int idx; + mutex_lock(&sp->srcu_gp_mutex); + /* * Because readers might be delayed for an extended period after - * fetching ->completed for their index, at any point in time there + * fetching ->srcu_idx for their index, at any point in time there * might well be readers using both idx=0 and idx=1. We therefore * need to wait for readers to clear from both index values before * invoking a callback. @@ -511,23 +861,29 @@ static void srcu_advance_batches(struct srcu_struct *sp) */ idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */ if (idx == SRCU_STATE_IDLE) { - spin_lock_irq(&sp->queue_lock); - if (rcu_segcblist_empty(&sp->srcu_cblist)) { - spin_unlock_irq(&sp->queue_lock); + spin_lock_irq(&sp->gp_lock); + if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) { + WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq)); + spin_unlock_irq(&sp->gp_lock); + mutex_unlock(&sp->srcu_gp_mutex); return; } idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); if (idx == SRCU_STATE_IDLE) srcu_gp_start(sp); - spin_unlock_irq(&sp->queue_lock); - if (idx != SRCU_STATE_IDLE) + spin_unlock_irq(&sp->gp_lock); + if (idx != SRCU_STATE_IDLE) { + mutex_unlock(&sp->srcu_gp_mutex); return; /* Someone else started the grace period. */ + } } if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { - idx = 1 ^ (sp->completed & 1); - if (!try_check_zero(sp, idx, 1)) + idx = 1 ^ (sp->srcu_idx & 1); + if (!try_check_zero(sp, idx, 1)) { + mutex_unlock(&sp->srcu_gp_mutex); return; /* readers present, retry later. */ + } srcu_flip(sp); rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2); } @@ -538,10 +894,12 @@ static void srcu_advance_batches(struct srcu_struct *sp) * SRCU read-side critical sections are normally short, * so check at least twice in quick succession after a flip. */ - idx = 1 ^ (sp->completed & 1); - if (!try_check_zero(sp, idx, 2)) - return; /* readers present, retry after later. */ - srcu_gp_end(sp); + idx = 1 ^ (sp->srcu_idx & 1); + if (!try_check_zero(sp, idx, 2)) { + mutex_unlock(&sp->srcu_gp_mutex); + return; /* readers present, retry later. */ + } + srcu_gp_end(sp); /* Releases ->srcu_gp_mutex. */ } } @@ -551,28 +909,51 @@ static void srcu_advance_batches(struct srcu_struct *sp) * the workqueue. Note that needed memory barriers have been executed * in this task's context by srcu_readers_active_idx_check(). */ -static void srcu_invoke_callbacks(struct srcu_struct *sp) +static void srcu_invoke_callbacks(struct work_struct *work) { + bool more; struct rcu_cblist ready_cbs; struct rcu_head *rhp; + struct srcu_data *sdp; + struct srcu_struct *sp; - spin_lock_irq(&sp->queue_lock); - if (!rcu_segcblist_ready_cbs(&sp->srcu_cblist)) { - spin_unlock_irq(&sp->queue_lock); - return; - } + sdp = container_of(work, struct srcu_data, work.work); + sp = sdp->sp; rcu_cblist_init(&ready_cbs); - rcu_segcblist_extract_done_cbs(&sp->srcu_cblist, &ready_cbs); - spin_unlock_irq(&sp->queue_lock); + spin_lock_irq(&sdp->lock); + smp_mb(); /* Old grace periods before callback invocation! */ + rcu_segcblist_advance(&sdp->srcu_cblist, + rcu_seq_current(&sp->srcu_gp_seq)); + if (sdp->srcu_cblist_invoking || + !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) { + spin_unlock_irq(&sdp->lock); + return; /* Someone else on the job or nothing to do. */ + } + + /* We are on the job! Extract and invoke ready callbacks. */ + sdp->srcu_cblist_invoking = true; + rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs); + spin_unlock_irq(&sdp->lock); rhp = rcu_cblist_dequeue(&ready_cbs); for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) { local_bh_disable(); rhp->func(rhp); local_bh_enable(); } - spin_lock_irq(&sp->queue_lock); - rcu_segcblist_insert_count(&sp->srcu_cblist, &ready_cbs); - spin_unlock_irq(&sp->queue_lock); + + /* + * Update counts, accelerate new callbacks, and if needed, + * schedule another round of callback invocation. + */ + spin_lock_irq(&sdp->lock); + rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs); + (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, + rcu_seq_snap(&sp->srcu_gp_seq)); + sdp->srcu_cblist_invoking = false; + more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist); + spin_unlock_irq(&sdp->lock); + if (more) + srcu_schedule_cbs_sdp(sdp, 0); } /* @@ -581,19 +962,21 @@ static void srcu_invoke_callbacks(struct srcu_struct *sp) */ static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay) { - bool pending = true; - int state; + bool pushgp = true; - if (rcu_segcblist_empty(&sp->srcu_cblist)) { - spin_lock_irq(&sp->queue_lock); - state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); - if (rcu_segcblist_empty(&sp->srcu_cblist) && - state == SRCU_STATE_IDLE) - pending = false; - spin_unlock_irq(&sp->queue_lock); + spin_lock_irq(&sp->gp_lock); + if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) { + if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) { + /* All requests fulfilled, time to go idle. */ + pushgp = false; + } + } else if (!rcu_seq_state(sp->srcu_gp_seq)) { + /* Outstanding request and no GP. Start one. */ + srcu_gp_start(sp); } + spin_unlock_irq(&sp->gp_lock); - if (pending) + if (pushgp) queue_delayed_work(system_power_efficient_wq, &sp->work, delay); } @@ -606,8 +989,7 @@ void process_srcu(struct work_struct *work) sp = container_of(work, struct srcu_struct, work.work); - srcu_advance_batches(sp); - srcu_invoke_callbacks(sp); + srcu_advance_state(sp); srcu_reschedule(sp, atomic_read(&sp->srcu_exp_cnt) ? 0 : SRCU_INTERVAL); } EXPORT_SYMBOL_GPL(process_srcu); diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 346948b51b0b..3c23435d2083 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3776,12 +3776,16 @@ int rcutree_online_cpu(unsigned int cpu) { sync_sched_exp_online_cleanup(cpu); rcutree_affinity_setting(cpu, -1); + if (IS_ENABLED(CONFIG_TREE_SRCU)) + srcu_online_cpu(cpu); return 0; } int rcutree_offline_cpu(unsigned int cpu) { rcutree_affinity_setting(cpu, cpu); + if (IS_ENABLED(CONFIG_TREE_SRCU)) + srcu_offline_cpu(cpu); return 0; } @@ -4157,6 +4161,8 @@ void __init rcu_init(void) for_each_online_cpu(cpu) { rcutree_prepare_cpu(cpu); rcu_cpu_starting(cpu); + if (IS_ENABLED(CONFIG_TREE_SRCU)) + srcu_online_cpu(cpu); } } diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index a2a45cb629d6..0e598ab08fea 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -541,6 +541,14 @@ static bool rcu_nohz_full_cpu(struct rcu_state *rsp); static void rcu_dynticks_task_enter(void); static void rcu_dynticks_task_exit(void); +#ifdef CONFIG_SRCU +void srcu_online_cpu(unsigned int cpu); +void srcu_offline_cpu(unsigned int cpu); +#else /* #ifdef CONFIG_SRCU */ +void srcu_online_cpu(unsigned int cpu) { } +void srcu_offline_cpu(unsigned int cpu) { } +#endif /* #else #ifdef CONFIG_SRCU */ + #endif /* #ifndef RCU_TREE_NONCORE */ #ifdef CONFIG_RCU_TRACE -- cgit v1.2.3 From bcbfdd01dce5556a952fae84ef16fd0f12525e7b Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 11 Apr 2017 15:50:41 -0700 Subject: rcu: Make non-preemptive schedule be Tasks RCU quiescent state Currently, a call to schedule() acts as a Tasks RCU quiescent state only if a context switch actually takes place. However, just the call to schedule() guarantees that the calling task has moved off of whatever tracing trampoline that it might have been one previously. This commit therefore plumbs schedule()'s "preempt" parameter into rcu_note_context_switch(), which then records the Tasks RCU quiescent state, but only if this call to schedule() was -not- due to a preemption. To avoid adding overhead to the common-case context-switch path, this commit hides the rcu_note_context_switch() check under an existing non-common-case check. Suggested-by: Steven Rostedt Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 11 ++++++++--- include/linux/rcutiny.h | 13 +++++++++---- include/linux/rcutree.h | 5 +++-- kernel/rcu/tree.c | 22 +++++++++++++++++++++- kernel/rcu/update.c | 1 + kernel/sched/core.c | 2 +- 6 files changed, 43 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index e6146d0074f8..f531b29207da 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -363,15 +363,20 @@ static inline void rcu_init_nohz(void) #ifdef CONFIG_TASKS_RCU #define TASKS_RCU(x) x extern struct srcu_struct tasks_rcu_exit_srcu; -#define rcu_note_voluntary_context_switch(t) \ +#define rcu_note_voluntary_context_switch_lite(t) \ do { \ - rcu_all_qs(); \ if (READ_ONCE((t)->rcu_tasks_holdout)) \ WRITE_ONCE((t)->rcu_tasks_holdout, false); \ } while (0) +#define rcu_note_voluntary_context_switch(t) \ + do { \ + rcu_all_qs(); \ + rcu_note_voluntary_context_switch_lite(t); \ + } while (0) #else /* #ifdef CONFIG_TASKS_RCU */ #define TASKS_RCU(x) do { } while (0) -#define rcu_note_voluntary_context_switch(t) rcu_all_qs() +#define rcu_note_voluntary_context_switch_lite(t) do { } while (0) +#define rcu_note_voluntary_context_switch(t) rcu_all_qs() #endif /* #else #ifdef CONFIG_TASKS_RCU */ /** diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 5219be250f00..74d9c3a1feee 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -92,10 +92,11 @@ static inline void kfree_call_rcu(struct rcu_head *head, call_rcu(head, func); } -static inline void rcu_note_context_switch(void) -{ - rcu_sched_qs(); -} +#define rcu_note_context_switch(preempt) \ + do { \ + rcu_sched_qs(); \ + rcu_note_voluntary_context_switch_lite(current); \ + } while (0) /* * Take advantage of the fact that there is only one CPU, which @@ -242,6 +243,10 @@ static inline bool rcu_is_watching(void) #endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ +static inline void rcu_request_urgent_qs_task(struct task_struct *t) +{ +} + static inline void rcu_all_qs(void) { barrier(); /* Avoid RCU read-side critical sections leaking across. */ diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 63a4e4cf40a5..0bacb6b2af69 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -30,7 +30,7 @@ #ifndef __LINUX_RCUTREE_H #define __LINUX_RCUTREE_H -void rcu_note_context_switch(void); +void rcu_note_context_switch(bool preempt); int rcu_needs_cpu(u64 basem, u64 *nextevt); void rcu_cpu_stall_reset(void); @@ -41,7 +41,7 @@ void rcu_cpu_stall_reset(void); */ static inline void rcu_virt_note_context_switch(int cpu) { - rcu_note_context_switch(); + rcu_note_context_switch(false); } void synchronize_rcu_bh(void); @@ -108,6 +108,7 @@ void rcu_scheduler_starting(void); extern int rcu_scheduler_active __read_mostly; bool rcu_is_watching(void); +void rcu_request_urgent_qs_task(struct task_struct *t); void rcu_all_qs(void); diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 3c23435d2083..891d97109e09 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -458,7 +458,7 @@ static void rcu_momentary_dyntick_idle(void) * and requires special handling for preemptible RCU. * The caller must have disabled interrupts. */ -void rcu_note_context_switch(void) +void rcu_note_context_switch(bool preempt) { barrier(); /* Avoid RCU read-side critical sections leaking down. */ trace_rcu_utilization(TPS("Start context switch")); @@ -471,6 +471,8 @@ void rcu_note_context_switch(void) if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) rcu_momentary_dyntick_idle(); this_cpu_inc(rcu_dynticks.rcu_qs_ctr); + if (!preempt) + rcu_note_voluntary_context_switch_lite(current); out: trace_rcu_utilization(TPS("End context switch")); barrier(); /* Avoid RCU read-side critical sections leaking up. */ @@ -1149,6 +1151,24 @@ bool notrace rcu_is_watching(void) } EXPORT_SYMBOL_GPL(rcu_is_watching); +/* + * If a holdout task is actually running, request an urgent quiescent + * state from its CPU. This is unsynchronized, so migrations can cause + * the request to go to the wrong CPU. Which is OK, all that will happen + * is that the CPU's next context switch will be a bit slower and next + * time around this task will generate another request. + */ +void rcu_request_urgent_qs_task(struct task_struct *t) +{ + int cpu; + + barrier(); + cpu = task_cpu(t); + if (!task_curr(t)) + return; /* This task is not running on that CPU. */ + smp_store_release(per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, cpu), true); +} + #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) /* diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index c5df0d756900..273e869ca21d 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -665,6 +665,7 @@ static void check_holdout_task(struct task_struct *t, put_task_struct(t); return; } + rcu_request_urgent_qs_task(t); if (!needreport) return; if (*firstreport) { diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 3b31fc05a0f1..2adf7b6c04e7 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3378,7 +3378,7 @@ static void __sched notrace __schedule(bool preempt) hrtick_clear(rq); local_irq_disable(); - rcu_note_context_switch(); + rcu_note_context_switch(preempt); /* * Make sure that signal_pending_state()->signal_pending() below -- cgit v1.2.3 From 50f2112cf7a3e62a8d33838eb205d5fef306457a Mon Sep 17 00:00:00 2001 From: Benjamin Coddington Date: Tue, 11 Apr 2017 12:50:09 -0400 Subject: locks: Set FL_CLOSE when removing flock locks on close() Set FL_CLOSE in fl_flags as in locks_remove_posix() when clearing locks. NFS will check for this flag to ensure an unlock is sent in a following patch. Fuse handles flock and posix locks differently for FL_CLOSE, and so requires a fixup to retain the existing behavior for flock. Signed-off-by: Benjamin Coddington Reviewed-by: Jeff Layton Acked-by: Miklos Szeredi Signed-off-by: Trond Myklebust --- fs/fuse/file.c | 2 +- fs/locks.c | 2 +- include/linux/fs.h | 2 ++ 3 files changed, 4 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/fs/fuse/file.c b/fs/fuse/file.c index ec238fb5a584..995da8957f6f 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -2168,7 +2168,7 @@ static int fuse_setlk(struct file *file, struct file_lock *fl, int flock) } /* Unlock on close is handled by the flush method */ - if (fl->fl_flags & FL_CLOSE) + if ((fl->fl_flags & FL_CLOSE_POSIX) == FL_CLOSE_POSIX) return 0; fuse_lk_fill(&args, file, fl, opcode, pid, flock, &inarg); diff --git a/fs/locks.c b/fs/locks.c index 26811321d39b..af2031a1fcff 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -2504,7 +2504,7 @@ locks_remove_flock(struct file *filp, struct file_lock_context *flctx) .fl_owner = filp, .fl_pid = current->tgid, .fl_file = filp, - .fl_flags = FL_FLOCK, + .fl_flags = FL_FLOCK | FL_CLOSE, .fl_type = F_UNLCK, .fl_end = OFFSET_MAX, }; diff --git a/include/linux/fs.h b/include/linux/fs.h index 7251f7bb45e8..72061aa65405 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -909,6 +909,8 @@ static inline struct file *get_file(struct file *f) #define FL_OFDLCK 1024 /* lock is "owned" by struct file */ #define FL_LAYOUT 2048 /* outstanding pNFS layout */ +#define FL_CLOSE_POSIX (FL_POSIX | FL_CLOSE) + /* * Special return value from posix_lock_file() and vfs_lock_file() for * asynchronous locking. -- cgit v1.2.3 From 7d6ddf88c4db372689c8aa65ea652d0514d66c06 Mon Sep 17 00:00:00 2001 From: Benjamin Coddington Date: Tue, 11 Apr 2017 12:50:10 -0400 Subject: NFS: Add an iocounter wait function for async RPC tasks By sleeping on a new NFS Unlock-On-Close waitqueue, rpc tasks may wait for a lock context's iocounter to reach zero. The rpc waitqueue is only woken when the open_context has the NFS_CONTEXT_UNLOCK flag set in order to mitigate spurious wake-ups for any iocounter reaching zero. Signed-off-by: Benjamin Coddington Reviewed-by: Jeff Layton Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 1 + fs/nfs/pagelist.c | 34 +++++++++++++++++++++++++++++++++- include/linux/nfs_fs.h | 1 + include/linux/nfs_fs_sb.h | 1 + include/linux/nfs_page.h | 1 + 5 files changed, 37 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 3ffbffe8f39f..3e7b2e6a7cfb 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -218,6 +218,7 @@ static void nfs_cb_idr_remove_locked(struct nfs_client *clp) static void pnfs_init_server(struct nfs_server *server) { rpc_init_wait_queue(&server->roc_rpcwaitq, "pNFS ROC"); + rpc_init_wait_queue(&server->uoc_rpcwaitq, "NFS UOC"); } #else diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index f53610672f03..ad92b401326c 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -102,6 +102,35 @@ nfs_iocounter_wait(struct nfs_lock_context *l_ctx) TASK_KILLABLE); } +/** + * nfs_async_iocounter_wait - wait on a rpc_waitqueue for I/O + * to complete + * @task: the rpc_task that should wait + * @l_ctx: nfs_lock_context with io_counter to check + * + * Returns true if there is outstanding I/O to wait on and the + * task has been put to sleep. + */ +bool +nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx) +{ + struct inode *inode = d_inode(l_ctx->open_context->dentry); + bool ret = false; + + if (atomic_read(&l_ctx->io_count) > 0) { + rpc_sleep_on(&NFS_SERVER(inode)->uoc_rpcwaitq, task, NULL); + ret = true; + } + + if (atomic_read(&l_ctx->io_count) == 0) { + rpc_wake_up_queued_task(&NFS_SERVER(inode)->uoc_rpcwaitq, task); + ret = false; + } + + return ret; +} +EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait); + /* * nfs_page_group_lock - lock the head of the page group * @req - request in group that is to be locked @@ -385,8 +414,11 @@ static void nfs_clear_request(struct nfs_page *req) req->wb_page = NULL; } if (l_ctx != NULL) { - if (atomic_dec_and_test(&l_ctx->io_count)) + if (atomic_dec_and_test(&l_ctx->io_count)) { wake_up_atomic_t(&l_ctx->io_count); + if (test_bit(NFS_CONTEXT_UNLOCK, &ctx->flags)) + rpc_wake_up(&NFS_SERVER(d_inode(ctx->dentry))->uoc_rpcwaitq); + } nfs_put_lock_context(l_ctx); req->wb_lock_context = NULL; } diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 1b29915247b2..9aa044e76820 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -76,6 +76,7 @@ struct nfs_open_context { #define NFS_CONTEXT_ERROR_WRITE (0) #define NFS_CONTEXT_RESEND_WRITES (1) #define NFS_CONTEXT_BAD (2) +#define NFS_CONTEXT_UNLOCK (3) int error; struct list_head list; diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index b34097c67848..2a70f34dffe8 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -222,6 +222,7 @@ struct nfs_server { u32 mountd_version; unsigned short mountd_port; unsigned short mountd_protocol; + struct rpc_wait_queue uoc_rpcwaitq; }; /* Server capabilities */ diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 6f01e28bba27..247cc3d3498f 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -141,6 +141,7 @@ extern int nfs_page_group_lock(struct nfs_page *, bool); extern void nfs_page_group_lock_wait(struct nfs_page *); extern void nfs_page_group_unlock(struct nfs_page *); extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int); +extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *); /* * Lock the page of an asynchronous request -- cgit v1.2.3 From b1ece737f44f91dca8f4829cf0b442e752e406db Mon Sep 17 00:00:00 2001 From: Benjamin Coddington Date: Tue, 11 Apr 2017 12:50:11 -0400 Subject: lockd: Introduce nlmclnt_operations NFS would enjoy the ability to modify the behavior of the NLM client's unlock RPC task in order to delay the transmission of the unlock until IO that was submitted under that lock has completed. This ability can ensure that the NLM client will always complete the transmission of an unlock even if the waiting caller has been interrupted with fatal signal. For this purpose, a pointer to a struct nlmclnt_operations can be assigned in a nfs_module's nfs_rpc_ops that will install those nlmclnt_operations on the nlm_host. The struct nlmclnt_operations defines three callback operations that will be used in a following patch: nlmclnt_alloc_call - used to call back after a successful allocation of a struct nlm_rqst in nlmclnt_proc(). nlmclnt_unlock_prepare - used to call back during NLM unlock's rpc_call_prepare. The NLM client defers calling rpc_call_start() until this callback returns false. nlmclnt_release_call - used to call back when the NLM client's struct nlm_rqst is freed. Signed-off-by: Benjamin Coddington Reviewed-by: Jeff Layton Signed-off-by: Trond Myklebust --- fs/lockd/clntlock.c | 1 + fs/lockd/clntproc.c | 26 +++++++++++++++++++++++++- fs/nfs/client.c | 1 + fs/nfs/nfs3proc.c | 2 +- fs/nfs/proc.c | 2 +- include/linux/lockd/bind.h | 24 ++++++++++++++++++++++-- include/linux/lockd/lockd.h | 2 ++ include/linux/nfs_xdr.h | 1 + 8 files changed, 54 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c index 41e491b8e5d7..27d577dbe51a 100644 --- a/fs/lockd/clntlock.c +++ b/fs/lockd/clntlock.c @@ -69,6 +69,7 @@ struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init) if (host->h_rpcclnt == NULL && nlm_bind_host(host) == NULL) goto out_nobind; + host->h_nlmclnt_ops = nlm_init->nlmclnt_ops; return host; out_nobind: nlmclnt_release_host(host); diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c index 112952037933..066ac313ae5c 100644 --- a/fs/lockd/clntproc.c +++ b/fs/lockd/clntproc.c @@ -150,17 +150,22 @@ static void nlmclnt_release_lockargs(struct nlm_rqst *req) * @host: address of a valid nlm_host context representing the NLM server * @cmd: fcntl-style file lock operation to perform * @fl: address of arguments for the lock operation + * @data: address of data to be sent to callback operations * */ -int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl) +int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl, void *data) { struct nlm_rqst *call; int status; + const struct nlmclnt_operations *nlmclnt_ops = host->h_nlmclnt_ops; call = nlm_alloc_call(host); if (call == NULL) return -ENOMEM; + if (nlmclnt_ops && nlmclnt_ops->nlmclnt_alloc_call) + nlmclnt_ops->nlmclnt_alloc_call(data); + nlmclnt_locks_init_private(fl, host); if (!fl->fl_u.nfs_fl.owner) { /* lockowner allocation has failed */ @@ -169,6 +174,7 @@ int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl) } /* Set up the argument struct */ nlmclnt_setlockargs(call, fl); + call->a_callback_data = data; if (IS_SETLK(cmd) || IS_SETLKW(cmd)) { if (fl->fl_type != F_UNLCK) { @@ -214,8 +220,12 @@ struct nlm_rqst *nlm_alloc_call(struct nlm_host *host) void nlmclnt_release_call(struct nlm_rqst *call) { + const struct nlmclnt_operations *nlmclnt_ops = call->a_host->h_nlmclnt_ops; + if (!atomic_dec_and_test(&call->a_count)) return; + if (nlmclnt_ops && nlmclnt_ops->nlmclnt_release_call) + nlmclnt_ops->nlmclnt_release_call(call->a_callback_data); nlmclnt_release_host(call->a_host); nlmclnt_release_lockargs(call); kfree(call); @@ -687,6 +697,19 @@ out: return status; } +static void nlmclnt_unlock_prepare(struct rpc_task *task, void *data) +{ + struct nlm_rqst *req = data; + const struct nlmclnt_operations *nlmclnt_ops = req->a_host->h_nlmclnt_ops; + bool defer_call = false; + + if (nlmclnt_ops && nlmclnt_ops->nlmclnt_unlock_prepare) + defer_call = nlmclnt_ops->nlmclnt_unlock_prepare(task, req->a_callback_data); + + if (!defer_call) + rpc_call_start(task); +} + static void nlmclnt_unlock_callback(struct rpc_task *task, void *data) { struct nlm_rqst *req = data; @@ -720,6 +743,7 @@ die: } static const struct rpc_call_ops nlmclnt_unlock_ops = { + .rpc_call_prepare = nlmclnt_unlock_prepare, .rpc_call_done = nlmclnt_unlock_callback, .rpc_release = nlmclnt_rpc_release, }; diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 3e7b2e6a7cfb..e0302101e18a 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -546,6 +546,7 @@ static int nfs_start_lockd(struct nfs_server *server) .noresvport = server->flags & NFS_MOUNT_NORESVPORT ? 1 : 0, .net = clp->cl_net, + .nlmclnt_ops = clp->cl_nfs_mod->rpc_ops->nlmclnt_ops, }; if (nlm_init.nfs_version > 3) diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index dc925b531f32..03b3c3de28f1 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c @@ -870,7 +870,7 @@ nfs3_proc_lock(struct file *filp, int cmd, struct file_lock *fl) { struct inode *inode = file_inode(filp); - return nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl); + return nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl, NULL); } static int nfs3_have_delegation(struct inode *inode, fmode_t flags) diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index b7bca8303989..9872cf676a50 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c @@ -638,7 +638,7 @@ nfs_proc_lock(struct file *filp, int cmd, struct file_lock *fl) { struct inode *inode = file_inode(filp); - return nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl); + return nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl, NULL); } /* Helper functions for NFS lock bounds checking */ diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h index 140edab64446..05728396a1a1 100644 --- a/include/linux/lockd/bind.h +++ b/include/linux/lockd/bind.h @@ -18,6 +18,7 @@ /* Dummy declarations */ struct svc_rqst; +struct rpc_task; /* * This is the set of functions for lockd->nfsd communication @@ -43,6 +44,7 @@ struct nlmclnt_initdata { u32 nfs_version; int noresvport; struct net *net; + const struct nlmclnt_operations *nlmclnt_ops; }; /* @@ -52,8 +54,26 @@ struct nlmclnt_initdata { extern struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init); extern void nlmclnt_done(struct nlm_host *host); -extern int nlmclnt_proc(struct nlm_host *host, int cmd, - struct file_lock *fl); +/* + * NLM client operations provide a means to modify RPC processing of NLM + * requests. Callbacks receive a pointer to data passed into the call to + * nlmclnt_proc(). + */ +struct nlmclnt_operations { + /* Called on successful allocation of nlm_rqst, use for allocation or + * reference counting. */ + void (*nlmclnt_alloc_call)(void *); + + /* Called in rpc_task_prepare for unlock. A return value of true + * indicates the callback has put the task to sleep on a waitqueue + * and NLM should not call rpc_call_start(). */ + bool (*nlmclnt_unlock_prepare)(struct rpc_task*, void *); + + /* Called when the nlm_rqst is freed, callbacks should clean up here */ + void (*nlmclnt_release_call)(void *); +}; + +extern int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl, void *data); extern int lockd_up(struct net *net); extern void lockd_down(struct net *net); diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index b37dee3acaba..41f7b6a04d69 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -69,6 +69,7 @@ struct nlm_host { char *h_addrbuf; /* address eyecatcher */ struct net *net; /* host net */ char nodename[UNX_MAXNODENAME + 1]; + const struct nlmclnt_operations *h_nlmclnt_ops; /* Callback ops for NLM users */ }; /* @@ -142,6 +143,7 @@ struct nlm_rqst { struct nlm_block * a_block; unsigned int a_retries; /* Retry count */ u8 a_owner[NLMCLNT_OHSIZE]; + void * a_callback_data; /* sent to nlmclnt_operations callbacks */ }; /* diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 51e27f9746ee..677c6b91dfcd 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1551,6 +1551,7 @@ struct nfs_rpc_ops { const struct inode_operations *dir_inode_ops; const struct inode_operations *file_inode_ops; const struct file_operations *file_ops; + const struct nlmclnt_operations *nlmclnt_ops; int (*getroot) (struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); -- cgit v1.2.3 From b62ea4112ce3746664dcc2f232d03461f0e6f3c7 Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Fri, 21 Apr 2017 16:47:11 +0200 Subject: video: fbdev: imxfb: support AUS mode Some displays require setting AUS mode in the LDCD AUS Mode Control Register to work with the imxfb driver. Like the value of the Panel Configuration Register, the AUS mode setting depends on the display mode. Allow setting AUS mode from the device tree by adding a boolean property. Make this property optional to keep the DT ABI stable. AUS mode can be set only on imx21 and compatible chipsets. Signed-off-by: Martin Kaiser Cc: Sascha Hauer Cc: Rob Herring Cc: Mark Rutland Signed-off-by: Bartlomiej Zolnierkiewicz --- drivers/video/fbdev/imxfb.c | 17 +++++++++++++++++ include/linux/platform_data/video-imxfb.h | 1 + 2 files changed, 18 insertions(+) (limited to 'include') diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c index 1b0faadb3080..c166e0725be5 100644 --- a/drivers/video/fbdev/imxfb.c +++ b/drivers/video/fbdev/imxfb.c @@ -117,6 +117,9 @@ #define IMXFB_LSCR1_DEFAULT 0x00120300 +#define LCDC_LAUSCR 0x80 +#define LAUSCR_AUS_MODE (1<<31) + /* Used fb-mode. Can be set on kernel command line, therefore file-static. */ static const char *fb_mode; @@ -158,6 +161,7 @@ struct imxfb_info { dma_addr_t dbar2; u_int pcr; + u_int lauscr; u_int pwmr; u_int lscr1; u_int dmacr; @@ -422,6 +426,11 @@ static int imxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) pcr |= imxfb_mode->pcr & ~(0x3f | (7 << 25)); fbi->pcr = pcr; + /* + * The LCDC AUS Mode Control Register does not exist on imx1. + */ + if (!is_imx1_fb(fbi) && imxfb_mode->aus_mode) + fbi->lauscr = LAUSCR_AUS_MODE; /* * Copy the RGB parameters for this display @@ -638,6 +647,9 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf if (fbi->dmacr) writel(fbi->dmacr, fbi->regs + LCDC_DMACR); + if (fbi->lauscr) + writel(fbi->lauscr, fbi->regs + LCDC_LAUSCR); + return 0; } @@ -734,6 +746,11 @@ static int imxfb_of_read_mode(struct device *dev, struct device_node *np, imxfb_mode->bpp = bpp; imxfb_mode->pcr = pcr; + /* + * fsl,aus-mode is optional + */ + imxfb_mode->aus_mode = of_property_read_bool(np, "fsl,aus-mode"); + return 0; } diff --git a/include/linux/platform_data/video-imxfb.h b/include/linux/platform_data/video-imxfb.h index a5c0a71ec914..cf9348b376ac 100644 --- a/include/linux/platform_data/video-imxfb.h +++ b/include/linux/platform_data/video-imxfb.h @@ -50,6 +50,7 @@ struct imx_fb_videomode { struct fb_videomode mode; u32 pcr; + bool aus_mode; unsigned char bpp; }; -- cgit v1.2.3 From 4f757f3cbf54edef7b75c68d6d6d2f1a0ca08d2e Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 15 Apr 2017 17:31:22 -0400 Subject: make sure that mntns_install() doesn't end up with referral for root new flag: LOOKUP_DOWN. If the starting point is overmounted, cross into whatever's mounted on top, triggering referrals et.al. Use that instead of follow_down_one() loop in mntns_install(), handle errors properly. Signed-off-by: Al Viro --- fs/namei.c | 38 ++++++++++++++++++++++++++++++++++++++ fs/namespace.c | 18 +++++++++++------- include/linux/namei.h | 1 + 3 files changed, 50 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/fs/namei.c b/fs/namei.c index 60c0a78ebca7..646db9cf2579 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2252,6 +2252,35 @@ static inline int lookup_last(struct nameidata *nd) return walk_component(nd, 0); } +static int handle_lookup_down(struct nameidata *nd) +{ + struct path path = nd->path; + struct inode *inode = nd->inode; + unsigned seq = nd->seq; + int err; + + if (nd->flags & LOOKUP_RCU) { + /* + * don't bother with unlazy_walk on failure - we are + * at the very beginning of walk, so we lose nothing + * if we simply redo everything in non-RCU mode + */ + if (unlikely(!__follow_mount_rcu(nd, &path, &inode, &seq))) + return -ECHILD; + } else { + dget(path.dentry); + err = follow_managed(&path, nd); + if (unlikely(err < 0)) + return err; + inode = d_backing_inode(path.dentry); + seq = 0; + } + path_to_nameidata(&path, nd); + nd->inode = inode; + nd->seq = seq; + return 0; +} + /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */ static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path) { @@ -2260,6 +2289,15 @@ static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path if (IS_ERR(s)) return PTR_ERR(s); + + if (unlikely(flags & LOOKUP_DOWN)) { + err = handle_lookup_down(nd); + if (unlikely(err < 0)) { + terminate_walk(nd); + return err; + } + } + while (!(err = link_path_walk(s, nd)) && ((err = lookup_last(nd)) > 0)) { s = trailing_symlink(nd); diff --git a/fs/namespace.c b/fs/namespace.c index cc1375eff88c..0886ef28bff6 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -3465,8 +3465,9 @@ static void mntns_put(struct ns_common *ns) static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns) { struct fs_struct *fs = current->fs; - struct mnt_namespace *mnt_ns = to_mnt_ns(ns); + struct mnt_namespace *mnt_ns = to_mnt_ns(ns), *old_mnt_ns; struct path root; + int err; if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) || !ns_capable(current_user_ns(), CAP_SYS_CHROOT) || @@ -3477,15 +3478,18 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns) return -EINVAL; get_mnt_ns(mnt_ns); - put_mnt_ns(nsproxy->mnt_ns); + old_mnt_ns = nsproxy->mnt_ns; nsproxy->mnt_ns = mnt_ns; /* Find the root */ - root.mnt = &mnt_ns->root->mnt; - root.dentry = mnt_ns->root->mnt.mnt_root; - path_get(&root); - while(d_mountpoint(root.dentry) && follow_down_one(&root)) - ; + err = vfs_path_lookup(mnt_ns->root->mnt.mnt_root, &mnt_ns->root->mnt, + "/", LOOKUP_DOWN, &root); + if (err) { + /* revert to old namespace */ + nsproxy->mnt_ns = old_mnt_ns; + put_mnt_ns(mnt_ns); + return err; + } /* Update the pwd and root */ set_fs_pwd(fs, &root); diff --git a/include/linux/namei.h b/include/linux/namei.h index f29abda31e6d..8b4794e83196 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -44,6 +44,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND}; #define LOOKUP_JUMPED 0x1000 #define LOOKUP_ROOT 0x2000 #define LOOKUP_EMPTY 0x4000 +#define LOOKUP_DOWN 0x8000 extern int path_pts(struct path *path); -- cgit v1.2.3 From 7c2adaf11036654e4a5f5da0bf09916901f3e1ec Mon Sep 17 00:00:00 2001 From: John Crispin Date: Mon, 23 Jan 2017 13:48:27 +0100 Subject: reset: mediatek: Add MT2701 ethsys reset controller include file Add the missing reset bits of the ethsys core to the mt2701-reset include file, so that we can reference them from within a devicetree file. Signed-off-by: John Crispin Signed-off-by: Stephen Boyd --- include/dt-bindings/reset/mt2701-resets.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include') diff --git a/include/dt-bindings/reset/mt2701-resets.h b/include/dt-bindings/reset/mt2701-resets.h index aaf03057f755..21deb547cfa4 100644 --- a/include/dt-bindings/reset/mt2701-resets.h +++ b/include/dt-bindings/reset/mt2701-resets.h @@ -80,4 +80,11 @@ #define MT2701_HIFSYS_PCIE1_RST 25 #define MT2701_HIFSYS_PCIE2_RST 26 +/* ETHSYS resets */ +#define MT2701_ETHSYS_SYS_RST 0 +#define MT2701_ETHSYS_MCM_RST 2 +#define MT2701_ETHSYS_FE_RST 6 +#define MT2701_ETHSYS_GMAC_RST 23 +#define MT2701_ETHSYS_PPE_RST 31 + #endif /* _DT_BINDINGS_RESET_CONTROLLER_MT2701 */ -- cgit v1.2.3 From cf0c3e68aa81f992b0301f62e341b710d385bf68 Mon Sep 17 00:00:00 2001 From: Jeroen Hofstee Date: Fri, 21 Apr 2017 15:21:11 +0900 Subject: kbuild: fix asm-offset generation to work with clang KBuild abuses the asm statement to write to a file and clang chokes about these invalid asm statements. Hack it even more by fooling this is actual valid asm code. [masahiro: Import Jeroen's work for U-Boot: http://patchwork.ozlabs.org/patch/375026/ Tweak sed script a little to avoid garbage '#' for GCC case, like #define NR_PAGEFLAGS 23 /* __NR_PAGEFLAGS # */ ] Signed-off-by: Jeroen Hofstee Signed-off-by: Masahiro Yamada Reviewed-by: Matthias Kaehlcke Tested-by: Matthias Kaehlcke --- include/linux/kbuild.h | 6 +++--- scripts/Makefile.lib | 8 ++++++-- 2 files changed, 9 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/kbuild.h b/include/linux/kbuild.h index 22a72198c14b..4e80f3a9ad58 100644 --- a/include/linux/kbuild.h +++ b/include/linux/kbuild.h @@ -2,14 +2,14 @@ #define __LINUX_KBUILD_H #define DEFINE(sym, val) \ - asm volatile("\n->" #sym " %0 " #val : : "i" (val)) + asm volatile("\n.ascii \"->" #sym " %0 " #val "\"" : : "i" (val)) -#define BLANK() asm volatile("\n->" : : ) +#define BLANK() asm volatile("\n.ascii \"->\"" : : ) #define OFFSET(sym, str, mem) \ DEFINE(sym, offsetof(struct str, mem)) #define COMMENT(x) \ - asm volatile("\n->#" x) + asm volatile("\n.ascii \"->#" x "\"") #endif diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 8109c133887d..2209ed696fb4 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -413,10 +413,14 @@ cmd_xzmisc = (cat $(filter-out FORCE,$^) | \ # --------------------------------------------------------------------------- # Default sed regexp - multiline due to syntax constraints +# +# Use [:space:] because LLVM's integrated assembler inserts around +# the .ascii directive whereas GCC keeps the as-is. define sed-offsets - "/^->/{s:->#\(.*\):/* \1 */:; \ + 's:^[[:space:]]*\.ascii[[:space:]]*"\(.*\)".*:\1:; \ + /^->/{s:->#\(.*\):/* \1 */:; \ s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \ - s:->::; p;}" + s:->::; p;}' endef # Use filechk to avoid rebuilds when a header changes, but the resulting file -- cgit v1.2.3 From f107d7a43923a83d837b3ea3c7b7de58cd014bbd Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Thu, 16 Mar 2017 09:02:42 +0100 Subject: mtd: nand: Remove unused chip->write_page() hook The last/only user of the chip->write_page() hook (the Atmel NAND controller driver) has been reworked and is no longer specifying a custom ->write_page() implementation. Drop this hook before someone else start abusing it. Signed-off-by: Boris Brezillon Reviewed-by: Masahiro Yamada --- drivers/mtd/nand/nand_base.c | 12 +++++------- include/linux/mtd/nand.h | 4 ---- 2 files changed, 5 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index c4c3386e4c0a..36258e69a389 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -2633,7 +2633,7 @@ static int nand_write_page_syndrome(struct mtd_info *mtd, } /** - * nand_write_page - [REPLACEABLE] write one page + * nand_write_page - write one page * @mtd: MTD device structure * @chip: NAND chip descriptor * @offset: address offset within the page @@ -2836,9 +2836,10 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, /* We still need to erase leftover OOB data */ memset(chip->oob_poi, 0xff, mtd->oobsize); } - ret = chip->write_page(mtd, chip, column, bytes, wbuf, - oob_required, page, cached, - (ops->mode == MTD_OPS_RAW)); + + ret = nand_write_page(mtd, chip, column, bytes, wbuf, + oob_required, page, cached, + (ops->mode == MTD_OPS_RAW)); if (ret) break; @@ -4548,9 +4549,6 @@ int nand_scan_tail(struct mtd_info *mtd) } } - if (!chip->write_page) - chip->write_page = nand_write_page; - /* * Check ECC mode, default to software if 3byte/512byte hardware ECC is * selected and we have 256 byte pagesize fallback to software ECC diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index c7de017c7f4c..40657939797c 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -828,7 +828,6 @@ struct nand_manufacturer_ops { * @errstat: [OPTIONAL] hardware specific function to perform * additional error status checks (determine if errors are * correctable). - * @write_page: [REPLACEABLE] High-level page write function * @manufacturer: [INTERN] Contains manufacturer information */ @@ -854,9 +853,6 @@ struct nand_chip { int (*scan_bbt)(struct mtd_info *mtd); int (*errstat)(struct mtd_info *mtd, struct nand_chip *this, int state, int status, int page); - int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip, - uint32_t offset, int data_len, const uint8_t *buf, - int oob_required, int page, int cached, int raw); int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip, int feature_addr, uint8_t *subfeature_para); int (*onfi_get_features)(struct mtd_info *mtd, struct nand_chip *chip, -- cgit v1.2.3 From 07604686e808cd93d352172806a7828860f048f5 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 30 Mar 2017 15:45:47 +0900 Subject: mtd: nand: relax ecc.read_page() return value for uncorrectable ECC The comment for ecc.read_page() requires that it should return "0 if bitflips uncorrectable". Actually, drivers could return positive values when uncorrectable bitflips occur. For example, nand_read_page_swecc() is the case. If ecc.correct() returns -EBADMSG for the first ECC sector, and a positive value for the second one, nand_read_page_swecc() returns a positive max_bitflips and increments ecc_stats.failed for the same page. The requirement can be relaxed by tweaking nand_do_read_ops(). Move the max_bitflips calculation below the retry. Signed-off-by: Masahiro Yamada Suggested-by: Boris Brezillon Signed-off-by: Boris Brezillon --- drivers/mtd/nand/nand_base.c | 3 +-- include/linux/mtd/nand.h | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 36258e69a389..de6c8045c85b 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -1993,8 +1993,6 @@ read_retry: break; } - max_bitflips = max_t(unsigned int, max_bitflips, ret); - /* Transfer not aligned data */ if (use_bufpoi) { if (!NAND_HAS_SUBPAGE_READ(chip) && !oob && @@ -2045,6 +2043,7 @@ read_retry: } buf += bytes; + max_bitflips = max_t(unsigned int, max_bitflips, ret); } else { memcpy(buf, chip->buffers->databuf + col, bytes); buf += bytes; diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 40657939797c..9e0c93c44bef 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -516,7 +516,7 @@ static inline void nand_hw_control_init(struct nand_hw_control *nfc) * out-of-band data). * @read_page: function to read a page according to the ECC generator * requirements; returns maximum number of bitflips corrected in - * any single ECC step, 0 if bitflips uncorrectable, -EIO hw error + * any single ECC step, -EIO hw error * @read_subpage: function to read parts of the page covered by ECC; * returns same as read_page() * @write_subpage: function to write parts of the page covered by ECC. -- cgit v1.2.3 From 477544c62a84d3bacd9f90ba75ffc16c04d78071 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 30 Mar 2017 17:15:05 +0900 Subject: mtd: nand: allow drivers to request minimum alignment for passed buffer In some cases, nand_do_{read,write}_ops is passed with unaligned ops->datbuf. Drivers using DMA will be unhappy about unaligned buffer. The new struct member, buf_align, represents the minimum alignment the driver require for the buffer. If the buffer passed from the upper MTD layer does not have enough alignment, nand_do_*_ops will use bufpoi. Signed-off-by: Masahiro Yamada Signed-off-by: Boris Brezillon --- drivers/mtd/nand/nand_base.c | 10 ++++++++-- include/linux/mtd/nand.h | 2 ++ 2 files changed, 10 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index c796d0e4039a..ed49a1d634b0 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -1954,7 +1954,9 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, if (!aligned) use_bufpoi = 1; else if (chip->options & NAND_USE_BOUNCE_BUFFER) - use_bufpoi = !virt_addr_valid(buf); + use_bufpoi = !virt_addr_valid(buf) || + !IS_ALIGNED((unsigned long)buf, + chip->buf_align); else use_bufpoi = 0; @@ -2810,7 +2812,9 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, if (part_pagewr) use_bufpoi = 1; else if (chip->options & NAND_USE_BOUNCE_BUFFER) - use_bufpoi = !virt_addr_valid(buf); + use_bufpoi = !virt_addr_valid(buf) || + !IS_ALIGNED((unsigned long)buf, + chip->buf_align); else use_bufpoi = 0; @@ -3429,6 +3433,8 @@ static void nand_set_defaults(struct nand_chip *chip) nand_hw_control_init(chip->controller); } + if (!chip->buf_align) + chip->buf_align = 1; } /* Sanitize ONFI strings so we can safely print them */ diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 9e0c93c44bef..8f67b1581683 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -755,6 +755,7 @@ struct nand_manufacturer_ops { * setting the read-retry mode. Mostly needed for MLC NAND. * @ecc: [BOARDSPECIFIC] ECC control structure * @buffers: buffer structure for read/write + * @buf_align: minimum buffer alignment required by a platform * @hwcontrol: platform-specific hardware control structure * @erase: [REPLACEABLE] erase function * @scan_bbt: [REPLACEABLE] function to scan bad block table @@ -905,6 +906,7 @@ struct nand_chip { struct nand_ecc_ctrl ecc; struct nand_buffers *buffers; + unsigned long buf_align; struct nand_hw_control hwcontrol; uint8_t *bbt; -- cgit v1.2.3 From 51f567777799c9d85a778302b9eb61cf15214a98 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Thu, 6 Apr 2017 22:36:31 -0400 Subject: nfsd: check for oversized NFSv2/v3 arguments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A client can append random data to the end of an NFSv2 or NFSv3 RPC call without our complaining; we'll just stop parsing at the end of the expected data and ignore the rest. Encoded arguments and replies are stored together in an array of pages, and if a call is too large it could leave inadequate space for the reply. This is normally OK because NFS RPC's typically have either short arguments and long replies (like READ) or long arguments and short replies (like WRITE). But a client that sends an incorrectly long reply can violate those assumptions. This was observed to cause crashes. So, insist that the argument not be any longer than we expect. Also, several operations increment rq_next_page in the decode routine before checking the argument size, which can leave rq_next_page pointing well past the end of the page array, causing trouble later in svc_free_pages. As followup we may also want to rewrite the encoding routines to check more carefully that they aren't running off the end of the page array. Reported-by: Tuomas Haanpää Reported-by: Ari Kauppi Cc: stable@vger.kernel.org Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs3xdr.c | 23 +++++++++++++++++------ fs/nfsd/nfsxdr.c | 13 ++++++++++--- include/linux/sunrpc/svc.h | 3 +-- 3 files changed, 28 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c index 452334694a5d..12feac6ee2fd 100644 --- a/fs/nfsd/nfs3xdr.c +++ b/fs/nfsd/nfs3xdr.c @@ -334,8 +334,11 @@ nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p, if (!p) return 0; p = xdr_decode_hyper(p, &args->offset); - args->count = ntohl(*p++); + + if (!xdr_argsize_check(rqstp, p)) + return 0; + len = min(args->count, max_blocksize); /* set up the kvec */ @@ -349,7 +352,7 @@ nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p, v++; } args->vlen = v; - return xdr_argsize_check(rqstp, p); + return 1; } int @@ -541,9 +544,11 @@ nfs3svc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p, p = decode_fh(p, &args->fh); if (!p) return 0; + if (!xdr_argsize_check(rqstp, p)) + return 0; args->buffer = page_address(*(rqstp->rq_next_page++)); - return xdr_argsize_check(rqstp, p); + return 1; } int @@ -569,10 +574,14 @@ nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p, args->verf = p; p += 2; args->dircount = ~0; args->count = ntohl(*p++); + + if (!xdr_argsize_check(rqstp, p)) + return 0; + args->count = min_t(u32, args->count, PAGE_SIZE); args->buffer = page_address(*(rqstp->rq_next_page++)); - return xdr_argsize_check(rqstp, p); + return 1; } int @@ -590,6 +599,9 @@ nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p, args->dircount = ntohl(*p++); args->count = ntohl(*p++); + if (!xdr_argsize_check(rqstp, p)) + return 0; + len = args->count = min(args->count, max_blocksize); while (len > 0) { struct page *p = *(rqstp->rq_next_page++); @@ -597,8 +609,7 @@ nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p, args->buffer = page_address(p); len -= PAGE_SIZE; } - - return xdr_argsize_check(rqstp, p); + return 1; } int diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c index de07ff625777..6a4947a3f4fa 100644 --- a/fs/nfsd/nfsxdr.c +++ b/fs/nfsd/nfsxdr.c @@ -257,6 +257,9 @@ nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p, len = args->count = ntohl(*p++); p++; /* totalcount - unused */ + if (!xdr_argsize_check(rqstp, p)) + return 0; + len = min_t(unsigned int, len, NFSSVC_MAXBLKSIZE_V2); /* set up somewhere to store response. @@ -272,7 +275,7 @@ nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p, v++; } args->vlen = v; - return xdr_argsize_check(rqstp, p); + return 1; } int @@ -362,9 +365,11 @@ nfssvc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readli p = decode_fh(p, &args->fh); if (!p) return 0; + if (!xdr_argsize_check(rqstp, p)) + return 0; args->buffer = page_address(*(rqstp->rq_next_page++)); - return xdr_argsize_check(rqstp, p); + return 1; } int @@ -402,9 +407,11 @@ nfssvc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p, args->cookie = ntohl(*p++); args->count = ntohl(*p++); args->count = min_t(u32, args->count, PAGE_SIZE); + if (!xdr_argsize_check(rqstp, p)) + return 0; args->buffer = page_address(*(rqstp->rq_next_page++)); - return xdr_argsize_check(rqstp, p); + return 1; } /* diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index e770abeed32d..6ef19cf658b4 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -336,8 +336,7 @@ xdr_argsize_check(struct svc_rqst *rqstp, __be32 *p) { char *cp = (char *)p; struct kvec *vec = &rqstp->rq_arg.head[0]; - return cp >= (char*)vec->iov_base - && cp <= (char*)vec->iov_base + vec->iov_len; + return cp == (char *)vec->iov_base + vec->iov_len; } static inline int -- cgit v1.2.3 From 16719199a43f0740113041fb34a0854b1d7f2111 Mon Sep 17 00:00:00 2001 From: "Dmitry V. Levin" Date: Wed, 1 Mar 2017 03:12:03 +0300 Subject: uapi: fix linux/nfsd/cld.h userspace compilation errors Include and consistently use types it provides to fix the following linux/nfsd/cld.h userspace compilation errors: /usr/include/linux/nfsd/cld.h:40:2: error: unknown type name 'uint16_t' uint16_t cn_len; /* length of cm_id */ /usr/include/linux/nfsd/cld.h:46:2: error: unknown type name 'uint8_t' uint8_t cm_vers; /* upcall version */ /usr/include/linux/nfsd/cld.h:47:2: error: unknown type name 'uint8_t' uint8_t cm_cmd; /* upcall command */ /usr/include/linux/nfsd/cld.h:48:2: error: unknown type name 'int16_t' int16_t cm_status; /* return code */ /usr/include/linux/nfsd/cld.h:49:2: error: unknown type name 'uint32_t' uint32_t cm_xid; /* transaction id */ /usr/include/linux/nfsd/cld.h:51:3: error: unknown type name 'int64_t' int64_t cm_gracetime; /* grace period start time */ Signed-off-by: Dmitry V. Levin Signed-off-by: J. Bruce Fields --- include/uapi/linux/nfsd/cld.h | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/uapi/linux/nfsd/cld.h b/include/uapi/linux/nfsd/cld.h index f14a9ab06f1f..ec260274be0c 100644 --- a/include/uapi/linux/nfsd/cld.h +++ b/include/uapi/linux/nfsd/cld.h @@ -22,6 +22,8 @@ #ifndef _NFSD_CLD_H #define _NFSD_CLD_H +#include + /* latest upcall version available */ #define CLD_UPCALL_VERSION 1 @@ -37,18 +39,18 @@ enum cld_command { /* representation of long-form NFSv4 client ID */ struct cld_name { - uint16_t cn_len; /* length of cm_id */ + __u16 cn_len; /* length of cm_id */ unsigned char cn_id[NFS4_OPAQUE_LIMIT]; /* client-provided */ } __attribute__((packed)); /* message struct for communication with userspace */ struct cld_msg { - uint8_t cm_vers; /* upcall version */ - uint8_t cm_cmd; /* upcall command */ - int16_t cm_status; /* return code */ - uint32_t cm_xid; /* transaction id */ + __u8 cm_vers; /* upcall version */ + __u8 cm_cmd; /* upcall command */ + __s16 cm_status; /* return code */ + __u32 cm_xid; /* transaction id */ union { - int64_t cm_gracetime; /* grace period start time */ + __s64 cm_gracetime; /* grace period start time */ struct cld_name cm_name; } __attribute__((packed)) cm_u; } __attribute__((packed)); -- cgit v1.2.3 From 17f5f7f506aaca985b95df7ef7fc2ff49c36a8e9 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Sun, 9 Apr 2017 13:05:36 -0400 Subject: svcrdma: Move send_wr to svc_rdma_op_ctxt Clean up: Move the ib_send_wr off the stack, and move common code to post a Send Work Request into a helper. This is a refactoring change only. Signed-off-by: Chuck Lever Signed-off-by: J. Bruce Fields --- include/linux/sunrpc/svc_rdma.h | 4 ++ net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 11 +---- net/sunrpc/xprtrdma/svc_rdma_sendto.c | 64 ++++++++++++++++++------------ 3 files changed, 44 insertions(+), 35 deletions(-) (limited to 'include') diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index b105f73e3ca2..287db5c179d8 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -85,6 +85,7 @@ struct svc_rdma_op_ctxt { enum dma_data_direction direction; int count; unsigned int mapped_sges; + struct ib_send_wr send_wr; struct ib_sge sge[RPCSVC_MAXPAGES]; struct page *pages[RPCSVC_MAXPAGES]; }; @@ -227,6 +228,9 @@ extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *, /* svc_rdma_sendto.c */ extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *, struct svc_rdma_req_map *, bool); +extern int svc_rdma_post_send_wr(struct svcxprt_rdma *rdma, + struct svc_rdma_op_ctxt *ctxt, + int num_sge, u32 inv_rkey); extern int svc_rdma_sendto(struct svc_rqst *); extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *, int); diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index ff1df40f0d26..f12f39c189c3 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -104,7 +104,6 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma, struct xdr_buf *sndbuf = &rqst->rq_snd_buf; struct svc_rdma_op_ctxt *ctxt; struct svc_rdma_req_map *vec; - struct ib_send_wr send_wr; int ret; vec = svc_rdma_get_req_map(rdma); @@ -132,15 +131,7 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma, } svc_rdma_count_mappings(rdma, ctxt); - memset(&send_wr, 0, sizeof(send_wr)); - ctxt->cqe.done = svc_rdma_wc_send; - send_wr.wr_cqe = &ctxt->cqe; - send_wr.sg_list = ctxt->sge; - send_wr.num_sge = 1; - send_wr.opcode = IB_WR_SEND; - send_wr.send_flags = IB_SEND_SIGNALED; - - ret = svc_rdma_send(rdma, &send_wr); + ret = svc_rdma_post_send_wr(rdma, ctxt, 1, 0); if (ret) { ret = -EIO; goto out_unmap; diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 515221b16d09..f90b40d0932f 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -435,6 +435,43 @@ out_err: return -EIO; } +/** + * svc_rdma_post_send_wr - Set up and post one Send Work Request + * @rdma: controlling transport + * @ctxt: op_ctxt for transmitting the Send WR + * @num_sge: number of SGEs to send + * @inv_rkey: R_key argument to Send With Invalidate, or zero + * + * Returns: + * %0 if the Send* was posted successfully, + * %-ENOTCONN if the connection was lost or dropped, + * %-EINVAL if there was a problem with the Send we built, + * %-ENOMEM if ib_post_send failed. + */ +int svc_rdma_post_send_wr(struct svcxprt_rdma *rdma, + struct svc_rdma_op_ctxt *ctxt, int num_sge, + u32 inv_rkey) +{ + struct ib_send_wr *send_wr = &ctxt->send_wr; + + dprintk("svcrdma: posting Send WR with %u sge(s)\n", num_sge); + + send_wr->next = NULL; + ctxt->cqe.done = svc_rdma_wc_send; + send_wr->wr_cqe = &ctxt->cqe; + send_wr->sg_list = ctxt->sge; + send_wr->num_sge = num_sge; + send_wr->send_flags = IB_SEND_SIGNALED; + if (inv_rkey) { + send_wr->opcode = IB_WR_SEND_WITH_INV; + send_wr->ex.invalidate_rkey = inv_rkey; + } else { + send_wr->opcode = IB_WR_SEND; + } + + return svc_rdma_send(rdma, send_wr); +} + /* This function prepares the portion of the RPCRDMA message to be * sent in the RDMA_SEND. This function is called after data sent via * RDMA has already been transmitted. There are three cases: @@ -460,7 +497,6 @@ static int send_reply(struct svcxprt_rdma *rdma, u32 inv_rkey) { struct svc_rdma_op_ctxt *ctxt; - struct ib_send_wr send_wr; u32 xdr_off; int sge_no; int sge_bytes; @@ -524,19 +560,8 @@ static int send_reply(struct svcxprt_rdma *rdma, pr_err("svcrdma: Too many sges (%d)\n", sge_no); goto err; } - memset(&send_wr, 0, sizeof send_wr); - ctxt->cqe.done = svc_rdma_wc_send; - send_wr.wr_cqe = &ctxt->cqe; - send_wr.sg_list = ctxt->sge; - send_wr.num_sge = sge_no; - if (inv_rkey) { - send_wr.opcode = IB_WR_SEND_WITH_INV; - send_wr.ex.invalidate_rkey = inv_rkey; - } else - send_wr.opcode = IB_WR_SEND; - send_wr.send_flags = IB_SEND_SIGNALED; - ret = svc_rdma_send(rdma, &send_wr); + ret = svc_rdma_post_send_wr(rdma, ctxt, sge_no, inv_rkey); if (ret) goto err; @@ -652,7 +677,6 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, int status) { - struct ib_send_wr err_wr; struct page *p; struct svc_rdma_op_ctxt *ctxt; enum rpcrdma_errcode err; @@ -692,17 +716,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, } svc_rdma_count_mappings(xprt, ctxt); - /* Prepare SEND WR */ - memset(&err_wr, 0, sizeof(err_wr)); - ctxt->cqe.done = svc_rdma_wc_send; - err_wr.wr_cqe = &ctxt->cqe; - err_wr.sg_list = ctxt->sge; - err_wr.num_sge = 1; - err_wr.opcode = IB_WR_SEND; - err_wr.send_flags = IB_SEND_SIGNALED; - - /* Post It */ - ret = svc_rdma_send(xprt, &err_wr); + ret = svc_rdma_post_send_wr(xprt, ctxt, 1, 0); if (ret) { dprintk("svcrdma: Error %d posting send for protocol error\n", ret); -- cgit v1.2.3 From 6e6092ca305ad785c605d7e313727aad96c228a5 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Sun, 9 Apr 2017 13:05:44 -0400 Subject: svcrdma: Add svc_rdma_map_reply_hdr() Introduce a helper to DMA-map a reply's transport header before sending it. This will in part replace the map vector cache. Signed-off-by: Chuck Lever Reviewed-by: Christoph Hellwig Signed-off-by: J. Bruce Fields --- include/linux/sunrpc/svc_rdma.h | 3 ++ net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 36 ++++++------------ net/sunrpc/xprtrdma/svc_rdma_sendto.c | 61 +++++++++++++++++++++++------- 3 files changed, 62 insertions(+), 38 deletions(-) (limited to 'include') diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 287db5c179d8..002a46d1faa1 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -228,6 +228,9 @@ extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *, /* svc_rdma_sendto.c */ extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *, struct svc_rdma_req_map *, bool); +extern int svc_rdma_map_reply_hdr(struct svcxprt_rdma *rdma, + struct svc_rdma_op_ctxt *ctxt, + __be32 *rdma_resp, unsigned int len); extern int svc_rdma_post_send_wr(struct svcxprt_rdma *rdma, struct svc_rdma_op_ctxt *ctxt, int num_sge, u32 inv_rkey); diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index f12f39c189c3..0305b33d482f 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -101,50 +101,36 @@ out_notfound: static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst) { - struct xdr_buf *sndbuf = &rqst->rq_snd_buf; struct svc_rdma_op_ctxt *ctxt; - struct svc_rdma_req_map *vec; int ret; - vec = svc_rdma_get_req_map(rdma); - ret = svc_rdma_map_xdr(rdma, sndbuf, vec, false); - if (ret) + ctxt = svc_rdma_get_context(rdma); + + /* rpcrdma_bc_send_request builds the transport header and + * the backchannel RPC message in the same buffer. Thus only + * one SGE is needed to send both. + */ + ret = svc_rdma_map_reply_hdr(rdma, ctxt, rqst->rq_buffer, + rqst->rq_snd_buf.len); + if (ret < 0) goto out_err; ret = svc_rdma_repost_recv(rdma, GFP_NOIO); if (ret) goto out_err; - ctxt = svc_rdma_get_context(rdma); - ctxt->pages[0] = virt_to_page(rqst->rq_buffer); - ctxt->count = 1; - - ctxt->direction = DMA_TO_DEVICE; - ctxt->sge[0].lkey = rdma->sc_pd->local_dma_lkey; - ctxt->sge[0].length = sndbuf->len; - ctxt->sge[0].addr = - ib_dma_map_page(rdma->sc_cm_id->device, ctxt->pages[0], 0, - sndbuf->len, DMA_TO_DEVICE); - if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr)) { - ret = -EIO; - goto out_unmap; - } - svc_rdma_count_mappings(rdma, ctxt); - ret = svc_rdma_post_send_wr(rdma, ctxt, 1, 0); - if (ret) { - ret = -EIO; + if (ret) goto out_unmap; - } out_err: - svc_rdma_put_req_map(rdma, vec); dprintk("svcrdma: %s returns %d\n", __func__, ret); return ret; out_unmap: svc_rdma_unmap_dma(ctxt); svc_rdma_put_context(ctxt, 1); + ret = -EIO; goto out_err; } diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index f90b40d0932f..a7dc71daa776 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -217,6 +217,49 @@ static u32 svc_rdma_get_inv_rkey(struct rpcrdma_msg *rdma_argp, return 0; } +static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma, + struct svc_rdma_op_ctxt *ctxt, + unsigned int sge_no, + struct page *page, + unsigned int offset, + unsigned int len) +{ + struct ib_device *dev = rdma->sc_cm_id->device; + dma_addr_t dma_addr; + + dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE); + if (ib_dma_mapping_error(dev, dma_addr)) + return -EIO; + + ctxt->sge[sge_no].addr = dma_addr; + ctxt->sge[sge_no].length = len; + ctxt->sge[sge_no].lkey = rdma->sc_pd->local_dma_lkey; + svc_rdma_count_mappings(rdma, ctxt); + return 0; +} + +/** + * svc_rdma_map_reply_hdr - DMA map the transport header buffer + * @rdma: controlling transport + * @ctxt: op_ctxt for the Send WR + * @rdma_resp: buffer containing transport header + * @len: length of transport header + * + * Returns: + * %0 if the header is DMA mapped, + * %-EIO if DMA mapping failed. + */ +int svc_rdma_map_reply_hdr(struct svcxprt_rdma *rdma, + struct svc_rdma_op_ctxt *ctxt, + __be32 *rdma_resp, + unsigned int len) +{ + ctxt->direction = DMA_TO_DEVICE; + ctxt->pages[0] = virt_to_page(rdma_resp); + ctxt->count = 1; + return svc_rdma_dma_map_page(rdma, ctxt, 0, ctxt->pages[0], 0, len); +} + /* Assumptions: * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE */ @@ -699,22 +742,14 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, err = ERR_VERS; length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); + /* Map transport header; no RPC message payload */ ctxt = svc_rdma_get_context(xprt); - ctxt->direction = DMA_TO_DEVICE; - ctxt->count = 1; - ctxt->pages[0] = p; - - /* Prepare SGE for local address */ - ctxt->sge[0].lkey = xprt->sc_pd->local_dma_lkey; - ctxt->sge[0].length = length; - ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device, - p, 0, length, DMA_TO_DEVICE); - if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) { - dprintk("svcrdma: Error mapping buffer for protocol error\n"); - svc_rdma_put_context(ctxt, 1); + ret = svc_rdma_map_reply_hdr(xprt, ctxt, &rmsgp->rm_xid, length); + if (ret) { + dprintk("svcrdma: Error %d mapping send for protocol error\n", + ret); return; } - svc_rdma_count_mappings(xprt, ctxt); ret = svc_rdma_post_send_wr(xprt, ctxt, 1, 0); if (ret) { -- cgit v1.2.3 From b623589dbacbc786c2fffc85113a1dc1a331e2ca Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Sun, 9 Apr 2017 13:05:52 -0400 Subject: svcrdma: Eliminate RPCRDMA_SQ_DEPTH_MULT The Send Queue depth is temporarily reduced to 1 SQE per credit. The new rdma_rw API does an internal computation, during QP creation, to increase the depth of the Send Queue to handle RDMA Read and Write operations. This change has to come before the NFSD code paths are updated to use the rdma_rw API. Without this patch, rdma_rw_init_qp() increases the size of the SQ too much, resulting in memory allocation failures during QP creation. Signed-off-by: Chuck Lever Reviewed-by: Christoph Hellwig Signed-off-by: J. Bruce Fields --- include/linux/sunrpc/svc_rdma.h | 1 - net/sunrpc/xprtrdma/svc_rdma.c | 2 -- net/sunrpc/xprtrdma/svc_rdma_transport.c | 2 +- 3 files changed, 1 insertion(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 002a46d1faa1..11d5aa123f17 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -182,7 +182,6 @@ struct svcxprt_rdma { /* The default ORD value is based on two outstanding full-size writes with a * page size of 4k, or 32k * 2 ops / 4k = 16 outstanding RDMA_READ. */ #define RPCRDMA_ORD (64/4) -#define RPCRDMA_SQ_DEPTH_MULT 8 #define RPCRDMA_MAX_REQUESTS 32 #define RPCRDMA_MAX_REQ_SIZE 4096 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c index c846ca9f1eba..912444174647 100644 --- a/net/sunrpc/xprtrdma/svc_rdma.c +++ b/net/sunrpc/xprtrdma/svc_rdma.c @@ -247,8 +247,6 @@ int svc_rdma_init(void) dprintk("SVCRDMA Module Init, register RPC RDMA transport\n"); dprintk("\tsvcrdma_ord : %d\n", svcrdma_ord); dprintk("\tmax_requests : %u\n", svcrdma_max_requests); - dprintk("\tsq_depth : %u\n", - svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT); dprintk("\tmax_bc_requests : %u\n", svcrdma_max_bc_requests); dprintk("\tmax_inline : %d\n", svcrdma_max_req_size); diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index fc8f14c7bfec..e1097cc6d1eb 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -1014,7 +1014,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) svcrdma_max_bc_requests); newxprt->sc_rq_depth = newxprt->sc_max_requests + newxprt->sc_max_bc_requests; - newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_rq_depth; + newxprt->sc_sq_depth = newxprt->sc_rq_depth; atomic_set(&newxprt->sc_sq_avail, newxprt->sc_sq_depth); if (!svc_rdma_prealloc_ctxts(newxprt)) -- cgit v1.2.3 From f13193f50b64e2e0c87706b838d6b9895626a892 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Sun, 9 Apr 2017 13:06:16 -0400 Subject: svcrdma: Introduce local rdma_rw API helpers The plan is to replace the local bespoke code that constructs and posts RDMA Read and Write Work Requests with calls to the rdma_rw API. This shares code with other RDMA-enabled ULPs that manages the gory details of buffer registration and posting Work Requests. Some design notes: o The structure of RPC-over-RDMA transport headers is flexible, allowing multiple segments per Reply with arbitrary alignment, each with a unique R_key. Write and Send WRs continue to be built and posted in separate code paths. However, one whole chunk (with one or more RDMA segments apiece) gets exactly one ib_post_send and one work completion. o svc_xprt reference counting is modified, since a chain of rdma_rw_ctx structs generates one completion, no matter how many Write WRs are posted. o The current code builds the transport header as it is construct- ing Write WRs. I've replaced that with marshaling of transport header data items in a separate step. This is because the exact structure of client-provided segments may not align with the components of the server's reply xdr_buf, or the pages in the page list. Thus parts of each client-provided segment may be written at different points in the send path. Signed-off-by: Chuck Lever Signed-off-by: J. Bruce Fields --- include/linux/sunrpc/svc_rdma.h | 11 + net/sunrpc/Kconfig | 1 + net/sunrpc/xprtrdma/Makefile | 2 +- net/sunrpc/xprtrdma/svc_rdma_rw.c | 512 +++++++++++++++++++++++++++++++ net/sunrpc/xprtrdma/svc_rdma_transport.c | 4 + 5 files changed, 529 insertions(+), 1 deletion(-) create mode 100644 net/sunrpc/xprtrdma/svc_rdma_rw.c (limited to 'include') diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 11d5aa123f17..ca08671fb7e2 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -145,12 +145,15 @@ struct svcxprt_rdma { u32 sc_max_requests; /* Max requests */ u32 sc_max_bc_requests;/* Backward credits */ int sc_max_req_size; /* Size of each RQ WR buf */ + u8 sc_port_num; struct ib_pd *sc_pd; spinlock_t sc_ctxt_lock; struct list_head sc_ctxts; int sc_ctxt_used; + spinlock_t sc_rw_ctxt_lock; + struct list_head sc_rw_ctxts; spinlock_t sc_map_lock; struct list_head sc_maps; @@ -224,6 +227,14 @@ extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *, struct svc_rdma_op_ctxt *, int *, u32 *, u32, u32, u64, bool); +/* svc_rdma_rw.c */ +extern void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma); +extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, + __be32 *wr_ch, struct xdr_buf *xdr); +extern int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, + __be32 *rp_ch, bool writelist, + struct xdr_buf *xdr); + /* svc_rdma_sendto.c */ extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *, struct svc_rdma_req_map *, bool); diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig index 04ce2c0b660e..ac09ca803296 100644 --- a/net/sunrpc/Kconfig +++ b/net/sunrpc/Kconfig @@ -52,6 +52,7 @@ config SUNRPC_XPRT_RDMA tristate "RPC-over-RDMA transport" depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS default SUNRPC && INFINIBAND + select SG_POOL help This option allows the NFS client and server to use RDMA transports (InfiniBand, iWARP, or RoCE). diff --git a/net/sunrpc/xprtrdma/Makefile b/net/sunrpc/xprtrdma/Makefile index ef19fa42c50f..c1ae8142ab73 100644 --- a/net/sunrpc/xprtrdma/Makefile +++ b/net/sunrpc/xprtrdma/Makefile @@ -4,5 +4,5 @@ rpcrdma-y := transport.o rpc_rdma.o verbs.o \ fmr_ops.o frwr_ops.o \ svc_rdma.o svc_rdma_backchannel.o svc_rdma_transport.o \ svc_rdma_marshal.o svc_rdma_sendto.o svc_rdma_recvfrom.o \ - module.o + svc_rdma_rw.o module.o rpcrdma-$(CONFIG_SUNRPC_BACKCHANNEL) += backchannel.o diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c new file mode 100644 index 000000000000..0cf620277693 --- /dev/null +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -0,0 +1,512 @@ +/* + * Copyright (c) 2016 Oracle. All rights reserved. + * + * Use the core R/W API to move RPC-over-RDMA Read and Write chunks. + */ + +#include +#include +#include + +#include + +#define RPCDBG_FACILITY RPCDBG_SVCXPRT + +/* Each R/W context contains state for one chain of RDMA Read or + * Write Work Requests. + * + * Each WR chain handles a single contiguous server-side buffer, + * because scatterlist entries after the first have to start on + * page alignment. xdr_buf iovecs cannot guarantee alignment. + * + * Each WR chain handles only one R_key. Each RPC-over-RDMA segment + * from a client may contain a unique R_key, so each WR chain moves + * up to one segment at a time. + * + * The scatterlist makes this data structure over 4KB in size. To + * make it less likely to fail, and to handle the allocation for + * smaller I/O requests without disabling bottom-halves, these + * contexts are created on demand, but cached and reused until the + * controlling svcxprt_rdma is destroyed. + */ +struct svc_rdma_rw_ctxt { + struct list_head rw_list; + struct rdma_rw_ctx rw_ctx; + int rw_nents; + struct sg_table rw_sg_table; + struct scatterlist rw_first_sgl[0]; +}; + +static inline struct svc_rdma_rw_ctxt * +svc_rdma_next_ctxt(struct list_head *list) +{ + return list_first_entry_or_null(list, struct svc_rdma_rw_ctxt, + rw_list); +} + +static struct svc_rdma_rw_ctxt * +svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges) +{ + struct svc_rdma_rw_ctxt *ctxt; + + spin_lock(&rdma->sc_rw_ctxt_lock); + + ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts); + if (ctxt) { + list_del(&ctxt->rw_list); + spin_unlock(&rdma->sc_rw_ctxt_lock); + } else { + spin_unlock(&rdma->sc_rw_ctxt_lock); + ctxt = kmalloc(sizeof(*ctxt) + + SG_CHUNK_SIZE * sizeof(struct scatterlist), + GFP_KERNEL); + if (!ctxt) + goto out; + INIT_LIST_HEAD(&ctxt->rw_list); + } + + ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl; + if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges, + ctxt->rw_sg_table.sgl)) { + kfree(ctxt); + ctxt = NULL; + } +out: + return ctxt; +} + +static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma, + struct svc_rdma_rw_ctxt *ctxt) +{ + sg_free_table_chained(&ctxt->rw_sg_table, true); + + spin_lock(&rdma->sc_rw_ctxt_lock); + list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts); + spin_unlock(&rdma->sc_rw_ctxt_lock); +} + +/** + * svc_rdma_destroy_rw_ctxts - Free accumulated R/W contexts + * @rdma: transport about to be destroyed + * + */ +void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma) +{ + struct svc_rdma_rw_ctxt *ctxt; + + while ((ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts)) != NULL) { + list_del(&ctxt->rw_list); + kfree(ctxt); + } +} + +/* A chunk context tracks all I/O for moving one Read or Write + * chunk. This is a a set of rdma_rw's that handle data movement + * for all segments of one chunk. + * + * These are small, acquired with a single allocator call, and + * no more than one is needed per chunk. They are allocated on + * demand, and not cached. + */ +struct svc_rdma_chunk_ctxt { + struct ib_cqe cc_cqe; + struct svcxprt_rdma *cc_rdma; + struct list_head cc_rwctxts; + int cc_sqecount; + enum dma_data_direction cc_dir; +}; + +static void svc_rdma_cc_init(struct svcxprt_rdma *rdma, + struct svc_rdma_chunk_ctxt *cc, + enum dma_data_direction dir) +{ + cc->cc_rdma = rdma; + svc_xprt_get(&rdma->sc_xprt); + + INIT_LIST_HEAD(&cc->cc_rwctxts); + cc->cc_sqecount = 0; + cc->cc_dir = dir; +} + +static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc) +{ + struct svcxprt_rdma *rdma = cc->cc_rdma; + struct svc_rdma_rw_ctxt *ctxt; + + while ((ctxt = svc_rdma_next_ctxt(&cc->cc_rwctxts)) != NULL) { + list_del(&ctxt->rw_list); + + rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp, + rdma->sc_port_num, ctxt->rw_sg_table.sgl, + ctxt->rw_nents, cc->cc_dir); + svc_rdma_put_rw_ctxt(rdma, ctxt); + } + svc_xprt_put(&rdma->sc_xprt); +} + +/* State for sending a Write or Reply chunk. + * - Tracks progress of writing one chunk over all its segments + * - Stores arguments for the SGL constructor functions + */ +struct svc_rdma_write_info { + /* write state of this chunk */ + unsigned int wi_seg_off; + unsigned int wi_seg_no; + unsigned int wi_nsegs; + __be32 *wi_segs; + + /* SGL constructor arguments */ + struct xdr_buf *wi_xdr; + unsigned char *wi_base; + unsigned int wi_next_off; + + struct svc_rdma_chunk_ctxt wi_cc; +}; + +static struct svc_rdma_write_info * +svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma, __be32 *chunk) +{ + struct svc_rdma_write_info *info; + + info = kmalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return info; + + info->wi_seg_off = 0; + info->wi_seg_no = 0; + info->wi_nsegs = be32_to_cpup(++chunk); + info->wi_segs = ++chunk; + svc_rdma_cc_init(rdma, &info->wi_cc, DMA_TO_DEVICE); + return info; +} + +static void svc_rdma_write_info_free(struct svc_rdma_write_info *info) +{ + svc_rdma_cc_release(&info->wi_cc); + kfree(info); +} + +/** + * svc_rdma_write_done - Write chunk completion + * @cq: controlling Completion Queue + * @wc: Work Completion + * + * Pages under I/O are freed by a subsequent Send completion. + */ +static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc) +{ + struct ib_cqe *cqe = wc->wr_cqe; + struct svc_rdma_chunk_ctxt *cc = + container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe); + struct svcxprt_rdma *rdma = cc->cc_rdma; + struct svc_rdma_write_info *info = + container_of(cc, struct svc_rdma_write_info, wi_cc); + + atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); + wake_up(&rdma->sc_send_wait); + + if (unlikely(wc->status != IB_WC_SUCCESS)) { + set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); + if (wc->status != IB_WC_WR_FLUSH_ERR) + pr_err("svcrdma: write ctx: %s (%u/0x%x)\n", + ib_wc_status_msg(wc->status), + wc->status, wc->vendor_err); + } + + svc_rdma_write_info_free(info); +} + +/* This function sleeps when the transport's Send Queue is congested. + * + * Assumptions: + * - If ib_post_send() succeeds, only one completion is expected, + * even if one or more WRs are flushed. This is true when posting + * an rdma_rw_ctx or when posting a single signaled WR. + */ +static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc) +{ + struct svcxprt_rdma *rdma = cc->cc_rdma; + struct svc_xprt *xprt = &rdma->sc_xprt; + struct ib_send_wr *first_wr, *bad_wr; + struct list_head *tmp; + struct ib_cqe *cqe; + int ret; + + first_wr = NULL; + cqe = &cc->cc_cqe; + list_for_each(tmp, &cc->cc_rwctxts) { + struct svc_rdma_rw_ctxt *ctxt; + + ctxt = list_entry(tmp, struct svc_rdma_rw_ctxt, rw_list); + first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp, + rdma->sc_port_num, cqe, first_wr); + cqe = NULL; + } + + do { + if (atomic_sub_return(cc->cc_sqecount, + &rdma->sc_sq_avail) > 0) { + ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr); + if (ret) + break; + return 0; + } + + atomic_inc(&rdma_stat_sq_starve); + atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); + wait_event(rdma->sc_send_wait, + atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount); + } while (1); + + pr_err("svcrdma: ib_post_send failed (%d)\n", ret); + set_bit(XPT_CLOSE, &xprt->xpt_flags); + + /* If even one was posted, there will be a completion. */ + if (bad_wr != first_wr) + return 0; + + atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); + wake_up(&rdma->sc_send_wait); + return -ENOTCONN; +} + +/* Build and DMA-map an SGL that covers one kvec in an xdr_buf + */ +static void svc_rdma_vec_to_sg(struct svc_rdma_write_info *info, + unsigned int len, + struct svc_rdma_rw_ctxt *ctxt) +{ + struct scatterlist *sg = ctxt->rw_sg_table.sgl; + + sg_set_buf(&sg[0], info->wi_base, len); + info->wi_base += len; + + ctxt->rw_nents = 1; +} + +/* Build and DMA-map an SGL that covers part of an xdr_buf's pagelist. + */ +static void svc_rdma_pagelist_to_sg(struct svc_rdma_write_info *info, + unsigned int remaining, + struct svc_rdma_rw_ctxt *ctxt) +{ + unsigned int sge_no, sge_bytes, page_off, page_no; + struct xdr_buf *xdr = info->wi_xdr; + struct scatterlist *sg; + struct page **page; + + page_off = (info->wi_next_off + xdr->page_base) & ~PAGE_MASK; + page_no = (info->wi_next_off + xdr->page_base) >> PAGE_SHIFT; + page = xdr->pages + page_no; + info->wi_next_off += remaining; + sg = ctxt->rw_sg_table.sgl; + sge_no = 0; + do { + sge_bytes = min_t(unsigned int, remaining, + PAGE_SIZE - page_off); + sg_set_page(sg, *page, sge_bytes, page_off); + + remaining -= sge_bytes; + sg = sg_next(sg); + page_off = 0; + sge_no++; + page++; + } while (remaining); + + ctxt->rw_nents = sge_no; +} + +/* Construct RDMA Write WRs to send a portion of an xdr_buf containing + * an RPC Reply. + */ +static int +svc_rdma_build_writes(struct svc_rdma_write_info *info, + void (*constructor)(struct svc_rdma_write_info *info, + unsigned int len, + struct svc_rdma_rw_ctxt *ctxt), + unsigned int remaining) +{ + struct svc_rdma_chunk_ctxt *cc = &info->wi_cc; + struct svcxprt_rdma *rdma = cc->cc_rdma; + struct svc_rdma_rw_ctxt *ctxt; + __be32 *seg; + int ret; + + cc->cc_cqe.done = svc_rdma_write_done; + seg = info->wi_segs + info->wi_seg_no * rpcrdma_segment_maxsz; + do { + unsigned int write_len; + u32 seg_length, seg_handle; + u64 seg_offset; + + if (info->wi_seg_no >= info->wi_nsegs) + goto out_overflow; + + seg_handle = be32_to_cpup(seg); + seg_length = be32_to_cpup(seg + 1); + xdr_decode_hyper(seg + 2, &seg_offset); + seg_offset += info->wi_seg_off; + + write_len = min(remaining, seg_length - info->wi_seg_off); + ctxt = svc_rdma_get_rw_ctxt(rdma, + (write_len >> PAGE_SHIFT) + 2); + if (!ctxt) + goto out_noctx; + + constructor(info, write_len, ctxt); + ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, + rdma->sc_port_num, ctxt->rw_sg_table.sgl, + ctxt->rw_nents, 0, seg_offset, + seg_handle, DMA_TO_DEVICE); + if (ret < 0) + goto out_initerr; + + list_add(&ctxt->rw_list, &cc->cc_rwctxts); + cc->cc_sqecount += ret; + if (write_len == seg_length - info->wi_seg_off) { + seg += 4; + info->wi_seg_no++; + info->wi_seg_off = 0; + } else { + info->wi_seg_off += write_len; + } + remaining -= write_len; + } while (remaining); + + return 0; + +out_overflow: + dprintk("svcrdma: inadequate space in Write chunk (%u)\n", + info->wi_nsegs); + return -E2BIG; + +out_noctx: + dprintk("svcrdma: no R/W ctxs available\n"); + return -ENOMEM; + +out_initerr: + svc_rdma_put_rw_ctxt(rdma, ctxt); + pr_err("svcrdma: failed to map pagelist (%d)\n", ret); + return -EIO; +} + +/* Send one of an xdr_buf's kvecs by itself. To send a Reply + * chunk, the whole RPC Reply is written back to the client. + * This function writes either the head or tail of the xdr_buf + * containing the Reply. + */ +static int svc_rdma_send_xdr_kvec(struct svc_rdma_write_info *info, + struct kvec *vec) +{ + info->wi_base = vec->iov_base; + return svc_rdma_build_writes(info, svc_rdma_vec_to_sg, + vec->iov_len); +} + +/* Send an xdr_buf's page list by itself. A Write chunk is + * just the page list. a Reply chunk is the head, page list, + * and tail. This function is shared between the two types + * of chunk. + */ +static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info, + struct xdr_buf *xdr) +{ + info->wi_xdr = xdr; + info->wi_next_off = 0; + return svc_rdma_build_writes(info, svc_rdma_pagelist_to_sg, + xdr->page_len); +} + +/** + * svc_rdma_send_write_chunk - Write all segments in a Write chunk + * @rdma: controlling RDMA transport + * @wr_ch: Write chunk provided by client + * @xdr: xdr_buf containing the data payload + * + * Returns a non-negative number of bytes the chunk consumed, or + * %-E2BIG if the payload was larger than the Write chunk, + * %-ENOMEM if rdma_rw context pool was exhausted, + * %-ENOTCONN if posting failed (connection is lost), + * %-EIO if rdma_rw initialization failed (DMA mapping, etc). + */ +int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch, + struct xdr_buf *xdr) +{ + struct svc_rdma_write_info *info; + int ret; + + if (!xdr->page_len) + return 0; + + info = svc_rdma_write_info_alloc(rdma, wr_ch); + if (!info) + return -ENOMEM; + + ret = svc_rdma_send_xdr_pagelist(info, xdr); + if (ret < 0) + goto out_err; + + ret = svc_rdma_post_chunk_ctxt(&info->wi_cc); + if (ret < 0) + goto out_err; + return xdr->page_len; + +out_err: + svc_rdma_write_info_free(info); + return ret; +} + +/** + * svc_rdma_send_reply_chunk - Write all segments in the Reply chunk + * @rdma: controlling RDMA transport + * @rp_ch: Reply chunk provided by client + * @writelist: true if client provided a Write list + * @xdr: xdr_buf containing an RPC Reply + * + * Returns a non-negative number of bytes the chunk consumed, or + * %-E2BIG if the payload was larger than the Reply chunk, + * %-ENOMEM if rdma_rw context pool was exhausted, + * %-ENOTCONN if posting failed (connection is lost), + * %-EIO if rdma_rw initialization failed (DMA mapping, etc). + */ +int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, __be32 *rp_ch, + bool writelist, struct xdr_buf *xdr) +{ + struct svc_rdma_write_info *info; + int consumed, ret; + + info = svc_rdma_write_info_alloc(rdma, rp_ch); + if (!info) + return -ENOMEM; + + ret = svc_rdma_send_xdr_kvec(info, &xdr->head[0]); + if (ret < 0) + goto out_err; + consumed = xdr->head[0].iov_len; + + /* Send the page list in the Reply chunk only if the + * client did not provide Write chunks. + */ + if (!writelist && xdr->page_len) { + ret = svc_rdma_send_xdr_pagelist(info, xdr); + if (ret < 0) + goto out_err; + consumed += xdr->page_len; + } + + if (xdr->tail[0].iov_len) { + ret = svc_rdma_send_xdr_kvec(info, &xdr->tail[0]); + if (ret < 0) + goto out_err; + consumed += xdr->tail[0].iov_len; + } + + ret = svc_rdma_post_chunk_ctxt(&info->wi_cc); + if (ret < 0) + goto out_err; + return consumed; + +out_err: + svc_rdma_write_info_free(info); + return ret; +} diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index e1097cc6d1eb..b25c50992a95 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -561,6 +561,7 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv, INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q); INIT_LIST_HEAD(&cma_xprt->sc_frmr_q); INIT_LIST_HEAD(&cma_xprt->sc_ctxts); + INIT_LIST_HEAD(&cma_xprt->sc_rw_ctxts); INIT_LIST_HEAD(&cma_xprt->sc_maps); init_waitqueue_head(&cma_xprt->sc_send_wait); @@ -568,6 +569,7 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv, spin_lock_init(&cma_xprt->sc_rq_dto_lock); spin_lock_init(&cma_xprt->sc_frmr_q_lock); spin_lock_init(&cma_xprt->sc_ctxt_lock); + spin_lock_init(&cma_xprt->sc_rw_ctxt_lock); spin_lock_init(&cma_xprt->sc_map_lock); /* @@ -999,6 +1001,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) newxprt, newxprt->sc_cm_id); dev = newxprt->sc_cm_id->device; + newxprt->sc_port_num = newxprt->sc_cm_id->port_num; /* Qualify the transport resource defaults with the * capabilities of this particular device */ @@ -1248,6 +1251,7 @@ static void __svc_rdma_free(struct work_struct *work) } rdma_dealloc_frmr_q(rdma); + svc_rdma_destroy_rw_ctxts(rdma); svc_rdma_destroy_ctxts(rdma); svc_rdma_destroy_maps(rdma); -- cgit v1.2.3 From 9a6a180b7867ceceeeab88a6f011bac23174b939 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Sun, 9 Apr 2017 13:06:25 -0400 Subject: svcrdma: Use rdma_rw API in RPC reply path The current svcrdma sendto code path posts one RDMA Write WR at a time. Each of these Writes typically carries a small number of pages (for instance, up to 30 pages for mlx4 devices). That means a 1MB NFS READ reply requires 9 ib_post_send() calls for the Write WRs, and one for the Send WR carrying the actual RPC Reply message. Instead, use the new rdma_rw API. The details of Write WR chain construction and memory registration are taken care of in the RDMA core. svcrdma can focus on the details of the RPC-over-RDMA protocol. This gives three main benefits: 1. All Write WRs for one RDMA segment are posted in a single chain. As few as one ib_post_send() for each Write chunk. 2. The Write path can now use FRWR to register the Write buffers. If the device's maximum page list depth is large, this means a single Write WR is needed for each RPC's Write chunk data. 3. The new code introduces support for RPCs that carry both a Write list and a Reply chunk. This combination can be used for an NFSv4 READ where the data payload is large, and thus is removed from the Payload Stream, but the Payload Stream is still larger than the inline threshold. Signed-off-by: Chuck Lever Signed-off-by: J. Bruce Fields --- include/linux/sunrpc/svc_rdma.h | 1 - net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 6 +- net/sunrpc/xprtrdma/svc_rdma_sendto.c | 696 ++++++++++++++--------------- net/sunrpc/xprtrdma/svc_rdma_transport.c | 2 + 4 files changed, 350 insertions(+), 355 deletions(-) (limited to 'include') diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index ca08671fb7e2..599ee03ee3fb 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -212,7 +212,6 @@ extern int svc_rdma_xdr_decode_req(struct xdr_buf *); extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *, struct rpcrdma_msg *, enum rpcrdma_errcode, __be32 *); -extern void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *, int); extern void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *, int); extern void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *, int, __be32, __be64, u32); diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index 0305b33d482f..bf185b79c98f 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -90,9 +90,9 @@ out_notfound: * Caller holds the connection's mutex and has already marshaled * the RPC/RDMA request. * - * This is similar to svc_rdma_reply, but takes an rpc_rqst - * instead, does not support chunks, and avoids blocking memory - * allocation. + * This is similar to svc_rdma_send_reply_msg, but takes a struct + * rpc_rqst instead, does not support chunks, and avoids blocking + * memory allocation. * * XXX: There is still an opportunity to block in svc_rdma_send() * if there are no SQ entries to post the Send. This may occur if diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 2eb3df698e11..ce62b78e5bc9 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -1,4 +1,5 @@ /* + * Copyright (c) 2016 Oracle. All rights reserved. * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved. * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved. * @@ -40,6 +41,63 @@ * Author: Tom Tucker */ +/* Operation + * + * The main entry point is svc_rdma_sendto. This is called by the + * RPC server when an RPC Reply is ready to be transmitted to a client. + * + * The passed-in svc_rqst contains a struct xdr_buf which holds an + * XDR-encoded RPC Reply message. sendto must construct the RPC-over-RDMA + * transport header, post all Write WRs needed for this Reply, then post + * a Send WR conveying the transport header and the RPC message itself to + * the client. + * + * svc_rdma_sendto must fully transmit the Reply before returning, as + * the svc_rqst will be recycled as soon as sendto returns. Remaining + * resources referred to by the svc_rqst are also recycled at that time. + * Therefore any resources that must remain longer must be detached + * from the svc_rqst and released later. + * + * Page Management + * + * The I/O that performs Reply transmission is asynchronous, and may + * complete well after sendto returns. Thus pages under I/O must be + * removed from the svc_rqst before sendto returns. + * + * The logic here depends on Send Queue and completion ordering. Since + * the Send WR is always posted last, it will always complete last. Thus + * when it completes, it is guaranteed that all previous Write WRs have + * also completed. + * + * Write WRs are constructed and posted. Each Write segment gets its own + * svc_rdma_rw_ctxt, allowing the Write completion handler to find and + * DMA-unmap the pages under I/O for that Write segment. The Write + * completion handler does not release any pages. + * + * When the Send WR is constructed, it also gets its own svc_rdma_op_ctxt. + * The ownership of all of the Reply's pages are transferred into that + * ctxt, the Send WR is posted, and sendto returns. + * + * The svc_rdma_op_ctxt is presented when the Send WR completes. The + * Send completion handler finally releases the Reply's pages. + * + * This mechanism also assumes that completions on the transport's Send + * Completion Queue do not run in parallel. Otherwise a Write completion + * and Send completion running at the same time could release pages that + * are still DMA-mapped. + * + * Error Handling + * + * - If the Send WR is posted successfully, it will either complete + * successfully, or get flushed. Either way, the Send completion + * handler releases the Reply's pages. + * - If the Send WR cannot be not posted, the forward path releases + * the Reply's pages. + * + * This handles the case, without the use of page reference counting, + * where two different Write segments send portions of the same page. + */ + #include #include #include @@ -55,6 +113,133 @@ static u32 xdr_padsize(u32 len) return (len & 3) ? (4 - (len & 3)) : 0; } +/* Returns length of transport header, in bytes. + */ +static unsigned int svc_rdma_reply_hdr_len(__be32 *rdma_resp) +{ + unsigned int nsegs; + __be32 *p; + + p = rdma_resp; + + /* RPC-over-RDMA V1 replies never have a Read list. */ + p += rpcrdma_fixed_maxsz + 1; + + /* Skip Write list. */ + while (*p++ != xdr_zero) { + nsegs = be32_to_cpup(p++); + p += nsegs * rpcrdma_segment_maxsz; + } + + /* Skip Reply chunk. */ + if (*p++ != xdr_zero) { + nsegs = be32_to_cpup(p++); + p += nsegs * rpcrdma_segment_maxsz; + } + + return (unsigned long)p - (unsigned long)rdma_resp; +} + +/* One Write chunk is copied from Call transport header to Reply + * transport header. Each segment's length field is updated to + * reflect number of bytes consumed in the segment. + * + * Returns number of segments in this chunk. + */ +static unsigned int xdr_encode_write_chunk(__be32 *dst, __be32 *src, + unsigned int remaining) +{ + unsigned int i, nsegs; + u32 seg_len; + + /* Write list discriminator */ + *dst++ = *src++; + + /* number of segments in this chunk */ + nsegs = be32_to_cpup(src); + *dst++ = *src++; + + for (i = nsegs; i; i--) { + /* segment's RDMA handle */ + *dst++ = *src++; + + /* bytes returned in this segment */ + seg_len = be32_to_cpu(*src); + if (remaining >= seg_len) { + /* entire segment was consumed */ + *dst = *src; + remaining -= seg_len; + } else { + /* segment only partly filled */ + *dst = cpu_to_be32(remaining); + remaining = 0; + } + dst++; src++; + + /* segment's RDMA offset */ + *dst++ = *src++; + *dst++ = *src++; + } + + return nsegs; +} + +/* The client provided a Write list in the Call message. Fill in + * the segments in the first Write chunk in the Reply's transport + * header with the number of bytes consumed in each segment. + * Remaining chunks are returned unused. + * + * Assumptions: + * - Client has provided only one Write chunk + */ +static void svc_rdma_xdr_encode_write_list(__be32 *rdma_resp, __be32 *wr_ch, + unsigned int consumed) +{ + unsigned int nsegs; + __be32 *p, *q; + + /* RPC-over-RDMA V1 replies never have a Read list. */ + p = rdma_resp + rpcrdma_fixed_maxsz + 1; + + q = wr_ch; + while (*q != xdr_zero) { + nsegs = xdr_encode_write_chunk(p, q, consumed); + q += 2 + nsegs * rpcrdma_segment_maxsz; + p += 2 + nsegs * rpcrdma_segment_maxsz; + consumed = 0; + } + + /* Terminate Write list */ + *p++ = xdr_zero; + + /* Reply chunk discriminator; may be replaced later */ + *p = xdr_zero; +} + +/* The client provided a Reply chunk in the Call message. Fill in + * the segments in the Reply chunk in the Reply message with the + * number of bytes consumed in each segment. + * + * Assumptions: + * - Reply can always fit in the provided Reply chunk + */ +static void svc_rdma_xdr_encode_reply_chunk(__be32 *rdma_resp, __be32 *rp_ch, + unsigned int consumed) +{ + __be32 *p; + + /* Find the Reply chunk in the Reply's xprt header. + * RPC-over-RDMA V1 replies never have a Read list. + */ + p = rdma_resp + rpcrdma_fixed_maxsz + 1; + + /* Skip past Write list */ + while (*p++ != xdr_zero) + p += 1 + be32_to_cpup(p) * rpcrdma_segment_maxsz; + + xdr_encode_write_chunk(p, rp_ch, consumed); +} + int svc_rdma_map_xdr(struct svcxprt_rdma *xprt, struct xdr_buf *xdr, struct svc_rdma_req_map *vec, @@ -123,45 +308,14 @@ int svc_rdma_map_xdr(struct svcxprt_rdma *xprt, return 0; } -static dma_addr_t dma_map_xdr(struct svcxprt_rdma *xprt, - struct xdr_buf *xdr, - u32 xdr_off, size_t len, int dir) -{ - struct page *page; - dma_addr_t dma_addr; - if (xdr_off < xdr->head[0].iov_len) { - /* This offset is in the head */ - xdr_off += (unsigned long)xdr->head[0].iov_base & ~PAGE_MASK; - page = virt_to_page(xdr->head[0].iov_base); - } else { - xdr_off -= xdr->head[0].iov_len; - if (xdr_off < xdr->page_len) { - /* This offset is in the page list */ - xdr_off += xdr->page_base; - page = xdr->pages[xdr_off >> PAGE_SHIFT]; - xdr_off &= ~PAGE_MASK; - } else { - /* This offset is in the tail */ - xdr_off -= xdr->page_len; - xdr_off += (unsigned long) - xdr->tail[0].iov_base & ~PAGE_MASK; - page = virt_to_page(xdr->tail[0].iov_base); - } - } - dma_addr = ib_dma_map_page(xprt->sc_cm_id->device, page, xdr_off, - min_t(size_t, PAGE_SIZE, len), dir); - return dma_addr; -} - /* Parse the RPC Call's transport header. */ -static void svc_rdma_get_write_arrays(struct rpcrdma_msg *rmsgp, - struct rpcrdma_write_array **write, - struct rpcrdma_write_array **reply) +static void svc_rdma_get_write_arrays(__be32 *rdma_argp, + __be32 **write, __be32 **reply) { __be32 *p; - p = (__be32 *)&rmsgp->rm_body.rm_chunks[0]; + p = rdma_argp + rpcrdma_fixed_maxsz; /* Read list */ while (*p++ != xdr_zero) @@ -169,7 +323,7 @@ static void svc_rdma_get_write_arrays(struct rpcrdma_msg *rmsgp, /* Write list */ if (*p != xdr_zero) { - *write = (struct rpcrdma_write_array *)p; + *write = p; while (*p++ != xdr_zero) p += 1 + be32_to_cpu(*p) * 4; } else { @@ -179,7 +333,7 @@ static void svc_rdma_get_write_arrays(struct rpcrdma_msg *rmsgp, /* Reply chunk */ if (*p != xdr_zero) - *reply = (struct rpcrdma_write_array *)p; + *reply = p; else *reply = NULL; } @@ -210,6 +364,32 @@ static u32 svc_rdma_get_inv_rkey(__be32 *rdma_argp, return be32_to_cpup(p); } +/* ib_dma_map_page() is used here because svc_rdma_dma_unmap() + * is used during completion to DMA-unmap this memory, and + * it uses ib_dma_unmap_page() exclusively. + */ +static int svc_rdma_dma_map_buf(struct svcxprt_rdma *rdma, + struct svc_rdma_op_ctxt *ctxt, + unsigned int sge_no, + unsigned char *base, + unsigned int len) +{ + unsigned long offset = (unsigned long)base & ~PAGE_MASK; + struct ib_device *dev = rdma->sc_cm_id->device; + dma_addr_t dma_addr; + + dma_addr = ib_dma_map_page(dev, virt_to_page(base), + offset, len, DMA_TO_DEVICE); + if (ib_dma_mapping_error(dev, dma_addr)) + return -EIO; + + ctxt->sge[sge_no].addr = dma_addr; + ctxt->sge[sge_no].length = len; + ctxt->sge[sge_no].lkey = rdma->sc_pd->local_dma_lkey; + svc_rdma_count_mappings(rdma, ctxt); + return 0; +} + static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma, struct svc_rdma_op_ctxt *ctxt, unsigned int sge_no, @@ -253,222 +433,73 @@ int svc_rdma_map_reply_hdr(struct svcxprt_rdma *rdma, return svc_rdma_dma_map_page(rdma, ctxt, 0, ctxt->pages[0], 0, len); } -/* Assumptions: - * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE +/* Load the xdr_buf into the ctxt's sge array, and DMA map each + * element as it is added. + * + * Returns the number of sge elements loaded on success, or + * a negative errno on failure. */ -static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, - u32 rmr, u64 to, - u32 xdr_off, int write_len, - struct svc_rdma_req_map *vec) +static int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, + struct svc_rdma_op_ctxt *ctxt, + struct xdr_buf *xdr, __be32 *wr_lst) { - struct ib_rdma_wr write_wr; - struct ib_sge *sge; - int xdr_sge_no; - int sge_no; - int sge_bytes; - int sge_off; - int bc; - struct svc_rdma_op_ctxt *ctxt; + unsigned int len, sge_no, remaining, page_off; + struct page **ppages; + unsigned char *base; + u32 xdr_pad; + int ret; - if (vec->count > RPCSVC_MAXPAGES) { - pr_err("svcrdma: Too many pages (%lu)\n", vec->count); - return -EIO; - } + sge_no = 1; - dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, " - "write_len=%d, vec->sge=%p, vec->count=%lu\n", - rmr, (unsigned long long)to, xdr_off, - write_len, vec->sge, vec->count); + ret = svc_rdma_dma_map_buf(rdma, ctxt, sge_no++, + xdr->head[0].iov_base, + xdr->head[0].iov_len); + if (ret < 0) + return ret; - ctxt = svc_rdma_get_context(xprt); - ctxt->direction = DMA_TO_DEVICE; - sge = ctxt->sge; - - /* Find the SGE associated with xdr_off */ - for (bc = xdr_off, xdr_sge_no = 1; bc && xdr_sge_no < vec->count; - xdr_sge_no++) { - if (vec->sge[xdr_sge_no].iov_len > bc) - break; - bc -= vec->sge[xdr_sge_no].iov_len; - } + /* If a Write chunk is present, the xdr_buf's page list + * is not included inline. However the Upper Layer may + * have added XDR padding in the tail buffer, and that + * should not be included inline. + */ + if (wr_lst) { + base = xdr->tail[0].iov_base; + len = xdr->tail[0].iov_len; + xdr_pad = xdr_padsize(xdr->page_len); - sge_off = bc; - bc = write_len; - sge_no = 0; - - /* Copy the remaining SGE */ - while (bc != 0) { - sge_bytes = min_t(size_t, - bc, vec->sge[xdr_sge_no].iov_len-sge_off); - sge[sge_no].length = sge_bytes; - sge[sge_no].addr = - dma_map_xdr(xprt, &rqstp->rq_res, xdr_off, - sge_bytes, DMA_TO_DEVICE); - xdr_off += sge_bytes; - if (ib_dma_mapping_error(xprt->sc_cm_id->device, - sge[sge_no].addr)) - goto err; - svc_rdma_count_mappings(xprt, ctxt); - sge[sge_no].lkey = xprt->sc_pd->local_dma_lkey; - ctxt->count++; - sge_off = 0; - sge_no++; - xdr_sge_no++; - if (xdr_sge_no > vec->count) { - pr_err("svcrdma: Too many sges (%d)\n", xdr_sge_no); - goto err; + if (len && xdr_pad) { + base += xdr_pad; + len -= xdr_pad; } - bc -= sge_bytes; - if (sge_no == xprt->sc_max_sge) - break; - } - - /* Prepare WRITE WR */ - memset(&write_wr, 0, sizeof write_wr); - ctxt->cqe.done = svc_rdma_wc_write; - write_wr.wr.wr_cqe = &ctxt->cqe; - write_wr.wr.sg_list = &sge[0]; - write_wr.wr.num_sge = sge_no; - write_wr.wr.opcode = IB_WR_RDMA_WRITE; - write_wr.wr.send_flags = IB_SEND_SIGNALED; - write_wr.rkey = rmr; - write_wr.remote_addr = to; - - /* Post It */ - atomic_inc(&rdma_stat_write); - if (svc_rdma_send(xprt, &write_wr.wr)) - goto err; - return write_len - bc; - err: - svc_rdma_unmap_dma(ctxt); - svc_rdma_put_context(ctxt, 0); - return -EIO; -} -noinline -static int send_write_chunks(struct svcxprt_rdma *xprt, - struct rpcrdma_write_array *wr_ary, - struct rpcrdma_msg *rdma_resp, - struct svc_rqst *rqstp, - struct svc_rdma_req_map *vec) -{ - u32 xfer_len = rqstp->rq_res.page_len; - int write_len; - u32 xdr_off; - int chunk_off; - int chunk_no; - int nchunks; - struct rpcrdma_write_array *res_ary; - int ret; - - res_ary = (struct rpcrdma_write_array *) - &rdma_resp->rm_body.rm_chunks[1]; - - /* Write chunks start at the pagelist */ - nchunks = be32_to_cpu(wr_ary->wc_nchunks); - for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0; - xfer_len && chunk_no < nchunks; - chunk_no++) { - struct rpcrdma_segment *arg_ch; - u64 rs_offset; - - arg_ch = &wr_ary->wc_array[chunk_no].wc_target; - write_len = min(xfer_len, be32_to_cpu(arg_ch->rs_length)); - - /* Prepare the response chunk given the length actually - * written */ - xdr_decode_hyper((__be32 *)&arg_ch->rs_offset, &rs_offset); - svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no, - arg_ch->rs_handle, - arg_ch->rs_offset, - write_len); - chunk_off = 0; - while (write_len) { - ret = send_write(xprt, rqstp, - be32_to_cpu(arg_ch->rs_handle), - rs_offset + chunk_off, - xdr_off, - write_len, - vec); - if (ret <= 0) - goto out_err; - chunk_off += ret; - xdr_off += ret; - xfer_len -= ret; - write_len -= ret; - } + goto tail; } - /* Update the req with the number of chunks actually used */ - svc_rdma_xdr_encode_write_list(rdma_resp, chunk_no); - return rqstp->rq_res.page_len; + ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT); + page_off = xdr->page_base & ~PAGE_MASK; + remaining = xdr->page_len; + while (remaining) { + len = min_t(u32, PAGE_SIZE - page_off, remaining); -out_err: - pr_err("svcrdma: failed to send write chunks, rc=%d\n", ret); - return -EIO; -} - -noinline -static int send_reply_chunks(struct svcxprt_rdma *xprt, - struct rpcrdma_write_array *rp_ary, - struct rpcrdma_msg *rdma_resp, - struct svc_rqst *rqstp, - struct svc_rdma_req_map *vec) -{ - u32 xfer_len = rqstp->rq_res.len; - int write_len; - u32 xdr_off; - int chunk_no; - int chunk_off; - int nchunks; - struct rpcrdma_segment *ch; - struct rpcrdma_write_array *res_ary; - int ret; + ret = svc_rdma_dma_map_page(rdma, ctxt, sge_no++, + *ppages++, page_off, len); + if (ret < 0) + return ret; - /* XXX: need to fix when reply lists occur with read-list and or - * write-list */ - res_ary = (struct rpcrdma_write_array *) - &rdma_resp->rm_body.rm_chunks[2]; - - /* xdr offset starts at RPC message */ - nchunks = be32_to_cpu(rp_ary->wc_nchunks); - for (xdr_off = 0, chunk_no = 0; - xfer_len && chunk_no < nchunks; - chunk_no++) { - u64 rs_offset; - ch = &rp_ary->wc_array[chunk_no].wc_target; - write_len = min(xfer_len, be32_to_cpu(ch->rs_length)); - - /* Prepare the reply chunk given the length actually - * written */ - xdr_decode_hyper((__be32 *)&ch->rs_offset, &rs_offset); - svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no, - ch->rs_handle, ch->rs_offset, - write_len); - chunk_off = 0; - while (write_len) { - ret = send_write(xprt, rqstp, - be32_to_cpu(ch->rs_handle), - rs_offset + chunk_off, - xdr_off, - write_len, - vec); - if (ret <= 0) - goto out_err; - chunk_off += ret; - xdr_off += ret; - xfer_len -= ret; - write_len -= ret; - } + remaining -= len; + page_off = 0; } - /* Update the req with the number of chunks actually used */ - svc_rdma_xdr_encode_reply_array(res_ary, chunk_no); - return rqstp->rq_res.len; + base = xdr->tail[0].iov_base; + len = xdr->tail[0].iov_len; +tail: + if (len) { + ret = svc_rdma_dma_map_buf(rdma, ctxt, sge_no++, base, len); + if (ret < 0) + return ret; + } -out_err: - pr_err("svcrdma: failed to send reply chunks, rc=%d\n", ret); - return -EIO; + return sge_no - 1; } /* The svc_rqst and all resources it owns are released as soon as @@ -525,90 +556,66 @@ int svc_rdma_post_send_wr(struct svcxprt_rdma *rdma, return svc_rdma_send(rdma, send_wr); } -/* This function prepares the portion of the RPCRDMA message to be - * sent in the RDMA_SEND. This function is called after data sent via - * RDMA has already been transmitted. There are three cases: - * - The RPCRDMA header, RPC header, and payload are all sent in a - * single RDMA_SEND. This is the "inline" case. - * - The RPCRDMA header and some portion of the RPC header and data - * are sent via this RDMA_SEND and another portion of the data is - * sent via RDMA. - * - The RPCRDMA header [NOMSG] is sent in this RDMA_SEND and the RPC - * header and data are all transmitted via RDMA. - * In all three cases, this function prepares the RPCRDMA header in - * sge[0], the 'type' parameter indicates the type to place in the - * RPCRDMA header, and the 'byte_count' field indicates how much of - * the XDR to include in this RDMA_SEND. NB: The offset of the payload - * to send is zero in the XDR. +/* Prepare the portion of the RPC Reply that will be transmitted + * via RDMA Send. The RPC-over-RDMA transport header is prepared + * in sge[0], and the RPC xdr_buf is prepared in following sges. + * + * Depending on whether a Write list or Reply chunk is present, + * the server may send all, a portion of, or none of the xdr_buf. + * In the latter case, only the transport header (sge[0]) is + * transmitted. + * + * RDMA Send is the last step of transmitting an RPC reply. Pages + * involved in the earlier RDMA Writes are here transferred out + * of the rqstp and into the ctxt's page array. These pages are + * DMA unmapped by each Write completion, but the subsequent Send + * completion finally releases these pages. + * + * Assumptions: + * - The Reply's transport header will never be larger than a page. */ -static int send_reply(struct svcxprt_rdma *rdma, - struct svc_rqst *rqstp, - struct page *page, - struct rpcrdma_msg *rdma_resp, - struct svc_rdma_req_map *vec, - int byte_count, - u32 inv_rkey) +static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma, + __be32 *rdma_argp, __be32 *rdma_resp, + struct svc_rqst *rqstp, + __be32 *wr_lst, __be32 *rp_ch) { struct svc_rdma_op_ctxt *ctxt; - u32 xdr_off; - int sge_no; - int sge_bytes; - int ret = -EIO; + u32 inv_rkey; + int ret; + + dprintk("svcrdma: sending %s reply: head=%zu, pagelen=%u, tail=%zu\n", + (rp_ch ? "RDMA_NOMSG" : "RDMA_MSG"), + rqstp->rq_res.head[0].iov_len, + rqstp->rq_res.page_len, + rqstp->rq_res.tail[0].iov_len); - /* Prepare the context */ ctxt = svc_rdma_get_context(rdma); - ctxt->direction = DMA_TO_DEVICE; - ctxt->pages[0] = page; - ctxt->count = 1; - /* Prepare the SGE for the RPCRDMA Header */ - ctxt->sge[0].lkey = rdma->sc_pd->local_dma_lkey; - ctxt->sge[0].length = - svc_rdma_xdr_get_reply_hdr_len((__be32 *)rdma_resp); - ctxt->sge[0].addr = - ib_dma_map_page(rdma->sc_cm_id->device, page, 0, - ctxt->sge[0].length, DMA_TO_DEVICE); - if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr)) + ret = svc_rdma_map_reply_hdr(rdma, ctxt, rdma_resp, + svc_rdma_reply_hdr_len(rdma_resp)); + if (ret < 0) goto err; - svc_rdma_count_mappings(rdma, ctxt); - - ctxt->direction = DMA_TO_DEVICE; - /* Map the payload indicated by 'byte_count' */ - xdr_off = 0; - for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) { - sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count); - byte_count -= sge_bytes; - ctxt->sge[sge_no].addr = - dma_map_xdr(rdma, &rqstp->rq_res, xdr_off, - sge_bytes, DMA_TO_DEVICE); - xdr_off += sge_bytes; - if (ib_dma_mapping_error(rdma->sc_cm_id->device, - ctxt->sge[sge_no].addr)) + if (!rp_ch) { + ret = svc_rdma_map_reply_msg(rdma, ctxt, + &rqstp->rq_res, wr_lst); + if (ret < 0) goto err; - svc_rdma_count_mappings(rdma, ctxt); - ctxt->sge[sge_no].lkey = rdma->sc_pd->local_dma_lkey; - ctxt->sge[sge_no].length = sge_bytes; - } - if (byte_count != 0) { - pr_err("svcrdma: Could not map %d bytes\n", byte_count); - goto err; } svc_rdma_save_io_pages(rqstp, ctxt); - if (sge_no > rdma->sc_max_sge) { - pr_err("svcrdma: Too many sges (%d)\n", sge_no); - goto err; - } - - ret = svc_rdma_post_send_wr(rdma, ctxt, sge_no, inv_rkey); + inv_rkey = 0; + if (rdma->sc_snd_w_inv) + inv_rkey = svc_rdma_get_inv_rkey(rdma_argp, wr_lst, rp_ch); + ret = svc_rdma_post_send_wr(rdma, ctxt, 1 + ret, inv_rkey); if (ret) goto err; return 0; - err: +err: + pr_err("svcrdma: failed to post Send WR (%d)\n", ret); svc_rdma_unmap_dma(ctxt); svc_rdma_put_context(ctxt, 1); return ret; @@ -618,41 +625,36 @@ void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp) { } +/** + * svc_rdma_sendto - Transmit an RPC reply + * @rqstp: processed RPC request, reply XDR already in ::rq_res + * + * Any resources still associated with @rqstp are released upon return. + * If no reply message was possible, the connection is closed. + * + * Returns: + * %0 if an RPC reply has been successfully posted, + * %-ENOMEM if a resource shortage occurred (connection is lost), + * %-ENOTCONN if posting failed (connection is lost). + */ int svc_rdma_sendto(struct svc_rqst *rqstp) { struct svc_xprt *xprt = rqstp->rq_xprt; struct svcxprt_rdma *rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); - struct rpcrdma_msg *rdma_argp; - struct rpcrdma_msg *rdma_resp; - struct rpcrdma_write_array *wr_ary, *rp_ary; - int ret; - int inline_bytes; + __be32 *p, *rdma_argp, *rdma_resp, *wr_lst, *rp_ch; + struct xdr_buf *xdr = &rqstp->rq_res; struct page *res_page; - struct svc_rdma_req_map *vec; - u32 inv_rkey; - __be32 *p; - - dprintk("svcrdma: sending response for rqstp=%p\n", rqstp); + int ret; - /* Get the RDMA request header. The receive logic always - * places this at the start of page 0. + /* Find the call's chunk lists to decide how to send the reply. + * Receive places the Call's xprt header at the start of page 0. */ rdma_argp = page_address(rqstp->rq_pages[0]); - svc_rdma_get_write_arrays(rdma_argp, &wr_ary, &rp_ary); + svc_rdma_get_write_arrays(rdma_argp, &wr_lst, &rp_ch); - inv_rkey = 0; - if (rdma->sc_snd_w_inv) - inv_rkey = svc_rdma_get_inv_rkey(&rdma_argp->rm_xid, - (__be32 *)wr_ary, - (__be32 *)rp_ary); - - /* Build an req vec for the XDR */ - vec = svc_rdma_get_req_map(rdma); - ret = svc_rdma_map_xdr(rdma, &rqstp->rq_res, vec, wr_ary != NULL); - if (ret) - goto err0; - inline_bytes = rqstp->rq_res.len; + dprintk("svcrdma: preparing response for XID 0x%08x\n", + be32_to_cpup(rdma_argp)); /* Create the RDMA response header. xprt->xpt_mutex, * acquired in svc_send(), serializes RPC replies. The @@ -666,54 +668,46 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) goto err0; rdma_resp = page_address(res_page); - p = &rdma_resp->rm_xid; - *p++ = rdma_argp->rm_xid; - *p++ = rdma_argp->rm_vers; + p = rdma_resp; + *p++ = *rdma_argp; + *p++ = *(rdma_argp + 1); *p++ = rdma->sc_fc_credits; - *p++ = rp_ary ? rdma_nomsg : rdma_msg; + *p++ = rp_ch ? rdma_nomsg : rdma_msg; /* Start with empty chunks */ *p++ = xdr_zero; *p++ = xdr_zero; *p = xdr_zero; - /* Send any write-chunk data and build resp write-list */ - if (wr_ary) { - ret = send_write_chunks(rdma, wr_ary, rdma_resp, rqstp, vec); + if (wr_lst) { + /* XXX: Presume the client sent only one Write chunk */ + ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr); if (ret < 0) goto err1; - inline_bytes -= ret + xdr_padsize(ret); + svc_rdma_xdr_encode_write_list(rdma_resp, wr_lst, ret); } - - /* Send any reply-list data and update resp reply-list */ - if (rp_ary) { - ret = send_reply_chunks(rdma, rp_ary, rdma_resp, rqstp, vec); + if (rp_ch) { + ret = svc_rdma_send_reply_chunk(rdma, rp_ch, wr_lst, xdr); if (ret < 0) goto err1; - inline_bytes -= ret; + svc_rdma_xdr_encode_reply_chunk(rdma_resp, rp_ch, ret); } - /* Post a fresh Receive buffer _before_ sending the reply */ ret = svc_rdma_post_recv(rdma, GFP_KERNEL); if (ret) goto err1; - - ret = send_reply(rdma, rqstp, res_page, rdma_resp, vec, - inline_bytes, inv_rkey); + ret = svc_rdma_send_reply_msg(rdma, rdma_argp, rdma_resp, rqstp, + wr_lst, rp_ch); if (ret < 0) goto err0; - - svc_rdma_put_req_map(rdma, vec); - dprintk("svcrdma: send_reply returns %d\n", ret); - return ret; + return 0; err1: put_page(res_page); err0: - svc_rdma_put_req_map(rdma, vec); pr_err("svcrdma: Could not send reply, err=%d. Closing transport.\n", ret); - set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); + set_bit(XPT_CLOSE, &xprt->xpt_flags); return -ENOTCONN; } diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index b25c50992a95..237c377c1e06 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -1053,6 +1053,8 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) memset(&qp_attr, 0, sizeof qp_attr); qp_attr.event_handler = qp_event_handler; qp_attr.qp_context = &newxprt->sc_xprt; + qp_attr.port_num = newxprt->sc_cm_id->port_num; + qp_attr.cap.max_rdma_ctxs = newxprt->sc_max_requests; qp_attr.cap.max_send_wr = newxprt->sc_sq_depth; qp_attr.cap.max_recv_wr = newxprt->sc_rq_depth; qp_attr.cap.max_send_sge = newxprt->sc_max_sge; -- cgit v1.2.3 From 6b19cc5ca2f78ebc88f5d39ba6a94197bb392fcc Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Sun, 9 Apr 2017 13:06:33 -0400 Subject: svcrdma: Clean up RDMA_ERROR path Now that svc_rdma_sendto has been renovated, svc_rdma_send_error can be refactored to reduce code duplication and remove C structure- based XDR encoding. It is also relocated to the source file that contains its only caller. This is a refactoring change only. Signed-off-by: Chuck Lever Reviewed-by: Sagi Grimberg Signed-off-by: J. Bruce Fields --- include/linux/sunrpc/rpc_rdma.h | 3 ++ include/linux/sunrpc/svc_rdma.h | 5 ---- net/sunrpc/xprtrdma/svc_rdma_marshal.c | 19 ------------ net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 52 ++++++++++++++++++++++++++++++++- net/sunrpc/xprtrdma/svc_rdma_sendto.c | 43 --------------------------- 5 files changed, 54 insertions(+), 68 deletions(-) (limited to 'include') diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h index 245fc59b7324..b7e85b341a54 100644 --- a/include/linux/sunrpc/rpc_rdma.h +++ b/include/linux/sunrpc/rpc_rdma.h @@ -143,6 +143,9 @@ enum rpcrdma_proc { #define rdma_done cpu_to_be32(RDMA_DONE) #define rdma_error cpu_to_be32(RDMA_ERROR) +#define err_vers cpu_to_be32(ERR_VERS) +#define err_chunk cpu_to_be32(ERR_CHUNK) + /* * Private extension to RPC-over-RDMA Version One. * Message passed during RDMA-CM connection set-up. diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 599ee03ee3fb..a770d200f607 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -209,9 +209,6 @@ extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, /* svc_rdma_marshal.c */ extern int svc_rdma_xdr_decode_req(struct xdr_buf *); -extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *, - struct rpcrdma_msg *, - enum rpcrdma_errcode, __be32 *); extern void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *, int); extern void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *, int, __be32, __be64, u32); @@ -244,8 +241,6 @@ extern int svc_rdma_post_send_wr(struct svcxprt_rdma *rdma, struct svc_rdma_op_ctxt *ctxt, int num_sge, u32 inv_rkey); extern int svc_rdma_sendto(struct svc_rqst *); -extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *, - int); /* svc_rdma_transport.c */ extern void svc_rdma_wc_send(struct ib_cq *, struct ib_wc *); diff --git a/net/sunrpc/xprtrdma/svc_rdma_marshal.c b/net/sunrpc/xprtrdma/svc_rdma_marshal.c index 1c4aabf0f657..ae58a897eca0 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_marshal.c +++ b/net/sunrpc/xprtrdma/svc_rdma_marshal.c @@ -167,25 +167,6 @@ out_inval: return -EINVAL; } -int svc_rdma_xdr_encode_error(struct svcxprt_rdma *xprt, - struct rpcrdma_msg *rmsgp, - enum rpcrdma_errcode err, __be32 *va) -{ - __be32 *startp = va; - - *va++ = rmsgp->rm_xid; - *va++ = rmsgp->rm_vers; - *va++ = xprt->sc_fc_credits; - *va++ = rdma_error; - *va++ = cpu_to_be32(err); - if (err == ERR_VERS) { - *va++ = rpcrdma_version; - *va++ = rpcrdma_version; - } - - return (int)((unsigned long)va - (unsigned long)startp); -} - /** * svc_rdma_xdr_get_reply_hdr_length - Get length of Reply transport header * @rdma_resp: buffer containing Reply transport header diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index f7b2daf72a86..7435cb666f42 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -558,6 +558,56 @@ static void rdma_read_complete(struct svc_rqst *rqstp, rqstp->rq_arg.buflen = head->arg.buflen; } +static void svc_rdma_send_error(struct svcxprt_rdma *xprt, + __be32 *rdma_argp, int status) +{ + struct svc_rdma_op_ctxt *ctxt; + __be32 *p, *err_msgp; + unsigned int length; + struct page *page; + int ret; + + ret = svc_rdma_repost_recv(xprt, GFP_KERNEL); + if (ret) + return; + + page = alloc_page(GFP_KERNEL); + if (!page) + return; + err_msgp = page_address(page); + + p = err_msgp; + *p++ = *rdma_argp; + *p++ = *(rdma_argp + 1); + *p++ = xprt->sc_fc_credits; + *p++ = rdma_error; + if (status == -EPROTONOSUPPORT) { + *p++ = err_vers; + *p++ = rpcrdma_version; + *p++ = rpcrdma_version; + } else { + *p++ = err_chunk; + } + length = (unsigned long)p - (unsigned long)err_msgp; + + /* Map transport header; no RPC message payload */ + ctxt = svc_rdma_get_context(xprt); + ret = svc_rdma_map_reply_hdr(xprt, ctxt, err_msgp, length); + if (ret) { + dprintk("svcrdma: Error %d mapping send for protocol error\n", + ret); + return; + } + + ret = svc_rdma_post_send_wr(xprt, ctxt, 1, 0); + if (ret) { + dprintk("svcrdma: Error %d posting send for protocol error\n", + ret); + svc_rdma_unmap_dma(ctxt); + svc_rdma_put_context(ctxt, 1); + } +} + /* By convention, backchannel calls arrive via rdma_msg type * messages, and never populate the chunk lists. This makes * the RPC/RDMA header small and fixed in size, so it is @@ -686,7 +736,7 @@ complete: return ret; out_err: - svc_rdma_send_error(rdma_xprt, rmsgp, ret); + svc_rdma_send_error(rdma_xprt, &rmsgp->rm_xid, ret); svc_rdma_put_context(ctxt, 0); return 0; diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index ce62b78e5bc9..0b646e8f23c7 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -710,46 +710,3 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) set_bit(XPT_CLOSE, &xprt->xpt_flags); return -ENOTCONN; } - -void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, - int status) -{ - struct page *p; - struct svc_rdma_op_ctxt *ctxt; - enum rpcrdma_errcode err; - __be32 *va; - int length; - int ret; - - ret = svc_rdma_repost_recv(xprt, GFP_KERNEL); - if (ret) - return; - - p = alloc_page(GFP_KERNEL); - if (!p) - return; - va = page_address(p); - - /* XDR encode an error reply */ - err = ERR_CHUNK; - if (status == -EPROTONOSUPPORT) - err = ERR_VERS; - length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); - - /* Map transport header; no RPC message payload */ - ctxt = svc_rdma_get_context(xprt); - ret = svc_rdma_map_reply_hdr(xprt, ctxt, &rmsgp->rm_xid, length); - if (ret) { - dprintk("svcrdma: Error %d mapping send for protocol error\n", - ret); - return; - } - - ret = svc_rdma_post_send_wr(xprt, ctxt, 1, 0); - if (ret) { - dprintk("svcrdma: Error %d posting send for protocol error\n", - ret); - svc_rdma_unmap_dma(ctxt); - svc_rdma_put_context(ctxt, 1); - } -} -- cgit v1.2.3 From f5821c76b2c9c2fb98b276c0bf6a101bfe9050a3 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Sun, 9 Apr 2017 13:06:49 -0400 Subject: svcrdma: Clean up RPC-over-RDMA backchannel reply processing Replace C structure-based XDR decoding with pointer arithmetic. Pointer arithmetic is considered more portable. Signed-off-by: Chuck Lever Signed-off-by: J. Bruce Fields --- include/linux/sunrpc/svc_rdma.h | 2 +- net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 18 ++++++++++++++---- net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 27 +++++++++++++++------------ 3 files changed, 30 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index a770d200f607..44d642bbfce6 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -204,7 +204,7 @@ static inline void svc_rdma_count_mappings(struct svcxprt_rdma *rdma, /* svc_rdma_backchannel.c */ extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, - struct rpcrdma_msg *rmsgp, + __be32 *rdma_resp, struct xdr_buf *rcvbuf); /* svc_rdma_marshal.c */ diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index bf185b79c98f..c676ed0efb5a 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -12,7 +12,17 @@ #undef SVCRDMA_BACKCHANNEL_DEBUG -int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, struct rpcrdma_msg *rmsgp, +/** + * svc_rdma_handle_bc_reply - Process incoming backchannel reply + * @xprt: controlling backchannel transport + * @rdma_resp: pointer to incoming transport header + * @rcvbuf: XDR buffer into which to decode the reply + * + * Returns: + * %0 if @rcvbuf is filled in, xprt_complete_rqst called, + * %-EAGAIN if server should call ->recvfrom again. + */ +int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp, struct xdr_buf *rcvbuf) { struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); @@ -27,13 +37,13 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, struct rpcrdma_msg *rmsgp, p = (__be32 *)src->iov_base; len = src->iov_len; - xid = rmsgp->rm_xid; + xid = *rdma_resp; #ifdef SVCRDMA_BACKCHANNEL_DEBUG pr_info("%s: xid=%08x, length=%zu\n", __func__, be32_to_cpu(xid), len); pr_info("%s: RPC/RDMA: %*ph\n", - __func__, (int)RPCRDMA_HDRLEN_MIN, rmsgp); + __func__, (int)RPCRDMA_HDRLEN_MIN, rdma_resp); pr_info("%s: RPC: %*ph\n", __func__, (int)len, p); #endif @@ -53,7 +63,7 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, struct rpcrdma_msg *rmsgp, goto out_unlock; memcpy(dst->iov_base, p, len); - credits = be32_to_cpu(rmsgp->rm_credit); + credits = be32_to_cpup(rdma_resp + 2); if (credits == 0) credits = 1; /* don't deadlock */ else if (credits > r_xprt->rx_buf.rb_bc_max_requests) diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 7435cb666f42..27a99bf5b1a6 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -613,28 +613,30 @@ static void svc_rdma_send_error(struct svcxprt_rdma *xprt, * the RPC/RDMA header small and fixed in size, so it is * straightforward to check the RPC header's direction field. */ -static bool -svc_rdma_is_backchannel_reply(struct svc_xprt *xprt, struct rpcrdma_msg *rmsgp) +static bool svc_rdma_is_backchannel_reply(struct svc_xprt *xprt, + __be32 *rdma_resp) { - __be32 *p = (__be32 *)rmsgp; + __be32 *p; if (!xprt->xpt_bc_xprt) return false; - if (rmsgp->rm_type != rdma_msg) + p = rdma_resp + 3; + if (*p++ != rdma_msg) return false; - if (rmsgp->rm_body.rm_chunks[0] != xdr_zero) + + if (*p++ != xdr_zero) return false; - if (rmsgp->rm_body.rm_chunks[1] != xdr_zero) + if (*p++ != xdr_zero) return false; - if (rmsgp->rm_body.rm_chunks[2] != xdr_zero) + if (*p++ != xdr_zero) return false; - /* sanity */ - if (p[7] != rmsgp->rm_xid) + /* XID sanity */ + if (*p++ != *rdma_resp) return false; /* call direction */ - if (p[8] == cpu_to_be32(RPC_CALL)) + if (*p == cpu_to_be32(RPC_CALL)) return false; return true; @@ -700,8 +702,9 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) goto out_drop; rqstp->rq_xprt_hlen = ret; - if (svc_rdma_is_backchannel_reply(xprt, rmsgp)) { - ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, rmsgp, + if (svc_rdma_is_backchannel_reply(xprt, &rmsgp->rm_xid)) { + ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, + &rmsgp->rm_xid, &rqstp->rq_arg); svc_rdma_put_context(ctxt, 0); if (ret) -- cgit v1.2.3 From ded8d19641a605232ab48f5d27f542648beba3cc Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Sun, 9 Apr 2017 13:06:57 -0400 Subject: svcrdma: Reduce size of sge array in struct svc_rdma_op_ctxt The sge array in struct svc_rdma_op_ctxt is no longer used for sending RDMA Write WRs. It need only accommodate the construction of Send and Receive WRs. The maximum inline size is the largest payload it needs to handle now. Signed-off-by: Chuck Lever Reviewed-by: Sagi Grimberg Signed-off-by: J. Bruce Fields --- include/linux/sunrpc/svc_rdma.h | 9 +++++++-- net/sunrpc/xprtrdma/svc_rdma.c | 6 +++--- 2 files changed, 10 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 44d642bbfce6..e84b77556784 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -48,6 +48,12 @@ #include #define SVCRDMA_DEBUG +/* Default and maximum inline threshold sizes */ +enum { + RPCRDMA_DEF_INLINE_THRESH = 4096, + RPCRDMA_MAX_INLINE_THRESH = 65536 +}; + /* RPC/RDMA parameters and stats */ extern unsigned int svcrdma_ord; extern unsigned int svcrdma_max_requests; @@ -86,7 +92,7 @@ struct svc_rdma_op_ctxt { int count; unsigned int mapped_sges; struct ib_send_wr send_wr; - struct ib_sge sge[RPCSVC_MAXPAGES]; + struct ib_sge sge[1 + RPCRDMA_MAX_INLINE_THRESH / PAGE_SIZE]; struct page *pages[RPCSVC_MAXPAGES]; }; @@ -186,7 +192,6 @@ struct svcxprt_rdma { * page size of 4k, or 32k * 2 ops / 4k = 16 outstanding RDMA_READ. */ #define RPCRDMA_ORD (64/4) #define RPCRDMA_MAX_REQUESTS 32 -#define RPCRDMA_MAX_REQ_SIZE 4096 /* Typical ULP usage of BC requests is NFSv4.1 backchannel. Our * current NFSv4.1 implementation supports one backchannel slot. diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c index 912444174647..a4a8f6989ee7 100644 --- a/net/sunrpc/xprtrdma/svc_rdma.c +++ b/net/sunrpc/xprtrdma/svc_rdma.c @@ -58,9 +58,9 @@ unsigned int svcrdma_max_requests = RPCRDMA_MAX_REQUESTS; unsigned int svcrdma_max_bc_requests = RPCRDMA_MAX_BC_REQUESTS; static unsigned int min_max_requests = 4; static unsigned int max_max_requests = 16384; -unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE; -static unsigned int min_max_inline = 4096; -static unsigned int max_max_inline = 65536; +unsigned int svcrdma_max_req_size = RPCRDMA_DEF_INLINE_THRESH; +static unsigned int min_max_inline = RPCRDMA_DEF_INLINE_THRESH; +static unsigned int max_max_inline = RPCRDMA_MAX_INLINE_THRESH; atomic_t rdma_stat_recv; atomic_t rdma_stat_read; -- cgit v1.2.3 From 68cc4636bbbca89b9fedcf46d8b6bee444fc5e4e Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Sun, 9 Apr 2017 13:07:05 -0400 Subject: svcrdma: Remove unused RDMA Write completion handler Clean up. All RDMA Write completions are now handled by svc_rdma_wc_write_ctx. Signed-off-by: Chuck Lever Signed-off-by: J. Bruce Fields --- include/linux/sunrpc/svc_rdma.h | 1 - net/sunrpc/xprtrdma/svc_rdma_transport.c | 18 ------------------ 2 files changed, 19 deletions(-) (limited to 'include') diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index e84b77556784..f58c5349beb7 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -249,7 +249,6 @@ extern int svc_rdma_sendto(struct svc_rqst *); /* svc_rdma_transport.c */ extern void svc_rdma_wc_send(struct ib_cq *, struct ib_wc *); -extern void svc_rdma_wc_write(struct ib_cq *, struct ib_wc *); extern void svc_rdma_wc_reg(struct ib_cq *, struct ib_wc *); extern void svc_rdma_wc_read(struct ib_cq *, struct ib_wc *); extern void svc_rdma_wc_inv(struct ib_cq *, struct ib_wc *); diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 237c377c1e06..1597ca8ba3c0 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -473,24 +473,6 @@ void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) svc_rdma_put_context(ctxt, 1); } -/** - * svc_rdma_wc_write - Invoked by RDMA provider for each polled Write WC - * @cq: completion queue - * @wc: completed WR - * - */ -void svc_rdma_wc_write(struct ib_cq *cq, struct ib_wc *wc) -{ - struct ib_cqe *cqe = wc->wr_cqe; - struct svc_rdma_op_ctxt *ctxt; - - svc_rdma_send_wc_common_put(cq, wc, "write"); - - ctxt = container_of(cqe, struct svc_rdma_op_ctxt, cqe); - svc_rdma_unmap_dma(ctxt); - svc_rdma_put_context(ctxt, 0); -} - /** * svc_rdma_wc_reg - Invoked by RDMA provider for each polled FASTREG WC * @cq: completion queue -- cgit v1.2.3 From 2cf32924c68a22783e6f630e1b5345a80aa1a376 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Sun, 9 Apr 2017 13:07:13 -0400 Subject: svcrdma: Remove the req_map cache req_maps are no longer used by the send path and can thus be removed. Signed-off-by: Chuck Lever Reviewed-by: Sagi Grimberg Signed-off-by: J. Bruce Fields --- include/linux/sunrpc/svc_rdma.h | 34 +------------ net/sunrpc/xprtrdma/svc_rdma_sendto.c | 68 -------------------------- net/sunrpc/xprtrdma/svc_rdma_transport.c | 84 -------------------------------- 3 files changed, 1 insertion(+), 185 deletions(-) (limited to 'include') diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index f58c5349beb7..479bb7f65233 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -96,23 +96,6 @@ struct svc_rdma_op_ctxt { struct page *pages[RPCSVC_MAXPAGES]; }; -/* - * NFS_ requests are mapped on the client side by the chunk lists in - * the RPCRDMA header. During the fetching of the RPC from the client - * and the writing of the reply to the client, the memory in the - * client and the memory in the server must be mapped as contiguous - * vaddr/len for access by the hardware. These data strucures keep - * these mappings. - * - * For an RDMA_WRITE, the 'sge' maps the RPC REPLY. For RDMA_READ, the - * 'sge' in the svc_rdma_req_map maps the server side RPC reply and the - * 'ch' field maps the read-list of the RPCRDMA header to the 'sge' - * mapping of the reply. - */ -struct svc_rdma_chunk_sge { - int start; /* sge no for this chunk */ - int count; /* sge count for this chunk */ -}; struct svc_rdma_fastreg_mr { struct ib_mr *mr; struct scatterlist *sg; @@ -121,15 +104,7 @@ struct svc_rdma_fastreg_mr { enum dma_data_direction direction; struct list_head frmr_list; }; -struct svc_rdma_req_map { - struct list_head free; - unsigned long count; - union { - struct kvec sge[RPCSVC_MAXPAGES]; - struct svc_rdma_chunk_sge ch[RPCSVC_MAXPAGES]; - unsigned long lkey[RPCSVC_MAXPAGES]; - }; -}; + #define RDMACTXT_F_LAST_CTXT 2 #define SVCRDMA_DEVCAP_FAST_REG 1 /* fast mr registration */ @@ -160,8 +135,6 @@ struct svcxprt_rdma { int sc_ctxt_used; spinlock_t sc_rw_ctxt_lock; struct list_head sc_rw_ctxts; - spinlock_t sc_map_lock; - struct list_head sc_maps; struct list_head sc_rq_dto_q; spinlock_t sc_rq_dto_lock; @@ -237,8 +210,6 @@ extern int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, struct xdr_buf *xdr); /* svc_rdma_sendto.c */ -extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *, - struct svc_rdma_req_map *, bool); extern int svc_rdma_map_reply_hdr(struct svcxprt_rdma *rdma, struct svc_rdma_op_ctxt *ctxt, __be32 *rdma_resp, unsigned int len); @@ -259,9 +230,6 @@ extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *); extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *); extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int); extern void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt); -extern struct svc_rdma_req_map *svc_rdma_get_req_map(struct svcxprt_rdma *); -extern void svc_rdma_put_req_map(struct svcxprt_rdma *, - struct svc_rdma_req_map *); extern struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *); extern void svc_rdma_put_frmr(struct svcxprt_rdma *, struct svc_rdma_fastreg_mr *); diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index e514f6864a93..1736337f3a55 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -240,74 +240,6 @@ static void svc_rdma_xdr_encode_reply_chunk(__be32 *rdma_resp, __be32 *rp_ch, xdr_encode_write_chunk(p, rp_ch, consumed); } -int svc_rdma_map_xdr(struct svcxprt_rdma *xprt, - struct xdr_buf *xdr, - struct svc_rdma_req_map *vec, - bool write_chunk_present) -{ - int sge_no; - u32 sge_bytes; - u32 page_bytes; - u32 page_off; - int page_no; - - if (xdr->len != - (xdr->head[0].iov_len + xdr->page_len + xdr->tail[0].iov_len)) { - pr_err("svcrdma: %s: XDR buffer length error\n", __func__); - return -EIO; - } - - /* Skip the first sge, this is for the RPCRDMA header */ - sge_no = 1; - - /* Head SGE */ - vec->sge[sge_no].iov_base = xdr->head[0].iov_base; - vec->sge[sge_no].iov_len = xdr->head[0].iov_len; - sge_no++; - - /* pages SGE */ - page_no = 0; - page_bytes = xdr->page_len; - page_off = xdr->page_base; - while (page_bytes) { - vec->sge[sge_no].iov_base = - page_address(xdr->pages[page_no]) + page_off; - sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off)); - page_bytes -= sge_bytes; - vec->sge[sge_no].iov_len = sge_bytes; - - sge_no++; - page_no++; - page_off = 0; /* reset for next time through loop */ - } - - /* Tail SGE */ - if (xdr->tail[0].iov_len) { - unsigned char *base = xdr->tail[0].iov_base; - size_t len = xdr->tail[0].iov_len; - u32 xdr_pad = xdr_padsize(xdr->page_len); - - if (write_chunk_present && xdr_pad) { - base += xdr_pad; - len -= xdr_pad; - } - - if (len) { - vec->sge[sge_no].iov_base = base; - vec->sge[sge_no].iov_len = len; - sge_no++; - } - } - - dprintk("svcrdma: %s: sge_no %d page_no %d " - "page_base %u page_len %u head_len %zu tail_len %zu\n", - __func__, sge_no, page_no, xdr->page_base, xdr->page_len, - xdr->head[0].iov_len, xdr->tail[0].iov_len); - - vec->count = sge_no; - return 0; -} - /* Parse the RPC Call's transport header. */ static void svc_rdma_get_write_arrays(__be32 *rdma_argp, diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 1597ca8ba3c0..a9d9cb1ba4c6 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -272,85 +272,6 @@ static void svc_rdma_destroy_ctxts(struct svcxprt_rdma *xprt) } } -static struct svc_rdma_req_map *alloc_req_map(gfp_t flags) -{ - struct svc_rdma_req_map *map; - - map = kmalloc(sizeof(*map), flags); - if (map) - INIT_LIST_HEAD(&map->free); - return map; -} - -static bool svc_rdma_prealloc_maps(struct svcxprt_rdma *xprt) -{ - unsigned int i; - - /* One for each receive buffer on this connection. */ - i = xprt->sc_max_requests; - - while (i--) { - struct svc_rdma_req_map *map; - - map = alloc_req_map(GFP_KERNEL); - if (!map) { - dprintk("svcrdma: No memory for request map\n"); - return false; - } - list_add(&map->free, &xprt->sc_maps); - } - return true; -} - -struct svc_rdma_req_map *svc_rdma_get_req_map(struct svcxprt_rdma *xprt) -{ - struct svc_rdma_req_map *map = NULL; - - spin_lock(&xprt->sc_map_lock); - if (list_empty(&xprt->sc_maps)) - goto out_empty; - - map = list_first_entry(&xprt->sc_maps, - struct svc_rdma_req_map, free); - list_del_init(&map->free); - spin_unlock(&xprt->sc_map_lock); - -out: - map->count = 0; - return map; - -out_empty: - spin_unlock(&xprt->sc_map_lock); - - /* Pre-allocation amount was incorrect */ - map = alloc_req_map(GFP_NOIO); - if (map) - goto out; - - WARN_ONCE(1, "svcrdma: empty request map list?\n"); - return NULL; -} - -void svc_rdma_put_req_map(struct svcxprt_rdma *xprt, - struct svc_rdma_req_map *map) -{ - spin_lock(&xprt->sc_map_lock); - list_add(&map->free, &xprt->sc_maps); - spin_unlock(&xprt->sc_map_lock); -} - -static void svc_rdma_destroy_maps(struct svcxprt_rdma *xprt) -{ - while (!list_empty(&xprt->sc_maps)) { - struct svc_rdma_req_map *map; - - map = list_first_entry(&xprt->sc_maps, - struct svc_rdma_req_map, free); - list_del(&map->free); - kfree(map); - } -} - /* QP event handler */ static void qp_event_handler(struct ib_event *event, void *context) { @@ -544,7 +465,6 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv, INIT_LIST_HEAD(&cma_xprt->sc_frmr_q); INIT_LIST_HEAD(&cma_xprt->sc_ctxts); INIT_LIST_HEAD(&cma_xprt->sc_rw_ctxts); - INIT_LIST_HEAD(&cma_xprt->sc_maps); init_waitqueue_head(&cma_xprt->sc_send_wait); spin_lock_init(&cma_xprt->sc_lock); @@ -552,7 +472,6 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv, spin_lock_init(&cma_xprt->sc_frmr_q_lock); spin_lock_init(&cma_xprt->sc_ctxt_lock); spin_lock_init(&cma_xprt->sc_rw_ctxt_lock); - spin_lock_init(&cma_xprt->sc_map_lock); /* * Note that this implies that the underlying transport support @@ -1004,8 +923,6 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) if (!svc_rdma_prealloc_ctxts(newxprt)) goto errout; - if (!svc_rdma_prealloc_maps(newxprt)) - goto errout; /* * Limit ORD based on client limit, local device limit, and @@ -1237,7 +1154,6 @@ static void __svc_rdma_free(struct work_struct *work) rdma_dealloc_frmr_q(rdma); svc_rdma_destroy_rw_ctxts(rdma); svc_rdma_destroy_ctxts(rdma); - svc_rdma_destroy_maps(rdma); /* Destroy the QP if present (not a listener) */ if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) -- cgit v1.2.3 From dadf3e435debb85dfcf28c157012047153a21a97 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Sun, 9 Apr 2017 13:07:21 -0400 Subject: svcrdma: Clean out old XDR encoders Clean up: These have been replaced and are no longer used. Signed-off-by: Chuck Lever Reviewed-by: Sagi Grimberg Signed-off-by: J. Bruce Fields --- include/linux/sunrpc/svc_rdma.h | 4 -- net/sunrpc/xprtrdma/svc_rdma_marshal.c | 70 ---------------------------------- 2 files changed, 74 deletions(-) (limited to 'include') diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 479bb7f65233..f3787d800ba4 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -187,10 +187,6 @@ extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, /* svc_rdma_marshal.c */ extern int svc_rdma_xdr_decode_req(struct xdr_buf *); -extern void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *, int); -extern void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *, int, - __be32, __be64, u32); -extern unsigned int svc_rdma_xdr_get_reply_hdr_len(__be32 *rdma_resp); /* svc_rdma_recvfrom.c */ extern int svc_rdma_recvfrom(struct svc_rqst *); diff --git a/net/sunrpc/xprtrdma/svc_rdma_marshal.c b/net/sunrpc/xprtrdma/svc_rdma_marshal.c index ae58a897eca0..bdcf7d85a3dc 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_marshal.c +++ b/net/sunrpc/xprtrdma/svc_rdma_marshal.c @@ -166,73 +166,3 @@ out_inval: dprintk("svcrdma: failed to parse transport header\n"); return -EINVAL; } - -/** - * svc_rdma_xdr_get_reply_hdr_length - Get length of Reply transport header - * @rdma_resp: buffer containing Reply transport header - * - * Returns length of transport header, in bytes. - */ -unsigned int svc_rdma_xdr_get_reply_hdr_len(__be32 *rdma_resp) -{ - unsigned int nsegs; - __be32 *p; - - p = rdma_resp; - - /* RPC-over-RDMA V1 replies never have a Read list. */ - p += rpcrdma_fixed_maxsz + 1; - - /* Skip Write list. */ - while (*p++ != xdr_zero) { - nsegs = be32_to_cpup(p++); - p += nsegs * rpcrdma_segment_maxsz; - } - - /* Skip Reply chunk. */ - if (*p++ != xdr_zero) { - nsegs = be32_to_cpup(p++); - p += nsegs * rpcrdma_segment_maxsz; - } - - return (unsigned long)p - (unsigned long)rdma_resp; -} - -void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *rmsgp, int chunks) -{ - struct rpcrdma_write_array *ary; - - /* no read-list */ - rmsgp->rm_body.rm_chunks[0] = xdr_zero; - - /* write-array discrim */ - ary = (struct rpcrdma_write_array *) - &rmsgp->rm_body.rm_chunks[1]; - ary->wc_discrim = xdr_one; - ary->wc_nchunks = cpu_to_be32(chunks); - - /* write-list terminator */ - ary->wc_array[chunks].wc_target.rs_handle = xdr_zero; - - /* reply-array discriminator */ - ary->wc_array[chunks].wc_target.rs_length = xdr_zero; -} - -void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *ary, - int chunks) -{ - ary->wc_discrim = xdr_one; - ary->wc_nchunks = cpu_to_be32(chunks); -} - -void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *ary, - int chunk_no, - __be32 rs_handle, - __be64 rs_offset, - u32 write_len) -{ - struct rpcrdma_segment *seg = &ary->wc_array[chunk_no].wc_target; - seg->rs_handle = rs_handle; - seg->rs_offset = rs_offset; - seg->rs_length = cpu_to_be32(write_len); -} -- cgit v1.2.3 From c373fff7bd252ec36e8a895c58a584088f1d38bc Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 26 Apr 2017 12:26:22 -0400 Subject: NFSv4: Don't special case "launder" If the client receives a fatal server error from nfs_pageio_add_request(), then we should always truncate the page on which the error occurred. Signed-off-by: Trond Myklebust --- fs/nfs/file.c | 2 +- fs/nfs/write.c | 27 +++++++++++---------------- include/linux/nfs_fs.h | 14 +------------- 3 files changed, 13 insertions(+), 30 deletions(-) (limited to 'include') diff --git a/fs/nfs/file.c b/fs/nfs/file.c index bebed885b6e4..5713eb32a45e 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -482,7 +482,7 @@ static int nfs_launder_page(struct page *page) inode->i_ino, (long long)page_offset(page)); nfs_fscache_wait_on_page_write(nfsi, page); - return nfs_wb_launder_page(inode, page); + return nfs_wb_page(inode, page); } static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file, diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 2e654940478f..59e21cc0a266 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -586,8 +586,7 @@ nfs_error_is_fatal_on_server(int err) * May return an error if the user signalled nfs_wait_on_request(). */ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, - struct page *page, bool nonblock, - bool launder) + struct page *page, bool nonblock) { struct nfs_page *req; int ret = 0; @@ -610,13 +609,11 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, if (!nfs_pageio_add_request(pgio, req)) { ret = pgio->pg_error; /* - * Remove the problematic req upon fatal errors - * in launder case, while other dirty pages can - * still be around until they get flushed. + * Remove the problematic req upon fatal errors on the server */ if (nfs_error_is_fatal(ret)) { nfs_context_set_write_error(req->wb_context, ret); - if (launder) + if (nfs_error_is_fatal_on_server(ret)) goto out_launder; } nfs_redirty_request(req); @@ -632,13 +629,12 @@ out_launder: } static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, - struct nfs_pageio_descriptor *pgio, bool launder) + struct nfs_pageio_descriptor *pgio) { int ret; nfs_pageio_cond_complete(pgio, page_index(page)); - ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE, - launder); + ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE); if (ret == -EAGAIN) { redirty_page_for_writepage(wbc, page); ret = 0; @@ -650,8 +646,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, * Write an mmapped page to the server. */ static int nfs_writepage_locked(struct page *page, - struct writeback_control *wbc, - bool launder) + struct writeback_control *wbc) { struct nfs_pageio_descriptor pgio; struct inode *inode = page_file_mapping(page)->host; @@ -660,7 +655,7 @@ static int nfs_writepage_locked(struct page *page, nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); nfs_pageio_init_write(&pgio, inode, 0, false, &nfs_async_write_completion_ops); - err = nfs_do_writepage(page, wbc, &pgio, launder); + err = nfs_do_writepage(page, wbc, &pgio); nfs_pageio_complete(&pgio); if (err < 0) return err; @@ -673,7 +668,7 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc) { int ret; - ret = nfs_writepage_locked(page, wbc, false); + ret = nfs_writepage_locked(page, wbc); unlock_page(page); return ret; } @@ -682,7 +677,7 @@ static int nfs_writepages_callback(struct page *page, struct writeback_control * { int ret; - ret = nfs_do_writepage(page, wbc, data, false); + ret = nfs_do_writepage(page, wbc, data); unlock_page(page); return ret; } @@ -2013,7 +2008,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page) /* * Write back all requests on one page - we do this before reading it. */ -int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder) +int nfs_wb_page(struct inode *inode, struct page *page) { loff_t range_start = page_file_offset(page); loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1); @@ -2030,7 +2025,7 @@ int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder) for (;;) { wait_on_page_writeback(page); if (clear_page_dirty_for_io(page)) { - ret = nfs_writepage_locked(page, &wbc, launder); + ret = nfs_writepage_locked(page, &wbc); if (ret < 0) goto out_error; continue; diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 9aa044e76820..bb0eb2c9acca 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -500,24 +500,12 @@ extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned */ extern int nfs_sync_inode(struct inode *inode); extern int nfs_wb_all(struct inode *inode); -extern int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder); +extern int nfs_wb_page(struct inode *inode, struct page *page); extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); extern int nfs_commit_inode(struct inode *, int); extern struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail); extern void nfs_commit_free(struct nfs_commit_data *data); -static inline int -nfs_wb_launder_page(struct inode *inode, struct page *page) -{ - return nfs_wb_single_page(inode, page, true); -} - -static inline int -nfs_wb_page(struct inode *inode, struct page *page) -{ - return nfs_wb_single_page(inode, page, false); -} - static inline int nfs_have_writebacks(struct inode *inode) { -- cgit v1.2.3 From c7e88067c1ae89e7bcbed070fb2c4e30bc39b51f Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 18 Apr 2017 16:01:46 -0700 Subject: srcu: Exact tracking of srcu_data structures containing callbacks The current Tree SRCU implementation schedules a workqueue for every srcu_data covered by a given leaf srcu_node structure having callbacks, even if only one of those srcu_data structures actually contains callbacks. This is clearly inefficient for workloads that don't feature callbacks everywhere all the time. This commit therefore adds an array of masks that are used by the leaf srcu_node structures to track exactly which srcu_data structures contain callbacks. Signed-off-by: Paul E. McKenney Tested-by: Mike Galbraith --- include/linux/srcutree.h | 4 ++++ kernel/rcu/srcutree.c | 29 +++++++++++++++++++++++------ 2 files changed, 27 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 0400e211aa44..94515ff226fb 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -47,6 +47,8 @@ struct srcu_data { struct delayed_work work; /* Context for CB invoking. */ struct rcu_head srcu_barrier_head; /* For srcu_barrier() use. */ struct srcu_node *mynode; /* Leaf srcu_node. */ + unsigned long grpmask; /* Mask for leaf srcu_node */ + /* ->srcu_data_have_cbs[]. */ int cpu; struct srcu_struct *sp; }; @@ -59,6 +61,8 @@ struct srcu_node { unsigned long srcu_have_cbs[4]; /* GP seq for children */ /* having CBs, but only */ /* is > ->srcu_gq_seq. */ + unsigned long srcu_data_have_cbs[4]; /* Which srcu_data structs */ + /* have CBs for given GP? */ struct srcu_node *srcu_parent; /* Next up in tree. */ int grplo; /* Least CPU for node. */ int grphi; /* Biggest CPU for node. */ diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index 9ecf0acc18eb..1c2c1004b3b1 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -66,8 +66,12 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static) /* Each pass through this loop initializes one srcu_node structure. */ rcu_for_each_node_breadth_first(sp, snp) { spin_lock_init(&snp->lock); - for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) + WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) != + ARRAY_SIZE(snp->srcu_data_have_cbs)); + for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) { snp->srcu_have_cbs[i] = 0; + snp->srcu_data_have_cbs[i] = 0; + } snp->grplo = -1; snp->grphi = -1; if (snp == &sp->node[0]) { @@ -107,6 +111,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static) sdp->cpu = cpu; INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks); sdp->sp = sp; + sdp->grpmask = 1 << (cpu - sdp->mynode->grplo); if (is_static) continue; @@ -434,16 +439,21 @@ static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay) /* * Schedule callback invocation for all srcu_data structures associated - * with the specified srcu_node structure, if possible, on the corresponding - * CPUs. + * with the specified srcu_node structure that have callbacks for the + * just-completed grace period, the one corresponding to idx. If possible, + * schedule this invocation on the corresponding CPUs. */ -static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp) +static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp, + unsigned long mask) { int cpu; - for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) + for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { + if (!(mask & (1 << (cpu - snp->grplo)))) + continue; srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), atomic_read(&sp->srcu_exp_cnt) ? 0 : SRCU_INTERVAL); + } } /* @@ -461,6 +471,7 @@ static void srcu_gp_end(struct srcu_struct *sp) unsigned long gpseq; int idx; int idxnext; + unsigned long mask; struct srcu_node *snp; /* Prevent more than one additional grace period. */ @@ -486,10 +497,12 @@ static void srcu_gp_end(struct srcu_struct *sp) cbs = snp->srcu_have_cbs[idx] == gpseq; snp->srcu_have_cbs[idx] = gpseq; rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1); + mask = snp->srcu_data_have_cbs[idx]; + snp->srcu_data_have_cbs[idx] = 0; spin_unlock_irq(&snp->lock); if (cbs) { smp_mb(); /* GP end before CB invocation. */ - srcu_schedule_cbs_snp(sp, snp); + srcu_schedule_cbs_snp(sp, snp, mask); } } @@ -536,6 +549,8 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, spin_lock_irqsave(&snp->lock, flags); if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) { snp_seq = snp->srcu_have_cbs[idx]; + if (snp == sdp->mynode && snp_seq == s) + snp->srcu_data_have_cbs[idx] |= sdp->grpmask; spin_unlock_irqrestore(&snp->lock, flags); if (snp == sdp->mynode && snp_seq != s) { smp_mb(); /* CBs after GP! */ @@ -544,6 +559,8 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, return; } snp->srcu_have_cbs[idx] = s; + if (snp == sdp->mynode) + snp->srcu_data_have_cbs[idx] |= sdp->grpmask; spin_unlock_irqrestore(&snp->lock, flags); } -- cgit v1.2.3 From 7f6733c3c648ddd6cf459c1b80ad388a95452955 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 18 Apr 2017 17:17:35 -0700 Subject: srcu: Make rcutorture writer stalls print SRCU GP state In the past, SRCU was simple enough that there was little point in making the rcutorture writer stall messages print the SRCU grace-period number state. With the advent of Tree SRCU, this has changed. This commit therefore makes Classic, Tiny, and Tree SRCU report this state to rcutorture as needed. Signed-off-by: Paul E. McKenney Tested-by: Mike Galbraith --- include/linux/srcuclassic.h | 14 ++++++++++++++ include/linux/srcutiny.h | 12 ++++++++++++ include/linux/srcutree.h | 4 ++++ kernel/rcu/rcutorture.c | 8 +++++--- kernel/rcu/srcutree.c | 13 +++++++++++++ kernel/rcu/tree.c | 12 ++++-------- 6 files changed, 52 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/linux/srcuclassic.h b/include/linux/srcuclassic.h index 41cf99930f34..5753f7322262 100644 --- a/include/linux/srcuclassic.h +++ b/include/linux/srcuclassic.h @@ -98,4 +98,18 @@ void synchronize_srcu_expedited(struct srcu_struct *sp); void srcu_barrier(struct srcu_struct *sp); unsigned long srcu_batches_completed(struct srcu_struct *sp); +static inline void srcutorture_get_gp_data(enum rcutorture_type test_type, + struct srcu_struct *sp, int *flags, + unsigned long *gpnum, + unsigned long *completed) +{ + if (test_type != SRCU_FLAVOR) + return; + *flags = 0; + *completed = sp->completed; + *gpnum = *completed; + if (sp->batch_queue.head || sp->batch_check0.head || sp->batch_check0.head) + (*gpnum)++; +} + #endif diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h index 4f284e4f4d8c..42311ee0334f 100644 --- a/include/linux/srcutiny.h +++ b/include/linux/srcutiny.h @@ -78,4 +78,16 @@ static inline unsigned long srcu_batches_completed(struct srcu_struct *sp) return 0; } +static inline void srcutorture_get_gp_data(enum rcutorture_type test_type, + struct srcu_struct *sp, int *flags, + unsigned long *gpnum, + unsigned long *completed) +{ + if (test_type != SRCU_FLAVOR) + return; + *flags = 0; + *completed = sp->srcu_gp_seq; + *gpnum = *completed; +} + #endif diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 94515ff226fb..3865717df124 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -140,4 +140,8 @@ void synchronize_srcu_expedited(struct srcu_struct *sp); void srcu_barrier(struct srcu_struct *sp); unsigned long srcu_batches_completed(struct srcu_struct *sp); +void srcutorture_get_gp_data(enum rcutorture_type test_type, + struct srcu_struct *sp, int *flags, + unsigned long *gpnum, unsigned long *completed); + #endif diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index e9d4527cdd43..ae6e574d4cf5 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1360,12 +1360,14 @@ rcu_torture_stats_print(void) cur_ops->stats(); if (rtcv_snap == rcu_torture_current_version && rcu_torture_current != NULL) { - int __maybe_unused flags; - unsigned long __maybe_unused gpnum; - unsigned long __maybe_unused completed; + int __maybe_unused flags = 0; + unsigned long __maybe_unused gpnum = 0; + unsigned long __maybe_unused completed = 0; rcutorture_get_gp_data(cur_ops->ttype, &flags, &gpnum, &completed); + srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, + &flags, &gpnum, &completed); wtp = READ_ONCE(writer_task); pr_alert("??? Writer stall state %s(%d) g%lu c%lu f%#x ->state %#lx\n", rcu_torture_writer_state_getname(), diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index 1c2c1004b3b1..72b6cce5f591 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -1011,3 +1011,16 @@ void process_srcu(struct work_struct *work) srcu_reschedule(sp, atomic_read(&sp->srcu_exp_cnt) ? 0 : SRCU_INTERVAL); } EXPORT_SYMBOL_GPL(process_srcu); + +void srcutorture_get_gp_data(enum rcutorture_type test_type, + struct srcu_struct *sp, int *flags, + unsigned long *gpnum, + unsigned long *completed) +{ + if (test_type != SRCU_FLAVOR) + return; + *flags = 0; + *completed = rcu_seq_ctr(sp->srcu_gp_seq); + *gpnum = rcu_seq_ctr(sp->srcu_gp_seq_needed); +} +EXPORT_SYMBOL_GPL(srcutorture_get_gp_data); diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 23aa02587d0f..91fff49d5869 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -704,15 +704,11 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, default: break; } - if (rsp != NULL) { - *flags = READ_ONCE(rsp->gp_flags); - *gpnum = READ_ONCE(rsp->gpnum); - *completed = READ_ONCE(rsp->completed); + if (rsp == NULL) return; - } - *flags = 0; - *gpnum = 0; - *completed = 0; + *flags = READ_ONCE(rsp->gp_flags); + *gpnum = READ_ONCE(rsp->gpnum); + *completed = READ_ONCE(rsp->completed); } EXPORT_SYMBOL_GPL(rcutorture_get_gp_data); -- cgit v1.2.3 From bfd20f1cc85010d2f2d77e544da05cd8c149ba9b Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Wed, 26 Apr 2017 09:18:35 -0700 Subject: x86, iommu/vt-d: Add an option to disable Intel IOMMU force on IOMMU harms performance signficantly when we run very fast networking workloads. It's 40GB networking doing XDP test. Software overhead is almost unaware, but it's the IOTLB miss (based on our analysis) which kills the performance. We observed the same performance issue even with software passthrough (identity mapping), only the hardware passthrough survives. The pps with iommu (with software passthrough) is only about ~30% of that without it. This is a limitation in hardware based on our observation, so we'd like to disable the IOMMU force on, but we do want to use TBOOT and we can sacrifice the DMA security bought by IOMMU. I must admit I know nothing about TBOOT, but TBOOT guys (cc-ed) think not eabling IOMMU is totally ok. So introduce a new boot option to disable the force on. It's kind of silly we need to run into intel_iommu_init even without force on, but we need to disable TBOOT PMR registers. For system without the boot option, nothing is changed. Signed-off-by: Shaohua Li Signed-off-by: Joerg Roedel --- Documentation/admin-guide/kernel-parameters.txt | 9 +++++++++ arch/x86/kernel/tboot.c | 3 +++ drivers/iommu/intel-iommu.c | 18 ++++++++++++++++++ include/linux/dma_remapping.h | 1 + 4 files changed, 31 insertions(+) (limited to 'include') diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 2ba45caabada..17135bfade6a 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1578,6 +1578,15 @@ extended tables themselves, and also PASID support. With this option set, extended tables will not be used even on hardware which claims to support them. + tboot_noforce [Default Off] + Do not force the Intel IOMMU enabled under tboot. + By default, tboot will force Intel IOMMU on, which + could harm performance of some high-throughput + devices like 40GBit network cards, even if identity + mapping is enabled. + Note that using this option lowers the security + provided by tboot because it makes the system + vulnerable to DMA attacks. intel_idle.max_cstate= [KNL,HW,ACPI,X86] 0 disables intel_idle and fall back on acpi_idle. diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c index b868fa1b812b..edbdfe6ab60a 100644 --- a/arch/x86/kernel/tboot.c +++ b/arch/x86/kernel/tboot.c @@ -510,6 +510,9 @@ int tboot_force_iommu(void) if (!tboot_enabled()) return 0; + if (!intel_iommu_tboot_noforce) + return 1; + if (no_iommu || swiotlb || dmar_disabled) pr_warning("Forcing Intel-IOMMU to enabled\n"); diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 5f08ba13972b..b0ced1c13713 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -183,6 +183,7 @@ static int rwbf_quirk; * (used when kernel is launched w/ TXT) */ static int force_on = 0; +int intel_iommu_tboot_noforce; /* * 0: Present @@ -607,6 +608,10 @@ static int __init intel_iommu_setup(char *str) "Intel-IOMMU: enable pre-production PASID support\n"); intel_iommu_pasid28 = 1; iommu_identity_mapping |= IDENTMAP_GFX; + } else if (!strncmp(str, "tboot_noforce", 13)) { + printk(KERN_INFO + "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); + intel_iommu_tboot_noforce = 1; } str += strcspn(str, ","); @@ -4850,6 +4855,19 @@ int __init intel_iommu_init(void) } if (no_iommu || dmar_disabled) { + /* + * We exit the function here to ensure IOMMU's remapping and + * mempool aren't setup, which means that the IOMMU's PMRs + * won't be disabled via the call to init_dmars(). So disable + * it explicitly here. The PMRs were setup by tboot prior to + * calling SENTER, but the kernel is expected to reset/tear + * down the PMRs. + */ + if (intel_iommu_tboot_noforce) { + for_each_iommu(iommu, drhd) + iommu_disable_protect_mem_regions(iommu); + } + /* * Make sure the IOMMUs are switched off, even when we * boot into a kexec kernel and the previous kernel left diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h index 187c10299722..90884072fa73 100644 --- a/include/linux/dma_remapping.h +++ b/include/linux/dma_remapping.h @@ -39,6 +39,7 @@ extern int iommu_calculate_agaw(struct intel_iommu *iommu); extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); extern int dmar_disabled; extern int intel_iommu_enabled; +extern int intel_iommu_tboot_noforce; #else static inline int iommu_calculate_agaw(struct intel_iommu *iommu) { -- cgit v1.2.3 From b7ecf663c75eed1e764f57281f9508c49c18516e Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Fri, 21 Apr 2017 12:47:40 +0200 Subject: ACPI / bus: Introduce a list of ids for "always present" devices Several Bay / Cherry Trail devices (all of which ship with Windows 10) hide the LPSS PWM controller in ACPI, typically the _STA method looks like this: Method (_STA, 0, NotSerialized) // _STA: Status { If (OSID == One) { Return (Zero) } Return (0x0F) } Where OSID is some dark magic seen in all Cherry Trail ACPI tables making the machine behave differently depending on which OS it *thinks* it is booting, this gets set in a number of ways which we cannot control, on some newer machines it simple hardcoded to "One" aka win10. This causes the PWM controller to get hidden, which means Linux cannot control the backlight level on cht based tablets / laptops. Since loading the driver for this does no harm (the only in kernel user of it is the i915 driver, which will only uses it when it needs it), this commit makes acpi_bus_get_status() always set status to ACPI_STA_DEFAULT for the LPSS PWM device, fixing the lack of backlight control. Signed-off-by: Hans de Goede [ rjw: Rename the new file to utils.c ] Signed-off-by: Rafael J. Wysocki --- drivers/acpi/Makefile | 1 + drivers/acpi/bus.c | 5 +++ drivers/acpi/x86/utils.c | 85 ++++++++++++++++++++++++++++++++++++++++++++++++ include/acpi/acpi_bus.h | 9 +++++ 4 files changed, 100 insertions(+) create mode 100644 drivers/acpi/x86/utils.c (limited to 'include') diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index d94f92f88ca1..2a81d278bcf3 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -50,6 +50,7 @@ acpi-$(CONFIG_ACPI_REDUCED_HARDWARE_ONLY) += evged.o acpi-y += sysfs.o acpi-y += property.o acpi-$(CONFIG_X86) += acpi_cmos_rtc.o +acpi-$(CONFIG_X86) += x86/utils.o acpi-$(CONFIG_DEBUG_FS) += debugfs.o acpi-$(CONFIG_ACPI_NUMA) += numa.o acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 34fbe027e73a..784bda663d16 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -114,6 +114,11 @@ int acpi_bus_get_status(struct acpi_device *device) acpi_status status; unsigned long long sta; + if (acpi_device_always_present(device)) { + acpi_set_device_status(device, ACPI_STA_DEFAULT); + return 0; + } + status = acpi_bus_get_status_handle(device->handle, &sta); if (ACPI_FAILURE(status)) return -ENODEV; diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c new file mode 100644 index 000000000000..c8e90ef4485f --- /dev/null +++ b/drivers/acpi/x86/utils.c @@ -0,0 +1,85 @@ +/* + * X86 ACPI Utility Functions + * + * Copyright (C) 2017 Hans de Goede + * + * Based on various non upstream patches to support the CHT Whiskey Cove PMIC: + * Copyright (C) 2013-2015 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include "../internal.h" + +/* + * Some ACPI devices are hidden (status == 0x0) in recent BIOS-es because + * some recent Windows drivers bind to one device but poke at multiple + * devices at the same time, so the others get hidden. + * We work around this by always reporting ACPI_STA_DEFAULT for these + * devices. Note this MUST only be done for devices where this is safe. + * + * This forcing of devices to be present is limited to specific CPU (SoC) + * models both to avoid potentially causing trouble on other models and + * because some HIDs are re-used on different SoCs for completely + * different devices. + */ +struct always_present_id { + struct acpi_device_id hid[2]; + struct x86_cpu_id cpu_ids[2]; + const char *uid; +}; + +#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, } + +#define ENTRY(hid, uid, cpu_models) { \ + { { hid, }, {} }, \ + { cpu_models, {} }, \ + uid, \ +} + +static const struct always_present_id always_present_ids[] = { + /* + * Bay / Cherry Trail PWM directly poked by GPU driver in win10, + * but Linux uses a separate PWM driver, harmless if not used. + */ + ENTRY("80860F09", "1", ICPU(INTEL_FAM6_ATOM_SILVERMONT1)), + ENTRY("80862288", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT)), +}; + +bool acpi_device_always_present(struct acpi_device *adev) +{ + u32 *status = (u32 *)&adev->status; + u32 old_status = *status; + bool ret = false; + unsigned int i; + + /* acpi_match_device_ids checks status, so set it to default */ + *status = ACPI_STA_DEFAULT; + for (i = 0; i < ARRAY_SIZE(always_present_ids); i++) { + if (acpi_match_device_ids(adev, always_present_ids[i].hid)) + continue; + + if (!adev->pnp.unique_id || + strcmp(adev->pnp.unique_id, always_present_ids[i].uid)) + continue; + + if (!x86_match_cpu(always_present_ids[i].cpu_ids)) + continue; + + if (old_status != ACPI_STA_DEFAULT) /* Log only once */ + dev_info(&adev->dev, + "Device [%s] is in always present list\n", + adev->pnp.bus_id); + + ret = true; + break; + } + *status = old_status; + + return ret; +} diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index ef0ae8aaa567..b86b90489140 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -586,6 +586,15 @@ struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle); int acpi_enable_wakeup_device_power(struct acpi_device *dev, int state); int acpi_disable_wakeup_device_power(struct acpi_device *dev); +#ifdef CONFIG_X86 +bool acpi_device_always_present(struct acpi_device *adev); +#else +static inline bool acpi_device_always_present(struct acpi_device *adev) +{ + return false; +} +#endif + #ifdef CONFIG_PM acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev, void (*work_func)(struct work_struct *work)); -- cgit v1.2.3 From bb1e23e66e6237ff7a1824b37366540a89149c33 Mon Sep 17 00:00:00 2001 From: Lv Zheng Date: Wed, 26 Apr 2017 16:18:49 +0800 Subject: ACPICA: iasl: Fix IORT SMMU GSI disassembling ACPICA commit 637b88de24a78c20478728d9d66632b06fcaa5bf If the IORT template is compiled and then iort.aml binary disassembled to iort.dsl, SMMUv1 node lists incorrect offset for SMMU_Nsg_cfg_irpt Interrupt: [0ECh 0236 8] SMMU_Nsg_irpt Interrupt : 0000000000000000 [0ECh 0236 8] SMMU_Nsg_cfg_irpt Interrupt : 0000000000000000 This is because iasl hasn't implemented SMMU GSI decoding yet. This patch fixes this issue by preparing structures for decoding IORT SMMU GSI. ACPICA BZ 1340, reported by Alexei Fedorov, fixed by Lv Zheng. Link: https://github.com/acpica/acpica/commit/637b88de Link: https://bugs.acpica.org/show_bug.cgi?id=1340 Reported-by: Alexei Fedorov Signed-off-by: Lv Zheng Signed-off-by: Bob Moore Signed-off-by: Rafael J. Wysocki --- include/acpi/actbl2.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'include') diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h index 7aee9fb3bd1f..83666cb3ec2c 100644 --- a/include/acpi/actbl2.h +++ b/include/acpi/actbl2.h @@ -783,6 +783,15 @@ struct acpi_iort_smmu { #define ACPI_IORT_SMMU_DVM_SUPPORTED (1) #define ACPI_IORT_SMMU_COHERENT_WALK (1<<1) +/* Global interrupt format */ + +struct acpi_iort_smmu_gsi { + u32 nsg_irpt; + u32 nsg_irpt_flags; + u32 nsg_cfg_irpt; + u32 nsg_cfg_irpt_flags; +}; + struct acpi_iort_smmu_v3 { u64 base_address; /* SMMUv3 base address */ u32 flags; -- cgit v1.2.3 From 1e9a038b7fe9a8c10ef1238f4e695d5fbe0dd594 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 24 Apr 2017 16:02:09 -0700 Subject: srcu: Expedited grace periods with reduced memory contention Commit f60d231a87c5 ("srcu: Crude control of expedited grace periods") introduced a per-srcu_struct atomic counter to track outstanding requests for grace periods. This works, but represents a memory-contention bottleneck. This commit therefore uses the srcu_node combining tree to remove this bottleneck. This commit adds new ->srcu_gp_seq_needed_exp fields to the srcu_data, srcu_node, and srcu_struct structures, which track the farthest-in-the-future grace period that must be expedited, which in turn requires that all nearer-term grace periods also be expedited. Requests for expediting start with the srcu_data structure, run up through the srcu_node tree, and end at the srcu_struct structure. Note that it may be necessary to expedite a grace period that just now started, and this is handled by a new srcu_funnel_exp_start() function, which is invoked when the grace period itself is already in its way, but when that grace period was not marked as expedited. A new srcu_get_delay() function returns zero if there is at least one expedited SRCU grace period in flight, or SRCU_INTERVAL otherwise. This function is used to calculate delays: Normal grace periods are allowed to extend in order to cover more requests with a given grace-period computation, which decreases per-request overhead. Signed-off-by: Paul E. McKenney Tested-by: Mike Galbraith --- include/linux/srcutree.h | 4 +- kernel/rcu/srcutree.c | 135 +++++++++++++++++++++++++++++++++-------------- 2 files changed, 98 insertions(+), 41 deletions(-) (limited to 'include') diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 3865717df124..86df48d3e97b 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -43,6 +43,7 @@ struct srcu_data { spinlock_t lock ____cacheline_internodealigned_in_smp; struct rcu_segcblist srcu_cblist; /* List of callbacks.*/ unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */ + unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */ bool srcu_cblist_invoking; /* Invoking these CBs? */ struct delayed_work work; /* Context for CB invoking. */ struct rcu_head srcu_barrier_head; /* For srcu_barrier() use. */ @@ -63,6 +64,7 @@ struct srcu_node { /* is > ->srcu_gq_seq. */ unsigned long srcu_data_have_cbs[4]; /* Which srcu_data structs */ /* have CBs for given GP? */ + unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */ struct srcu_node *srcu_parent; /* Next up in tree. */ int grplo; /* Least CPU for node. */ int grphi; /* Biggest CPU for node. */ @@ -81,7 +83,7 @@ struct srcu_struct { unsigned int srcu_idx; /* Current rdr array element. */ unsigned long srcu_gp_seq; /* Grace-period seq #. */ unsigned long srcu_gp_seq_needed; /* Latest gp_seq needed. */ - atomic_t srcu_exp_cnt; /* # ongoing expedited GPs. */ + unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */ struct srcu_data __percpu *sda; /* Per-CPU srcu_data array. */ unsigned long srcu_barrier_seq; /* srcu_barrier seq #. */ struct mutex srcu_barrier_mutex; /* Serialize barrier ops. */ diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index 72b6cce5f591..4b98e6f45166 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -72,6 +72,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static) snp->srcu_have_cbs[i] = 0; snp->srcu_data_have_cbs[i] = 0; } + snp->srcu_gp_seq_needed_exp = 0; snp->grplo = -1; snp->grphi = -1; if (snp == &sp->node[0]) { @@ -102,6 +103,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static) rcu_segcblist_init(&sdp->srcu_cblist); sdp->srcu_cblist_invoking = false; sdp->srcu_gp_seq_needed = sp->srcu_gp_seq; + sdp->srcu_gp_seq_needed_exp = sp->srcu_gp_seq; sdp->mynode = &snp_first[cpu / levelspread[level]]; for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) { if (snp->grplo < 0) @@ -135,7 +137,6 @@ static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static) mutex_init(&sp->srcu_gp_mutex); sp->srcu_idx = 0; sp->srcu_gp_seq = 0; - atomic_set(&sp->srcu_exp_cnt, 0); sp->srcu_barrier_seq = 0; mutex_init(&sp->srcu_barrier_mutex); atomic_set(&sp->srcu_barrier_cpu_cnt, 0); @@ -143,6 +144,7 @@ static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static) if (!is_static) sp->sda = alloc_percpu(struct srcu_data); init_srcu_struct_nodes(sp, is_static); + sp->srcu_gp_seq_needed_exp = 0; smp_store_release(&sp->srcu_gp_seq_needed, 0); /* Init done. */ return sp->sda ? 0 : -ENOMEM; } @@ -307,6 +309,18 @@ static bool srcu_readers_active(struct srcu_struct *sp) #define SRCU_INTERVAL 1 +/* + * Return grace-period delay, zero if there are expedited grace + * periods pending, SRCU_INTERVAL otherwise. + */ +static unsigned long srcu_get_delay(struct srcu_struct *sp) +{ + if (ULONG_CMP_LT(READ_ONCE(sp->srcu_gp_seq), + READ_ONCE(sp->srcu_gp_seq_needed_exp))) + return 0; + return SRCU_INTERVAL; +} + /** * cleanup_srcu_struct - deconstruct a sleep-RCU structure * @sp: structure to clean up. @@ -318,7 +332,8 @@ void cleanup_srcu_struct(struct srcu_struct *sp) { int cpu; - WARN_ON_ONCE(atomic_read(&sp->srcu_exp_cnt)); + if (WARN_ON(!srcu_get_delay(sp))) + return; /* Leakage unless caller handles error. */ if (WARN_ON(srcu_readers_active(sp))) return; /* Leakage unless caller handles error. */ flush_delayed_work(&sp->work); @@ -444,15 +459,14 @@ static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay) * schedule this invocation on the corresponding CPUs. */ static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp, - unsigned long mask) + unsigned long mask, unsigned long delay) { int cpu; for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { if (!(mask & (1 << (cpu - snp->grplo)))) continue; - srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), - atomic_read(&sp->srcu_exp_cnt) ? 0 : SRCU_INTERVAL); + srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), delay); } } @@ -467,6 +481,7 @@ static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp, */ static void srcu_gp_end(struct srcu_struct *sp) { + unsigned long cbdelay; bool cbs; unsigned long gpseq; int idx; @@ -481,8 +496,11 @@ static void srcu_gp_end(struct srcu_struct *sp) spin_lock_irq(&sp->gp_lock); idx = rcu_seq_state(sp->srcu_gp_seq); WARN_ON_ONCE(idx != SRCU_STATE_SCAN2); + cbdelay = srcu_get_delay(sp); rcu_seq_end(&sp->srcu_gp_seq); gpseq = rcu_seq_current(&sp->srcu_gp_seq); + if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq)) + sp->srcu_gp_seq_needed_exp = gpseq; spin_unlock_irq(&sp->gp_lock); mutex_unlock(&sp->srcu_gp_mutex); /* A new grace period can start at this point. But only one. */ @@ -497,12 +515,14 @@ static void srcu_gp_end(struct srcu_struct *sp) cbs = snp->srcu_have_cbs[idx] == gpseq; snp->srcu_have_cbs[idx] = gpseq; rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1); + if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq)) + snp->srcu_gp_seq_needed_exp = gpseq; mask = snp->srcu_data_have_cbs[idx]; snp->srcu_data_have_cbs[idx] = 0; spin_unlock_irq(&snp->lock); if (cbs) { smp_mb(); /* GP end before CB invocation. */ - srcu_schedule_cbs_snp(sp, snp, mask); + srcu_schedule_cbs_snp(sp, snp, mask, cbdelay); } } @@ -517,15 +537,43 @@ static void srcu_gp_end(struct srcu_struct *sp) srcu_gp_start(sp); spin_unlock_irq(&sp->gp_lock); /* Throttle expedited grace periods: Should be rare! */ - srcu_reschedule(sp, atomic_read(&sp->srcu_exp_cnt) && - rcu_seq_ctr(gpseq) & 0xf - ? 0 - : SRCU_INTERVAL); + srcu_reschedule(sp, rcu_seq_ctr(gpseq) & 0x3ff + ? 0 : SRCU_INTERVAL); } else { spin_unlock_irq(&sp->gp_lock); } } +/* + * Funnel-locking scheme to scalably mediate many concurrent expedited + * grace-period requests. This function is invoked for the first known + * expedited request for a grace period that has already been requested, + * but without expediting. To start a completely new grace period, + * whether expedited or not, use srcu_funnel_gp_start() instead. + */ +static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp, + unsigned long s) +{ + unsigned long flags; + + for (; snp != NULL; snp = snp->srcu_parent) { + if (rcu_seq_done(&sp->srcu_gp_seq, s) || + ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s)) + return; + spin_lock_irqsave(&snp->lock, flags); + if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) { + spin_unlock_irqrestore(&snp->lock, flags); + return; + } + WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); + spin_unlock_irqrestore(&snp->lock, flags); + } + spin_lock_irqsave(&sp->gp_lock, flags); + if (!ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s)) + sp->srcu_gp_seq_needed_exp = s; + spin_unlock_irqrestore(&sp->gp_lock, flags); +} + /* * Funnel-locking scheme to scalably mediate many concurrent grace-period * requests. The winner has to do the work of actually starting grace @@ -533,9 +581,8 @@ static void srcu_gp_end(struct srcu_struct *sp) * number is recorded on at least their leaf srcu_node structure, or they * must take steps to invoke their own callbacks. */ -static void srcu_funnel_gp_start(struct srcu_struct *sp, - struct srcu_data *sdp, - unsigned long s) +static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp, + unsigned long s, bool do_norm) { unsigned long flags; int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs); @@ -554,13 +601,20 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, spin_unlock_irqrestore(&snp->lock, flags); if (snp == sdp->mynode && snp_seq != s) { smp_mb(); /* CBs after GP! */ - srcu_schedule_cbs_sdp(sdp, 0); + srcu_schedule_cbs_sdp(sdp, do_norm + ? SRCU_INTERVAL + : 0); + return; } + if (!do_norm) + srcu_funnel_exp_start(sp, snp, s); return; } snp->srcu_have_cbs[idx] = s; if (snp == sdp->mynode) snp->srcu_data_have_cbs[idx] |= sdp->grpmask; + if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s)) + snp->srcu_gp_seq_needed_exp = s; spin_unlock_irqrestore(&snp->lock, flags); } @@ -573,6 +627,8 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, */ smp_store_release(&sp->srcu_gp_seq_needed, s); /*^^^*/ } + if (!do_norm && ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s)) + sp->srcu_gp_seq_needed_exp = s; /* If grace period not already done and none in progress, start it. */ if (!rcu_seq_done(&sp->srcu_gp_seq, s) && @@ -580,9 +636,7 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); srcu_gp_start(sp); queue_delayed_work(system_power_efficient_wq, &sp->work, - atomic_read(&sp->srcu_exp_cnt) - ? 0 - : SRCU_INTERVAL); + srcu_get_delay(sp)); } spin_unlock_irqrestore(&sp->gp_lock, flags); } @@ -597,7 +651,7 @@ static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount) for (;;) { if (srcu_readers_active_idx_check(sp, idx)) return true; - if (--trycount + !!atomic_read(&sp->srcu_exp_cnt) <= 0) + if (--trycount + !srcu_get_delay(sp) <= 0) return false; udelay(SRCU_RETRY_CHECK_DELAY); } @@ -650,10 +704,11 @@ static void srcu_flip(struct srcu_struct *sp) * srcu_read_lock(), and srcu_read_unlock() that are all passed the same * srcu_struct structure. */ -void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, - rcu_callback_t func) +void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, + rcu_callback_t func, bool do_norm) { unsigned long flags; + bool needexp = false; bool needgp = false; unsigned long s; struct srcu_data *sdp; @@ -672,16 +727,28 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, sdp->srcu_gp_seq_needed = s; needgp = true; } + if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) { + sdp->srcu_gp_seq_needed_exp = s; + needexp = true; + } spin_unlock_irqrestore(&sdp->lock, flags); if (needgp) - srcu_funnel_gp_start(sp, sdp, s); + srcu_funnel_gp_start(sp, sdp, s, do_norm); + else if (needexp) + srcu_funnel_exp_start(sp, sdp->mynode, s); +} + +void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, + rcu_callback_t func) +{ + __call_srcu(sp, rhp, func, true); } EXPORT_SYMBOL_GPL(call_srcu); /* * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). */ -static void __synchronize_srcu(struct srcu_struct *sp) +static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm) { struct rcu_synchronize rcu; @@ -697,7 +764,7 @@ static void __synchronize_srcu(struct srcu_struct *sp) check_init_srcu_struct(sp); init_completion(&rcu.completion); init_rcu_head_on_stack(&rcu.head); - call_srcu(sp, &rcu.head, wakeme_after_rcu); + __call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm); wait_for_completion(&rcu.completion); destroy_rcu_head_on_stack(&rcu.head); } @@ -714,18 +781,7 @@ static void __synchronize_srcu(struct srcu_struct *sp) */ void synchronize_srcu_expedited(struct srcu_struct *sp) { - bool do_norm = rcu_gp_is_normal(); - - check_init_srcu_struct(sp); - if (!do_norm) { - atomic_inc(&sp->srcu_exp_cnt); - smp_mb__after_atomic(); /* increment before GP. */ - } - __synchronize_srcu(sp); - if (!do_norm) { - smp_mb__before_atomic(); /* GP before decrement. */ - WARN_ON_ONCE(atomic_dec_return(&sp->srcu_exp_cnt) < 0); - } + __synchronize_srcu(sp, rcu_gp_is_normal()); } EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); @@ -773,7 +829,7 @@ void synchronize_srcu(struct srcu_struct *sp) if (rcu_gp_is_expedited()) synchronize_srcu_expedited(sp); else - __synchronize_srcu(sp); + __synchronize_srcu(sp, true); } EXPORT_SYMBOL_GPL(synchronize_srcu); @@ -1008,14 +1064,13 @@ void process_srcu(struct work_struct *work) sp = container_of(work, struct srcu_struct, work.work); srcu_advance_state(sp); - srcu_reschedule(sp, atomic_read(&sp->srcu_exp_cnt) ? 0 : SRCU_INTERVAL); + srcu_reschedule(sp, srcu_get_delay(sp)); } EXPORT_SYMBOL_GPL(process_srcu); void srcutorture_get_gp_data(enum rcutorture_type test_type, - struct srcu_struct *sp, int *flags, - unsigned long *gpnum, - unsigned long *completed) + struct srcu_struct *sp, int *flags, + unsigned long *gpnum, unsigned long *completed) { if (test_type != SRCU_FLAVOR) return; -- cgit v1.2.3 From 22607d66bbc3e81140d3bcf08894f4378eb36428 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 25 Apr 2017 14:03:11 -0700 Subject: srcu: Specify auto-expedite holdoff time On small systems, in the absence of readers, expedited SRCU grace periods can complete in less than a microsecond. This means that an eight-CPU system can have all CPUs doing synchronize_srcu() in a tight loop and almost always expedite. This might actually be desirable in some situations, but in general it is a good way to needlessly burn CPU cycles. And in those situations where it is desirable, your friend is the function synchronize_srcu_expedited(). For other situations, this commit adds a kernel parameter that specifies a holdoff between completing the last SRCU grace period and auto-expediting the next. If the next grace period starts before the holdoff expires, auto-expediting is disabled. The holdoff is 50 microseconds by default, and can be tuned to the desired number of nanoseconds. A value of zero disables auto-expediting. Signed-off-by: Paul E. McKenney Tested-by: Mike Galbraith --- Documentation/admin-guide/kernel-parameters.txt | 8 ++++++++ include/linux/srcutree.h | 1 + kernel/rcu/srcutree.c | 18 +++++++++++++++++- 3 files changed, 26 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index facc20a3f962..4a4b9266c4de 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -3779,6 +3779,14 @@ spia_pedr= spia_peddr= + srcutree.exp_holdoff [KNL] + Specifies how many nanoseconds must elapse + since the end of the last SRCU grace period for + a given srcu_struct until the next normal SRCU + grace period will be considered for automatic + expediting. Set to zero to disable automatic + expediting. + stacktrace [FTRACE] Enabled the stack tracer on boot up. diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 86df48d3e97b..32e86d85fd11 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -84,6 +84,7 @@ struct srcu_struct { unsigned long srcu_gp_seq; /* Grace-period seq #. */ unsigned long srcu_gp_seq_needed; /* Latest gp_seq needed. */ unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */ + unsigned long srcu_last_gp_end; /* Last GP end timestamp (ns) */ struct srcu_data __percpu *sda; /* Per-CPU srcu_data array. */ unsigned long srcu_barrier_seq; /* srcu_barrier seq #. */ struct mutex srcu_barrier_mutex; /* Serialize barrier ops. */ diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index 2286e06fd159..74c283f9d15e 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -34,10 +34,14 @@ #include #include #include +#include #include #include "rcu.h" +ulong exp_holdoff = 50 * 1000; /* Holdoff (ns) for auto-expediting. */ +module_param(exp_holdoff, ulong, 0444); + static void srcu_invoke_callbacks(struct work_struct *work); static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay); @@ -145,6 +149,7 @@ static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static) sp->sda = alloc_percpu(struct srcu_data); init_srcu_struct_nodes(sp, is_static); sp->srcu_gp_seq_needed_exp = 0; + sp->srcu_last_gp_end = ktime_get_mono_fast_ns(); smp_store_release(&sp->srcu_gp_seq_needed, 0); /* Init done. */ return sp->sda ? 0 : -ENOMEM; } @@ -498,6 +503,7 @@ static void srcu_gp_end(struct srcu_struct *sp) idx = rcu_seq_state(sp->srcu_gp_seq); WARN_ON_ONCE(idx != SRCU_STATE_SCAN2); cbdelay = srcu_get_delay(sp); + sp->srcu_last_gp_end = ktime_get_mono_fast_ns(); rcu_seq_end(&sp->srcu_gp_seq); gpseq = rcu_seq_current(&sp->srcu_gp_seq); if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq)) @@ -700,9 +706,10 @@ static void srcu_flip(struct srcu_struct *sp) */ static bool srcu_might_be_idle(struct srcu_struct *sp) { + unsigned long curseq; unsigned long flags; struct srcu_data *sdp; - unsigned long curseq; + unsigned long t; /* If the local srcu_data structure has callbacks, not idle. */ local_irq_save(flags); @@ -718,6 +725,15 @@ static bool srcu_might_be_idle(struct srcu_struct *sp) * Exact information would require acquiring locks, which would * kill scalability, hence the probabalistic nature of the probe. */ + + /* First, see if enough time has passed since the last GP. */ + t = ktime_get_mono_fast_ns(); + if (exp_holdoff == 0 || + time_in_range_open(t, sp->srcu_last_gp_end, + sp->srcu_last_gp_end + exp_holdoff)) + return false; /* Too soon after last GP. */ + + /* Next, check for probable idleness. */ curseq = rcu_seq_current(&sp->srcu_gp_seq); smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */ if (ULONG_CMP_LT(curseq, READ_ONCE(sp->srcu_gp_seq_needed))) -- cgit v1.2.3 From e8245c1b1a3bb8474f91c69ccd13637d3589bb2c Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 26 Apr 2017 15:34:06 +0200 Subject: iommu: Include device.h in iommu.h We make use of 'struct device' in iommu.h, so include device.h to make it available explicitly. Re-order the other headers while at it. Signed-off-by: Joerg Roedel --- include/linux/iommu.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 6a6de187ddc0..3b4fe4b79d20 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -19,11 +19,13 @@ #ifndef __LINUX_IOMMU_H #define __LINUX_IOMMU_H +#include +#include +#include #include #include #include -#include -#include + #include #define IOMMU_READ (1 << 0) -- cgit v1.2.3 From 207c6e36f122ebb1164d611c9f34f128313f47d5 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 26 Apr 2017 15:39:28 +0200 Subject: iommu: Move report_iommu_fault() to iommu.c The function is in no fast-path, there is no need for it to be static inline in a header file. This also removes the need to include iommu trace-points in iommu.h. Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 42 ++++++++++++++++++++++++++++++++++++++++++ include/linux/iommu.h | 41 ++--------------------------------------- 2 files changed, 44 insertions(+), 39 deletions(-) (limited to 'include') diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 9170fd498f46..4cb5792cbc21 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1655,6 +1655,48 @@ void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr) } EXPORT_SYMBOL_GPL(iommu_domain_window_disable); +/** + * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework + * @domain: the iommu domain where the fault has happened + * @dev: the device where the fault has happened + * @iova: the faulting address + * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) + * + * This function should be called by the low-level IOMMU implementations + * whenever IOMMU faults happen, to allow high-level users, that are + * interested in such events, to know about them. + * + * This event may be useful for several possible use cases: + * - mere logging of the event + * - dynamic TLB/PTE loading + * - if restarting of the faulting device is required + * + * Returns 0 on success and an appropriate error code otherwise (if dynamic + * PTE/TLB loading will one day be supported, implementations will be able + * to tell whether it succeeded or not according to this return value). + * + * Specifically, -ENOSYS is returned if a fault handler isn't installed + * (though fault handlers can also return -ENOSYS, in case they want to + * elicit the default behavior of the IOMMU drivers). + */ +int report_iommu_fault(struct iommu_domain *domain, struct device *dev, + unsigned long iova, int flags) +{ + int ret = -ENOSYS; + + /* + * if upper layers showed interest and installed a fault handler, + * invoke it. + */ + if (domain->handler) + ret = domain->handler(domain, dev, iova, flags, + domain->handler_token); + + trace_io_page_fault(dev, iova, flags); + return ret; +} +EXPORT_SYMBOL_GPL(report_iommu_fault); + static int __init iommu_init(void) { iommu_group_kset = kset_create_and_add("iommu_groups", diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 3b4fe4b79d20..abaa0ca848bc 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -330,46 +330,9 @@ extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, phys_addr_t offset, u64 size, int prot); extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr); -/** - * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework - * @domain: the iommu domain where the fault has happened - * @dev: the device where the fault has happened - * @iova: the faulting address - * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) - * - * This function should be called by the low-level IOMMU implementations - * whenever IOMMU faults happen, to allow high-level users, that are - * interested in such events, to know about them. - * - * This event may be useful for several possible use cases: - * - mere logging of the event - * - dynamic TLB/PTE loading - * - if restarting of the faulting device is required - * - * Returns 0 on success and an appropriate error code otherwise (if dynamic - * PTE/TLB loading will one day be supported, implementations will be able - * to tell whether it succeeded or not according to this return value). - * - * Specifically, -ENOSYS is returned if a fault handler isn't installed - * (though fault handlers can also return -ENOSYS, in case they want to - * elicit the default behavior of the IOMMU drivers). - */ -static inline int report_iommu_fault(struct iommu_domain *domain, - struct device *dev, unsigned long iova, int flags) -{ - int ret = -ENOSYS; - /* - * if upper layers showed interest and installed a fault handler, - * invoke it. - */ - if (domain->handler) - ret = domain->handler(domain, dev, iova, flags, - domain->handler_token); - - trace_io_page_fault(dev, iova, flags); - return ret; -} +extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, + unsigned long iova, int flags); static inline size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, -- cgit v1.2.3 From 5af50993850a48ba749b122173d789ea90976c72 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 5 Apr 2017 17:54:56 +1000 Subject: KVM: PPC: Book3S HV: Native usage of the XIVE interrupt controller This patch makes KVM capable of using the XIVE interrupt controller to provide the standard PAPR "XICS" style hypercalls. It is necessary for proper operations when the host uses XIVE natively. This has been lightly tested on an actual system, including PCI pass-through with a TG3 device. Signed-off-by: Benjamin Herrenschmidt [mpe: Cleanup pr_xxx(), unsplit pr_xxx() strings, etc., fix build failures by adding KVM_XIVE which depends on KVM_XICS and XIVE, and adding empty stubs for the kvm_xive_xxx() routines, fixup subject, integrate fixes from Paul for building PR=y HV=n] Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/kvm_book3s_asm.h | 2 + arch/powerpc/include/asm/kvm_host.h | 28 +- arch/powerpc/include/asm/kvm_ppc.h | 74 ++ arch/powerpc/include/asm/xive.h | 9 +- arch/powerpc/kernel/asm-offsets.c | 10 + arch/powerpc/kvm/Kconfig | 5 + arch/powerpc/kvm/Makefile | 4 +- arch/powerpc/kvm/book3s.c | 75 +- arch/powerpc/kvm/book3s_hv.c | 51 +- arch/powerpc/kvm/book3s_hv_builtin.c | 103 ++ arch/powerpc/kvm/book3s_hv_rm_xics.c | 10 +- arch/powerpc/kvm/book3s_hv_rm_xive.c | 47 + arch/powerpc/kvm/book3s_hv_rmhandlers.S | 62 +- arch/powerpc/kvm/book3s_rtas.c | 21 +- arch/powerpc/kvm/book3s_xics.c | 35 +- arch/powerpc/kvm/book3s_xics.h | 7 + arch/powerpc/kvm/book3s_xive.c | 1893 +++++++++++++++++++++++++++++ arch/powerpc/kvm/book3s_xive.h | 256 ++++ arch/powerpc/kvm/book3s_xive_template.c | 503 ++++++++ arch/powerpc/kvm/irq.h | 1 + arch/powerpc/kvm/powerpc.c | 17 +- arch/powerpc/platforms/powernv/opal.c | 1 + arch/powerpc/sysdev/xive/common.c | 142 ++- arch/powerpc/sysdev/xive/native.c | 86 +- include/linux/kvm_host.h | 1 - virt/kvm/kvm_main.c | 4 - 26 files changed, 3358 insertions(+), 89 deletions(-) create mode 100644 arch/powerpc/kvm/book3s_hv_rm_xive.c create mode 100644 arch/powerpc/kvm/book3s_xive.c create mode 100644 arch/powerpc/kvm/book3s_xive.h create mode 100644 arch/powerpc/kvm/book3s_xive_template.c (limited to 'include') diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 0593d9479f74..b148496ffe36 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -111,6 +111,8 @@ struct kvmppc_host_state { struct kvm_vcpu *kvm_vcpu; struct kvmppc_vcore *kvm_vcore; void __iomem *xics_phys; + void __iomem *xive_tima_phys; + void __iomem *xive_tima_virt; u32 saved_xirr; u64 dabr; u64 host_mmcr[7]; /* MMCR 0,1,A, SIAR, SDAR, MMCR2, SIER */ diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 7bba8f415627..5a8ab4a758f1 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -205,6 +205,12 @@ struct kvmppc_spapr_tce_table { /* XICS components, defined in book3s_xics.c */ struct kvmppc_xics; struct kvmppc_icp; +extern struct kvm_device_ops kvm_xics_ops; + +/* XIVE components, defined in book3s_xive.c */ +struct kvmppc_xive; +struct kvmppc_xive_vcpu; +extern struct kvm_device_ops kvm_xive_ops; struct kvmppc_passthru_irqmap; @@ -293,6 +299,7 @@ struct kvm_arch { #endif #ifdef CONFIG_KVM_XICS struct kvmppc_xics *xics; + struct kvmppc_xive *xive; struct kvmppc_passthru_irqmap *pimap; #endif struct kvmppc_ops *kvm_ops; @@ -421,7 +428,7 @@ struct kvmppc_passthru_irqmap { #define KVMPPC_IRQ_DEFAULT 0 #define KVMPPC_IRQ_MPIC 1 -#define KVMPPC_IRQ_XICS 2 +#define KVMPPC_IRQ_XICS 2 /* Includes a XIVE option */ #define MMIO_HPTE_CACHE_SIZE 4 @@ -443,6 +450,21 @@ struct mmio_hpte_cache { struct openpic; +/* W0 and W1 of a XIVE thread management context */ +union xive_tma_w01 { + struct { + u8 nsr; + u8 cppr; + u8 ipb; + u8 lsmfb; + u8 ack; + u8 inc; + u8 age; + u8 pipr; + }; + __be64 w01; +}; + struct kvm_vcpu_arch { ulong host_stack; u32 host_pid; @@ -688,6 +710,10 @@ struct kvm_vcpu_arch { struct openpic *mpic; /* KVM_IRQ_MPIC */ #ifdef CONFIG_KVM_XICS struct kvmppc_icp *icp; /* XICS presentation controller */ + struct kvmppc_xive_vcpu *xive_vcpu; /* XIVE virtual CPU data */ + __be32 xive_cam_word; /* Cooked W2 in proper endian with valid bit */ + u32 xive_pushed; /* Is the VP pushed on the physical CPU ? */ + union xive_tma_w01 xive_saved_state; /* W0..1 of XIVE thread state */ #endif #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index c3877992eff9..ed52b13d9ffb 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -225,6 +225,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq); extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp); extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu); extern void kvmppc_rtas_tokens_free(struct kvm *kvm); + extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority); extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, @@ -412,6 +413,14 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) paca[cpu].kvm_hstate.xics_phys = (void __iomem *)addr; } +static inline void kvmppc_set_xive_tima(int cpu, + unsigned long phys_addr, + void __iomem *virt_addr) +{ + paca[cpu].kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr; + paca[cpu].kvm_hstate.xive_tima_virt = virt_addr; +} + static inline u32 kvmppc_get_xics_latch(void) { u32 xirr; @@ -442,6 +451,11 @@ static inline void __init kvm_cma_reserve(void) static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) {} +static inline void kvmppc_set_xive_tima(int cpu, + unsigned long phys_addr, + void __iomem *virt_addr) +{} + static inline u32 kvmppc_get_xics_latch(void) { return 0; @@ -492,6 +506,10 @@ extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr, struct kvmppc_irq_map *irq_map, struct kvmppc_passthru_irqmap *pimap, bool *again); + +extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, + int level, bool line_status); + extern int h_ipi_redirect; #else static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap( @@ -509,6 +527,60 @@ static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd) { return 0; } #endif +#ifdef CONFIG_KVM_XIVE +/* + * Below the first "xive" is the "eXternal Interrupt Virtualization Engine" + * ie. P9 new interrupt controller, while the second "xive" is the legacy + * "eXternal Interrupt Vector Entry" which is the configuration of an + * interrupt on the "xics" interrupt controller on P8 and earlier. Those + * two function consume or produce a legacy "XIVE" state from the + * new "XIVE" interrupt controller. + */ +extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server, + u32 priority); +extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server, + u32 *priority); +extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq); +extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq); +extern void kvmppc_xive_init_module(void); +extern void kvmppc_xive_exit_module(void); + +extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev, + struct kvm_vcpu *vcpu, u32 cpu); +extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu); +extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq, + struct irq_desc *host_desc); +extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq, + struct irq_desc *host_desc); +extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu); +extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval); + +extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, + int level, bool line_status); +#else +static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server, + u32 priority) { return -1; } +static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server, + u32 *priority) { return -1; } +static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; } +static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; } +static inline void kvmppc_xive_init_module(void) { } +static inline void kvmppc_xive_exit_module(void) { } + +static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev, + struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; } +static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { } +static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq, + struct irq_desc *host_desc) { return -ENODEV; } +static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq, + struct irq_desc *host_desc) { return -ENODEV; } +static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; } +static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; } + +static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, + int level, bool line_status) { return -ENODEV; } +#endif /* CONFIG_KVM_XIVE */ + /* * Prototypes for functions called only from assembler code. * Having prototypes reduces sparse errors. @@ -546,6 +618,8 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags, long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr, unsigned long slb_v, unsigned int status, bool data); unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu); +unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu); +unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server); int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, unsigned long mfrr); int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr); diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h index 3cdbeaeac397..c8a822acf962 100644 --- a/arch/powerpc/include/asm/xive.h +++ b/arch/powerpc/include/asm/xive.h @@ -99,7 +99,6 @@ struct xive_q { #define XIVE_ESB_SET_PQ_01 0xd00 #define XIVE_ESB_SET_PQ_10 0xe00 #define XIVE_ESB_SET_PQ_11 0xf00 -#define XIVE_ESB_MASK XIVE_ESB_SET_PQ_01 #define XIVE_ESB_VAL_P 0x2 #define XIVE_ESB_VAL_Q 0x1 @@ -136,11 +135,11 @@ extern int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio, __be32 *qpage, u32 order, bool can_escalate); extern void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio); -extern bool __xive_irq_trigger(struct xive_irq_data *xd); -extern bool __xive_irq_retrigger(struct xive_irq_data *xd); -extern void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd); - +extern void xive_native_sync_source(u32 hw_irq); extern bool is_xive_irq(struct irq_chip *chip); +extern int xive_native_enable_vp(u32 vp_id); +extern int xive_native_disable_vp(u32 vp_id); +extern int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id); #else diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 4367e7df51a1..1822187813dc 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -630,6 +630,8 @@ int main(void) HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu); HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore); HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys); + HSTATE_FIELD(HSTATE_XIVE_TIMA_PHYS, xive_tima_phys); + HSTATE_FIELD(HSTATE_XIVE_TIMA_VIRT, xive_tima_virt); HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr); HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi); HSTATE_FIELD(HSTATE_PTID, ptid); @@ -715,6 +717,14 @@ int main(void) OFFSET(VCPU_HOST_MAS6, kvm_vcpu, arch.host_mas6); #endif +#ifdef CONFIG_KVM_XICS + DEFINE(VCPU_XIVE_SAVED_STATE, offsetof(struct kvm_vcpu, + arch.xive_saved_state)); + DEFINE(VCPU_XIVE_CAM_WORD, offsetof(struct kvm_vcpu, + arch.xive_cam_word)); + DEFINE(VCPU_XIVE_PUSHED, offsetof(struct kvm_vcpu, arch.xive_pushed)); +#endif + #ifdef CONFIG_KVM_EXIT_TIMING OFFSET(VCPU_TIMING_EXIT_TBU, kvm_vcpu, arch.timing_exit.tv32.tbu); OFFSET(VCPU_TIMING_EXIT_TBL, kvm_vcpu, arch.timing_exit.tv32.tbl); diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index 029be26b5a17..b9d66e53b773 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -196,6 +196,11 @@ config KVM_XICS Specification) interrupt controller architecture used on IBM POWER (pSeries) servers. +config KVM_XIVE + bool + default y + depends on KVM_XICS && PPC_XIVE_NATIVE && KVM_BOOK3S_HV_POSSIBLE + source drivers/vhost/Kconfig endif # VIRTUALIZATION diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index b87ccde2137a..d91a2604c496 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile @@ -74,7 +74,7 @@ kvm-hv-y += \ book3s_64_mmu_radix.o kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \ - book3s_hv_rm_xics.o + book3s_hv_rm_xics.o book3s_hv_rm_xive.o ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \ @@ -89,6 +89,8 @@ endif kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \ book3s_xics.o +kvm-book3s_64-objs-$(CONFIG_KVM_XIVE) += book3s_xive.o + kvm-book3s_64-module-objs := \ $(common-objs-y) \ book3s.o \ diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index aedacefd961d..cb8009cd688d 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "book3s.h" #include "trace.h" @@ -578,11 +579,14 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, break; #ifdef CONFIG_KVM_XICS case KVM_REG_PPC_ICP_STATE: - if (!vcpu->arch.icp) { + if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) { r = -ENXIO; break; } - *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu)); + if (xive_enabled()) + *val = get_reg_val(id, kvmppc_xive_get_icp(vcpu)); + else + *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu)); break; #endif /* CONFIG_KVM_XICS */ case KVM_REG_PPC_FSCR: @@ -648,12 +652,14 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, #endif /* CONFIG_VSX */ #ifdef CONFIG_KVM_XICS case KVM_REG_PPC_ICP_STATE: - if (!vcpu->arch.icp) { + if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) { r = -ENXIO; break; } - r = kvmppc_xics_set_icp(vcpu, - set_reg_val(id, *val)); + if (xive_enabled()) + r = kvmppc_xive_set_icp(vcpu, set_reg_val(id, *val)); + else + r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val)); break; #endif /* CONFIG_KVM_XICS */ case KVM_REG_PPC_FSCR: @@ -924,6 +930,50 @@ int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall) return kvm->arch.kvm_ops->hcall_implemented(hcall); } +#ifdef CONFIG_KVM_XICS +int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, + bool line_status) +{ + if (xive_enabled()) + return kvmppc_xive_set_irq(kvm, irq_source_id, irq, level, + line_status); + else + return kvmppc_xics_set_irq(kvm, irq_source_id, irq, level, + line_status); +} + +int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry, + struct kvm *kvm, int irq_source_id, + int level, bool line_status) +{ + return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi, + level, line_status); +} +static int kvmppc_book3s_set_irq(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_source_id, int level, + bool line_status) +{ + return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status); +} + +int kvm_irq_map_gsi(struct kvm *kvm, + struct kvm_kernel_irq_routing_entry *entries, int gsi) +{ + entries->gsi = gsi; + entries->type = KVM_IRQ_ROUTING_IRQCHIP; + entries->set = kvmppc_book3s_set_irq; + entries->irqchip.irqchip = 0; + entries->irqchip.pin = gsi; + return 1; +} + +int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin) +{ + return pin; +} + +#endif /* CONFIG_KVM_XICS */ + static int kvmppc_book3s_init(void) { int r; @@ -934,12 +984,25 @@ static int kvmppc_book3s_init(void) #ifdef CONFIG_KVM_BOOK3S_32_HANDLER r = kvmppc_book3s_init_pr(); #endif - return r; +#ifdef CONFIG_KVM_XICS +#ifdef CONFIG_KVM_XIVE + if (xive_enabled()) { + kvmppc_xive_init_module(); + kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS); + } else +#endif + kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS); +#endif + return r; } static void kvmppc_book3s_exit(void) { +#ifdef CONFIG_KVM_XICS + if (xive_enabled()) + kvmppc_xive_exit_module(); +#endif #ifdef CONFIG_KVM_BOOK3S_32_HANDLER kvmppc_book3s_exit_pr(); #endif diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index fadb75abfe37..128efb42ec4e 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -67,6 +67,7 @@ #include #include #include +#include #include "book3s.h" @@ -837,6 +838,10 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) case H_IPOLL: case H_XIRR_X: if (kvmppc_xics_enabled(vcpu)) { + if (xive_enabled()) { + ret = H_NOT_AVAILABLE; + return RESUME_GUEST; + } ret = kvmppc_xics_hcall(vcpu, req); break; } @@ -2947,8 +2952,12 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) r = kvmppc_book3s_hv_page_fault(run, vcpu, vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); - } else if (r == RESUME_PASSTHROUGH) - r = kvmppc_xics_rm_complete(vcpu, 0); + } else if (r == RESUME_PASSTHROUGH) { + if (WARN_ON(xive_enabled())) + r = H_SUCCESS; + else + r = kvmppc_xics_rm_complete(vcpu, 0); + } } while (is_kvmppc_resume_guest(r)); out: @@ -3400,10 +3409,20 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) /* * On POWER9, VPM0 bit is reserved (VPM0=1 behaviour is assumed) * Set HVICE bit to enable hypervisor virtualization interrupts. + * Set HEIC to prevent OS interrupts to go to hypervisor (should + * be unnecessary but better safe than sorry in case we re-enable + * EE in HV mode with this LPCR still set) */ if (cpu_has_feature(CPU_FTR_ARCH_300)) { lpcr &= ~LPCR_VPM0; - lpcr |= LPCR_HVICE; + lpcr |= LPCR_HVICE | LPCR_HEIC; + + /* + * If xive is enabled, we route 0x500 interrupts directly + * to the guest. + */ + if (xive_enabled()) + lpcr |= LPCR_LPES; } /* @@ -3533,7 +3552,7 @@ static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi) struct kvmppc_irq_map *irq_map; struct kvmppc_passthru_irqmap *pimap; struct irq_chip *chip; - int i; + int i, rc = 0; if (!kvm_irq_bypass) return 1; @@ -3558,10 +3577,10 @@ static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi) /* * For now, we only support interrupts for which the EOI operation * is an OPAL call followed by a write to XIRR, since that's - * what our real-mode EOI code does. + * what our real-mode EOI code does, or a XIVE interrupt */ chip = irq_data_get_irq_chip(&desc->irq_data); - if (!chip || !is_pnv_opal_msi(chip)) { + if (!chip || !(is_pnv_opal_msi(chip) || is_xive_irq(chip))) { pr_warn("kvmppc_set_passthru_irq_hv: Could not assign IRQ map for (%d,%d)\n", host_irq, guest_gsi); mutex_unlock(&kvm->lock); @@ -3603,7 +3622,12 @@ static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi) if (i == pimap->n_mapped) pimap->n_mapped++; - kvmppc_xics_set_mapped(kvm, guest_gsi, desc->irq_data.hwirq); + if (xive_enabled()) + rc = kvmppc_xive_set_mapped(kvm, guest_gsi, desc); + else + kvmppc_xics_set_mapped(kvm, guest_gsi, desc->irq_data.hwirq); + if (rc) + irq_map->r_hwirq = 0; mutex_unlock(&kvm->lock); @@ -3614,7 +3638,7 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi) { struct irq_desc *desc; struct kvmppc_passthru_irqmap *pimap; - int i; + int i, rc = 0; if (!kvm_irq_bypass) return 0; @@ -3641,9 +3665,12 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi) return -ENODEV; } - kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq); + if (xive_enabled()) + rc = kvmppc_xive_clr_mapped(kvm, guest_gsi, pimap->mapped[i].desc); + else + kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq); - /* invalidate the entry */ + /* invalidate the entry (what do do on error from the above ?) */ pimap->mapped[i].r_hwirq = 0; /* @@ -3652,7 +3679,7 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi) */ mutex_unlock(&kvm->lock); - return 0; + return rc; } static int kvmppc_irq_bypass_add_producer_hv(struct irq_bypass_consumer *cons, @@ -3930,7 +3957,7 @@ static int kvmppc_book3s_init_hv(void) * indirectly, via OPAL. */ #ifdef CONFIG_SMP - if (!get_paca()->kvm_hstate.xics_phys) { + if (!xive_enabled() && !get_paca()->kvm_hstate.xics_phys) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc"); diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index a752e29977e0..846b40cb3a62 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -32,6 +32,24 @@ #define KVM_CMA_CHUNK_ORDER 18 +#include "book3s_xics.h" +#include "book3s_xive.h" + +/* + * The XIVE module will populate these when it loads + */ +unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu); +unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server); +int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server, + unsigned long mfrr); +int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr); +int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr); +EXPORT_SYMBOL_GPL(__xive_vm_h_xirr); +EXPORT_SYMBOL_GPL(__xive_vm_h_ipoll); +EXPORT_SYMBOL_GPL(__xive_vm_h_ipi); +EXPORT_SYMBOL_GPL(__xive_vm_h_cppr); +EXPORT_SYMBOL_GPL(__xive_vm_h_eoi); + /* * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206) * should be power of 2. @@ -210,6 +228,7 @@ void kvmhv_rm_send_ipi(int cpu) __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); return; } + /* On POWER8 for IPIs to threads in the same core, use msgsnd. */ if (cpu_has_feature(CPU_FTR_ARCH_207S) && cpu_first_thread_sibling(cpu) == @@ -406,6 +425,9 @@ static long kvmppc_read_one_intr(bool *again) u8 host_ipi; int64_t rc; + if (xive_enabled()) + return 1; + /* see if a host IPI is pending */ host_ipi = local_paca->kvm_hstate.host_ipi; if (host_ipi) @@ -490,3 +512,84 @@ static long kvmppc_read_one_intr(bool *again) return kvmppc_check_passthru(xisr, xirr, again); } + +#ifdef CONFIG_KVM_XICS +static inline bool is_rm(void) +{ + return !(mfmsr() & MSR_DR); +} + +unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu) +{ + if (xive_enabled()) { + if (is_rm()) + return xive_rm_h_xirr(vcpu); + if (unlikely(!__xive_vm_h_xirr)) + return H_NOT_AVAILABLE; + return __xive_vm_h_xirr(vcpu); + } else + return xics_rm_h_xirr(vcpu); +} + +unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu) +{ + vcpu->arch.gpr[5] = get_tb(); + if (xive_enabled()) { + if (is_rm()) + return xive_rm_h_xirr(vcpu); + if (unlikely(!__xive_vm_h_xirr)) + return H_NOT_AVAILABLE; + return __xive_vm_h_xirr(vcpu); + } else + return xics_rm_h_xirr(vcpu); +} + +unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) +{ + if (xive_enabled()) { + if (is_rm()) + return xive_rm_h_ipoll(vcpu, server); + if (unlikely(!__xive_vm_h_ipoll)) + return H_NOT_AVAILABLE; + return __xive_vm_h_ipoll(vcpu, server); + } else + return H_TOO_HARD; +} + +int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, + unsigned long mfrr) +{ + if (xive_enabled()) { + if (is_rm()) + return xive_rm_h_ipi(vcpu, server, mfrr); + if (unlikely(!__xive_vm_h_ipi)) + return H_NOT_AVAILABLE; + return __xive_vm_h_ipi(vcpu, server, mfrr); + } else + return xics_rm_h_ipi(vcpu, server, mfrr); +} + +int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) +{ + if (xive_enabled()) { + if (is_rm()) + return xive_rm_h_cppr(vcpu, cppr); + if (unlikely(!__xive_vm_h_cppr)) + return H_NOT_AVAILABLE; + return __xive_vm_h_cppr(vcpu, cppr); + } else + return xics_rm_h_cppr(vcpu, cppr); +} + +int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) +{ + if (xive_enabled()) { + if (is_rm()) + return xive_rm_h_eoi(vcpu, xirr); + if (unlikely(!__xive_vm_h_eoi)) + return H_NOT_AVAILABLE; + return __xive_vm_h_eoi(vcpu, xirr); + } else + return xics_rm_h_eoi(vcpu, xirr); +} +#endif /* CONFIG_KVM_XICS */ diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c index 3a1a463a039a..f8068801ac36 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_xics.c +++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c @@ -485,7 +485,7 @@ static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp, } -unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu) +unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu) { union kvmppc_icp_state old_state, new_state; struct kvmppc_xics *xics = vcpu->kvm->arch.xics; @@ -523,8 +523,8 @@ unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu) return check_too_hard(xics, icp); } -int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, - unsigned long mfrr) +int xics_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, + unsigned long mfrr) { union kvmppc_icp_state old_state, new_state; struct kvmppc_xics *xics = vcpu->kvm->arch.xics; @@ -610,7 +610,7 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, return check_too_hard(xics, this_icp); } -int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) +int xics_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) { union kvmppc_icp_state old_state, new_state; struct kvmppc_xics *xics = vcpu->kvm->arch.xics; @@ -730,7 +730,7 @@ static int ics_rm_eoi(struct kvm_vcpu *vcpu, u32 irq) return check_too_hard(xics, icp); } -int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) +int xics_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) { struct kvmppc_xics *xics = vcpu->kvm->arch.xics; struct kvmppc_icp *icp = vcpu->arch.icp; diff --git a/arch/powerpc/kvm/book3s_hv_rm_xive.c b/arch/powerpc/kvm/book3s_hv_rm_xive.c new file mode 100644 index 000000000000..abf5f01b6eb1 --- /dev/null +++ b/arch/powerpc/kvm/book3s_hv_rm_xive.c @@ -0,0 +1,47 @@ +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "book3s_xive.h" + +/* XXX */ +#include +//#define DBG(fmt...) udbg_printf(fmt) +#define DBG(fmt...) do { } while(0) + +static inline void __iomem *get_tima_phys(void) +{ + return local_paca->kvm_hstate.xive_tima_phys; +} + +#undef XIVE_RUNTIME_CHECKS +#define X_PFX xive_rm_ +#define X_STATIC +#define X_STAT_PFX stat_rm_ +#define __x_tima get_tima_phys() +#define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_page)) +#define __x_trig_page(xd) ((void __iomem *)((xd)->trig_page)) +#define __x_readb __raw_rm_readb +#define __x_writeb __raw_rm_writeb +#define __x_readw __raw_rm_readw +#define __x_readq __raw_rm_readq +#define __x_writeq __raw_rm_writeq + +#include "book3s_xive_template.c" diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 7c6477d1840a..bdb3f76ceb6b 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -30,6 +30,7 @@ #include #include #include +#include #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) @@ -970,6 +971,23 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) cmpwi r3, 512 /* 1 microsecond */ blt hdec_soon +#ifdef CONFIG_KVM_XICS + /* We are entering the guest on that thread, push VCPU to XIVE */ + ld r10, HSTATE_XIVE_TIMA_PHYS(r13) + cmpldi cr0, r10, r0 + beq no_xive + ld r11, VCPU_XIVE_SAVED_STATE(r4) + li r9, TM_QW1_OS + stdcix r11,r9,r10 + eieio + lwz r11, VCPU_XIVE_CAM_WORD(r4) + li r9, TM_QW1_OS + TM_WORD2 + stwcix r11,r9,r10 + li r9, 1 + stw r9, VCPU_XIVE_PUSHED(r4) +no_xive: +#endif /* CONFIG_KVM_XICS */ + deliver_guest_interrupt: ld r6, VCPU_CTR(r4) ld r7, VCPU_XER(r4) @@ -1307,6 +1325,42 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) blt deliver_guest_interrupt guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ +#ifdef CONFIG_KVM_XICS + /* We are exiting, pull the VP from the XIVE */ + lwz r0, VCPU_XIVE_PUSHED(r9) + cmpwi cr0, r0, 0 + beq 1f + li r7, TM_SPC_PULL_OS_CTX + li r6, TM_QW1_OS + mfmsr r0 + andi. r0, r0, MSR_IR /* in real mode? */ + beq 2f + ld r10, HSTATE_XIVE_TIMA_VIRT(r13) + cmpldi cr0, r10, 0 + beq 1f + /* First load to pull the context, we ignore the value */ + lwzx r11, r7, r10 + eieio + /* Second load to recover the context state (Words 0 and 1) */ + ldx r11, r6, r10 + b 3f +2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13) + cmpldi cr0, r10, 0 + beq 1f + /* First load to pull the context, we ignore the value */ + lwzcix r11, r7, r10 + eieio + /* Second load to recover the context state (Words 0 and 1) */ + ldcix r11, r6, r10 +3: std r11, VCPU_XIVE_SAVED_STATE(r9) + /* Fixup some of the state for the next load */ + li r10, 0 + li r0, 0xff + stw r10, VCPU_XIVE_PUSHED(r9) + stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9) + stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9) +1: +#endif /* CONFIG_KVM_XICS */ /* Save more register state */ mfdar r6 mfdsisr r7 @@ -2011,7 +2065,7 @@ hcall_real_table: .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table - .long 0 /* 0x70 - H_IPOLL */ + .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table #else .long 0 /* 0x64 - H_EOI */ @@ -2181,7 +2235,11 @@ hcall_real_table: .long 0 /* 0x2f0 */ .long 0 /* 0x2f4 */ .long 0 /* 0x2f8 */ - .long 0 /* 0x2fc */ +#ifdef CONFIG_KVM_XICS + .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table +#else + .long 0 /* 0x2fc - H_XIRR_X*/ +#endif .long DOTSYM(kvmppc_h_random) - hcall_real_table .globl hcall_real_table_end hcall_real_table_end: diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c index 20528701835b..2d3b2b1cc272 100644 --- a/arch/powerpc/kvm/book3s_rtas.c +++ b/arch/powerpc/kvm/book3s_rtas.c @@ -16,6 +16,7 @@ #include #include #include +#include #ifdef CONFIG_KVM_XICS static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args) @@ -32,7 +33,10 @@ static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args) server = be32_to_cpu(args->args[1]); priority = be32_to_cpu(args->args[2]); - rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority); + if (xive_enabled()) + rc = kvmppc_xive_set_xive(vcpu->kvm, irq, server, priority); + else + rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority); if (rc) rc = -3; out: @@ -52,7 +56,10 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args) irq = be32_to_cpu(args->args[0]); server = priority = 0; - rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority); + if (xive_enabled()) + rc = kvmppc_xive_get_xive(vcpu->kvm, irq, &server, &priority); + else + rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority); if (rc) { rc = -3; goto out; @@ -76,7 +83,10 @@ static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args) irq = be32_to_cpu(args->args[0]); - rc = kvmppc_xics_int_off(vcpu->kvm, irq); + if (xive_enabled()) + rc = kvmppc_xive_int_off(vcpu->kvm, irq); + else + rc = kvmppc_xics_int_off(vcpu->kvm, irq); if (rc) rc = -3; out: @@ -95,7 +105,10 @@ static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args) irq = be32_to_cpu(args->args[0]); - rc = kvmppc_xics_int_on(vcpu->kvm, irq); + if (xive_enabled()) + rc = kvmppc_xive_int_on(vcpu->kvm, irq); + else + rc = kvmppc_xics_int_on(vcpu->kvm, irq); if (rc) rc = -3; out: diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c index ef4fd528c193..e6829c415bc8 100644 --- a/arch/powerpc/kvm/book3s_xics.c +++ b/arch/powerpc/kvm/book3s_xics.c @@ -1307,8 +1307,8 @@ static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr) return 0; } -int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, - bool line_status) +int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, + bool line_status) { struct kvmppc_xics *xics = kvm->arch.xics; @@ -1317,14 +1317,6 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, return ics_deliver_irq(xics, irq, level); } -int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry, - struct kvm *kvm, int irq_source_id, - int level, bool line_status) -{ - return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi, - level, line_status); -} - static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { struct kvmppc_xics *xics = dev->private; @@ -1458,29 +1450,6 @@ void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT; } -static int xics_set_irq(struct kvm_kernel_irq_routing_entry *e, - struct kvm *kvm, int irq_source_id, int level, - bool line_status) -{ - return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status); -} - -int kvm_irq_map_gsi(struct kvm *kvm, - struct kvm_kernel_irq_routing_entry *entries, int gsi) -{ - entries->gsi = gsi; - entries->type = KVM_IRQ_ROUTING_IRQCHIP; - entries->set = xics_set_irq; - entries->irqchip.irqchip = 0; - entries->irqchip.pin = gsi; - return 1; -} - -int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin) -{ - return pin; -} - void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long irq, unsigned long host_irq) { diff --git a/arch/powerpc/kvm/book3s_xics.h b/arch/powerpc/kvm/book3s_xics.h index ec5474cf70c6..453c9e518c19 100644 --- a/arch/powerpc/kvm/book3s_xics.h +++ b/arch/powerpc/kvm/book3s_xics.h @@ -10,6 +10,7 @@ #ifndef _KVM_PPC_BOOK3S_XICS_H #define _KVM_PPC_BOOK3S_XICS_H +#ifdef CONFIG_KVM_XICS /* * We use a two-level tree to store interrupt source information. * There are up to 1024 ICS nodes, each of which can represent @@ -144,5 +145,11 @@ static inline struct kvmppc_ics *kvmppc_xics_find_ics(struct kvmppc_xics *xics, return ics; } +extern unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu); +extern int xics_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, + unsigned long mfrr); +extern int xics_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr); +extern int xics_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr); +#endif /* CONFIG_KVM_XICS */ #endif /* _KVM_PPC_BOOK3S_XICS_H */ diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c new file mode 100644 index 000000000000..7807ee17af4b --- /dev/null +++ b/arch/powerpc/kvm/book3s_xive.c @@ -0,0 +1,1893 @@ +/* + * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) "xive-kvm: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "book3s_xive.h" + + +/* + * Virtual mode variants of the hcalls for use on radix/radix + * with AIL. They require the VCPU's VP to be "pushed" + * + * We still instanciate them here because we use some of the + * generated utility functions as well in this file. + */ +#define XIVE_RUNTIME_CHECKS +#define X_PFX xive_vm_ +#define X_STATIC static +#define X_STAT_PFX stat_vm_ +#define __x_tima xive_tima +#define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio)) +#define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio)) +#define __x_readb __raw_readb +#define __x_writeb __raw_writeb +#define __x_readw __raw_readw +#define __x_readq __raw_readq +#define __x_writeq __raw_writeq + +#include "book3s_xive_template.c" + +/* + * We leave a gap of a couple of interrupts in the queue to + * account for the IPI and additional safety guard. + */ +#define XIVE_Q_GAP 2 + +/* + * This is a simple trigger for a generic XIVE IRQ. This must + * only be called for interrupts that support a trigger page + */ +static bool xive_irq_trigger(struct xive_irq_data *xd) +{ + /* This should be only for MSIs */ + if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI)) + return false; + + /* Those interrupts should always have a trigger page */ + if (WARN_ON(!xd->trig_mmio)) + return false; + + out_be64(xd->trig_mmio, 0); + + return true; +} + +static irqreturn_t xive_esc_irq(int irq, void *data) +{ + struct kvm_vcpu *vcpu = data; + + /* We use the existing H_PROD mechanism to wake up the target */ + vcpu->arch.prodded = 1; + smp_mb(); + if (vcpu->arch.ceded) + kvmppc_fast_vcpu_kick(vcpu); + + return IRQ_HANDLED; +} + +static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + struct xive_q *q = &xc->queues[prio]; + char *name = NULL; + int rc; + + /* Already there ? */ + if (xc->esc_virq[prio]) + return 0; + + /* Hook up the escalation interrupt */ + xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq); + if (!xc->esc_virq[prio]) { + pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n", + prio, xc->server_num); + return -EIO; + } + + /* + * Future improvement: start with them disabled + * and handle DD2 and later scheme of merged escalation + * interrupts + */ + name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d", + vcpu->kvm->arch.lpid, xc->server_num, prio); + if (!name) { + pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n", + prio, xc->server_num); + rc = -ENOMEM; + goto error; + } + rc = request_irq(xc->esc_virq[prio], xive_esc_irq, + IRQF_NO_THREAD, name, vcpu); + if (rc) { + pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n", + prio, xc->server_num); + goto error; + } + xc->esc_virq_names[prio] = name; + return 0; +error: + irq_dispose_mapping(xc->esc_virq[prio]); + xc->esc_virq[prio] = 0; + kfree(name); + return rc; +} + +static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + struct kvmppc_xive *xive = xc->xive; + struct xive_q *q = &xc->queues[prio]; + void *qpage; + int rc; + + if (WARN_ON(q->qpage)) + return 0; + + /* Allocate the queue and retrieve infos on current node for now */ + qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order); + if (!qpage) { + pr_err("Failed to allocate queue %d for VCPU %d\n", + prio, xc->server_num); + return -ENOMEM;; + } + memset(qpage, 0, 1 << xive->q_order); + + /* + * Reconfigure the queue. This will set q->qpage only once the + * queue is fully configured. This is a requirement for prio 0 + * as we will stop doing EOIs for every IPI as soon as we observe + * qpage being non-NULL, and instead will only EOI when we receive + * corresponding queue 0 entries + */ + rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage, + xive->q_order, true); + if (rc) + pr_err("Failed to configure queue %d for VCPU %d\n", + prio, xc->server_num); + return rc; +} + +/* Called with kvm_lock held */ +static int xive_check_provisioning(struct kvm *kvm, u8 prio) +{ + struct kvmppc_xive *xive = kvm->arch.xive; + struct kvm_vcpu *vcpu; + int i, rc; + + lockdep_assert_held(&kvm->lock); + + /* Already provisioned ? */ + if (xive->qmap & (1 << prio)) + return 0; + + pr_devel("Provisioning prio... %d\n", prio); + + /* Provision each VCPU and enable escalations */ + kvm_for_each_vcpu(i, vcpu, kvm) { + if (!vcpu->arch.xive_vcpu) + continue; + rc = xive_provision_queue(vcpu, prio); + if (rc == 0) + xive_attach_escalation(vcpu, prio); + if (rc) + return rc; + } + + /* Order previous stores and mark it as provisioned */ + mb(); + xive->qmap |= (1 << prio); + return 0; +} + +static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio) +{ + struct kvm_vcpu *vcpu; + struct kvmppc_xive_vcpu *xc; + struct xive_q *q; + + /* Locate target server */ + vcpu = kvmppc_xive_find_server(kvm, server); + if (!vcpu) { + pr_warn("%s: Can't find server %d\n", __func__, server); + return; + } + xc = vcpu->arch.xive_vcpu; + if (WARN_ON(!xc)) + return; + + q = &xc->queues[prio]; + atomic_inc(&q->pending_count); +} + +static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + struct xive_q *q; + u32 max; + + if (WARN_ON(!xc)) + return -ENXIO; + if (!xc->valid) + return -ENXIO; + + q = &xc->queues[prio]; + if (WARN_ON(!q->qpage)) + return -ENXIO; + + /* Calculate max number of interrupts in that queue. */ + max = (q->msk + 1) - XIVE_Q_GAP; + return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY; +} + +static int xive_select_target(struct kvm *kvm, u32 *server, u8 prio) +{ + struct kvm_vcpu *vcpu; + int i, rc; + + /* Locate target server */ + vcpu = kvmppc_xive_find_server(kvm, *server); + if (!vcpu) { + pr_devel("Can't find server %d\n", *server); + return -EINVAL; + } + + pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio); + + /* Try pick it */ + rc = xive_try_pick_queue(vcpu, prio); + if (rc == 0) + return rc; + + pr_devel(" .. failed, looking up candidate...\n"); + + /* Failed, pick another VCPU */ + kvm_for_each_vcpu(i, vcpu, kvm) { + if (!vcpu->arch.xive_vcpu) + continue; + rc = xive_try_pick_queue(vcpu, prio); + if (rc == 0) { + *server = vcpu->arch.xive_vcpu->server_num; + pr_devel(" found on 0x%x/%d\n", *server, prio); + return rc; + } + } + pr_devel(" no available target !\n"); + + /* No available target ! */ + return -EBUSY; +} + +static u8 xive_lock_and_mask(struct kvmppc_xive *xive, + struct kvmppc_xive_src_block *sb, + struct kvmppc_xive_irq_state *state) +{ + struct xive_irq_data *xd; + u32 hw_num; + u8 old_prio; + u64 val; + + /* + * Take the lock, set masked, try again if racing + * with H_EOI + */ + for (;;) { + arch_spin_lock(&sb->lock); + old_prio = state->guest_priority; + state->guest_priority = MASKED; + mb(); + if (!state->in_eoi) + break; + state->guest_priority = old_prio; + arch_spin_unlock(&sb->lock); + } + + /* No change ? Bail */ + if (old_prio == MASKED) + return old_prio; + + /* Get the right irq */ + kvmppc_xive_select_irq(state, &hw_num, &xd); + + /* + * If the interrupt is marked as needing masking via + * firmware, we do it here. Firmware masking however + * is "lossy", it won't return the old p and q bits + * and won't set the interrupt to a state where it will + * record queued ones. If this is an issue we should do + * lazy masking instead. + * + * For now, we work around this in unmask by forcing + * an interrupt whenever we unmask a non-LSI via FW + * (if ever). + */ + if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) { + xive_native_configure_irq(hw_num, + xive->vp_base + state->act_server, + MASKED, state->number); + /* set old_p so we can track if an H_EOI was done */ + state->old_p = true; + state->old_q = false; + } else { + /* Set PQ to 10, return old P and old Q and remember them */ + val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10); + state->old_p = !!(val & 2); + state->old_q = !!(val & 1); + + /* + * Synchronize hardware to sensure the queues are updated + * when masking + */ + xive_native_sync_source(hw_num); + } + + return old_prio; +} + +static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb, + struct kvmppc_xive_irq_state *state) +{ + /* + * Take the lock try again if racing with H_EOI + */ + for (;;) { + arch_spin_lock(&sb->lock); + if (!state->in_eoi) + break; + arch_spin_unlock(&sb->lock); + } +} + +static void xive_finish_unmask(struct kvmppc_xive *xive, + struct kvmppc_xive_src_block *sb, + struct kvmppc_xive_irq_state *state, + u8 prio) +{ + struct xive_irq_data *xd; + u32 hw_num; + + /* If we aren't changing a thing, move on */ + if (state->guest_priority != MASKED) + goto bail; + + /* Get the right irq */ + kvmppc_xive_select_irq(state, &hw_num, &xd); + + /* + * See command in xive_lock_and_mask() concerning masking + * via firmware. + */ + if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) { + xive_native_configure_irq(hw_num, + xive->vp_base + state->act_server, + state->act_priority, state->number); + /* If an EOI is needed, do it here */ + if (!state->old_p) + xive_vm_source_eoi(hw_num, xd); + /* If this is not an LSI, force a trigger */ + if (!(xd->flags & OPAL_XIVE_IRQ_LSI)) + xive_irq_trigger(xd); + goto bail; + } + + /* Old Q set, set PQ to 11 */ + if (state->old_q) + xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11); + + /* + * If not old P, then perform an "effective" EOI, + * on the source. This will handle the cases where + * FW EOI is needed. + */ + if (!state->old_p) + xive_vm_source_eoi(hw_num, xd); + + /* Synchronize ordering and mark unmasked */ + mb(); +bail: + state->guest_priority = prio; +} + +/* + * Target an interrupt to a given server/prio, this will fallback + * to another server if necessary and perform the HW targetting + * updates as needed + * + * NOTE: Must be called with the state lock held + */ +static int xive_target_interrupt(struct kvm *kvm, + struct kvmppc_xive_irq_state *state, + u32 server, u8 prio) +{ + struct kvmppc_xive *xive = kvm->arch.xive; + u32 hw_num; + int rc; + + /* + * This will return a tentative server and actual + * priority. The count for that new target will have + * already been incremented. + */ + rc = xive_select_target(kvm, &server, prio); + + /* + * We failed to find a target ? Not much we can do + * at least until we support the GIQ. + */ + if (rc) + return rc; + + /* + * Increment the old queue pending count if there + * was one so that the old queue count gets adjusted later + * when observed to be empty. + */ + if (state->act_priority != MASKED) + xive_inc_q_pending(kvm, + state->act_server, + state->act_priority); + /* + * Update state and HW + */ + state->act_priority = prio; + state->act_server = server; + + /* Get the right irq */ + kvmppc_xive_select_irq(state, &hw_num, NULL); + + return xive_native_configure_irq(hw_num, + xive->vp_base + server, + prio, state->number); +} + +/* + * Targetting rules: In order to avoid losing track of + * pending interrupts accross mask and unmask, which would + * allow queue overflows, we implement the following rules: + * + * - Unless it was never enabled (or we run out of capacity) + * an interrupt is always targetted at a valid server/queue + * pair even when "masked" by the guest. This pair tends to + * be the last one used but it can be changed under some + * circumstances. That allows us to separate targetting + * from masking, we only handle accounting during (re)targetting, + * this also allows us to let an interrupt drain into its target + * queue after masking, avoiding complex schemes to remove + * interrupts out of remote processor queues. + * + * - When masking, we set PQ to 10 and save the previous value + * of P and Q. + * + * - When unmasking, if saved Q was set, we set PQ to 11 + * otherwise we leave PQ to the HW state which will be either + * 10 if nothing happened or 11 if the interrupt fired while + * masked. Effectively we are OR'ing the previous Q into the + * HW Q. + * + * Then if saved P is clear, we do an effective EOI (Q->P->Trigger) + * which will unmask the interrupt and shoot a new one if Q was + * set. + * + * Otherwise (saved P is set) we leave PQ unchanged (so 10 or 11, + * effectively meaning an H_EOI from the guest is still expected + * for that interrupt). + * + * - If H_EOI occurs while masked, we clear the saved P. + * + * - When changing target, we account on the new target and + * increment a separate "pending" counter on the old one. + * This pending counter will be used to decrement the old + * target's count when its queue has been observed empty. + */ + +int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server, + u32 priority) +{ + struct kvmppc_xive *xive = kvm->arch.xive; + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + u8 new_act_prio; + int rc = 0; + u16 idx; + + if (!xive) + return -ENODEV; + + pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n", + irq, server, priority); + + /* First, check provisioning of queues */ + if (priority != MASKED) + rc = xive_check_provisioning(xive->kvm, + xive_prio_from_guest(priority)); + if (rc) { + pr_devel(" provisioning failure %d !\n", rc); + return rc; + } + + sb = kvmppc_xive_find_source(xive, irq, &idx); + if (!sb) + return -EINVAL; + state = &sb->irq_state[idx]; + + /* + * We first handle masking/unmasking since the locking + * might need to be retried due to EOIs, we'll handle + * targetting changes later. These functions will return + * with the SB lock held. + * + * xive_lock_and_mask() will also set state->guest_priority + * but won't otherwise change other fields of the state. + * + * xive_lock_for_unmask will not actually unmask, this will + * be done later by xive_finish_unmask() once the targetting + * has been done, so we don't try to unmask an interrupt + * that hasn't yet been targetted. + */ + if (priority == MASKED) + xive_lock_and_mask(xive, sb, state); + else + xive_lock_for_unmask(sb, state); + + + /* + * Then we handle targetting. + * + * First calculate a new "actual priority" + */ + new_act_prio = state->act_priority; + if (priority != MASKED) + new_act_prio = xive_prio_from_guest(priority); + + pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n", + new_act_prio, state->act_server, state->act_priority); + + /* + * Then check if we actually need to change anything, + * + * The condition for re-targetting the interrupt is that + * we have a valid new priority (new_act_prio is not 0xff) + * and either the server or the priority changed. + * + * Note: If act_priority was ff and the new priority is + * also ff, we don't do anything and leave the interrupt + * untargetted. An attempt of doing an int_on on an + * untargetted interrupt will fail. If that is a problem + * we could initialize interrupts with valid default + */ + + if (new_act_prio != MASKED && + (state->act_server != server || + state->act_priority != new_act_prio)) + rc = xive_target_interrupt(kvm, state, server, new_act_prio); + + /* + * Perform the final unmasking of the interrupt source + * if necessary + */ + if (priority != MASKED) + xive_finish_unmask(xive, sb, state, priority); + + /* + * Finally Update saved_priority to match. Only int_on/off + * set this field to a different value. + */ + state->saved_priority = priority; + + arch_spin_unlock(&sb->lock); + return rc; +} + +int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server, + u32 *priority) +{ + struct kvmppc_xive *xive = kvm->arch.xive; + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + u16 idx; + + if (!xive) + return -ENODEV; + + sb = kvmppc_xive_find_source(xive, irq, &idx); + if (!sb) + return -EINVAL; + state = &sb->irq_state[idx]; + arch_spin_lock(&sb->lock); + *server = state->guest_server; + *priority = state->guest_priority; + arch_spin_unlock(&sb->lock); + + return 0; +} + +int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) +{ + struct kvmppc_xive *xive = kvm->arch.xive; + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + u16 idx; + + if (!xive) + return -ENODEV; + + sb = kvmppc_xive_find_source(xive, irq, &idx); + if (!sb) + return -EINVAL; + state = &sb->irq_state[idx]; + + pr_devel("int_on(irq=0x%x)\n", irq); + + /* + * Check if interrupt was not targetted + */ + if (state->act_priority == MASKED) { + pr_devel("int_on on untargetted interrupt\n"); + return -EINVAL; + } + + /* If saved_priority is 0xff, do nothing */ + if (state->saved_priority == MASKED) + return 0; + + /* + * Lock and unmask it. + */ + xive_lock_for_unmask(sb, state); + xive_finish_unmask(xive, sb, state, state->saved_priority); + arch_spin_unlock(&sb->lock); + + return 0; +} + +int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) +{ + struct kvmppc_xive *xive = kvm->arch.xive; + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + u16 idx; + + if (!xive) + return -ENODEV; + + sb = kvmppc_xive_find_source(xive, irq, &idx); + if (!sb) + return -EINVAL; + state = &sb->irq_state[idx]; + + pr_devel("int_off(irq=0x%x)\n", irq); + + /* + * Lock and mask + */ + state->saved_priority = xive_lock_and_mask(xive, sb, state); + arch_spin_unlock(&sb->lock); + + return 0; +} + +static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq) +{ + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + u16 idx; + + sb = kvmppc_xive_find_source(xive, irq, &idx); + if (!sb) + return false; + state = &sb->irq_state[idx]; + if (!state->valid) + return false; + + /* + * Trigger the IPI. This assumes we never restore a pass-through + * interrupt which should be safe enough + */ + xive_irq_trigger(&state->ipi_data); + + return true; +} + +u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + + if (!xc) + return 0; + + /* Return the per-cpu state for state saving/migration */ + return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT | + (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT; +} + +int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + struct kvmppc_xive *xive = vcpu->kvm->arch.xive; + u8 cppr, mfrr; + u32 xisr; + + if (!xc || !xive) + return -ENOENT; + + /* Grab individual state fields. We don't use pending_pri */ + cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT; + xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) & + KVM_REG_PPC_ICP_XISR_MASK; + mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT; + + pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n", + xc->server_num, cppr, mfrr, xisr); + + /* + * We can't update the state of a "pushed" VCPU, but that + * shouldn't happen. + */ + if (WARN_ON(vcpu->arch.xive_pushed)) + return -EIO; + + /* Update VCPU HW saved state */ + vcpu->arch.xive_saved_state.cppr = cppr; + xc->hw_cppr = xc->cppr = cppr; + + /* + * Update MFRR state. If it's not 0xff, we mark the VCPU as + * having a pending MFRR change, which will re-evaluate the + * target. The VCPU will thus potentially get a spurious + * interrupt but that's not a big deal. + */ + xc->mfrr = mfrr; + if (mfrr < cppr) + xive_irq_trigger(&xc->vp_ipi_data); + + /* + * Now saved XIRR is "interesting". It means there's something in + * the legacy "1 element" queue... for an IPI we simply ignore it, + * as the MFRR restore will handle that. For anything else we need + * to force a resend of the source. + * However the source may not have been setup yet. If that's the + * case, we keep that info and increment a counter in the xive to + * tell subsequent xive_set_source() to go look. + */ + if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) { + xc->delayed_irq = xisr; + xive->delayed_irqs++; + pr_devel(" xisr restore delayed\n"); + } + + return 0; +} + +int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq, + struct irq_desc *host_desc) +{ + struct kvmppc_xive *xive = kvm->arch.xive; + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + struct irq_data *host_data = irq_desc_get_irq_data(host_desc); + unsigned int host_irq = irq_desc_get_irq(host_desc); + unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data); + u16 idx; + u8 prio; + int rc; + + if (!xive) + return -ENODEV; + + pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq, hw_irq); + + sb = kvmppc_xive_find_source(xive, guest_irq, &idx); + if (!sb) + return -EINVAL; + state = &sb->irq_state[idx]; + + /* + * Mark the passed-through interrupt as going to a VCPU, + * this will prevent further EOIs and similar operations + * from the XIVE code. It will also mask the interrupt + * to either PQ=10 or 11 state, the latter if the interrupt + * is pending. This will allow us to unmask or retrigger it + * after routing it to the guest with a simple EOI. + * + * The "state" argument is a "token", all it needs is to be + * non-NULL to switch to passed-through or NULL for the + * other way around. We may not yet have an actual VCPU + * target here and we don't really care. + */ + rc = irq_set_vcpu_affinity(host_irq, state); + if (rc) { + pr_err("Failed to set VCPU affinity for irq %d\n", host_irq); + return rc; + } + + /* + * Mask and read state of IPI. We need to know if its P bit + * is set as that means it's potentially already using a + * queue entry in the target + */ + prio = xive_lock_and_mask(xive, sb, state); + pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio, + state->old_p, state->old_q); + + /* Turn the IPI hard off */ + xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01); + + /* Grab info about irq */ + state->pt_number = hw_irq; + state->pt_data = irq_data_get_irq_handler_data(host_data); + + /* + * Configure the IRQ to match the existing configuration of + * the IPI if it was already targetted. Otherwise this will + * mask the interrupt in a lossy way (act_priority is 0xff) + * which is fine for a never started interrupt. + */ + xive_native_configure_irq(hw_irq, + xive->vp_base + state->act_server, + state->act_priority, state->number); + + /* + * We do an EOI to enable the interrupt (and retrigger if needed) + * if the guest has the interrupt unmasked and the P bit was *not* + * set in the IPI. If it was set, we know a slot may still be in + * use in the target queue thus we have to wait for a guest + * originated EOI + */ + if (prio != MASKED && !state->old_p) + xive_vm_source_eoi(hw_irq, state->pt_data); + + /* Clear old_p/old_q as they are no longer relevant */ + state->old_p = state->old_q = false; + + /* Restore guest prio (unlocks EOI) */ + mb(); + state->guest_priority = prio; + arch_spin_unlock(&sb->lock); + + return 0; +} +EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped); + +int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq, + struct irq_desc *host_desc) +{ + struct kvmppc_xive *xive = kvm->arch.xive; + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + unsigned int host_irq = irq_desc_get_irq(host_desc); + u16 idx; + u8 prio; + int rc; + + if (!xive) + return -ENODEV; + + pr_devel("clr_mapped girq 0x%lx...\n", guest_irq); + + sb = kvmppc_xive_find_source(xive, guest_irq, &idx); + if (!sb) + return -EINVAL; + state = &sb->irq_state[idx]; + + /* + * Mask and read state of IRQ. We need to know if its P bit + * is set as that means it's potentially already using a + * queue entry in the target + */ + prio = xive_lock_and_mask(xive, sb, state); + pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio, + state->old_p, state->old_q); + + /* + * If old_p is set, the interrupt is pending, we switch it to + * PQ=11. This will force a resend in the host so the interrupt + * isn't lost to whatver host driver may pick it up + */ + if (state->old_p) + xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11); + + /* Release the passed-through interrupt to the host */ + rc = irq_set_vcpu_affinity(host_irq, NULL); + if (rc) { + pr_err("Failed to clr VCPU affinity for irq %d\n", host_irq); + return rc; + } + + /* Forget about the IRQ */ + state->pt_number = 0; + state->pt_data = NULL; + + /* Reconfigure the IPI */ + xive_native_configure_irq(state->ipi_number, + xive->vp_base + state->act_server, + state->act_priority, state->number); + + /* + * If old_p is set (we have a queue entry potentially + * occupied) or the interrupt is masked, we set the IPI + * to PQ=10 state. Otherwise we just re-enable it (PQ=00). + */ + if (prio == MASKED || state->old_p) + xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10); + else + xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00); + + /* Restore guest prio (unlocks EOI) */ + mb(); + state->guest_priority = prio; + arch_spin_unlock(&sb->lock); + + return 0; +} +EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped); + +static void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + struct kvm *kvm = vcpu->kvm; + struct kvmppc_xive *xive = kvm->arch.xive; + int i, j; + + for (i = 0; i <= xive->max_sbid; i++) { + struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; + + if (!sb) + continue; + for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) { + struct kvmppc_xive_irq_state *state = &sb->irq_state[j]; + + if (!state->valid) + continue; + if (state->act_priority == MASKED) + continue; + if (state->act_server != xc->server_num) + continue; + + /* Clean it up */ + arch_spin_lock(&sb->lock); + state->act_priority = MASKED; + xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01); + xive_native_configure_irq(state->ipi_number, 0, MASKED, 0); + if (state->pt_number) { + xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01); + xive_native_configure_irq(state->pt_number, 0, MASKED, 0); + } + arch_spin_unlock(&sb->lock); + } + } +} + +void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + struct kvmppc_xive *xive = xc->xive; + int i; + + pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num); + + /* Ensure no interrupt is still routed to that VP */ + xc->valid = false; + kvmppc_xive_disable_vcpu_interrupts(vcpu); + + /* Mask the VP IPI */ + xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01); + + /* Disable the VP */ + xive_native_disable_vp(xc->vp_id); + + /* Free the queues & associated interrupts */ + for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { + struct xive_q *q = &xc->queues[i]; + + /* Free the escalation irq */ + if (xc->esc_virq[i]) { + free_irq(xc->esc_virq[i], vcpu); + irq_dispose_mapping(xc->esc_virq[i]); + kfree(xc->esc_virq_names[i]); + } + /* Free the queue */ + xive_native_disable_queue(xc->vp_id, q, i); + if (q->qpage) { + free_pages((unsigned long)q->qpage, + xive->q_page_order); + q->qpage = NULL; + } + } + + /* Free the IPI */ + if (xc->vp_ipi) { + xive_cleanup_irq_data(&xc->vp_ipi_data); + xive_native_free_irq(xc->vp_ipi); + } + /* Free the VP */ + kfree(xc); +} + +int kvmppc_xive_connect_vcpu(struct kvm_device *dev, + struct kvm_vcpu *vcpu, u32 cpu) +{ + struct kvmppc_xive *xive = dev->private; + struct kvmppc_xive_vcpu *xc; + int i, r = -EBUSY; + + pr_devel("connect_vcpu(cpu=%d)\n", cpu); + + if (dev->ops != &kvm_xive_ops) { + pr_devel("Wrong ops !\n"); + return -EPERM; + } + if (xive->kvm != vcpu->kvm) + return -EPERM; + if (vcpu->arch.irq_type) + return -EBUSY; + if (kvmppc_xive_find_server(vcpu->kvm, cpu)) { + pr_devel("Duplicate !\n"); + return -EEXIST; + } + if (cpu >= KVM_MAX_VCPUS) { + pr_devel("Out of bounds !\n"); + return -EINVAL; + } + xc = kzalloc(sizeof(*xc), GFP_KERNEL); + if (!xc) + return -ENOMEM; + + /* We need to synchronize with queue provisioning */ + mutex_lock(&vcpu->kvm->lock); + vcpu->arch.xive_vcpu = xc; + xc->xive = xive; + xc->vcpu = vcpu; + xc->server_num = cpu; + xc->vp_id = xive->vp_base + cpu; + xc->mfrr = 0xff; + xc->valid = true; + + r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id); + if (r) + goto bail; + + /* Configure VCPU fields for use by assembly push/pull */ + vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000); + vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO); + + /* Allocate IPI */ + xc->vp_ipi = xive_native_alloc_irq(); + if (!xc->vp_ipi) { + r = -EIO; + goto bail; + } + pr_devel(" IPI=0x%x\n", xc->vp_ipi); + + r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data); + if (r) + goto bail; + + /* + * Initialize queues. Initially we set them all for no queueing + * and we enable escalation for queue 0 only which we'll use for + * our mfrr change notifications. If the VCPU is hot-plugged, we + * do handle provisioning however. + */ + for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { + struct xive_q *q = &xc->queues[i]; + + /* Is queue already enabled ? Provision it */ + if (xive->qmap & (1 << i)) { + r = xive_provision_queue(vcpu, i); + if (r == 0) + xive_attach_escalation(vcpu, i); + if (r) + goto bail; + } else { + r = xive_native_configure_queue(xc->vp_id, + q, i, NULL, 0, true); + if (r) { + pr_err("Failed to configure queue %d for VCPU %d\n", + i, cpu); + goto bail; + } + } + } + + /* If not done above, attach priority 0 escalation */ + r = xive_attach_escalation(vcpu, 0); + if (r) + goto bail; + + /* Enable the VP */ + r = xive_native_enable_vp(xc->vp_id); + if (r) + goto bail; + + /* Route the IPI */ + r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI); + if (!r) + xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00); + +bail: + mutex_unlock(&vcpu->kvm->lock); + if (r) { + kvmppc_xive_cleanup_vcpu(vcpu); + return r; + } + + vcpu->arch.irq_type = KVMPPC_IRQ_XICS; + return 0; +} + +/* + * Scanning of queues before/after migration save + */ +static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq) +{ + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + u16 idx; + + sb = kvmppc_xive_find_source(xive, irq, &idx); + if (!sb) + return; + + state = &sb->irq_state[idx]; + + /* Some sanity checking */ + if (!state->valid) { + pr_err("invalid irq 0x%x in cpu queue!\n", irq); + return; + } + + /* + * If the interrupt is in a queue it should have P set. + * We warn so that gets reported. A backtrace isn't useful + * so no need to use a WARN_ON. + */ + if (!state->saved_p) + pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq); + + /* Set flag */ + state->in_queue = true; +} + +static void xive_pre_save_mask_irq(struct kvmppc_xive *xive, + struct kvmppc_xive_src_block *sb, + u32 irq) +{ + struct kvmppc_xive_irq_state *state = &sb->irq_state[irq]; + + if (!state->valid) + return; + + /* Mask and save state, this will also sync HW queues */ + state->saved_scan_prio = xive_lock_and_mask(xive, sb, state); + + /* Transfer P and Q */ + state->saved_p = state->old_p; + state->saved_q = state->old_q; + + /* Unlock */ + arch_spin_unlock(&sb->lock); +} + +static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive, + struct kvmppc_xive_src_block *sb, + u32 irq) +{ + struct kvmppc_xive_irq_state *state = &sb->irq_state[irq]; + + if (!state->valid) + return; + + /* + * Lock / exclude EOI (not technically necessary if the + * guest isn't running concurrently. If this becomes a + * performance issue we can probably remove the lock. + */ + xive_lock_for_unmask(sb, state); + + /* Restore mask/prio if it wasn't masked */ + if (state->saved_scan_prio != MASKED) + xive_finish_unmask(xive, sb, state, state->saved_scan_prio); + + /* Unlock */ + arch_spin_unlock(&sb->lock); +} + +static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q) +{ + u32 idx = q->idx; + u32 toggle = q->toggle; + u32 irq; + + do { + irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle); + if (irq > XICS_IPI) + xive_pre_save_set_queued(xive, irq); + } while(irq); +} + +static void xive_pre_save_scan(struct kvmppc_xive *xive) +{ + struct kvm_vcpu *vcpu = NULL; + int i, j; + + /* + * See comment in xive_get_source() about how this + * work. Collect a stable state for all interrupts + */ + for (i = 0; i <= xive->max_sbid; i++) { + struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; + if (!sb) + continue; + for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) + xive_pre_save_mask_irq(xive, sb, j); + } + + /* Then scan the queues and update the "in_queue" flag */ + kvm_for_each_vcpu(i, vcpu, xive->kvm) { + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + if (!xc) + continue; + for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) { + if (xc->queues[i].qpage) + xive_pre_save_queue(xive, &xc->queues[i]); + } + } + + /* Finally restore interrupt states */ + for (i = 0; i <= xive->max_sbid; i++) { + struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; + if (!sb) + continue; + for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) + xive_pre_save_unmask_irq(xive, sb, j); + } +} + +static void xive_post_save_scan(struct kvmppc_xive *xive) +{ + u32 i, j; + + /* Clear all the in_queue flags */ + for (i = 0; i <= xive->max_sbid; i++) { + struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; + if (!sb) + continue; + for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) + sb->irq_state[j].in_queue = false; + } + + /* Next get_source() will do a new scan */ + xive->saved_src_count = 0; +} + +/* + * This returns the source configuration and state to user space. + */ +static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr) +{ + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + u64 __user *ubufp = (u64 __user *) addr; + u64 val, prio; + u16 idx; + + sb = kvmppc_xive_find_source(xive, irq, &idx); + if (!sb) + return -ENOENT; + + state = &sb->irq_state[idx]; + + if (!state->valid) + return -ENOENT; + + pr_devel("get_source(%ld)...\n", irq); + + /* + * So to properly save the state into something that looks like a + * XICS migration stream we cannot treat interrupts individually. + * + * We need, instead, mask them all (& save their previous PQ state) + * to get a stable state in the HW, then sync them to ensure that + * any interrupt that had already fired hits its queue, and finally + * scan all the queues to collect which interrupts are still present + * in the queues, so we can set the "pending" flag on them and + * they can be resent on restore. + * + * So we do it all when the "first" interrupt gets saved, all the + * state is collected at that point, the rest of xive_get_source() + * will merely collect and convert that state to the expected + * userspace bit mask. + */ + if (xive->saved_src_count == 0) + xive_pre_save_scan(xive); + xive->saved_src_count++; + + /* Convert saved state into something compatible with xics */ + val = state->guest_server; + prio = state->saved_scan_prio; + + if (prio == MASKED) { + val |= KVM_XICS_MASKED; + prio = state->saved_priority; + } + val |= prio << KVM_XICS_PRIORITY_SHIFT; + if (state->lsi) { + val |= KVM_XICS_LEVEL_SENSITIVE; + if (state->saved_p) + val |= KVM_XICS_PENDING; + } else { + if (state->saved_p) + val |= KVM_XICS_PRESENTED; + + if (state->saved_q) + val |= KVM_XICS_QUEUED; + + /* + * We mark it pending (which will attempt a re-delivery) + * if we are in a queue *or* we were masked and had + * Q set which is equivalent to the XICS "masked pending" + * state + */ + if (state->in_queue || (prio == MASKED && state->saved_q)) + val |= KVM_XICS_PENDING; + } + + /* + * If that was the last interrupt saved, reset the + * in_queue flags + */ + if (xive->saved_src_count == xive->src_count) + xive_post_save_scan(xive); + + /* Copy the result to userspace */ + if (put_user(val, ubufp)) + return -EFAULT; + + return 0; +} + +static struct kvmppc_xive_src_block *xive_create_src_block(struct kvmppc_xive *xive, + int irq) +{ + struct kvm *kvm = xive->kvm; + struct kvmppc_xive_src_block *sb; + int i, bid; + + bid = irq >> KVMPPC_XICS_ICS_SHIFT; + + mutex_lock(&kvm->lock); + + /* block already exists - somebody else got here first */ + if (xive->src_blocks[bid]) + goto out; + + /* Create the ICS */ + sb = kzalloc(sizeof(*sb), GFP_KERNEL); + if (!sb) + goto out; + + sb->id = bid; + + for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { + sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i; + sb->irq_state[i].guest_priority = MASKED; + sb->irq_state[i].saved_priority = MASKED; + sb->irq_state[i].act_priority = MASKED; + } + smp_wmb(); + xive->src_blocks[bid] = sb; + + if (bid > xive->max_sbid) + xive->max_sbid = bid; + +out: + mutex_unlock(&kvm->lock); + return xive->src_blocks[bid]; +} + +static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq) +{ + struct kvm *kvm = xive->kvm; + struct kvm_vcpu *vcpu = NULL; + int i; + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + + if (!xc) + continue; + + if (xc->delayed_irq == irq) { + xc->delayed_irq = 0; + xive->delayed_irqs--; + return true; + } + } + return false; +} + +static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr) +{ + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + u64 __user *ubufp = (u64 __user *) addr; + u16 idx; + u64 val; + u8 act_prio, guest_prio; + u32 server; + int rc = 0; + + if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS) + return -ENOENT; + + pr_devel("set_source(irq=0x%lx)\n", irq); + + /* Find the source */ + sb = kvmppc_xive_find_source(xive, irq, &idx); + if (!sb) { + pr_devel("No source, creating source block...\n"); + sb = xive_create_src_block(xive, irq); + if (!sb) { + pr_devel("Failed to create block...\n"); + return -ENOMEM; + } + } + state = &sb->irq_state[idx]; + + /* Read user passed data */ + if (get_user(val, ubufp)) { + pr_devel("fault getting user info !\n"); + return -EFAULT; + } + + server = val & KVM_XICS_DESTINATION_MASK; + guest_prio = val >> KVM_XICS_PRIORITY_SHIFT; + + pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n", + val, server, guest_prio); + /* + * If the source doesn't already have an IPI, allocate + * one and get the corresponding data + */ + if (!state->ipi_number) { + state->ipi_number = xive_native_alloc_irq(); + if (state->ipi_number == 0) { + pr_devel("Failed to allocate IPI !\n"); + return -ENOMEM; + } + xive_native_populate_irq_data(state->ipi_number, &state->ipi_data); + pr_devel(" src_ipi=0x%x\n", state->ipi_number); + } + + /* + * We use lock_and_mask() to set us in the right masked + * state. We will override that state from the saved state + * further down, but this will handle the cases of interrupts + * that need FW masking. We set the initial guest_priority to + * 0 before calling it to ensure it actually performs the masking. + */ + state->guest_priority = 0; + xive_lock_and_mask(xive, sb, state); + + /* + * Now, we select a target if we have one. If we don't we + * leave the interrupt untargetted. It means that an interrupt + * can become "untargetted" accross migration if it was masked + * by set_xive() but there is little we can do about it. + */ + + /* First convert prio and mark interrupt as untargetted */ + act_prio = xive_prio_from_guest(guest_prio); + state->act_priority = MASKED; + state->guest_server = server; + + /* + * We need to drop the lock due to the mutex below. Hopefully + * nothing is touching that interrupt yet since it hasn't been + * advertized to a running guest yet + */ + arch_spin_unlock(&sb->lock); + + /* If we have a priority target the interrupt */ + if (act_prio != MASKED) { + /* First, check provisioning of queues */ + mutex_lock(&xive->kvm->lock); + rc = xive_check_provisioning(xive->kvm, act_prio); + mutex_unlock(&xive->kvm->lock); + + /* Target interrupt */ + if (rc == 0) + rc = xive_target_interrupt(xive->kvm, state, + server, act_prio); + /* + * If provisioning or targetting failed, leave it + * alone and masked. It will remain disabled until + * the guest re-targets it. + */ + } + + /* + * Find out if this was a delayed irq stashed in an ICP, + * in which case, treat it as pending + */ + if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) { + val |= KVM_XICS_PENDING; + pr_devel(" Found delayed ! forcing PENDING !\n"); + } + + /* Cleanup the SW state */ + state->old_p = false; + state->old_q = false; + state->lsi = false; + state->asserted = false; + + /* Restore LSI state */ + if (val & KVM_XICS_LEVEL_SENSITIVE) { + state->lsi = true; + if (val & KVM_XICS_PENDING) + state->asserted = true; + pr_devel(" LSI ! Asserted=%d\n", state->asserted); + } + + /* + * Restore P and Q. If the interrupt was pending, we + * force both P and Q, which will trigger a resend. + * + * That means that a guest that had both an interrupt + * pending (queued) and Q set will restore with only + * one instance of that interrupt instead of 2, but that + * is perfectly fine as coalescing interrupts that haven't + * been presented yet is always allowed. + */ + if (val & KVM_XICS_PRESENTED || val & KVM_XICS_PENDING) + state->old_p = true; + if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING) + state->old_q = true; + + pr_devel(" P=%d, Q=%d\n", state->old_p, state->old_q); + + /* + * If the interrupt was unmasked, update guest priority and + * perform the appropriate state transition and do a + * re-trigger if necessary. + */ + if (val & KVM_XICS_MASKED) { + pr_devel(" masked, saving prio\n"); + state->guest_priority = MASKED; + state->saved_priority = guest_prio; + } else { + pr_devel(" unmasked, restoring to prio %d\n", guest_prio); + xive_finish_unmask(xive, sb, state, guest_prio); + state->saved_priority = guest_prio; + } + + /* Increment the number of valid sources and mark this one valid */ + if (!state->valid) + xive->src_count++; + state->valid = true; + + return 0; +} + +int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, + bool line_status) +{ + struct kvmppc_xive *xive = kvm->arch.xive; + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + u16 idx; + + if (!xive) + return -ENODEV; + + sb = kvmppc_xive_find_source(xive, irq, &idx); + if (!sb) + return -EINVAL; + + /* Perform locklessly .... (we need to do some RCUisms here...) */ + state = &sb->irq_state[idx]; + if (!state->valid) + return -EINVAL; + + /* We don't allow a trigger on a passed-through interrupt */ + if (state->pt_number) + return -EINVAL; + + if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL) + state->asserted = 1; + else if (level == 0 || level == KVM_INTERRUPT_UNSET) { + state->asserted = 0; + return 0; + } + + /* Trigger the IPI */ + xive_irq_trigger(&state->ipi_data); + + return 0; +} + +static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) +{ + struct kvmppc_xive *xive = dev->private; + + /* We honor the existing XICS ioctl */ + switch (attr->group) { + case KVM_DEV_XICS_GRP_SOURCES: + return xive_set_source(xive, attr->attr, attr->addr); + } + return -ENXIO; +} + +static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) +{ + struct kvmppc_xive *xive = dev->private; + + /* We honor the existing XICS ioctl */ + switch (attr->group) { + case KVM_DEV_XICS_GRP_SOURCES: + return xive_get_source(xive, attr->attr, attr->addr); + } + return -ENXIO; +} + +static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) +{ + /* We honor the same limits as XICS, at least for now */ + switch (attr->group) { + case KVM_DEV_XICS_GRP_SOURCES: + if (attr->attr >= KVMPPC_XICS_FIRST_IRQ && + attr->attr < KVMPPC_XICS_NR_IRQS) + return 0; + break; + } + return -ENXIO; +} + +static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd) +{ + xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01); + xive_native_configure_irq(hw_num, 0, MASKED, 0); + xive_cleanup_irq_data(xd); +} + +static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb) +{ + int i; + + for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { + struct kvmppc_xive_irq_state *state = &sb->irq_state[i]; + + if (!state->valid) + continue; + + kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data); + xive_native_free_irq(state->ipi_number); + + /* Pass-through, cleanup too */ + if (state->pt_number) + kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data); + + state->valid = false; + } +} + +static void kvmppc_xive_free(struct kvm_device *dev) +{ + struct kvmppc_xive *xive = dev->private; + struct kvm *kvm = xive->kvm; + int i; + + debugfs_remove(xive->dentry); + + if (kvm) + kvm->arch.xive = NULL; + + /* Mask and free interrupts */ + for (i = 0; i <= xive->max_sbid; i++) { + if (xive->src_blocks[i]) + kvmppc_xive_free_sources(xive->src_blocks[i]); + kfree(xive->src_blocks[i]); + xive->src_blocks[i] = NULL; + } + + if (xive->vp_base != XIVE_INVALID_VP) + xive_native_free_vp_block(xive->vp_base); + + + kfree(xive); + kfree(dev); +} + +static int kvmppc_xive_create(struct kvm_device *dev, u32 type) +{ + struct kvmppc_xive *xive; + struct kvm *kvm = dev->kvm; + int ret = 0; + + pr_devel("Creating xive for partition\n"); + + xive = kzalloc(sizeof(*xive), GFP_KERNEL); + if (!xive) + return -ENOMEM; + + dev->private = xive; + xive->dev = dev; + xive->kvm = kvm; + + /* Already there ? */ + if (kvm->arch.xive) + ret = -EEXIST; + else + kvm->arch.xive = xive; + + /* We use the default queue size set by the host */ + xive->q_order = xive_native_default_eq_shift(); + if (xive->q_order < PAGE_SHIFT) + xive->q_page_order = 0; + else + xive->q_page_order = xive->q_order - PAGE_SHIFT; + + /* Allocate a bunch of VPs */ + xive->vp_base = xive_native_alloc_vp_block(KVM_MAX_VCPUS); + pr_devel("VP_Base=%x\n", xive->vp_base); + + if (xive->vp_base == XIVE_INVALID_VP) + ret = -ENOMEM; + + if (ret) { + kfree(xive); + return ret; + } + + return 0; +} + + +static int xive_debug_show(struct seq_file *m, void *private) +{ + struct kvmppc_xive *xive = m->private; + struct kvm *kvm = xive->kvm; + struct kvm_vcpu *vcpu; + u64 t_rm_h_xirr = 0; + u64 t_rm_h_ipoll = 0; + u64 t_rm_h_cppr = 0; + u64 t_rm_h_eoi = 0; + u64 t_rm_h_ipi = 0; + u64 t_vm_h_xirr = 0; + u64 t_vm_h_ipoll = 0; + u64 t_vm_h_cppr = 0; + u64 t_vm_h_eoi = 0; + u64 t_vm_h_ipi = 0; + unsigned int i; + + if (!kvm) + return 0; + + seq_printf(m, "=========\nVCPU state\n=========\n"); + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + + if (!xc) + continue; + + seq_printf(m, "cpu server %#x CPPR:%#x HWCPPR:%#x" + " MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n", + xc->server_num, xc->cppr, xc->hw_cppr, + xc->mfrr, xc->pending, + xc->stat_rm_h_xirr, xc->stat_vm_h_xirr); + + t_rm_h_xirr += xc->stat_rm_h_xirr; + t_rm_h_ipoll += xc->stat_rm_h_ipoll; + t_rm_h_cppr += xc->stat_rm_h_cppr; + t_rm_h_eoi += xc->stat_rm_h_eoi; + t_rm_h_ipi += xc->stat_rm_h_ipi; + t_vm_h_xirr += xc->stat_vm_h_xirr; + t_vm_h_ipoll += xc->stat_vm_h_ipoll; + t_vm_h_cppr += xc->stat_vm_h_cppr; + t_vm_h_eoi += xc->stat_vm_h_eoi; + t_vm_h_ipi += xc->stat_vm_h_ipi; + } + + seq_printf(m, "Hcalls totals\n"); + seq_printf(m, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr); + seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll); + seq_printf(m, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr); + seq_printf(m, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi); + seq_printf(m, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi); + + return 0; +} + +static int xive_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, xive_debug_show, inode->i_private); +} + +static const struct file_operations xive_debug_fops = { + .open = xive_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static void xive_debugfs_init(struct kvmppc_xive *xive) +{ + char *name; + + name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive); + if (!name) { + pr_err("%s: no memory for name\n", __func__); + return; + } + + xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root, + xive, &xive_debug_fops); + + pr_debug("%s: created %s\n", __func__, name); + kfree(name); +} + +static void kvmppc_xive_init(struct kvm_device *dev) +{ + struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private; + + /* Register some debug interfaces */ + xive_debugfs_init(xive); +} + +struct kvm_device_ops kvm_xive_ops = { + .name = "kvm-xive", + .create = kvmppc_xive_create, + .init = kvmppc_xive_init, + .destroy = kvmppc_xive_free, + .set_attr = xive_set_attr, + .get_attr = xive_get_attr, + .has_attr = xive_has_attr, +}; + +void kvmppc_xive_init_module(void) +{ + __xive_vm_h_xirr = xive_vm_h_xirr; + __xive_vm_h_ipoll = xive_vm_h_ipoll; + __xive_vm_h_ipi = xive_vm_h_ipi; + __xive_vm_h_cppr = xive_vm_h_cppr; + __xive_vm_h_eoi = xive_vm_h_eoi; +} + +void kvmppc_xive_exit_module(void) +{ + __xive_vm_h_xirr = NULL; + __xive_vm_h_ipoll = NULL; + __xive_vm_h_ipi = NULL; + __xive_vm_h_cppr = NULL; + __xive_vm_h_eoi = NULL; +} diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h new file mode 100644 index 000000000000..5938f7644dc1 --- /dev/null +++ b/arch/powerpc/kvm/book3s_xive.h @@ -0,0 +1,256 @@ +/* + * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + */ + +#ifndef _KVM_PPC_BOOK3S_XIVE_H +#define _KVM_PPC_BOOK3S_XIVE_H + +#ifdef CONFIG_KVM_XICS +#include "book3s_xics.h" + +/* + * State for one guest irq source. + * + * For each guest source we allocate a HW interrupt in the XIVE + * which we use for all SW triggers. It will be unused for + * pass-through but it's easier to keep around as the same + * guest interrupt can alternatively be emulated or pass-through + * if a physical device is hot unplugged and replaced with an + * emulated one. + * + * This state structure is very similar to the XICS one with + * additional XIVE specific tracking. + */ +struct kvmppc_xive_irq_state { + bool valid; /* Interrupt entry is valid */ + + u32 number; /* Guest IRQ number */ + u32 ipi_number; /* XIVE IPI HW number */ + struct xive_irq_data ipi_data; /* XIVE IPI associated data */ + u32 pt_number; /* XIVE Pass-through number if any */ + struct xive_irq_data *pt_data; /* XIVE Pass-through associated data */ + + /* Targetting as set by guest */ + u32 guest_server; /* Current guest selected target */ + u8 guest_priority; /* Guest set priority */ + u8 saved_priority; /* Saved priority when masking */ + + /* Actual targetting */ + u32 act_server; /* Actual server */ + u8 act_priority; /* Actual priority */ + + /* Various state bits */ + bool in_eoi; /* Synchronize with H_EOI */ + bool old_p; /* P bit state when masking */ + bool old_q; /* Q bit state when masking */ + bool lsi; /* level-sensitive interrupt */ + bool asserted; /* Only for emulated LSI: current state */ + + /* Saved for migration state */ + bool in_queue; + bool saved_p; + bool saved_q; + u8 saved_scan_prio; +}; + +/* Select the "right" interrupt (IPI vs. passthrough) */ +static inline void kvmppc_xive_select_irq(struct kvmppc_xive_irq_state *state, + u32 *out_hw_irq, + struct xive_irq_data **out_xd) +{ + if (state->pt_number) { + if (out_hw_irq) + *out_hw_irq = state->pt_number; + if (out_xd) + *out_xd = state->pt_data; + } else { + if (out_hw_irq) + *out_hw_irq = state->ipi_number; + if (out_xd) + *out_xd = &state->ipi_data; + } +} + +/* + * This corresponds to an "ICS" in XICS terminology, we use it + * as a mean to break up source information into multiple structures. + */ +struct kvmppc_xive_src_block { + arch_spinlock_t lock; + u16 id; + struct kvmppc_xive_irq_state irq_state[KVMPPC_XICS_IRQ_PER_ICS]; +}; + + +struct kvmppc_xive { + struct kvm *kvm; + struct kvm_device *dev; + struct dentry *dentry; + + /* VP block associated with the VM */ + u32 vp_base; + + /* Blocks of sources */ + struct kvmppc_xive_src_block *src_blocks[KVMPPC_XICS_MAX_ICS_ID + 1]; + u32 max_sbid; + + /* + * For state save, we lazily scan the queues on the first interrupt + * being migrated. We don't have a clean way to reset that flags + * so we keep track of the number of valid sources and how many of + * them were migrated so we can reset when all of them have been + * processed. + */ + u32 src_count; + u32 saved_src_count; + + /* + * Some irqs are delayed on restore until the source is created, + * keep track here of how many of them + */ + u32 delayed_irqs; + + /* Which queues (priorities) are in use by the guest */ + u8 qmap; + + /* Queue orders */ + u32 q_order; + u32 q_page_order; + +}; + +#define KVMPPC_XIVE_Q_COUNT 8 + +struct kvmppc_xive_vcpu { + struct kvmppc_xive *xive; + struct kvm_vcpu *vcpu; + bool valid; + + /* Server number. This is the HW CPU ID from a guest perspective */ + u32 server_num; + + /* + * HW VP corresponding to this VCPU. This is the base of the VP + * block plus the server number. + */ + u32 vp_id; + u32 vp_chip_id; + u32 vp_cam; + + /* IPI used for sending ... IPIs */ + u32 vp_ipi; + struct xive_irq_data vp_ipi_data; + + /* Local emulation state */ + uint8_t cppr; /* guest CPPR */ + uint8_t hw_cppr;/* Hardware CPPR */ + uint8_t mfrr; + uint8_t pending; + + /* Each VP has 8 queues though we only provision some */ + struct xive_q queues[KVMPPC_XIVE_Q_COUNT]; + u32 esc_virq[KVMPPC_XIVE_Q_COUNT]; + char *esc_virq_names[KVMPPC_XIVE_Q_COUNT]; + + /* Stash a delayed irq on restore from migration (see set_icp) */ + u32 delayed_irq; + + /* Stats */ + u64 stat_rm_h_xirr; + u64 stat_rm_h_ipoll; + u64 stat_rm_h_cppr; + u64 stat_rm_h_eoi; + u64 stat_rm_h_ipi; + u64 stat_vm_h_xirr; + u64 stat_vm_h_ipoll; + u64 stat_vm_h_cppr; + u64 stat_vm_h_eoi; + u64 stat_vm_h_ipi; +}; + +static inline struct kvm_vcpu *kvmppc_xive_find_server(struct kvm *kvm, u32 nr) +{ + struct kvm_vcpu *vcpu = NULL; + int i; + + kvm_for_each_vcpu(i, vcpu, kvm) { + if (vcpu->arch.xive_vcpu && nr == vcpu->arch.xive_vcpu->server_num) + return vcpu; + } + return NULL; +} + +static inline struct kvmppc_xive_src_block *kvmppc_xive_find_source(struct kvmppc_xive *xive, + u32 irq, u16 *source) +{ + u32 bid = irq >> KVMPPC_XICS_ICS_SHIFT; + u16 src = irq & KVMPPC_XICS_SRC_MASK; + + if (source) + *source = src; + if (bid > KVMPPC_XICS_MAX_ICS_ID) + return NULL; + return xive->src_blocks[bid]; +} + +/* + * Mapping between guest priorities and host priorities + * is as follow. + * + * Guest request for 0...6 are honored. Guest request for anything + * higher results in a priority of 7 being applied. + * + * However, when XIRR is returned via H_XIRR, 7 is translated to 0xb + * in order to match AIX expectations + * + * Similar mapping is done for CPPR values + */ +static inline u8 xive_prio_from_guest(u8 prio) +{ + if (prio == 0xff || prio < 8) + return prio; + return 7; +} + +static inline u8 xive_prio_to_guest(u8 prio) +{ + if (prio == 0xff || prio < 7) + return prio; + return 0xb; +} + +static inline u32 __xive_read_eq(__be32 *qpage, u32 msk, u32 *idx, u32 *toggle) +{ + u32 cur; + + if (!qpage) + return 0; + cur = be32_to_cpup(qpage + *idx); + if ((cur >> 31) == *toggle) + return 0; + *idx = (*idx + 1) & msk; + if (*idx == 0) + (*toggle) ^= 1; + return cur & 0x7fffffff; +} + +extern unsigned long xive_rm_h_xirr(struct kvm_vcpu *vcpu); +extern unsigned long xive_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server); +extern int xive_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, + unsigned long mfrr); +extern int xive_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr); +extern int xive_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr); + +extern unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu); +extern unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server); +extern int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server, + unsigned long mfrr); +extern int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr); +extern int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr); + +#endif /* CONFIG_KVM_XICS */ +#endif /* _KVM_PPC_BOOK3S_XICS_H */ diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c new file mode 100644 index 000000000000..023a31133c37 --- /dev/null +++ b/arch/powerpc/kvm/book3s_xive_template.c @@ -0,0 +1,503 @@ +/* + * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + */ + +/* File to be included by other .c files */ + +#define XGLUE(a,b) a##b +#define GLUE(a,b) XGLUE(a,b) + +static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc) +{ + u8 cppr; + u16 ack; + + /* XXX DD1 bug workaround: Check PIPR vs. CPPR first ! */ + + /* Perform the acknowledge OS to register cycle. */ + ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG)); + + /* Synchronize subsequent queue accesses */ + mb(); + + /* XXX Check grouping level */ + + /* Anything ? */ + if (!((ack >> 8) & TM_QW1_NSR_EO)) + return; + + /* Grab CPPR of the most favored pending interrupt */ + cppr = ack & 0xff; + if (cppr < 8) + xc->pending |= 1 << cppr; + +#ifdef XIVE_RUNTIME_CHECKS + /* Check consistency */ + if (cppr >= xc->hw_cppr) + pr_warn("KVM-XIVE: CPU %d odd ack CPPR, got %d at %d\n", + smp_processor_id(), cppr, xc->hw_cppr); +#endif + + /* + * Update our image of the HW CPPR. We don't yet modify + * xc->cppr, this will be done as we scan for interrupts + * in the queues. + */ + xc->hw_cppr = cppr; +} + +static u8 GLUE(X_PFX,esb_load)(struct xive_irq_data *xd, u32 offset) +{ + u64 val; + + if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG) + offset |= offset << 4; + + val =__x_readq(__x_eoi_page(xd) + offset); +#ifdef __LITTLE_ENDIAN__ + val >>= 64-8; +#endif + return (u8)val; +} + + +static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd) +{ + /* If the XIVE supports the new "store EOI facility, use it */ + if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) + __x_writeq(0, __x_eoi_page(xd)); + else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { + opal_int_eoi(hw_irq); + } else { + uint64_t eoi_val; + + /* + * Otherwise for EOI, we use the special MMIO that does + * a clear of both P and Q and returns the old Q, + * except for LSIs where we use the "EOI cycle" special + * load. + * + * This allows us to then do a re-trigger if Q was set + * rather than synthetizing an interrupt in software + * + * For LSIs, using the HW EOI cycle works around a problem + * on P9 DD1 PHBs where the other ESB accesses don't work + * properly. + */ + if (xd->flags & XIVE_IRQ_FLAG_LSI) + __x_readq(__x_eoi_page(xd)); + else { + eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00); + + /* Re-trigger if needed */ + if ((eoi_val & 1) && __x_trig_page(xd)) + __x_writeq(0, __x_trig_page(xd)); + } + } +} + +enum { + scan_fetch, + scan_poll, + scan_eoi, +}; + +static u32 GLUE(X_PFX,scan_interrupts)(struct kvmppc_xive_vcpu *xc, + u8 pending, int scan_type) +{ + u32 hirq = 0; + u8 prio = 0xff; + + /* Find highest pending priority */ + while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) { + struct xive_q *q; + u32 idx, toggle; + __be32 *qpage; + + /* + * If pending is 0 this will return 0xff which is what + * we want + */ + prio = ffs(pending) - 1; + + /* + * If the most favoured prio we found pending is less + * favored (or equal) than a pending IPI, we return + * the IPI instead. + * + * Note: If pending was 0 and mfrr is 0xff, we will + * not spurriously take an IPI because mfrr cannot + * then be smaller than cppr. + */ + if (prio >= xc->mfrr && xc->mfrr < xc->cppr) { + prio = xc->mfrr; + hirq = XICS_IPI; + break; + } + + /* Don't scan past the guest cppr */ + if (prio >= xc->cppr || prio > 7) + break; + + /* Grab queue and pointers */ + q = &xc->queues[prio]; + idx = q->idx; + toggle = q->toggle; + + /* + * Snapshot the queue page. The test further down for EOI + * must use the same "copy" that was used by __xive_read_eq + * since qpage can be set concurrently and we don't want + * to miss an EOI. + */ + qpage = READ_ONCE(q->qpage); + +skip_ipi: + /* + * Try to fetch from the queue. Will return 0 for a + * non-queueing priority (ie, qpage = 0). + */ + hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle); + + /* + * If this was a signal for an MFFR change done by + * H_IPI we skip it. Additionally, if we were fetching + * we EOI it now, thus re-enabling reception of a new + * such signal. + * + * We also need to do that if prio is 0 and we had no + * page for the queue. In this case, we have non-queued + * IPI that needs to be EOId. + * + * This is safe because if we have another pending MFRR + * change that wasn't observed above, the Q bit will have + * been set and another occurrence of the IPI will trigger. + */ + if (hirq == XICS_IPI || (prio == 0 && !qpage)) { + if (scan_type == scan_fetch) + GLUE(X_PFX,source_eoi)(xc->vp_ipi, + &xc->vp_ipi_data); + /* Loop back on same queue with updated idx/toggle */ +#ifdef XIVE_RUNTIME_CHECKS + WARN_ON(hirq && hirq != XICS_IPI); +#endif + if (hirq) + goto skip_ipi; + } + + /* If fetching, update queue pointers */ + if (scan_type == scan_fetch) { + q->idx = idx; + q->toggle = toggle; + } + + /* Something found, stop searching */ + if (hirq) + break; + + /* Clear the pending bit on the now empty queue */ + pending &= ~(1 << prio); + + /* + * Check if the queue count needs adjusting due to + * interrupts being moved away. + */ + if (atomic_read(&q->pending_count)) { + int p = atomic_xchg(&q->pending_count, 0); + if (p) { +#ifdef XIVE_RUNTIME_CHECKS + WARN_ON(p > atomic_read(&q->count)); +#endif + atomic_sub(p, &q->count); + } + } + } + + /* If we are just taking a "peek", do nothing else */ + if (scan_type == scan_poll) + return hirq; + + /* Update the pending bits */ + xc->pending = pending; + + /* + * If this is an EOI that's it, no CPPR adjustment done here, + * all we needed was cleanup the stale pending bits and check + * if there's anything left. + */ + if (scan_type == scan_eoi) + return hirq; + + /* + * If we found an interrupt, adjust what the guest CPPR should + * be as if we had just fetched that interrupt from HW. + */ + if (hirq) + xc->cppr = prio; + /* + * If it was an IPI the HW CPPR might have been lowered too much + * as the HW interrupt we use for IPIs is routed to priority 0. + * + * We re-sync it here. + */ + if (xc->cppr != xc->hw_cppr) { + xc->hw_cppr = xc->cppr; + __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR); + } + + return hirq; +} + +X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + u8 old_cppr; + u32 hirq; + + pr_devel("H_XIRR\n"); + + xc->GLUE(X_STAT_PFX,h_xirr)++; + + /* First collect pending bits from HW */ + GLUE(X_PFX,ack_pending)(xc); + + /* + * Cleanup the old-style bits if needed (they may have been + * set by pull or an escalation interrupts). + */ + if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions)) + clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, + &vcpu->arch.pending_exceptions); + + pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n", + xc->pending, xc->hw_cppr, xc->cppr); + + /* Grab previous CPPR and reverse map it */ + old_cppr = xive_prio_to_guest(xc->cppr); + + /* Scan for actual interrupts */ + hirq = GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_fetch); + + pr_devel(" got hirq=0x%x hw_cppr=%d cppr=%d\n", + hirq, xc->hw_cppr, xc->cppr); + +#ifdef XIVE_RUNTIME_CHECKS + /* That should never hit */ + if (hirq & 0xff000000) + pr_warn("XIVE: Weird guest interrupt number 0x%08x\n", hirq); +#endif + + /* + * XXX We could check if the interrupt is masked here and + * filter it. If we chose to do so, we would need to do: + * + * if (masked) { + * lock(); + * if (masked) { + * old_Q = true; + * hirq = 0; + * } + * unlock(); + * } + */ + + /* Return interrupt and old CPPR in GPR4 */ + vcpu->arch.gpr[4] = hirq | (old_cppr << 24); + + return H_SUCCESS; +} + +X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + u8 pending = xc->pending; + u32 hirq; + u8 pipr; + + pr_devel("H_IPOLL(server=%ld)\n", server); + + xc->GLUE(X_STAT_PFX,h_ipoll)++; + + /* Grab the target VCPU if not the current one */ + if (xc->server_num != server) { + vcpu = kvmppc_xive_find_server(vcpu->kvm, server); + if (!vcpu) + return H_PARAMETER; + xc = vcpu->arch.xive_vcpu; + + /* Scan all priorities */ + pending = 0xff; + } else { + /* Grab pending interrupt if any */ + pipr = __x_readb(__x_tima + TM_QW1_OS + TM_PIPR); + if (pipr < 8) + pending |= 1 << pipr; + } + + hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll); + + /* Return interrupt and old CPPR in GPR4 */ + vcpu->arch.gpr[4] = hirq | (xc->cppr << 24); + + return H_SUCCESS; +} + +static void GLUE(X_PFX,push_pending_to_hw)(struct kvmppc_xive_vcpu *xc) +{ + u8 pending, prio; + + pending = xc->pending; + if (xc->mfrr != 0xff) { + if (xc->mfrr < 8) + pending |= 1 << xc->mfrr; + else + pending |= 0x80; + } + if (!pending) + return; + prio = ffs(pending) - 1; + + __x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING); +} + +X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + u8 old_cppr; + + pr_devel("H_CPPR(cppr=%ld)\n", cppr); + + xc->GLUE(X_STAT_PFX,h_cppr)++; + + /* Map CPPR */ + cppr = xive_prio_from_guest(cppr); + + /* Remember old and update SW state */ + old_cppr = xc->cppr; + xc->cppr = cppr; + + /* + * We are masking less, we need to look for pending things + * to deliver and set VP pending bits accordingly to trigger + * a new interrupt otherwise we might miss MFRR changes for + * which we have optimized out sending an IPI signal. + */ + if (cppr > old_cppr) + GLUE(X_PFX,push_pending_to_hw)(xc); + + /* Apply new CPPR */ + xc->hw_cppr = cppr; + __x_writeb(cppr, __x_tima + TM_QW1_OS + TM_CPPR); + + return H_SUCCESS; +} + +X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr) +{ + struct kvmppc_xive *xive = vcpu->kvm->arch.xive; + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + struct xive_irq_data *xd; + u8 new_cppr = xirr >> 24; + u32 irq = xirr & 0x00ffffff, hw_num; + u16 src; + int rc = 0; + + pr_devel("H_EOI(xirr=%08lx)\n", xirr); + + xc->GLUE(X_STAT_PFX,h_eoi)++; + + xc->cppr = xive_prio_from_guest(new_cppr); + + /* + * IPIs are synthetized from MFRR and thus don't need + * any special EOI handling. The underlying interrupt + * used to signal MFRR changes is EOId when fetched from + * the queue. + */ + if (irq == XICS_IPI || irq == 0) + goto bail; + + /* Find interrupt source */ + sb = kvmppc_xive_find_source(xive, irq, &src); + if (!sb) { + pr_devel(" source not found !\n"); + rc = H_PARAMETER; + goto bail; + } + state = &sb->irq_state[src]; + kvmppc_xive_select_irq(state, &hw_num, &xd); + + state->in_eoi = true; + mb(); + +again: + if (state->guest_priority == MASKED) { + arch_spin_lock(&sb->lock); + if (state->guest_priority != MASKED) { + arch_spin_unlock(&sb->lock); + goto again; + } + pr_devel(" EOI on saved P...\n"); + + /* Clear old_p, that will cause unmask to perform an EOI */ + state->old_p = false; + + arch_spin_unlock(&sb->lock); + } else { + pr_devel(" EOI on source...\n"); + + /* Perform EOI on the source */ + GLUE(X_PFX,source_eoi)(hw_num, xd); + + /* If it's an emulated LSI, check level and resend */ + if (state->lsi && state->asserted) + __x_writeq(0, __x_trig_page(xd)); + + } + + mb(); + state->in_eoi = false; +bail: + + /* Re-evaluate pending IRQs and update HW */ + GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_eoi); + GLUE(X_PFX,push_pending_to_hw)(xc); + pr_devel(" after scan pending=%02x\n", xc->pending); + + /* Apply new CPPR */ + xc->hw_cppr = xc->cppr; + __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR); + + return rc; +} + +X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server, + unsigned long mfrr) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + + pr_devel("H_IPI(server=%08lx,mfrr=%ld)\n", server, mfrr); + + xc->GLUE(X_STAT_PFX,h_ipi)++; + + /* Find target */ + vcpu = kvmppc_xive_find_server(vcpu->kvm, server); + if (!vcpu) + return H_PARAMETER; + xc = vcpu->arch.xive_vcpu; + + /* Locklessly write over MFRR */ + xc->mfrr = mfrr; + + /* Shoot the IPI if most favored than target cppr */ + if (mfrr < xc->cppr) + __x_writeq(0, __x_trig_page(&xc->vp_ipi_data)); + + return H_SUCCESS; +} diff --git a/arch/powerpc/kvm/irq.h b/arch/powerpc/kvm/irq.h index 5a9a10b90762..3f1be85a83bc 100644 --- a/arch/powerpc/kvm/irq.h +++ b/arch/powerpc/kvm/irq.h @@ -12,6 +12,7 @@ static inline int irqchip_in_kernel(struct kvm *kvm) #endif #ifdef CONFIG_KVM_XICS ret = ret || (kvm->arch.xics != NULL); + ret = ret || (kvm->arch.xive != NULL); #endif smp_rmb(); return ret; diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 95c91a9de351..de79bd721ec7 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -37,6 +37,8 @@ #include #include #include +#include + #include "timing.h" #include "irq.h" #include "../mm/mmu_decl.h" @@ -699,7 +701,10 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); break; case KVMPPC_IRQ_XICS: - kvmppc_xics_free_icp(vcpu); + if (xive_enabled()) + kvmppc_xive_cleanup_vcpu(vcpu); + else + kvmppc_xics_free_icp(vcpu); break; } @@ -1219,8 +1224,12 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, r = -EPERM; dev = kvm_device_from_filp(f.file); - if (dev) - r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); + if (dev) { + if (xive_enabled()) + r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]); + else + r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); + } fdput(f); break; @@ -1244,7 +1253,7 @@ bool kvm_arch_intc_initialized(struct kvm *kvm) return true; #endif #ifdef CONFIG_KVM_XICS - if (kvm->arch.xics) + if (kvm->arch.xics || kvm->arch.xive) return true; #endif return false; diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index e0f856bfbfe8..d71cd773d870 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -890,3 +890,4 @@ EXPORT_SYMBOL_GPL(opal_leds_set_ind); EXPORT_SYMBOL_GPL(opal_write_oppanel_async); /* Export this for KVM */ EXPORT_SYMBOL_GPL(opal_int_set_mfrr); +EXPORT_SYMBOL_GPL(opal_int_eoi); diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index d9cd7f705f21..496036c93531 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -46,13 +46,15 @@ #endif bool __xive_enabled; +EXPORT_SYMBOL_GPL(__xive_enabled); bool xive_cmdline_disabled; /* We use only one priority for now */ static u8 xive_irq_priority; -/* TIMA */ +/* TIMA exported to KVM */ void __iomem *xive_tima; +EXPORT_SYMBOL_GPL(xive_tima); u32 xive_tima_offset; /* Backend ops */ @@ -345,8 +347,11 @@ static void xive_irq_eoi(struct irq_data *d) DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n", d->irq, irqd_to_hwirq(d), xc->pending_prio); - /* EOI the source if it hasn't been disabled */ - if (!irqd_irq_disabled(d)) + /* + * EOI the source if it hasn't been disabled and hasn't + * been passed-through to a KVM guest + */ + if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d)) xive_do_source_eoi(irqd_to_hwirq(d), xd); /* @@ -689,9 +694,14 @@ static int xive_irq_set_affinity(struct irq_data *d, old_target = xd->target; - rc = xive_ops->configure_irq(hw_irq, - get_hard_smp_processor_id(target), - xive_irq_priority, d->irq); + /* + * Only configure the irq if it's not currently passed-through to + * a KVM guest + */ + if (!irqd_is_forwarded_to_vcpu(d)) + rc = xive_ops->configure_irq(hw_irq, + get_hard_smp_processor_id(target), + xive_irq_priority, d->irq); if (rc < 0) { pr_err("Error %d reconfiguring irq %d\n", rc, d->irq); return rc; @@ -771,6 +781,123 @@ static int xive_irq_retrigger(struct irq_data *d) return 1; } +static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state) +{ + struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); + unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); + int rc; + u8 pq; + + /* + * We only support this on interrupts that do not require + * firmware calls for masking and unmasking + */ + if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) + return -EIO; + + /* + * This is called by KVM with state non-NULL for enabling + * pass-through or NULL for disabling it + */ + if (state) { + irqd_set_forwarded_to_vcpu(d); + + /* Set it to PQ=10 state to prevent further sends */ + pq = xive_poke_esb(xd, XIVE_ESB_SET_PQ_10); + + /* No target ? nothing to do */ + if (xd->target == XIVE_INVALID_TARGET) { + /* + * An untargetted interrupt should have been + * also masked at the source + */ + WARN_ON(pq & 2); + + return 0; + } + + /* + * If P was set, adjust state to PQ=11 to indicate + * that a resend is needed for the interrupt to reach + * the guest. Also remember the value of P. + * + * This also tells us that it's in flight to a host queue + * or has already been fetched but hasn't been EOIed yet + * by the host. This it's potentially using up a host + * queue slot. This is important to know because as long + * as this is the case, we must not hard-unmask it when + * "returning" that interrupt to the host. + * + * This saved_p is cleared by the host EOI, when we know + * for sure the queue slot is no longer in use. + */ + if (pq & 2) { + pq = xive_poke_esb(xd, XIVE_ESB_SET_PQ_11); + xd->saved_p = true; + + /* + * Sync the XIVE source HW to ensure the interrupt + * has gone through the EAS before we change its + * target to the guest. That should guarantee us + * that we *will* eventually get an EOI for it on + * the host. Otherwise there would be a small window + * for P to be seen here but the interrupt going + * to the guest queue. + */ + if (xive_ops->sync_source) + xive_ops->sync_source(hw_irq); + } else + xd->saved_p = false; + } else { + irqd_clr_forwarded_to_vcpu(d); + + /* No host target ? hard mask and return */ + if (xd->target == XIVE_INVALID_TARGET) { + xive_do_source_set_mask(xd, true); + return 0; + } + + /* + * Sync the XIVE source HW to ensure the interrupt + * has gone through the EAS before we change its + * target to the host. + */ + if (xive_ops->sync_source) + xive_ops->sync_source(hw_irq); + + /* + * By convention we are called with the interrupt in + * a PQ=10 or PQ=11 state, ie, it won't fire and will + * have latched in Q whether there's a pending HW + * interrupt or not. + * + * First reconfigure the target. + */ + rc = xive_ops->configure_irq(hw_irq, + get_hard_smp_processor_id(xd->target), + xive_irq_priority, d->irq); + if (rc) + return rc; + + /* + * Then if saved_p is not set, effectively re-enable the + * interrupt with an EOI. If it is set, we know there is + * still a message in a host queue somewhere that will be + * EOId eventually. + * + * Note: We don't check irqd_irq_disabled(). Effectively, + * we *will* let the irq get through even if masked if the + * HW is still firing it in order to deal with the whole + * saved_p business properly. If the interrupt triggers + * while masked, the generic code will re-mask it anyway. + */ + if (!xd->saved_p) + xive_do_source_eoi(hw_irq, xd); + + } + return 0; +} + static struct irq_chip xive_irq_chip = { .name = "XIVE-IRQ", .irq_startup = xive_irq_startup, @@ -781,12 +908,14 @@ static struct irq_chip xive_irq_chip = { .irq_set_affinity = xive_irq_set_affinity, .irq_set_type = xive_irq_set_type, .irq_retrigger = xive_irq_retrigger, + .irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity, }; bool is_xive_irq(struct irq_chip *chip) { return chip == &xive_irq_chip; } +EXPORT_SYMBOL_GPL(is_xive_irq); void xive_cleanup_irq_data(struct xive_irq_data *xd) { @@ -801,6 +930,7 @@ void xive_cleanup_irq_data(struct xive_irq_data *xd) xd->trig_mmio = NULL; } } +EXPORT_SYMBOL_GPL(xive_cleanup_irq_data); static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw) { diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index 5fae59186cb2..6feac0a758e1 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c @@ -31,6 +31,7 @@ #include #include #include +#include #include "xive-internal.h" @@ -95,6 +96,7 @@ int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data) } return 0; } +EXPORT_SYMBOL_GPL(xive_native_populate_irq_data); int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq) { @@ -108,6 +110,8 @@ int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq) } return rc == 0 ? 0 : -ENXIO; } +EXPORT_SYMBOL_GPL(xive_native_configure_irq); + /* This can be called multiple time to change a queue configuration */ int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio, @@ -172,6 +176,7 @@ int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio, fail: return rc; } +EXPORT_SYMBOL_GPL(xive_native_configure_queue); static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio) { @@ -191,6 +196,7 @@ void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio) { __xive_native_disable_queue(vp_id, q, prio); } +EXPORT_SYMBOL_GPL(xive_native_disable_queue); static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio) { @@ -261,6 +267,7 @@ static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc) } return 0; } +#endif /* CONFIG_SMP */ u32 xive_native_alloc_irq(void) { @@ -276,6 +283,7 @@ u32 xive_native_alloc_irq(void) return 0; return rc; } +EXPORT_SYMBOL_GPL(xive_native_alloc_irq); void xive_native_free_irq(u32 irq) { @@ -286,7 +294,9 @@ void xive_native_free_irq(u32 irq) msleep(1); } } +EXPORT_SYMBOL_GPL(xive_native_free_irq); +#ifdef CONFIG_SMP static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc) { s64 rc; @@ -382,7 +392,7 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc) return; /* Enable the pool VP */ - vp = xive_pool_vps + get_hard_smp_processor_id(cpu); + vp = xive_pool_vps + cpu; pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp); for (;;) { rc = opal_xive_set_vp_info(vp, OPAL_XIVE_VP_ENABLED, 0); @@ -427,7 +437,7 @@ static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc) in_be64(xive_tima + TM_SPC_PULL_POOL_CTX); /* Disable it */ - vp = xive_pool_vps + get_hard_smp_processor_id(cpu); + vp = xive_pool_vps + cpu; for (;;) { rc = opal_xive_set_vp_info(vp, 0, 0); if (rc != OPAL_BUSY) @@ -436,10 +446,11 @@ static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc) } } -static void xive_native_sync_source(u32 hw_irq) +void xive_native_sync_source(u32 hw_irq) { opal_xive_sync(XIVE_SYNC_EAS, hw_irq); } +EXPORT_SYMBOL_GPL(xive_native_sync_source); static const struct xive_ops xive_native_ops = { .populate_irq_data = xive_native_populate_irq_data, @@ -500,10 +511,24 @@ static bool xive_parse_provisioning(struct device_node *np) return true; } +static void xive_native_setup_pools(void) +{ + /* Allocate a pool big enough */ + pr_debug("XIVE: Allocating VP block for pool size %d\n", nr_cpu_ids); + + xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids); + if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP)) + pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n"); + + pr_debug("XIVE: Pool VPs allocated at 0x%x for %d max CPUs\n", + xive_pool_vps, nr_cpu_ids); +} + u32 xive_native_default_eq_shift(void) { return xive_queue_shift; } +EXPORT_SYMBOL_GPL(xive_native_default_eq_shift); bool xive_native_init(void) { @@ -513,7 +538,7 @@ bool xive_native_init(void) struct property *prop; u8 max_prio = 7; const __be32 *p; - u32 val; + u32 val, cpu; s64 rc; if (xive_cmdline_disabled) @@ -549,7 +574,11 @@ bool xive_native_init(void) break; } - /* Grab size of provisioning pages */ + /* Configure Thread Management areas for KVM */ + for_each_possible_cpu(cpu) + kvmppc_set_xive_tima(cpu, r.start, tima); + + /* Grab size of provisionning pages */ xive_parse_provisioning(np); /* Switch the XIVE to exploitation mode */ @@ -559,6 +588,9 @@ bool xive_native_init(void) return false; } + /* Setup some dummy HV pool VPs */ + xive_native_setup_pools(); + /* Initialize XIVE core with our backend */ if (!xive_core_init(&xive_native_ops, tima, TM_QW3_HV_PHYS, max_prio)) { @@ -637,3 +669,47 @@ void xive_native_free_vp_block(u32 vp_base) pr_warn("OPAL error %lld freeing VP block\n", rc); } EXPORT_SYMBOL_GPL(xive_native_free_vp_block); + +int xive_native_enable_vp(u32 vp_id) +{ + s64 rc; + + for (;;) { + rc = opal_xive_set_vp_info(vp_id, OPAL_XIVE_VP_ENABLED, 0); + if (rc != OPAL_BUSY) + break; + msleep(1); + } + return rc ? -EIO : 0; +} +EXPORT_SYMBOL_GPL(xive_native_enable_vp); + +int xive_native_disable_vp(u32 vp_id) +{ + s64 rc; + + for (;;) { + rc = opal_xive_set_vp_info(vp_id, 0, 0); + if (rc != OPAL_BUSY) + break; + msleep(1); + } + return rc ? -EIO : 0; +} +EXPORT_SYMBOL_GPL(xive_native_disable_vp); + +int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id) +{ + __be64 vp_cam_be; + __be32 vp_chip_id_be; + s64 rc; + + rc = opal_xive_get_vp_info(vp_id, NULL, &vp_cam_be, NULL, &vp_chip_id_be); + if (rc) + return -EIO; + *out_cam_id = be64_to_cpu(vp_cam_be) & 0xffffffffu; + *out_chip_id = be32_to_cpu(vp_chip_id_be); + + return 0; +} +EXPORT_SYMBOL_GPL(xive_native_get_vp_info); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 2c14ad9809da..d1a6e554ee68 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1165,7 +1165,6 @@ int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type); void kvm_unregister_device_ops(u32 type); extern struct kvm_device_ops kvm_mpic_ops; -extern struct kvm_device_ops kvm_xics_ops; extern struct kvm_device_ops kvm_arm_vgic_v2_ops; extern struct kvm_device_ops kvm_arm_vgic_v3_ops; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a17d78759727..1b0da5771f71 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2839,10 +2839,6 @@ static struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = { [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops, [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops, #endif - -#ifdef CONFIG_KVM_XICS - [KVM_DEV_TYPE_XICS] = &kvm_xics_ops, -#endif }; int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type) -- cgit v1.2.3 From ed6473ddc704a2005b9900ca08e236ebb2d8540a Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 26 Apr 2017 11:55:27 -0400 Subject: NFSv4: Fix callback server shutdown We want to use kthread_stop() in order to ensure the threads are shut down before we tear down the nfs_callback_info in nfs_callback_down. Tested-and-reviewed-by: Kinglong Mee Reported-by: Kinglong Mee Fixes: bb6aeba736ba9 ("NFSv4.x: Switch to using svc_set_num_threads()...") Signed-off-by: Trond Myklebust Signed-off-by: J. Bruce Fields --- fs/nfs/callback.c | 24 ++++++++++++++++-------- include/linux/sunrpc/svc.h | 1 + net/sunrpc/svc.c | 38 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 55 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index c5e27ebd8da8..73a1f928226c 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -76,7 +76,10 @@ nfs4_callback_svc(void *vrqstp) set_freezable(); - while (!kthread_should_stop()) { + while (!kthread_freezable_should_stop(NULL)) { + + if (signal_pending(current)) + flush_signals(current); /* * Listen for a request on the socket */ @@ -85,6 +88,8 @@ nfs4_callback_svc(void *vrqstp) continue; svc_process(rqstp); } + svc_exit_thread(rqstp); + module_put_and_exit(0); return 0; } @@ -103,9 +108,10 @@ nfs41_callback_svc(void *vrqstp) set_freezable(); - while (!kthread_should_stop()) { - if (try_to_freeze()) - continue; + while (!kthread_freezable_should_stop(NULL)) { + + if (signal_pending(current)) + flush_signals(current); prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE); spin_lock_bh(&serv->sv_cb_lock); @@ -121,11 +127,13 @@ nfs41_callback_svc(void *vrqstp) error); } else { spin_unlock_bh(&serv->sv_cb_lock); - schedule(); + if (!kthread_should_stop()) + schedule(); finish_wait(&serv->sv_cb_waitq, &wq); } - flush_signals(current); } + svc_exit_thread(rqstp); + module_put_and_exit(0); return 0; } @@ -221,14 +229,14 @@ err_bind: static struct svc_serv_ops nfs40_cb_sv_ops = { .svo_function = nfs4_callback_svc, .svo_enqueue_xprt = svc_xprt_do_enqueue, - .svo_setup = svc_set_num_threads, + .svo_setup = svc_set_num_threads_sync, .svo_module = THIS_MODULE, }; #if defined(CONFIG_NFS_V4_1) static struct svc_serv_ops nfs41_cb_sv_ops = { .svo_function = nfs41_callback_svc, .svo_enqueue_xprt = svc_xprt_do_enqueue, - .svo_setup = svc_set_num_threads, + .svo_setup = svc_set_num_threads_sync, .svo_module = THIS_MODULE, }; diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 6ef19cf658b4..94631026f79c 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -473,6 +473,7 @@ void svc_pool_map_put(void); struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int, struct svc_serv_ops *); int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int); +int svc_set_num_threads_sync(struct svc_serv *, struct svc_pool *, int); int svc_pool_stats_open(struct svc_serv *serv, struct file *file); void svc_destroy(struct svc_serv *); void svc_shutdown_net(struct svc_serv *, struct net *); diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 98dc33ae738b..bc0f5a0ecbdc 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -795,6 +795,44 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) } EXPORT_SYMBOL_GPL(svc_set_num_threads); +/* destroy old threads */ +static int +svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) +{ + struct task_struct *task; + unsigned int state = serv->sv_nrthreads-1; + + /* destroy old threads */ + do { + task = choose_victim(serv, pool, &state); + if (task == NULL) + break; + kthread_stop(task); + nrservs++; + } while (nrservs < 0); + return 0; +} + +int +svc_set_num_threads_sync(struct svc_serv *serv, struct svc_pool *pool, int nrservs) +{ + if (pool == NULL) { + /* The -1 assumes caller has done a svc_get() */ + nrservs -= (serv->sv_nrthreads-1); + } else { + spin_lock_bh(&pool->sp_lock); + nrservs -= pool->sp_nrthreads; + spin_unlock_bh(&pool->sp_lock); + } + + if (nrservs > 0) + return svc_start_kthreads(serv, pool, nrservs); + if (nrservs < 0) + return svc_stop_kthreads(serv, pool, nrservs); + return 0; +} +EXPORT_SYMBOL_GPL(svc_set_num_threads_sync); + /* * Called from a server thread as it's exiting. Caller must hold the "service * mutex" for the service. -- cgit v1.2.3 From 9cf7adeca1f307b578021f227119495eecb1a510 Mon Sep 17 00:00:00 2001 From: Bob Moore Date: Fri, 28 Apr 2017 08:53:22 +0800 Subject: ACPICA: iasl: add ASL conversion tool ACPICA commit c04d310039d3e0ed1cb62876fe7e596fbc75ab01 ACPICA commit a65c1df7e6b4bad8e37df822018c40c6c446add9 The key feature of this utility is that the original comments within the input ASL files are preserved during the conversion process, and included within the converted ASL+ file -- thus creating a transparent conversion of existing ASL files to ASL+ (ASL 2.0) This patch is an automatic generation of the ASL converter commit, Linux kernel isn't affected by the functionality provided in this commit, but requires the linuxized changes to support future ACPICA release automation. Link: https://github.com/acpica/acpica/commit/c04d3100 Link: https://github.com/acpica/acpica/commit/a65c1df7 Signed-off-by: Bob Moore Signed-off-by: Jung-uk Kim Signed-off-by: Lv Zheng Signed-off-by: Rafael J. Wysocki --- drivers/acpi/acpica/acconvert.h | 144 ++++++++++++++++++++++++++++++++++++++++ drivers/acpi/acpica/acglobal.h | 53 +++++++++++++++ drivers/acpi/acpica/aclocal.h | 99 ++++++++++++++++++++++----- drivers/acpi/acpica/acmacros.h | 35 ++++++++++ drivers/acpi/acpica/acopcode.h | 2 + drivers/acpi/acpica/amlcode.h | 4 +- drivers/acpi/acpica/psargs.c | 23 +++++++ drivers/acpi/acpica/psloop.c | 32 +++++++++ drivers/acpi/acpica/psobject.c | 38 +++++++++++ drivers/acpi/acpica/psopcode.c | 5 +- drivers/acpi/acpica/psopinfo.c | 2 +- drivers/acpi/acpica/pstree.c | 7 ++ drivers/acpi/acpica/psutils.c | 11 +++ drivers/acpi/acpica/utalloc.c | 50 ++++++++++++++ drivers/acpi/acpica/utdebug.c | 1 + include/acpi/acconfig.h | 1 + include/acpi/actbl2.h | 1 + 17 files changed, 490 insertions(+), 18 deletions(-) create mode 100644 drivers/acpi/acpica/acconvert.h (limited to 'include') diff --git a/drivers/acpi/acpica/acconvert.h b/drivers/acpi/acpica/acconvert.h new file mode 100644 index 000000000000..c84223b60b35 --- /dev/null +++ b/drivers/acpi/acpica/acconvert.h @@ -0,0 +1,144 @@ +/****************************************************************************** + * + * Module Name: acapps - common include for ACPI applications/tools + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2017, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef _ACCONVERT +#define _ACCONVERT + +/* Definitions for comment state */ + +#define ASL_COMMENT_STANDARD 1 +#define ASLCOMMENT_INLINE 2 +#define ASL_COMMENT_OPEN_PAREN 3 +#define ASL_COMMENT_CLOSE_PAREN 4 +#define ASL_COMMENT_CLOSE_BRACE 5 + +/* Definitions for comment print function*/ + +#define AML_COMMENT_STANDARD 1 +#define AMLCOMMENT_INLINE 2 +#define AML_COMMENT_END_NODE 3 +#define AML_NAMECOMMENT 4 +#define AML_COMMENT_CLOSE_BRACE 5 +#define AML_COMMENT_ENDBLK 6 +#define AML_COMMENT_INCLUDE 7 + +#ifdef ACPI_ASL_COMPILER +/* + * cvcompiler + */ +void +cv_process_comment(struct asl_comment_state current_state, + char *string_buffer, int c1); + +void +cv_process_comment_type2(struct asl_comment_state current_state, + char *string_buffer); + +u32 cv_calculate_comment_lengths(union acpi_parse_object *op); + +void cv_process_comment_state(char input); + +char *cv_append_inline_comment(char *inline_comment, char *to_add); + +void cv_add_to_comment_list(char *to_add); + +void cv_place_comment(u8 type, char *comment_string); + +u32 cv_parse_op_block_type(union acpi_parse_object *op); + +struct acpi_comment_node *cv_comment_node_calloc(void); + +void cg_write_aml_def_block_comment(union acpi_parse_object *op); + +void +cg_write_one_aml_comment(union acpi_parse_object *op, + char *comment_to_print, u8 input_option); + +void cg_write_aml_comment(union acpi_parse_object *op); + +/* + * cvparser + */ +void +cv_init_file_tree(struct acpi_table_header *table, + u8 *aml_start, u32 aml_length); + +void cv_clear_op_comments(union acpi_parse_object *op); + +struct acpi_file_node *cv_filename_exists(char *filename, + struct acpi_file_node *head); + +void cv_label_file_node(union acpi_parse_object *op); + +void +cv_capture_list_comments(struct acpi_parse_state *parser_state, + struct acpi_comment_node *list_head, + struct acpi_comment_node *list_tail); + +void cv_capture_comments_only(struct acpi_parse_state *parser_state); + +void cv_capture_comments(struct acpi_walk_state *walk_state); + +void cv_transfer_comments(union acpi_parse_object *op); + +/* + * cvdisasm + */ +void cv_switch_files(u32 level, union acpi_parse_object *op); + +u8 cv_file_has_switched(union acpi_parse_object *op); + +void cv_close_paren_write_comment(union acpi_parse_object *op, u32 level); + +void cv_close_brace_write_comment(union acpi_parse_object *op, u32 level); + +void +cv_print_one_comment_list(struct acpi_comment_node *comment_list, u32 level); + +void +cv_print_one_comment_type(union acpi_parse_object *op, + u8 comment_type, char *end_str, u32 level); + +#endif + +#endif /* _ACCONVERT */ diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index 1d955fe216c4..abe8c316908c 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h @@ -370,6 +370,59 @@ ACPI_GLOBAL(const char, *acpi_gbl_pld_shape_list[]); #endif +/* + * Meant for the -ca option. + */ +ACPI_INIT_GLOBAL(char *, acpi_gbl_current_inline_comment, NULL); +ACPI_INIT_GLOBAL(char *, acpi_gbl_current_end_node_comment, NULL); +ACPI_INIT_GLOBAL(char *, acpi_gbl_current_open_brace_comment, NULL); +ACPI_INIT_GLOBAL(char *, acpi_gbl_current_close_brace_comment, NULL); + +ACPI_INIT_GLOBAL(char *, acpi_gbl_root_filename, NULL); +ACPI_INIT_GLOBAL(char *, acpi_gbl_current_filename, NULL); +ACPI_INIT_GLOBAL(char *, acpi_gbl_current_parent_filename, NULL); +ACPI_INIT_GLOBAL(char *, acpi_gbl_current_include_filename, NULL); + +ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_last_list_head, NULL); + +ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_def_blk_comment_list_head, + NULL); +ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_def_blk_comment_list_tail, + NULL); + +ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_reg_comment_list_head, + NULL); +ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_reg_comment_list_tail, + NULL); + +ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_inc_comment_list_head, + NULL); +ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_inc_comment_list_tail, + NULL); + +ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_end_blk_comment_list_head, + NULL); +ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_end_blk_comment_list_tail, + NULL); + +ACPI_INIT_GLOBAL(struct acpi_comment_addr_node, + *acpi_gbl_comment_addr_list_head, NULL); + +ACPI_INIT_GLOBAL(union acpi_parse_object, *acpi_gbl_current_scope, NULL); + +ACPI_INIT_GLOBAL(struct acpi_file_node, *acpi_gbl_file_tree_root, NULL); + +ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_reg_comment_cache); +ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_comment_addr_cache); +ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_file_cache); + +ACPI_INIT_GLOBAL(u8, gbl_capture_comments, FALSE); + +ACPI_INIT_GLOBAL(u8, acpi_gbl_debug_asl_conversion, FALSE); +ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_conv_debug_file, NULL); + +ACPI_GLOBAL(char, acpi_gbl_table_sig[4]); + /***************************************************************************** * * Application globals diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index ca984261acbb..f9b3f7fef462 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -53,7 +53,7 @@ typedef u32 acpi_mutex_handle; /* Total number of aml opcodes defined */ -#define AML_NUM_OPCODES 0x82 +#define AML_NUM_OPCODES 0x83 /* Forward declarations */ @@ -754,21 +754,52 @@ union acpi_parse_value { #define ACPI_DISASM_ONLY_MEMBERS(a) #endif +#if defined(ACPI_ASL_COMPILER) +#define ACPI_CONVERTER_ONLY_MEMBERS(a) a; +#else +#define ACPI_CONVERTER_ONLY_MEMBERS(a) +#endif + #define ACPI_PARSE_COMMON \ - union acpi_parse_object *parent; /* Parent op */\ - u8 descriptor_type; /* To differentiate various internal objs */\ - u8 flags; /* Type of Op */\ - u16 aml_opcode; /* AML opcode */\ - u8 *aml; /* Address of declaration in AML */\ - union acpi_parse_object *next; /* Next op */\ - struct acpi_namespace_node *node; /* For use by interpreter */\ - union acpi_parse_value value; /* Value or args associated with the opcode */\ - u8 arg_list_length; /* Number of elements in the arg list */\ - ACPI_DISASM_ONLY_MEMBERS (\ - u16 disasm_flags; /* Used during AML disassembly */\ - u8 disasm_opcode; /* Subtype used for disassembly */\ - char *operator_symbol;/* Used for C-style operator name strings */\ - char aml_op_name[16]) /* Op name (debug only) */ + union acpi_parse_object *parent; /* Parent op */\ + u8 descriptor_type; /* To differentiate various internal objs */\ + u8 flags; /* Type of Op */\ + u16 aml_opcode; /* AML opcode */\ + u8 *aml; /* Address of declaration in AML */\ + union acpi_parse_object *next; /* Next op */\ + struct acpi_namespace_node *node; /* For use by interpreter */\ + union acpi_parse_value value; /* Value or args associated with the opcode */\ + u8 arg_list_length; /* Number of elements in the arg list */\ + ACPI_DISASM_ONLY_MEMBERS (\ + u16 disasm_flags; /* Used during AML disassembly */\ + u8 disasm_opcode; /* Subtype used for disassembly */\ + char *operator_symbol; /* Used for C-style operator name strings */\ + char aml_op_name[16]) /* Op name (debug only) */\ + ACPI_CONVERTER_ONLY_MEMBERS (\ + char *inline_comment; /* Inline comment */\ + char *end_node_comment; /* End of node comment */\ + char *name_comment; /* Comment associated with the first parameter of the name node */\ + char *close_brace_comment; /* Comments that come after } on the same as } */\ + struct acpi_comment_node *comment_list; /* comments that appears before this node */\ + struct acpi_comment_node *end_blk_comment; /* comments that at the end of a block but before ) or } */\ + char *cv_filename; /* Filename associated with this node. Used for ASL/ASL+ converter */\ + char *cv_parent_filename) /* Parent filename associated with this node. Used for ASL/ASL+ converter */ + +/* categories of comments */ + +typedef enum { + STANDARD_COMMENT = 1, + INLINE_COMMENT, + ENDNODE_COMMENT, + OPENBRACE_COMMENT, + CLOSE_BRACE_COMMENT, + STD_DEFBLK_COMMENT, + END_DEFBLK_COMMENT, + FILENAME_COMMENT, + PARENTFILENAME_COMMENT, + ENDBLK_COMMENT, + INCLUDE_COMMENT +} asl_comment_types; /* Internal opcodes for disasm_opcode field above */ @@ -789,6 +820,34 @@ union acpi_parse_value { #define ACPI_DASM_CASE 0x0E /* If/Else is a Case in a Switch/Case block */ #define ACPI_DASM_DEFAULT 0x0F /* Else is a Default in a Switch/Case block */ +/* + * List struct used in the -ca option + */ +struct acpi_comment_node { + char *comment; + struct acpi_comment_node *next; +}; + +struct acpi_comment_addr_node { + u8 *addr; + struct acpi_comment_addr_node *next; +}; + +/* + * File node - used for "Include" operator file stack and + * depdendency tree for the -ca option + */ +struct acpi_file_node { + void *file; + char *filename; + char *file_start; /* Points to AML and indicates when the AML for this particular file starts. */ + char *file_end; /* Points to AML and indicates when the AML for this particular file ends. */ + struct acpi_file_node *next; + struct acpi_file_node *parent; + u8 include_written; + struct acpi_comment_node *include_comment; +}; + /* * Generic operation (for example: If, While, Store) */ @@ -814,6 +873,8 @@ struct acpi_parse_obj_asl { ACPI_PARSE_COMMON union acpi_parse_object *child; union acpi_parse_object *parent_method; char *filename; + u8 file_changed; + char *parent_filename; char *external_name; char *namepath; char name_seg[4]; @@ -843,6 +904,14 @@ union acpi_parse_object { struct acpi_parse_obj_asl asl; }; +struct asl_comment_state { + u8 comment_type; + u32 spaces_before; + union acpi_parse_object *latest_parse_node; + union acpi_parse_object *parsing_paren_brace_node; + u8 capture_comments; +}; + /* * Parse state - one state per parser invocation and each control * method. diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h index c3337514e0ed..c7f0c96cc00f 100644 --- a/drivers/acpi/acpica/acmacros.h +++ b/drivers/acpi/acpica/acmacros.h @@ -493,4 +493,39 @@ #define ACPI_IS_OCTAL_DIGIT(d) (((char)(d) >= '0') && ((char)(d) <= '7')) +/* + * Macors used for the ASL-/ASL+ converter utility + */ +#ifdef ACPI_ASL_COMPILER + +#define ASL_CV_LABEL_FILENODE(a) cv_label_file_node(a); +#define ASL_CV_CAPTURE_COMMENTS_ONLY(a) cv_capture_comments_only (a); +#define ASL_CV_CAPTURE_COMMENTS(a) cv_capture_comments (a); +#define ASL_CV_TRANSFER_COMMENTS(a) cv_transfer_comments (a); +#define ASL_CV_CLOSE_PAREN(a,b) cv_close_paren_write_comment(a,b); +#define ASL_CV_CLOSE_BRACE(a,b) cv_close_brace_write_comment(a,b); +#define ASL_CV_SWITCH_FILES(a,b) cv_switch_files(a,b); +#define ASL_CV_CLEAR_OP_COMMENTS(a) cv_clear_op_comments(a); +#define ASL_CV_PRINT_ONE_COMMENT(a,b,c,d) cv_print_one_comment_type (a,b,c,d); +#define ASL_CV_PRINT_ONE_COMMENT_LIST(a,b) cv_print_one_comment_list (a,b); +#define ASL_CV_FILE_HAS_SWITCHED(a) cv_file_has_switched(a) +#define ASL_CV_INIT_FILETREE(a,b,c) cv_init_file_tree(a,b,c); + +#else + +#define ASL_CV_LABEL_FILENODE(a) +#define ASL_CV_CAPTURE_COMMENTS_ONLY(a) +#define ASL_CV_CAPTURE_COMMENTS(a) +#define ASL_CV_TRANSFER_COMMENTS(a) +#define ASL_CV_CLOSE_PAREN(a,b) acpi_os_printf (")"); +#define ASL_CV_CLOSE_BRACE(a,b) acpi_os_printf ("}"); +#define ASL_CV_SWITCH_FILES(a,b) +#define ASL_CV_CLEAR_OP_COMMENTS(a) +#define ASL_CV_PRINT_ONE_COMMENT(a,b,c,d) +#define ASL_CV_PRINT_ONE_COMMENT_LIST(a,b) +#define ASL_CV_FILE_HAS_SWITCHED(a) 0 +#define ASL_CV_INIT_FILETREE(a,b,c) + +#endif + #endif /* ACMACROS_H */ diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h index e758f098ff4b..a5d9af758c52 100644 --- a/drivers/acpi/acpica/acopcode.h +++ b/drivers/acpi/acpica/acopcode.h @@ -90,6 +90,7 @@ #define ARGP_BUFFER_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_TERMARG, ARGP_BYTELIST) #define ARGP_BYTE_OP ARGP_LIST1 (ARGP_BYTEDATA) #define ARGP_BYTELIST_OP ARGP_LIST1 (ARGP_NAMESTRING) +#define ARGP_COMMENT_OP ARGP_LIST2 (ARGP_BYTEDATA, ARGP_COMMENT) #define ARGP_CONCAT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET) #define ARGP_CONCAT_RES_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET) #define ARGP_COND_REF_OF_OP ARGP_LIST2 (ARGP_SIMPLENAME, ARGP_TARGET) @@ -223,6 +224,7 @@ #define ARGI_BUFFER_OP ARGI_LIST1 (ARGI_INTEGER) #define ARGI_BYTE_OP ARGI_INVALID_OPCODE #define ARGI_BYTELIST_OP ARGI_INVALID_OPCODE +#define ARGI_COMMENT_OP ARGI_INVALID_OPCODE #define ARGI_CONCAT_OP ARGI_LIST3 (ARGI_ANYTYPE, ARGI_ANYTYPE, ARGI_TARGETREF) #define ARGI_CONCAT_RES_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_BUFFER, ARGI_TARGETREF) #define ARGI_COND_REF_OF_OP ARGI_LIST2 (ARGI_OBJECT_REF, ARGI_TARGETREF) diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h index 7e2b369487ae..176f7e9b4d0e 100644 --- a/drivers/acpi/acpica/amlcode.h +++ b/drivers/acpi/acpica/amlcode.h @@ -137,7 +137,8 @@ #define AML_NOOP_OP (u16) 0xa3 #define AML_RETURN_OP (u16) 0xa4 #define AML_BREAK_OP (u16) 0xa5 -#define AML_BREAKPOINT_OP (u16) 0xcc +#define AML_COMMENT_OP (u16) 0xa9 +#define AML_BREAKPOINT_OP (u16) 0xcc #define AML_ONES_OP (u16) 0xff /* @@ -236,6 +237,7 @@ #define ARGP_SIMPLENAME 0x12 /* name_string | local_term | arg_term */ #define ARGP_NAME_OR_REF 0x13 /* For object_type only */ #define ARGP_MAX 0x13 +#define ARGP_COMMENT 0x14 /* * Resolved argument types for the AML Interpreter diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c index cda1e151dbc7..eb9dfaca555f 100644 --- a/drivers/acpi/acpica/psargs.c +++ b/drivers/acpi/acpica/psargs.c @@ -47,6 +47,7 @@ #include "amlcode.h" #include "acnamesp.h" #include "acdispat.h" +#include "acconvert.h" #define _COMPONENT ACPI_PARSER ACPI_MODULE_NAME("psargs") @@ -502,6 +503,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state ACPI_FUNCTION_TRACE(ps_get_next_field); + ASL_CV_CAPTURE_COMMENTS_ONLY(parser_state); aml = parser_state->aml; /* Determine field type */ @@ -546,6 +548,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state /* Decode the field type */ + ASL_CV_CAPTURE_COMMENTS_ONLY(parser_state); switch (opcode) { case AML_INT_NAMEDFIELD_OP: @@ -555,6 +558,22 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state acpi_ps_set_name(field, name); parser_state->aml += ACPI_NAME_SIZE; + ASL_CV_CAPTURE_COMMENTS_ONLY(parser_state); + +#ifdef ACPI_ASL_COMPILER + /* + * Because the package length isn't represented as a parse tree object, + * take comments surrounding this and add to the previously created + * parse node. + */ + if (field->common.inline_comment) { + field->common.name_comment = + field->common.inline_comment; + } + field->common.inline_comment = acpi_gbl_current_inline_comment; + acpi_gbl_current_inline_comment = NULL; +#endif + /* Get the length which is encoded as a package length */ field->common.value.size = @@ -609,11 +628,13 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state if (ACPI_GET8(parser_state->aml) == AML_BUFFER_OP) { parser_state->aml++; + ASL_CV_CAPTURE_COMMENTS_ONLY(parser_state); pkg_end = parser_state->aml; pkg_length = acpi_ps_get_next_package_length(parser_state); pkg_end += pkg_length; + ASL_CV_CAPTURE_COMMENTS_ONLY(parser_state); if (parser_state->aml < pkg_end) { /* Non-empty list */ @@ -630,6 +651,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state opcode = ACPI_GET8(parser_state->aml); parser_state->aml++; + ASL_CV_CAPTURE_COMMENTS_ONLY(parser_state); switch (opcode) { case AML_BYTE_OP: /* AML_BYTEDATA_ARG */ @@ -660,6 +682,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state /* Fill in bytelist data */ + ASL_CV_CAPTURE_COMMENTS_ONLY(parser_state); arg->named.value.size = buffer_length; arg->named.data = parser_state->aml; } diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c index b7da881b66da..b4224005783c 100644 --- a/drivers/acpi/acpica/psloop.c +++ b/drivers/acpi/acpica/psloop.c @@ -55,6 +55,7 @@ #include "acparser.h" #include "acdispat.h" #include "amlcode.h" +#include "acconvert.h" #define _COMPONENT ACPI_PARSER ACPI_MODULE_NAME("psloop") @@ -132,6 +133,21 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state, !walk_state->arg_count) { walk_state->aml = walk_state->parser_state.aml; + switch (op->common.aml_opcode) { + case AML_METHOD_OP: + case AML_BUFFER_OP: + case AML_PACKAGE_OP: + case AML_VARIABLE_PACKAGE_OP: + case AML_WHILE_OP: + + break; + + default: + + ASL_CV_CAPTURE_COMMENTS(walk_state); + break; + } + status = acpi_ps_get_next_arg(walk_state, &(walk_state->parser_state), @@ -480,6 +496,8 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state) /* Iterative parsing loop, while there is more AML to process: */ while ((parser_state->aml < parser_state->aml_end) || (op)) { + ASL_CV_CAPTURE_COMMENTS(walk_state); + aml_op_start = parser_state->aml; if (!op) { status = @@ -516,6 +534,20 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state) */ walk_state->arg_count = 0; + switch (op->common.aml_opcode) { + case AML_BYTE_OP: + case AML_WORD_OP: + case AML_DWORD_OP: + case AML_QWORD_OP: + + break; + + default: + + ASL_CV_CAPTURE_COMMENTS(walk_state); + break; + } + /* Are there any arguments that must be processed? */ if (walk_state->arg_types) { diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c index 5c4aff0f4f26..5bcb61831706 100644 --- a/drivers/acpi/acpica/psobject.c +++ b/drivers/acpi/acpica/psobject.c @@ -45,6 +45,7 @@ #include "accommon.h" #include "acparser.h" #include "amlcode.h" +#include "acconvert.h" #define _COMPONENT ACPI_PARSER ACPI_MODULE_NAME("psobject") @@ -190,6 +191,7 @@ acpi_ps_build_named_op(struct acpi_walk_state *walk_state, */ while (GET_CURRENT_ARG_TYPE(walk_state->arg_types) && (GET_CURRENT_ARG_TYPE(walk_state->arg_types) != ARGP_NAME)) { + ASL_CV_CAPTURE_COMMENTS(walk_state); status = acpi_ps_get_next_arg(walk_state, &(walk_state->parser_state), @@ -203,6 +205,18 @@ acpi_ps_build_named_op(struct acpi_walk_state *walk_state, INCREMENT_ARG_LIST(walk_state->arg_types); } + /* are there any inline comments associated with the name_seg?? If so, save this. */ + + ASL_CV_CAPTURE_COMMENTS(walk_state); + +#ifdef ACPI_ASL_COMPILER + if (acpi_gbl_current_inline_comment != NULL) { + unnamed_op->common.name_comment = + acpi_gbl_current_inline_comment; + acpi_gbl_current_inline_comment = NULL; + } +#endif + /* * Make sure that we found a NAME and didn't run out of arguments */ @@ -243,6 +257,30 @@ acpi_ps_build_named_op(struct acpi_walk_state *walk_state, acpi_ps_append_arg(*op, unnamed_op->common.value.arg); +#ifdef ACPI_ASL_COMPILER + + /* save any comments that might be associated with unnamed_op. */ + + (*op)->common.inline_comment = unnamed_op->common.inline_comment; + (*op)->common.end_node_comment = unnamed_op->common.end_node_comment; + (*op)->common.close_brace_comment = + unnamed_op->common.close_brace_comment; + (*op)->common.name_comment = unnamed_op->common.name_comment; + (*op)->common.comment_list = unnamed_op->common.comment_list; + (*op)->common.end_blk_comment = unnamed_op->common.end_blk_comment; + (*op)->common.cv_filename = unnamed_op->common.cv_filename; + (*op)->common.cv_parent_filename = + unnamed_op->common.cv_parent_filename; + (*op)->named.aml = unnamed_op->common.aml; + + unnamed_op->common.inline_comment = NULL; + unnamed_op->common.end_node_comment = NULL; + unnamed_op->common.close_brace_comment = NULL; + unnamed_op->common.name_comment = NULL; + unnamed_op->common.comment_list = NULL; + unnamed_op->common.end_blk_comment = NULL; +#endif + if ((*op)->common.aml_opcode == AML_REGION_OP || (*op)->common.aml_opcode == AML_DATA_REGION_OP) { /* diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c index b8f0617fd421..c343a0d5a3d2 100644 --- a/drivers/acpi/acpica/psopcode.c +++ b/drivers/acpi/acpica/psopcode.c @@ -652,7 +652,10 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = { /* 81 */ ACPI_OP("External", ARGP_EXTERNAL_OP, ARGI_EXTERNAL_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, /* ? */ - AML_TYPE_EXEC_3A_0T_0R, AML_FLAGS_EXEC_3A_0T_0R) + AML_TYPE_EXEC_3A_0T_0R, AML_FLAGS_EXEC_3A_0T_0R), +/* 82 */ ACPI_OP("Comment", ARGP_COMMENT_OP, ARGI_COMMENT_OP, + ACPI_TYPE_STRING, AML_CLASS_ARGUMENT, + AML_TYPE_LITERAL, AML_CONSTANT) /*! [End] no source code translation !*/ }; diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c index 89f95b7f26e9..eff22950232b 100644 --- a/drivers/acpi/acpica/psopinfo.c +++ b/drivers/acpi/acpica/psopinfo.c @@ -226,7 +226,7 @@ const u8 acpi_gbl_short_op_index[256] = { /* 0x90 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x73, 0x74, /* 0x98 */ 0x75, 0x76, _UNK, _UNK, 0x77, 0x78, 0x79, 0x7A, /* 0xA0 */ 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x60, 0x61, -/* 0xA8 */ 0x62, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, +/* 0xA8 */ 0x62, 0x82, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0xB0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0xB8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0xC0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c index a11d475b79cd..c06d6e2fc7a5 100644 --- a/drivers/acpi/acpica/pstree.c +++ b/drivers/acpi/acpica/pstree.c @@ -45,6 +45,7 @@ #include "accommon.h" #include "acparser.h" #include "amlcode.h" +#include "acconvert.h" #define _COMPONENT ACPI_PARSER ACPI_MODULE_NAME("pstree") @@ -216,6 +217,7 @@ union acpi_parse_object *acpi_ps_get_depth_next(union acpi_parse_object *origin, next = acpi_ps_get_arg(op, 0); if (next) { + ASL_CV_LABEL_FILENODE(next); return (next); } @@ -223,6 +225,7 @@ union acpi_parse_object *acpi_ps_get_depth_next(union acpi_parse_object *origin, next = op->common.next; if (next) { + ASL_CV_LABEL_FILENODE(next); return (next); } @@ -233,6 +236,8 @@ union acpi_parse_object *acpi_ps_get_depth_next(union acpi_parse_object *origin, while (parent) { arg = acpi_ps_get_arg(parent, 0); while (arg && (arg != origin) && (arg != op)) { + + ASL_CV_LABEL_FILENODE(arg); arg = arg->common.next; } @@ -247,6 +252,7 @@ union acpi_parse_object *acpi_ps_get_depth_next(union acpi_parse_object *origin, /* Found sibling of parent */ + ASL_CV_LABEL_FILENODE(parent->common.next); return (parent->common.next); } @@ -254,6 +260,7 @@ union acpi_parse_object *acpi_ps_get_depth_next(union acpi_parse_object *origin, parent = parent->common.parent; } + ASL_CV_LABEL_FILENODE(next); return (next); } diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c index 2fa38bb76a55..02642760cb93 100644 --- a/drivers/acpi/acpica/psutils.c +++ b/drivers/acpi/acpica/psutils.c @@ -45,6 +45,7 @@ #include "accommon.h" #include "acparser.h" #include "amlcode.h" +#include "acconvert.h" #define _COMPONENT ACPI_PARSER ACPI_MODULE_NAME("psutils") @@ -152,6 +153,15 @@ union acpi_parse_object *acpi_ps_alloc_op(u16 opcode, u8 *aml) acpi_ps_init_op(op, opcode); op->common.aml = aml; op->common.flags = flags; + ASL_CV_CLEAR_OP_COMMENTS(op); + + if (opcode == AML_SCOPE_OP) { + acpi_gbl_current_scope = op; + } + } + + if (gbl_capture_comments) { + ASL_CV_TRANSFER_COMMENTS(op); } return (op); @@ -174,6 +184,7 @@ void acpi_ps_free_op(union acpi_parse_object *op) { ACPI_FUNCTION_NAME(ps_free_op); + ASL_CV_CLEAR_OP_COMMENTS(op); if (op->common.aml_opcode == AML_INT_RETURN_VALUE_OP) { ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Free retval op: %p\n", op)); diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c index a3401bd29413..5594a359dbf1 100644 --- a/drivers/acpi/acpica/utalloc.c +++ b/drivers/acpi/acpica/utalloc.c @@ -142,6 +142,45 @@ acpi_status acpi_ut_create_caches(void) if (ACPI_FAILURE(status)) { return (status); } +#ifdef ACPI_ASL_COMPILER + /* + * For use with the ASL-/ASL+ option. This cache keeps track of regular + * 0xA9 0x01 comments. + */ + status = + acpi_os_create_cache("Acpi-Comment", + sizeof(struct acpi_comment_node), + ACPI_MAX_COMMENT_CACHE_DEPTH, + &acpi_gbl_reg_comment_cache); + if (ACPI_FAILURE(status)) { + return (status); + } + + /* + * This cache keeps track of the starting addresses of where the comments + * lie. This helps prevent duplication of comments. + */ + status = + acpi_os_create_cache("Acpi-Comment-Addr", + sizeof(struct acpi_comment_addr_node), + ACPI_MAX_COMMENT_CACHE_DEPTH, + &acpi_gbl_comment_addr_cache); + if (ACPI_FAILURE(status)) { + return (status); + } + + /* + * This cache will be used for nodes that represent files. + */ + status = + acpi_os_create_cache("Acpi-File", sizeof(struct acpi_file_node), + ACPI_MAX_COMMENT_CACHE_DEPTH, + &acpi_gbl_file_cache); + if (ACPI_FAILURE(status)) { + return (status); + } +#endif + #ifdef ACPI_DBG_TRACK_ALLOCATIONS /* Memory allocation lists */ @@ -201,6 +240,17 @@ acpi_status acpi_ut_delete_caches(void) (void)acpi_os_delete_cache(acpi_gbl_ps_node_ext_cache); acpi_gbl_ps_node_ext_cache = NULL; +#ifdef ACPI_ASL_COMPILER + (void)acpi_os_delete_cache(acpi_gbl_reg_comment_cache); + acpi_gbl_reg_comment_cache = NULL; + + (void)acpi_os_delete_cache(acpi_gbl_comment_addr_cache); + acpi_gbl_comment_addr_cache = NULL; + + (void)acpi_os_delete_cache(acpi_gbl_file_cache); + acpi_gbl_file_cache = NULL; +#endif + #ifdef ACPI_DBG_TRACK_ALLOCATIONS /* Debug only - display leftover memory allocation, if any */ diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c index bd5ea3101eb7..615a885e2ca3 100644 --- a/drivers/acpi/acpica/utdebug.c +++ b/drivers/acpi/acpica/utdebug.c @@ -627,4 +627,5 @@ acpi_trace_point(acpi_trace_event_type type, u8 begin, u8 *aml, char *pathname) } ACPI_EXPORT_SYMBOL(acpi_trace_point) + #endif diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h index 07740072da55..6db3b4668b1a 100644 --- a/include/acpi/acconfig.h +++ b/include/acpi/acconfig.h @@ -78,6 +78,7 @@ #define ACPI_MAX_EXTPARSE_CACHE_DEPTH 96 /* Parse tree objects */ #define ACPI_MAX_OBJECT_CACHE_DEPTH 96 /* Interpreter operand objects */ #define ACPI_MAX_NAMESPACE_CACHE_DEPTH 96 /* Namespace objects */ +#define ACPI_MAX_COMMENT_CACHE_DEPTH 96 /* Comments for the -ca option */ /* * Should the subsystem abort the loading of an ACPI table if the diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h index 83666cb3ec2c..eb0eadcf9918 100644 --- a/include/acpi/actbl2.h +++ b/include/acpi/actbl2.h @@ -87,6 +87,7 @@ #define ACPI_SIG_WDAT "WDAT" /* Watchdog Action Table */ #define ACPI_SIG_WDDT "WDDT" /* Watchdog Timer Description Table */ #define ACPI_SIG_WDRT "WDRT" /* Watchdog Resource Table */ +#define ACPI_SIG_XXXX "XXXX" /* Intermediate AML header for ASL/ASL+ converter */ #ifdef ACPI_UNDEFINED_TABLES /* -- cgit v1.2.3 From bc5150ef3331167f08adc9fd5c1f947bd5fdd1ff Mon Sep 17 00:00:00 2001 From: Bob Moore Date: Wed, 26 Apr 2017 16:20:17 +0800 Subject: ACPICA: Update version to 20170303 ACPICA commit db13f9ffbdf16c0b4fb92d051c14c7b96f379f3f ACPICA commit c55bb526ac9233fe4abc5ea923e136354ce7779c Version 20170224. Version 20170303. Link: https://github.com/acpica/acpica/commit/db13f9ff Link: https://github.com/acpica/acpica/commit/c55bb526 Signed-off-by: Bob Moore Signed-off-by: Lv Zheng Signed-off-by: Rafael J. Wysocki --- include/acpi/acpixf.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 3795386ea706..15c86ce4df53 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -46,7 +46,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20170119 +#define ACPI_CA_VERSION 0x20170303 #include #include -- cgit v1.2.3 From d7b1eeb2ca039d04f1a1fcb241920cb112b4b52a Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Fri, 7 Apr 2017 18:39:07 +0800 Subject: drm/amdgpu:fix race condition MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sequence is protected by spinlock so don't access sequence in paramter seq when invoking this function. ~0 means to get the latest sequence number and 0 means none to get. Change-Id: Ib7a03f3cf5594deeb4ad333cc59b47a6bddfd1ad Signed-off-by: Monk Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 3 +++ include/uapi/drm/amdgpu_drm.h | 5 ++++- 2 files changed, 7 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index cf0500671353..90d1ac8a80f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -273,6 +273,9 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, spin_lock(&ctx->ring_lock); + if (seq == ~0ull) + seq = ctx->rings[ring->idx].sequence - 1; + if (seq >= cring->sequence) { spin_unlock(&ctx->ring_lock); return ERR_PTR(-EINVAL); diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 516a9f285730..92262d81d41e 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -295,7 +295,10 @@ union drm_amdgpu_gem_wait_idle { }; struct drm_amdgpu_wait_cs_in { - /** Command submission handle */ + /* Command submission handle + * handle equals 0 means none to wait for + * handle equal ~0ull meanas wait for the latest sequence number + */ __u64 handle; /** Absolute timeout to wait */ __u64 timeout; -- cgit v1.2.3 From 080b24ebdf230c80fe63e64633c0d52aca5d1a8e Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 10 Apr 2017 15:32:43 -0400 Subject: drm/amdgpu: fix spelling in header comment MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Christian König Signed-off-by: Alex Deucher --- include/uapi/drm/amdgpu_drm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 92262d81d41e..95260e5043af 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -297,7 +297,7 @@ union drm_amdgpu_gem_wait_idle { struct drm_amdgpu_wait_cs_in { /* Command submission handle * handle equals 0 means none to wait for - * handle equal ~0ull meanas wait for the latest sequence number + * handle equals ~0ull means wait for the latest sequence number */ __u64 handle; /** Absolute timeout to wait */ -- cgit v1.2.3 From 408bfe7c3c5d036947b509356f494dc6b46025ff Mon Sep 17 00:00:00 2001 From: Junwei Zhang Date: Thu, 27 Apr 2017 11:12:07 +0800 Subject: drm/amdgpu: export more gpu info for gfx9 v2: 64-bit aligned for gpu info v3: squash in wave_front_fix Signed-off-by: Ken Wang Signed-off-by: Junwei Zhang Reviewed-by: Alex Deucher Reviewed-by: Qiang Yu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 4 ++++ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 11 +++++++++++ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 3 +++ include/uapi/drm/amdgpu_drm.h | 19 +++++++++++++++++++ 4 files changed, 37 insertions(+) (limited to 'include') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index a4fc54c70f30..c9f935710d40 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -967,6 +967,9 @@ struct amdgpu_gfx_config { unsigned mc_arb_ramcfg; unsigned gb_addr_config; unsigned num_rbs; + unsigned gs_vgt_table_depth; + unsigned gs_prim_buffer_depth; + unsigned max_gs_waves_per_vgt; uint32_t tile_mode_array[32]; uint32_t macrotile_mode_array[16]; @@ -981,6 +984,7 @@ struct amdgpu_gfx_config { struct amdgpu_cu_info { uint32_t number; /* total active CU number */ uint32_t ao_cu_mask; + uint32_t wave_front_size; uint32_t bitmap[4][4]; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 832be632478f..21f616cdb279 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -546,10 +546,21 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file if (amdgpu_ngg) { dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[PRIM].gpu_addr; + dev_info.prim_buf_size = adev->gfx.ngg.buf[PRIM].size; dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[POS].gpu_addr; + dev_info.pos_buf_size = adev->gfx.ngg.buf[POS].size; dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[CNTL].gpu_addr; + dev_info.cntl_sb_buf_size = adev->gfx.ngg.buf[CNTL].size; dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[PARAM].gpu_addr; + dev_info.param_buf_size = adev->gfx.ngg.buf[PARAM].size; } + dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size; + dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs; + dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh; + dev_info.num_tcc_blocks = adev->gfx.config.max_texture_channel_caches; + dev_info.gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth; + dev_info.gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth; + dev_info.max_gs_waves_per_vgt = adev->gfx.config.max_gs_waves_per_vgt; return copy_to_user(out, &dev_info, min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 9a9cb90e2f5f..210d21c085f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -785,6 +785,9 @@ static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) adev->gfx.config.sc_prim_fifo_size_backend = 0x100; adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; + adev->gfx.config.gs_vgt_table_depth = 32; + adev->gfx.config.gs_prim_buffer_depth = 1792; + adev->gfx.config.max_gs_waves_per_vgt = 32; gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN; break; default: diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 95260e5043af..6c249e5cfb09 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -767,6 +767,25 @@ struct drm_amdgpu_info_device { __u64 cntl_sb_buf_gpu_addr; /* NGG Parameter Cache */ __u64 param_buf_gpu_addr; + __u32 prim_buf_size; + __u32 pos_buf_size; + __u32 cntl_sb_buf_size; + __u32 param_buf_size; + /* wavefront size*/ + __u32 wave_front_size; + /* shader visible vgprs*/ + __u32 num_shader_visible_vgprs; + /* CU per shader array*/ + __u32 num_cu_per_sh; + /* number of tcc blocks*/ + __u32 num_tcc_blocks; + /* gs vgt table depth*/ + __u32 gs_vgt_table_depth; + /* gs primitive buffer depth*/ + __u32 gs_prim_buffer_depth; + /* max gs wavefront per vgt*/ + __u32 max_gs_waves_per_vgt; + __u32 _pad1; }; struct drm_amdgpu_info_hw_ip { -- cgit v1.2.3 From 461a6946b1f93f6720577fb06aa78e8cbd9291c9 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 26 Apr 2017 15:46:20 +0200 Subject: iommu: Remove pci.h include from trace/events/iommu.h The include file does not need any PCI specifics, so remove that include. Also fix the places that relied on it. Signed-off-by: Joerg Roedel --- arch/arm64/mm/dma-mapping.c | 1 + drivers/infiniband/hw/qedr/main.c | 1 + drivers/iommu/fsl_pamu.h | 1 + drivers/iommu/rockchip-iommu.c | 1 + drivers/iommu/tegra-smmu.c | 1 + include/linux/dma-iommu.h | 1 + include/trace/events/iommu.h | 1 - 7 files changed, 6 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 81cdb2e844ed..982f85b81624 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -28,6 +28,7 @@ #include #include #include +#include #include diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index b9b47e5cc8b3..33033624cd9b 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/iommu/fsl_pamu.h b/drivers/iommu/fsl_pamu.h index aab723f91f12..c3434f29c967 100644 --- a/drivers/iommu/fsl_pamu.h +++ b/drivers/iommu/fsl_pamu.h @@ -20,6 +20,7 @@ #define __FSL_PAMU_H #include +#include #include diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 9afcbf79f0b0..0ba303a184dd 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 9305964250ac..eeb19f560a05 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index 5725c94b1f12..abd946569515 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -20,6 +20,7 @@ #include #ifdef CONFIG_IOMMU_DMA +#include #include #include diff --git a/include/trace/events/iommu.h b/include/trace/events/iommu.h index 2c7befb10f13..99254ed89212 100644 --- a/include/trace/events/iommu.h +++ b/include/trace/events/iommu.h @@ -11,7 +11,6 @@ #define _TRACE_IOMMU_H #include -#include struct device; -- cgit v1.2.3 From 208480bb273e15f42711bd47f70dc0fbfa2570b8 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 26 Apr 2017 15:49:57 +0200 Subject: iommu: Remove trace-events include from iommu.h It is not needed there anymore. All places needing it are fixed. Signed-off-by: Joerg Roedel --- drivers/media/platform/mtk-vpu/mtk_vpu.c | 1 + include/linux/iommu.h | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c index 463b69c934be..e011c8dc0198 100644 --- a/drivers/media/platform/mtk-vpu/mtk_vpu.c +++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c @@ -23,6 +23,7 @@ #include #include #include +#include #include "mtk_vpu.h" diff --git a/include/linux/iommu.h b/include/linux/iommu.h index abaa0ca848bc..dda8717545e9 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -26,8 +26,6 @@ #include #include -#include - #define IOMMU_READ (1 << 0) #define IOMMU_WRITE (1 << 1) #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ -- cgit v1.2.3 From 8b8642af15ed14b9a7a34d3401afbcc274533e13 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 7 Feb 2017 10:05:09 +0100 Subject: net: ethernet: ucc_geth: fix MEM_PART_MURAM mode Since commit 5093bb965a163 ("powerpc/QE: switch to the cpm_muram implementation"), muram area is not part of immrbar mapping anymore so immrbar_virt_to_phys() is not usable anymore. Fixes: 5093bb965a163 ("powerpc/QE: switch to the cpm_muram implementation") Signed-off-by: Christophe Leroy Acked-by: David S. Miller Acked-by: Li Yang Signed-off-by: Scott Wood --- drivers/net/ethernet/freescale/ucc_geth.c | 8 +++----- include/soc/fsl/qe/qe.h | 1 + 2 files changed, 4 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 3f7ae9f64cd8..f77ba9fa257b 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -2594,11 +2594,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) } else if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_MURAM) { out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, - (u32) immrbar_virt_to_phys(ugeth-> - p_tx_bd_ring[i])); + (u32)qe_muram_dma(ugeth->p_tx_bd_ring[i])); out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. last_bd_completed_address, - (u32) immrbar_virt_to_phys(endOfRing)); + (u32)qe_muram_dma(endOfRing)); } } @@ -2844,8 +2843,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) } else if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_MURAM) { out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, - (u32) immrbar_virt_to_phys(ugeth-> - p_rx_bd_ring[i])); + (u32)qe_muram_dma(ugeth->p_rx_bd_ring[i])); } /* rest of fields handled by QE */ } diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h index 70339d7958c0..0cd4c11479b1 100644 --- a/include/soc/fsl/qe/qe.h +++ b/include/soc/fsl/qe/qe.h @@ -243,6 +243,7 @@ static inline int qe_alive_during_sleep(void) #define qe_muram_free cpm_muram_free #define qe_muram_addr cpm_muram_addr #define qe_muram_offset cpm_muram_offset +#define qe_muram_dma cpm_muram_dma #define qe_setbits32(_addr, _v) iowrite32be(ioread32be(_addr) | (_v), (_addr)) #define qe_clrbits32(_addr, _v) iowrite32be(ioread32be(_addr) & ~(_v), (_addr)) -- cgit v1.2.3 From b54ea82f01282253c85eb7e2fd2b6c96f7a027d8 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 7 Feb 2017 10:05:11 +0100 Subject: soc/fsl/qe: get rid of immrbar_virt_to_phys() immrbar_virt_to_phys() is not used anymore Signed-off-by: Christophe Leroy Acked-by: Li Yang Signed-off-by: Scott Wood --- drivers/soc/fsl/qe/qe.c | 4 +--- include/soc/fsl/qe/immap_qe.h | 19 ------------------- 2 files changed, 1 insertion(+), 22 deletions(-) (limited to 'include') diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c index ade168f5328e..d9c04f588f7f 100644 --- a/drivers/soc/fsl/qe/qe.c +++ b/drivers/soc/fsl/qe/qe.c @@ -66,7 +66,7 @@ static unsigned int qe_num_of_snum; static phys_addr_t qebase = -1; -phys_addr_t get_qe_base(void) +static phys_addr_t get_qe_base(void) { struct device_node *qe; int ret; @@ -90,8 +90,6 @@ phys_addr_t get_qe_base(void) return qebase; } -EXPORT_SYMBOL(get_qe_base); - void qe_reset(void) { if (qe_immr == NULL) diff --git a/include/soc/fsl/qe/immap_qe.h b/include/soc/fsl/qe/immap_qe.h index c76ef30b05ba..7baaabd5ec2c 100644 --- a/include/soc/fsl/qe/immap_qe.h +++ b/include/soc/fsl/qe/immap_qe.h @@ -464,25 +464,6 @@ struct qe_immap { } __attribute__ ((packed)); extern struct qe_immap __iomem *qe_immr; -extern phys_addr_t get_qe_base(void); - -/* - * Returns the offset within the QE address space of the given pointer. - * - * Note that the QE does not support 36-bit physical addresses, so if - * get_qe_base() returns a number above 4GB, the caller will probably fail. - */ -static inline phys_addr_t immrbar_virt_to_phys(void *address) -{ - void *q = (void *)qe_immr; - - /* Is it a MURAM address? */ - if ((address >= q) && (address < (q + QE_IMMAP_SIZE))) - return get_qe_base() + (address - q); - - /* It's an address returned by kmalloc */ - return virt_to_phys(address); -} #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_IMMAP_QE_H */ -- cgit v1.2.3 From 917362135b8a5c0680acf08807e9fc6179eb6c79 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Fri, 14 Apr 2017 20:32:49 +0200 Subject: power: supply: max17042_battery: Add default platform_data fallback data Some x86 machines use a max17047 fuel-gauge and x86 might be missing platform_data if not provided by SFI. This commit adds default platform_data as fallback option so that the driver can work on boards where no platform_data is provided. Since not all boards have a thermistor hooked up, set temp_min to 0 and change the health checks from temp <= temp_min to temp < temp_min to not trigger on such boards (where temp reads 0). Signed-off-by: Hans de Goede Reviewed-by: Krzysztof Kozlowski Signed-off-by: Sebastian Reichel --- drivers/power/supply/max17042_battery.c | 60 +++++++++++++++++++++++++++++---- include/linux/power/max17042_battery.h | 6 +++- 2 files changed, 58 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c index a51b2965cd5c..f0ff6e880ff2 100644 --- a/drivers/power/supply/max17042_battery.c +++ b/drivers/power/supply/max17042_battery.c @@ -150,12 +150,12 @@ static int max17042_get_battery_health(struct max17042_chip *chip, int *health) if (ret < 0) goto health_error; - if (temp <= chip->pdata->temp_min) { + if (temp < chip->pdata->temp_min) { *health = POWER_SUPPLY_HEALTH_COLD; goto out; } - if (temp >= chip->pdata->temp_max) { + if (temp > chip->pdata->temp_max) { *health = POWER_SUPPLY_HEALTH_OVERHEAT; goto out; } @@ -772,8 +772,9 @@ static void max17042_init_worker(struct work_struct *work) #ifdef CONFIG_OF static struct max17042_platform_data * -max17042_get_pdata(struct device *dev) +max17042_get_pdata(struct max17042_chip *chip) { + struct device *dev = &chip->client->dev; struct device_node *np = dev->of_node; u32 prop; struct max17042_platform_data *pdata; @@ -806,10 +807,55 @@ max17042_get_pdata(struct device *dev) return pdata; } #else +static struct max17042_reg_data max17047_default_pdata_init_regs[] = { + /* + * Some firmwares do not set FullSOCThr, Enable End-of-Charge Detection + * when the voltage FG reports 95%, as recommended in the datasheet. + */ + { MAX17047_FullSOCThr, MAX17042_BATTERY_FULL << 8 }, +}; + static struct max17042_platform_data * -max17042_get_pdata(struct device *dev) +max17042_get_pdata(struct max17042_chip *chip) { - return dev->platform_data; + struct device *dev = &chip->client->dev; + struct max17042_platform_data *pdata; + int ret, misc_cfg; + + if (dev->platform_data) + return dev->platform_data; + + /* + * The MAX17047 gets used on x86 where we might not have pdata, assume + * the firmware will already have initialized the fuel-gauge and provide + * default values for the non init bits to make things work. + */ + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return pdata; + + if (chip->chip_type != MAXIM_DEVICE_TYPE_MAX17042) { + pdata->init_data = max17047_default_pdata_init_regs; + pdata->num_init_data = + ARRAY_SIZE(max17047_default_pdata_init_regs); + } + + ret = regmap_read(chip->regmap, MAX17042_MiscCFG, &misc_cfg); + if (ret < 0) + return NULL; + + /* If bits 0-1 are set to 3 then only Voltage readings are used */ + if ((misc_cfg & 0x3) == 0x3) + pdata->enable_current_sense = false; + else + pdata->enable_current_sense = true; + + pdata->vmin = MAX17042_DEFAULT_VMIN; + pdata->vmax = MAX17042_DEFAULT_VMAX; + pdata->temp_min = MAX17042_DEFAULT_TEMP_MIN; + pdata->temp_max = MAX17042_DEFAULT_TEMP_MAX; + + return pdata; } #endif @@ -858,20 +904,20 @@ static int max17042_probe(struct i2c_client *client, return -ENOMEM; chip->client = client; + chip->chip_type = id->driver_data; chip->regmap = devm_regmap_init_i2c(client, &max17042_regmap_config); if (IS_ERR(chip->regmap)) { dev_err(&client->dev, "Failed to initialize regmap\n"); return -EINVAL; } - chip->pdata = max17042_get_pdata(&client->dev); + chip->pdata = max17042_get_pdata(chip); if (!chip->pdata) { dev_err(&client->dev, "no platform data provided\n"); return -EINVAL; } i2c_set_clientdata(client, chip); - chip->chip_type = id->driver_data; psy_cfg.drv_data = chip; /* When current is not measured, diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h index 522757ac9cd4..3489fb0f9099 100644 --- a/include/linux/power/max17042_battery.h +++ b/include/linux/power/max17042_battery.h @@ -24,8 +24,12 @@ #define __MAX17042_BATTERY_H_ #define MAX17042_STATUS_BattAbsent (1 << 3) -#define MAX17042_BATTERY_FULL (100) +#define MAX17042_BATTERY_FULL (95) /* Recommend. FullSOCThr value */ #define MAX17042_DEFAULT_SNS_RESISTOR (10000) +#define MAX17042_DEFAULT_VMIN (3000) +#define MAX17042_DEFAULT_VMAX (4500) /* LiHV cell max */ +#define MAX17042_DEFAULT_TEMP_MIN (0) /* For sys without temp sensor */ +#define MAX17042_DEFAULT_TEMP_MAX (700) /* 70 degrees Celcius */ #define MAX17042_CHARACTERIZATION_DATA_SIZE 48 -- cgit v1.2.3 From a9df22c00d7c2c9c2944c62f1b819de6c214660f Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Fri, 14 Apr 2017 20:32:51 +0200 Subject: power: supply: max17042_battery: Add support for the STATUS property Userspace prefers the driver having a status property over having to guess itself. Specifically this will properly make the GNOME3 UI (and likely others) properly show discharging / charging / full status, instead of always showing discharging as status. Note that in the case there is no charger driver supplying the max17042, then a status of unknown will get returned. At least upower treats this the same as not having a status attribute, so in this case nothing changes from a userspace pov. Signed-off-by: Hans de Goede Reviewed-by: Krzysztof Kozlowski Signed-off-by: Sebastian Reichel --- drivers/power/supply/max17042_battery.c | 46 +++++++++++++++++++++++++++++++++ include/linux/power/max17042_battery.h | 3 +++ 2 files changed, 49 insertions(+) (limited to 'include') diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c index f0ff6e880ff2..62efe7eeb3f8 100644 --- a/drivers/power/supply/max17042_battery.c +++ b/drivers/power/supply/max17042_battery.c @@ -76,6 +76,7 @@ struct max17042_chip { }; static enum power_supply_property max17042_battery_props[] = { + POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_CYCLE_COUNT, POWER_SUPPLY_PROP_VOLTAGE_MAX, @@ -113,6 +114,46 @@ static int max17042_get_temperature(struct max17042_chip *chip, int *temp) return 0; } +static int max17042_get_status(struct max17042_chip *chip, int *status) +{ + int ret, charge_full, charge_now; + + ret = power_supply_am_i_supplied(chip->battery); + if (ret < 0) { + *status = POWER_SUPPLY_STATUS_UNKNOWN; + return 0; + } + if (ret == 0) { + *status = POWER_SUPPLY_STATUS_DISCHARGING; + return 0; + } + + /* + * The MAX170xx has builtin end-of-charge detection and will update + * FullCAP to match RepCap when it detects end of charging. + * + * When this cycle the battery gets charged to a higher (calculated) + * capacity then the previous cycle then FullCAP will get updated + * contineously once end-of-charge detection kicks in, so allow the + * 2 to differ a bit. + */ + + ret = regmap_read(chip->regmap, MAX17042_FullCAP, &charge_full); + if (ret < 0) + return ret; + + ret = regmap_read(chip->regmap, MAX17042_RepCap, &charge_now); + if (ret < 0) + return ret; + + if ((charge_full - charge_now) <= MAX17042_FULL_THRESHOLD) + *status = POWER_SUPPLY_STATUS_FULL; + else + *status = POWER_SUPPLY_STATUS_CHARGING; + + return 0; +} + static int max17042_get_battery_health(struct max17042_chip *chip, int *health) { int temp, vavg, vbatt, ret; @@ -182,6 +223,11 @@ static int max17042_get_property(struct power_supply *psy, return -EAGAIN; switch (psp) { + case POWER_SUPPLY_PROP_STATUS: + ret = max17042_get_status(chip, &val->intval); + if (ret < 0) + return ret; + break; case POWER_SUPPLY_PROP_PRESENT: ret = regmap_read(map, MAX17042_STATUS, &data); if (ret < 0) diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h index 3489fb0f9099..a7ed29baf44a 100644 --- a/include/linux/power/max17042_battery.h +++ b/include/linux/power/max17042_battery.h @@ -31,6 +31,9 @@ #define MAX17042_DEFAULT_TEMP_MIN (0) /* For sys without temp sensor */ #define MAX17042_DEFAULT_TEMP_MAX (700) /* 70 degrees Celcius */ +/* Consider RepCap which is less then 10 units below FullCAP full */ +#define MAX17042_FULL_THRESHOLD 10 + #define MAX17042_CHARACTERIZATION_DATA_SIZE 48 enum max17042_register { -- cgit v1.2.3 From 056e8924a072d22007275dfb8b247bb814765b67 Mon Sep 17 00:00:00 2001 From: Dmitry Monakhov Date: Fri, 31 Mar 2017 19:53:36 +0400 Subject: tcm: make pi data verification configurable Currently ramdisk and fileio always perform PI verification before and after backend IO. This approach is not very flexible. Because some one may want to postpone this work to other layers in IO stack. For example if we want to test blk_integrity_profile testcase: https://github.com/dmonakhov/xfstests/commit/dee408c868861d6b6871dbb3381facee7effdbe4 Signed-off-by: Dmitry Monakhov Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_configfs.c | 34 ++++++++++++++++++++++++++++++++++ drivers/target/target_core_file.c | 6 ++++-- drivers/target/target_core_rd.c | 17 +++++++++-------- include/target/target_core_base.h | 1 + 4 files changed, 48 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 70657fd56440..a34a86652b96 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -533,6 +533,7 @@ DEF_CONFIGFS_ATTRIB_SHOW(emulate_3pc); DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_type); DEF_CONFIGFS_ATTRIB_SHOW(hw_pi_prot_type); DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_format); +DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_verify); DEF_CONFIGFS_ATTRIB_SHOW(enforce_pr_isids); DEF_CONFIGFS_ATTRIB_SHOW(is_nonrot); DEF_CONFIGFS_ATTRIB_SHOW(emulate_rest_reord); @@ -823,6 +824,7 @@ static ssize_t pi_prot_type_store(struct config_item *item, ret = dev->transport->init_prot(dev); if (ret) { da->pi_prot_type = old_prot; + da->pi_prot_verify = (bool) da->pi_prot_type; return ret; } @@ -830,6 +832,7 @@ static ssize_t pi_prot_type_store(struct config_item *item, dev->transport->free_prot(dev); } + da->pi_prot_verify = (bool) da->pi_prot_type; pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag); return count; } @@ -872,6 +875,35 @@ static ssize_t pi_prot_format_store(struct config_item *item, return count; } +static ssize_t pi_prot_verify_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = to_attrib(item); + bool flag; + int ret; + + ret = strtobool(page, &flag); + if (ret < 0) + return ret; + + if (!flag) { + da->pi_prot_verify = flag; + return count; + } + if (da->hw_pi_prot_type) { + pr_warn("DIF protection enabled on underlying hardware," + " ignoring\n"); + return count; + } + if (!da->pi_prot_type) { + pr_warn("DIF protection not supported by backend, ignoring\n"); + return count; + } + da->pi_prot_verify = flag; + + return count; +} + static ssize_t force_pr_aptpl_store(struct config_item *item, const char *page, size_t count) { @@ -1067,6 +1099,7 @@ CONFIGFS_ATTR(, emulate_3pc); CONFIGFS_ATTR(, pi_prot_type); CONFIGFS_ATTR_RO(, hw_pi_prot_type); CONFIGFS_ATTR(, pi_prot_format); +CONFIGFS_ATTR(, pi_prot_verify); CONFIGFS_ATTR(, enforce_pr_isids); CONFIGFS_ATTR(, is_nonrot); CONFIGFS_ATTR(, emulate_rest_reord); @@ -1104,6 +1137,7 @@ struct configfs_attribute *sbc_attrib_attrs[] = { &attr_pi_prot_type, &attr_hw_pi_prot_type, &attr_pi_prot_format, + &attr_pi_prot_verify, &attr_enforce_pr_isids, &attr_is_nonrot, &attr_emulate_rest_reord, diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index dd8f32055266..1bf6c31e4c21 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -554,7 +554,8 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size, sgl, sgl_nents, cmd->data_length, 0); - if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) { + if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type && + dev->dev_attrib.pi_prot_verify) { u32 sectors = cmd->data_length >> ilog2(dev->dev_attrib.block_size); @@ -564,7 +565,8 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, return rc; } } else { - if (cmd->prot_type && dev->dev_attrib.pi_prot_type) { + if (cmd->prot_type && dev->dev_attrib.pi_prot_type && + dev->dev_attrib.pi_prot_verify) { u32 sectors = cmd->data_length >> ilog2(dev->dev_attrib.block_size); diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index ddc216c9f1f6..5f23f341f8d3 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -410,7 +410,7 @@ static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read) u32 prot_offset, prot_page; u32 prot_npages __maybe_unused; u64 tmp; - sense_reason_t rc = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + sense_reason_t rc = 0; tmp = cmd->t_task_lba * se_dev->prot_length; prot_offset = do_div(tmp, PAGE_SIZE); @@ -423,13 +423,14 @@ static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read) prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset]; - if (is_read) - rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0, - prot_sg, prot_offset); - else - rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0, - cmd->t_prot_sg, 0); - + if (se_dev->dev_attrib.pi_prot_verify) { + if (is_read) + rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0, + prot_sg, prot_offset); + else + rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0, + cmd->t_prot_sg, 0); + } if (!rc) sbc_dif_copy_prot(cmd, sectors, is_read, prot_sg, prot_offset); diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index ccfad0e9c2cd..0c1dce2ac6f0 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -664,6 +664,7 @@ struct se_dev_attrib { int pi_prot_format; enum target_prot_type pi_prot_type; enum target_prot_type hw_pi_prot_type; + int pi_prot_verify; int enforce_pr_isids; int force_pr_aptpl; int is_nonrot; -- cgit v1.2.3 From c2d26f18dcbc84799457852292c66967ff6626f1 Mon Sep 17 00:00:00 2001 From: "Bryant G. Ly" Date: Tue, 18 Apr 2017 14:10:05 -0500 Subject: target: Add WRITE_VERIFY_16 This patch addresses clients who needs write_verify_16 for large volume groups such as AIX. Signed-off-by: Bryant G. Ly Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_sbc.c | 2 ++ include/scsi/scsi_proto.h | 1 + 2 files changed, 3 insertions(+) (limited to 'include') diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index a7fa4a7339db..f9250b3c3fd4 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -850,6 +850,7 @@ static sense_reason_t sbc_parse_verify(struct se_cmd *cmd, int *sectors, cmd->t_task_lba = transport_lba_32(cdb); break; case VERIFY_16: + case WRITE_VERIFY_16: *sectors = transport_get_sectors_16(cdb); cmd->t_task_lba = transport_lba_64(cdb); break; @@ -962,6 +963,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) cmd->execute_cmd = sbc_execute_rw; break; case WRITE_VERIFY: + case WRITE_VERIFY_16: ret = sbc_parse_verify(cmd, §ors, &size); if (ret) return ret; diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h index 6ba66e01f6df..ce78ec8e367d 100644 --- a/include/scsi/scsi_proto.h +++ b/include/scsi/scsi_proto.h @@ -112,6 +112,7 @@ #define WRITE_16 0x8a #define READ_ATTRIBUTE 0x8c #define WRITE_ATTRIBUTE 0x8d +#define WRITE_VERIFY_16 0x8e #define VERIFY_16 0x8f #define SYNCHRONIZE_CACHE_16 0x91 #define WRITE_SAME_16 0x93 -- cgit v1.2.3 From 4ec5bf0ea83930b96addf6b78225bf0355459d7f Mon Sep 17 00:00:00 2001 From: "Bryant G. Ly" Date: Fri, 21 Apr 2017 20:40:50 -0500 Subject: target/user: PGR Support This adds initial PGR support for just TCMU, since tcmu doesn't have the necessary IT_NEXUS info to process PGR in userspace, so have those commands be processed in kernel. HA support is not available yet, we will work on it if this patch is acceptable. Signed-off-by: Bryant G. Ly Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_configfs.c | 10 ++++----- drivers/target/target_core_device.c | 38 +++++++++++++++++++++++++++++++++++ drivers/target/target_core_pr.c | 2 +- drivers/target/target_core_pscsi.c | 3 ++- include/target/target_core_backend.h | 1 + 5 files changed, 47 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index a34a86652b96..e7b62bc239a7 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -1400,7 +1400,7 @@ static ssize_t target_pr_res_holder_show(struct config_item *item, char *page) struct se_device *dev = pr_to_dev(item); int ret; - if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) return sprintf(page, "Passthrough\n"); spin_lock(&dev->dev_reservation_lock); @@ -1540,7 +1540,7 @@ static ssize_t target_pr_res_type_show(struct config_item *item, char *page) { struct se_device *dev = pr_to_dev(item); - if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) return sprintf(page, "SPC_PASSTHROUGH\n"); else if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) return sprintf(page, "SPC2_RESERVATIONS\n"); @@ -1553,7 +1553,7 @@ static ssize_t target_pr_res_aptpl_active_show(struct config_item *item, { struct se_device *dev = pr_to_dev(item); - if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) return 0; return sprintf(page, "APTPL Bit Status: %s\n", @@ -1565,7 +1565,7 @@ static ssize_t target_pr_res_aptpl_metadata_show(struct config_item *item, { struct se_device *dev = pr_to_dev(item); - if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) return 0; return sprintf(page, "Ready to process PR APTPL metadata..\n"); @@ -1611,7 +1611,7 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item, u16 tpgt = 0; u8 type = 0; - if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) return count; if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) return count; diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index c754ae33bf7b..c4845678eb91 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -1045,6 +1045,8 @@ passthrough_parse_cdb(struct se_cmd *cmd, sense_reason_t (*exec_cmd)(struct se_cmd *cmd)) { unsigned char *cdb = cmd->t_task_cdb; + struct se_device *dev = cmd->se_dev; + unsigned int size; /* * Clear a lun set in the cdb if the initiator talking to use spoke @@ -1076,6 +1078,42 @@ passthrough_parse_cdb(struct se_cmd *cmd, return TCM_NO_SENSE; } + /* + * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to + * emulate the response, since tcmu does not have the information + * required to process these commands. + */ + if (!(dev->transport->transport_flags & + TRANSPORT_FLAG_PASSTHROUGH_PGR)) { + if (cdb[0] == PERSISTENT_RESERVE_IN) { + cmd->execute_cmd = target_scsi3_emulate_pr_in; + size = (cdb[7] << 8) + cdb[8]; + return target_cmd_size_check(cmd, size); + } + if (cdb[0] == PERSISTENT_RESERVE_OUT) { + cmd->execute_cmd = target_scsi3_emulate_pr_out; + size = (cdb[7] << 8) + cdb[8]; + return target_cmd_size_check(cmd, size); + } + + if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) { + cmd->execute_cmd = target_scsi2_reservation_release; + if (cdb[0] == RELEASE_10) + size = (cdb[7] << 8) | cdb[8]; + else + size = cmd->data_length; + return target_cmd_size_check(cmd, size); + } + if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) { + cmd->execute_cmd = target_scsi2_reservation_reserve; + if (cdb[0] == RESERVE_10) + size = (cdb[7] << 8) | cdb[8]; + else + size = cmd->data_length; + return target_cmd_size_check(cmd, size); + } + } + /* Set DATA_CDB flag for ops that should have it */ switch (cdb[0]) { case READ_6: diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index e18051185846..129ca572673c 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -4147,7 +4147,7 @@ target_check_reservation(struct se_cmd *cmd) return 0; if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) return 0; - if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) return 0; spin_lock(&dev->dev_reservation_lock); diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 94cda7991e80..8943a62eb203 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -1081,7 +1081,8 @@ static const struct target_backend_ops pscsi_ops = { .name = "pscsi", .owner = THIS_MODULE, .transport_flags = TRANSPORT_FLAG_PASSTHROUGH | - TRANSPORT_FLAG_PASSTHROUGH_ALUA, + TRANSPORT_FLAG_PASSTHROUGH_ALUA | + TRANSPORT_FLAG_PASSTHROUGH_PGR, .attach_hba = pscsi_attach_hba, .detach_hba = pscsi_detach_hba, .pmode_enable_hba = pscsi_pmode_enable_hba, diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 1b0f447ce850..e475531565fd 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -10,6 +10,7 @@ * backend module. */ #define TRANSPORT_FLAG_PASSTHROUGH_ALUA 0x2 +#define TRANSPORT_FLAG_PASSTHROUGH_PGR 0x4 struct request_queue; struct scatterlist; -- cgit v1.2.3 From 45753c5f315749711b935a2506ee5c10eef5c23d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 2 May 2017 10:31:18 +0200 Subject: srcu: Debloat the header Linus noticed that the has huge inline functions which should not be inline at all. As a first step in cleaning this up, move them all to kernel/rcu/ and only keep an absolute minimum of data type defines in the header: before: -rw-r--r-- 1 mingo mingo 22284 May 2 10:25 include/linux/rcu_segcblist.h after: -rw-r--r-- 1 mingo mingo 3180 May 2 10:22 include/linux/rcu_segcblist.h More can be done, such as uninlining the large functions, which inlining is unjustified even if it's an RCU internal matter. Reported-by: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar Signed-off-by: Paul E. McKenney --- include/linux/rcu_segcblist.h | 628 +--------------------------------------- kernel/rcu/rcu_segcblist.h | 645 ++++++++++++++++++++++++++++++++++++++++++ kernel/rcu/srcutiny.c | 1 + kernel/rcu/srcutree.c | 1 + kernel/rcu/tree.h | 3 +- 5 files changed, 652 insertions(+), 626 deletions(-) create mode 100644 kernel/rcu/rcu_segcblist.h (limited to 'include') diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h index ced8f313fd05..ba4d2621d9ca 100644 --- a/include/linux/rcu_segcblist.h +++ b/include/linux/rcu_segcblist.h @@ -20,8 +20,8 @@ * Authors: Paul E. McKenney */ -#ifndef __KERNEL_RCU_SEGCBLIST_H -#define __KERNEL_RCU_SEGCBLIST_H +#ifndef __INCLUDE_LINUX_RCU_SEGCBLIST_H +#define __INCLUDE_LINUX_RCU_SEGCBLIST_H /* Simple unsegmented callback lists. */ struct rcu_cblist { @@ -33,102 +33,6 @@ struct rcu_cblist { #define RCU_CBLIST_INITIALIZER(n) { .head = NULL, .tail = &n.head } -/* Initialize simple callback list. */ -static inline void rcu_cblist_init(struct rcu_cblist *rclp) -{ - rclp->head = NULL; - rclp->tail = &rclp->head; - rclp->len = 0; - rclp->len_lazy = 0; -} - -/* Is simple callback list empty? */ -static inline bool rcu_cblist_empty(struct rcu_cblist *rclp) -{ - return !rclp->head; -} - -/* Return number of callbacks in simple callback list. */ -static inline long rcu_cblist_n_cbs(struct rcu_cblist *rclp) -{ - return rclp->len; -} - -/* Return number of lazy callbacks in simple callback list. */ -static inline long rcu_cblist_n_lazy_cbs(struct rcu_cblist *rclp) -{ - return rclp->len_lazy; -} - -/* - * Debug function to actually count the number of callbacks. - * If the number exceeds the limit specified, return -1. - */ -static inline long rcu_cblist_count_cbs(struct rcu_cblist *rclp, long lim) -{ - int cnt = 0; - struct rcu_head **rhpp = &rclp->head; - - for (;;) { - if (!*rhpp) - return cnt; - if (++cnt > lim) - return -1; - rhpp = &(*rhpp)->next; - } -} - -/* - * Dequeue the oldest rcu_head structure from the specified callback - * list. This function assumes that the callback is non-lazy, but - * the caller can later invoke rcu_cblist_dequeued_lazy() if it - * finds otherwise (and if it cares about laziness). This allows - * different users to have different ways of determining laziness. - */ -static inline struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp) -{ - struct rcu_head *rhp; - - rhp = rclp->head; - if (!rhp) - return NULL; - rclp->len--; - rclp->head = rhp->next; - if (!rclp->head) - rclp->tail = &rclp->head; - return rhp; -} - -/* - * Account for the fact that a previously dequeued callback turned out - * to be marked as lazy. - */ -static inline void rcu_cblist_dequeued_lazy(struct rcu_cblist *rclp) -{ - rclp->len_lazy--; -} - -/* - * Interim function to return rcu_cblist head pointer. Longer term, the - * rcu_cblist will be used more pervasively, removing the need for this - * function. - */ -static inline struct rcu_head *rcu_cblist_head(struct rcu_cblist *rclp) -{ - return rclp->head; -} - -/* - * Interim function to return rcu_cblist head pointer. Longer term, the - * rcu_cblist will be used more pervasively, removing the need for this - * function. - */ -static inline struct rcu_head **rcu_cblist_tail(struct rcu_cblist *rclp) -{ - WARN_ON_ONCE(rcu_cblist_empty(rclp)); - return rclp->tail; -} - /* Complicated segmented callback lists. ;-) */ /* @@ -183,530 +87,4 @@ struct rcu_segcblist { .tails[RCU_NEXT_TAIL] = &n.head, \ } -/* - * Initialize an rcu_segcblist structure. - */ -static inline void rcu_segcblist_init(struct rcu_segcblist *rsclp) -{ - int i; - - BUILD_BUG_ON(RCU_NEXT_TAIL + 1 != ARRAY_SIZE(rsclp->gp_seq)); - BUILD_BUG_ON(ARRAY_SIZE(rsclp->tails) != ARRAY_SIZE(rsclp->gp_seq)); - rsclp->head = NULL; - for (i = 0; i < RCU_CBLIST_NSEGS; i++) - rsclp->tails[i] = &rsclp->head; - rsclp->len = 0; - rsclp->len_lazy = 0; -} - -/* - * Is the specified rcu_segcblist structure empty? - * - * But careful! The fact that the ->head field is NULL does not - * necessarily imply that there are no callbacks associated with - * this structure. When callbacks are being invoked, they are - * removed as a group. If callback invocation must be preempted, - * the remaining callbacks will be added back to the list. Either - * way, the counts are updated later. - * - * So it is often the case that rcu_segcblist_n_cbs() should be used - * instead. - */ -static inline bool rcu_segcblist_empty(struct rcu_segcblist *rsclp) -{ - return !rsclp->head; -} - -/* Return number of callbacks in segmented callback list. */ -static inline long rcu_segcblist_n_cbs(struct rcu_segcblist *rsclp) -{ - return READ_ONCE(rsclp->len); -} - -/* Return number of lazy callbacks in segmented callback list. */ -static inline long rcu_segcblist_n_lazy_cbs(struct rcu_segcblist *rsclp) -{ - return rsclp->len_lazy; -} - -/* Return number of lazy callbacks in segmented callback list. */ -static inline long rcu_segcblist_n_nonlazy_cbs(struct rcu_segcblist *rsclp) -{ - return rsclp->len - rsclp->len_lazy; -} - -/* - * Is the specified rcu_segcblist enabled, for example, not corresponding - * to an offline or callback-offloaded CPU? - */ -static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp) -{ - return !!rsclp->tails[RCU_NEXT_TAIL]; -} - -/* - * Disable the specified rcu_segcblist structure, so that callbacks can - * no longer be posted to it. This structure must be empty. - */ -static inline void rcu_segcblist_disable(struct rcu_segcblist *rsclp) -{ - WARN_ON_ONCE(!rcu_segcblist_empty(rsclp)); - WARN_ON_ONCE(rcu_segcblist_n_cbs(rsclp)); - WARN_ON_ONCE(rcu_segcblist_n_lazy_cbs(rsclp)); - rsclp->tails[RCU_NEXT_TAIL] = NULL; -} - -/* - * Is the specified segment of the specified rcu_segcblist structure - * empty of callbacks? - */ -static inline bool rcu_segcblist_segempty(struct rcu_segcblist *rsclp, int seg) -{ - if (seg == RCU_DONE_TAIL) - return &rsclp->head == rsclp->tails[RCU_DONE_TAIL]; - return rsclp->tails[seg - 1] == rsclp->tails[seg]; -} - -/* - * Are all segments following the specified segment of the specified - * rcu_segcblist structure empty of callbacks? (The specified - * segment might well contain callbacks.) - */ -static inline bool rcu_segcblist_restempty(struct rcu_segcblist *rsclp, int seg) -{ - return !*rsclp->tails[seg]; -} - -/* - * Does the specified rcu_segcblist structure contain callbacks that - * are ready to be invoked? - */ -static inline bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp) -{ - return rcu_segcblist_is_enabled(rsclp) && - &rsclp->head != rsclp->tails[RCU_DONE_TAIL]; -} - -/* - * Does the specified rcu_segcblist structure contain callbacks that - * are still pending, that is, not yet ready to be invoked? - */ -static inline bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp) -{ - return rcu_segcblist_is_enabled(rsclp) && - !rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL); -} - -/* - * Dequeue and return the first ready-to-invoke callback. If there - * are no ready-to-invoke callbacks, return NULL. Disables interrupts - * to avoid interference. Does not protect from interference from other - * CPUs or tasks. - */ -static inline struct rcu_head * -rcu_segcblist_dequeue(struct rcu_segcblist *rsclp) -{ - unsigned long flags; - int i; - struct rcu_head *rhp; - - local_irq_save(flags); - if (!rcu_segcblist_ready_cbs(rsclp)) { - local_irq_restore(flags); - return NULL; - } - rhp = rsclp->head; - BUG_ON(!rhp); - rsclp->head = rhp->next; - for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++) { - if (rsclp->tails[i] != &rhp->next) - break; - rsclp->tails[i] = &rsclp->head; - } - smp_mb(); /* Dequeue before decrement for rcu_barrier(). */ - WRITE_ONCE(rsclp->len, rsclp->len - 1); - local_irq_restore(flags); - return rhp; -} - -/* - * Account for the fact that a previously dequeued callback turned out - * to be marked as lazy. - */ -static inline void rcu_segcblist_dequeued_lazy(struct rcu_segcblist *rsclp) -{ - unsigned long flags; - - local_irq_save(flags); - rsclp->len_lazy--; - local_irq_restore(flags); -} - -/* - * Return a pointer to the first callback in the specified rcu_segcblist - * structure. This is useful for diagnostics. - */ -static inline struct rcu_head * -rcu_segcblist_first_cb(struct rcu_segcblist *rsclp) -{ - if (rcu_segcblist_is_enabled(rsclp)) - return rsclp->head; - return NULL; -} - -/* - * Return a pointer to the first pending callback in the specified - * rcu_segcblist structure. This is useful just after posting a given - * callback -- if that callback is the first pending callback, then - * you cannot rely on someone else having already started up the required - * grace period. - */ -static inline struct rcu_head * -rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp) -{ - if (rcu_segcblist_is_enabled(rsclp)) - return *rsclp->tails[RCU_DONE_TAIL]; - return NULL; -} - -/* - * Does the specified rcu_segcblist structure contain callbacks that - * have not yet been processed beyond having been posted, that is, - * does it contain callbacks in its last segment? - */ -static inline bool rcu_segcblist_new_cbs(struct rcu_segcblist *rsclp) -{ - return rcu_segcblist_is_enabled(rsclp) && - !rcu_segcblist_restempty(rsclp, RCU_NEXT_READY_TAIL); -} - -/* - * Enqueue the specified callback onto the specified rcu_segcblist - * structure, updating accounting as needed. Note that the ->len - * field may be accessed locklessly, hence the WRITE_ONCE(). - * The ->len field is used by rcu_barrier() and friends to determine - * if it must post a callback on this structure, and it is OK - * for rcu_barrier() to sometimes post callbacks needlessly, but - * absolutely not OK for it to ever miss posting a callback. - */ -static inline void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp, - struct rcu_head *rhp, bool lazy) -{ - WRITE_ONCE(rsclp->len, rsclp->len + 1); /* ->len sampled locklessly. */ - if (lazy) - rsclp->len_lazy++; - smp_mb(); /* Ensure counts are updated before callback is enqueued. */ - rhp->next = NULL; - *rsclp->tails[RCU_NEXT_TAIL] = rhp; - rsclp->tails[RCU_NEXT_TAIL] = &rhp->next; -} - -/* - * Entrain the specified callback onto the specified rcu_segcblist at - * the end of the last non-empty segment. If the entire rcu_segcblist - * is empty, make no change, but return false. - * - * This is intended for use by rcu_barrier()-like primitives, -not- - * for normal grace-period use. IMPORTANT: The callback you enqueue - * will wait for all prior callbacks, NOT necessarily for a grace - * period. You have been warned. - */ -static inline bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp, - struct rcu_head *rhp, bool lazy) -{ - int i; - - if (rcu_segcblist_n_cbs(rsclp) == 0) - return false; - WRITE_ONCE(rsclp->len, rsclp->len + 1); - if (lazy) - rsclp->len_lazy++; - smp_mb(); /* Ensure counts are updated before callback is entrained. */ - rhp->next = NULL; - for (i = RCU_NEXT_TAIL; i > RCU_DONE_TAIL; i--) - if (rsclp->tails[i] != rsclp->tails[i - 1]) - break; - *rsclp->tails[i] = rhp; - for (; i <= RCU_NEXT_TAIL; i++) - rsclp->tails[i] = &rhp->next; - return true; -} - -/* - * Extract only the counts from the specified rcu_segcblist structure, - * and place them in the specified rcu_cblist structure. This function - * supports both callback orphaning and invocation, hence the separation - * of counts and callbacks. (Callbacks ready for invocation must be - * orphaned and adopted separately from pending callbacks, but counts - * apply to all callbacks. Locking must be used to make sure that - * both orphaned-callbacks lists are consistent.) - */ -static inline void rcu_segcblist_extract_count(struct rcu_segcblist *rsclp, - struct rcu_cblist *rclp) -{ - rclp->len_lazy += rsclp->len_lazy; - rclp->len += rsclp->len; - rsclp->len_lazy = 0; - WRITE_ONCE(rsclp->len, 0); /* ->len sampled locklessly. */ -} - -/* - * Extract only those callbacks ready to be invoked from the specified - * rcu_segcblist structure and place them in the specified rcu_cblist - * structure. - */ -static inline void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp, - struct rcu_cblist *rclp) -{ - int i; - - if (!rcu_segcblist_ready_cbs(rsclp)) - return; /* Nothing to do. */ - *rclp->tail = rsclp->head; - rsclp->head = *rsclp->tails[RCU_DONE_TAIL]; - *rsclp->tails[RCU_DONE_TAIL] = NULL; - rclp->tail = rsclp->tails[RCU_DONE_TAIL]; - for (i = RCU_CBLIST_NSEGS - 1; i >= RCU_DONE_TAIL; i--) - if (rsclp->tails[i] == rsclp->tails[RCU_DONE_TAIL]) - rsclp->tails[i] = &rsclp->head; -} - -/* - * Extract only those callbacks still pending (not yet ready to be - * invoked) from the specified rcu_segcblist structure and place them in - * the specified rcu_cblist structure. Note that this loses information - * about any callbacks that might have been partway done waiting for - * their grace period. Too bad! They will have to start over. - */ -static inline void -rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp, - struct rcu_cblist *rclp) -{ - int i; - - if (!rcu_segcblist_pend_cbs(rsclp)) - return; /* Nothing to do. */ - *rclp->tail = *rsclp->tails[RCU_DONE_TAIL]; - rclp->tail = rsclp->tails[RCU_NEXT_TAIL]; - *rsclp->tails[RCU_DONE_TAIL] = NULL; - for (i = RCU_DONE_TAIL + 1; i < RCU_CBLIST_NSEGS; i++) - rsclp->tails[i] = rsclp->tails[RCU_DONE_TAIL]; -} - -/* - * Move the entire contents of the specified rcu_segcblist structure, - * counts, callbacks, and all, to the specified rcu_cblist structure. - * @@@ Why do we need this??? Moving early-boot CBs to NOCB lists? - * @@@ Memory barrier needed? (Not if only used at boot time...) - */ -static inline void rcu_segcblist_extract_all(struct rcu_segcblist *rsclp, - struct rcu_cblist *rclp) -{ - rcu_segcblist_extract_done_cbs(rsclp, rclp); - rcu_segcblist_extract_pend_cbs(rsclp, rclp); - rcu_segcblist_extract_count(rsclp, rclp); -} - -/* - * Insert counts from the specified rcu_cblist structure in the - * specified rcu_segcblist structure. - */ -static inline void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp, - struct rcu_cblist *rclp) -{ - rsclp->len_lazy += rclp->len_lazy; - /* ->len sampled locklessly. */ - WRITE_ONCE(rsclp->len, rsclp->len + rclp->len); - rclp->len_lazy = 0; - rclp->len = 0; -} - -/* - * Move callbacks from the specified rcu_cblist to the beginning of the - * done-callbacks segment of the specified rcu_segcblist. - */ -static inline void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp, - struct rcu_cblist *rclp) -{ - int i; - - if (!rclp->head) - return; /* No callbacks to move. */ - *rclp->tail = rsclp->head; - rsclp->head = rclp->head; - for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++) - if (&rsclp->head == rsclp->tails[i]) - rsclp->tails[i] = rclp->tail; - else - break; - rclp->head = NULL; - rclp->tail = &rclp->head; -} - -/* - * Move callbacks from the specified rcu_cblist to the end of the - * new-callbacks segment of the specified rcu_segcblist. - */ -static inline void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist *rsclp, - struct rcu_cblist *rclp) -{ - if (!rclp->head) - return; /* Nothing to do. */ - *rsclp->tails[RCU_NEXT_TAIL] = rclp->head; - rsclp->tails[RCU_NEXT_TAIL] = rclp->tail; - rclp->head = NULL; - rclp->tail = &rclp->head; -} - -/* - * Advance the callbacks in the specified rcu_segcblist structure based - * on the current value passed in for the grace-period counter. - */ -static inline void rcu_segcblist_advance(struct rcu_segcblist *rsclp, - unsigned long seq) -{ - int i, j; - - WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp)); - if (rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL)) - return; - - /* - * Find all callbacks whose ->gp_seq numbers indicate that they - * are ready to invoke, and put them into the RCU_DONE_TAIL segment. - */ - for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) { - if (ULONG_CMP_LT(seq, rsclp->gp_seq[i])) - break; - rsclp->tails[RCU_DONE_TAIL] = rsclp->tails[i]; - } - - /* If no callbacks moved, nothing more need be done. */ - if (i == RCU_WAIT_TAIL) - return; - - /* Clean up tail pointers that might have been misordered above. */ - for (j = RCU_WAIT_TAIL; j < i; j++) - rsclp->tails[j] = rsclp->tails[RCU_DONE_TAIL]; - - /* - * Callbacks moved, so clean up the misordered ->tails[] pointers - * that now point into the middle of the list of ready-to-invoke - * callbacks. The overall effect is to copy down the later pointers - * into the gap that was created by the now-ready segments. - */ - for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) { - if (rsclp->tails[j] == rsclp->tails[RCU_NEXT_TAIL]) - break; /* No more callbacks. */ - rsclp->tails[j] = rsclp->tails[i]; - rsclp->gp_seq[j] = rsclp->gp_seq[i]; - } -} - -/* - * "Accelerate" callbacks based on more-accurate grace-period information. - * The reason for this is that RCU does not synchronize the beginnings and - * ends of grace periods, and that callbacks are posted locally. This in - * turn means that the callbacks must be labelled conservatively early - * on, as getting exact information would degrade both performance and - * scalability. When more accurate grace-period information becomes - * available, previously posted callbacks can be "accelerated", marking - * them to complete at the end of the earlier grace period. - * - * This function operates on an rcu_segcblist structure, and also the - * grace-period sequence number seq at which new callbacks would become - * ready to invoke. Returns true if there are callbacks that won't be - * ready to invoke until seq, false otherwise. - */ -static inline bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, - unsigned long seq) -{ - int i; - - WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp)); - if (rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL)) - return false; - - /* - * Find the segment preceding the oldest segment of callbacks - * whose ->gp_seq[] completion is at or after that passed in via - * "seq", skipping any empty segments. This oldest segment, along - * with any later segments, can be merged in with any newly arrived - * callbacks in the RCU_NEXT_TAIL segment, and assigned "seq" - * as their ->gp_seq[] grace-period completion sequence number. - */ - for (i = RCU_NEXT_READY_TAIL; i > RCU_DONE_TAIL; i--) - if (rsclp->tails[i] != rsclp->tails[i - 1] && - ULONG_CMP_LT(rsclp->gp_seq[i], seq)) - break; - - /* - * If all the segments contain callbacks that correspond to - * earlier grace-period sequence numbers than "seq", leave. - * Assuming that the rcu_segcblist structure has enough - * segments in its arrays, this can only happen if some of - * the non-done segments contain callbacks that really are - * ready to invoke. This situation will get straightened - * out by the next call to rcu_segcblist_advance(). - * - * Also advance to the oldest segment of callbacks whose - * ->gp_seq[] completion is at or after that passed in via "seq", - * skipping any empty segments. - */ - if (++i >= RCU_NEXT_TAIL) - return false; - - /* - * Merge all later callbacks, including newly arrived callbacks, - * into the segment located by the for-loop above. Assign "seq" - * as the ->gp_seq[] value in order to correctly handle the case - * where there were no pending callbacks in the rcu_segcblist - * structure other than in the RCU_NEXT_TAIL segment. - */ - for (; i < RCU_NEXT_TAIL; i++) { - rsclp->tails[i] = rsclp->tails[RCU_NEXT_TAIL]; - rsclp->gp_seq[i] = seq; - } - return true; -} - -/* - * Scan the specified rcu_segcblist structure for callbacks that need - * a grace period later than the one specified by "seq". We don't look - * at the RCU_DONE_TAIL or RCU_NEXT_TAIL segments because they don't - * have a grace-period sequence number. - */ -static inline bool rcu_segcblist_future_gp_needed(struct rcu_segcblist *rsclp, - unsigned long seq) -{ - int i; - - for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) - if (rsclp->tails[i - 1] != rsclp->tails[i] && - ULONG_CMP_LT(seq, rsclp->gp_seq[i])) - return true; - return false; -} - -/* - * Interim function to return rcu_segcblist head pointer. Longer term, the - * rcu_segcblist will be used more pervasively, removing the need for this - * function. - */ -static inline struct rcu_head *rcu_segcblist_head(struct rcu_segcblist *rsclp) -{ - return rsclp->head; -} - -/* - * Interim function to return rcu_segcblist head pointer. Longer term, the - * rcu_segcblist will be used more pervasively, removing the need for this - * function. - */ -static inline struct rcu_head **rcu_segcblist_tail(struct rcu_segcblist *rsclp) -{ - WARN_ON_ONCE(rcu_segcblist_empty(rsclp)); - return rsclp->tails[RCU_NEXT_TAIL]; -} - -#endif /* __KERNEL_RCU_SEGCBLIST_H */ +#endif /* __INCLUDE_LINUX_RCU_SEGCBLIST_H */ diff --git a/kernel/rcu/rcu_segcblist.h b/kernel/rcu/rcu_segcblist.h new file mode 100644 index 000000000000..d98d2f9b8d59 --- /dev/null +++ b/kernel/rcu/rcu_segcblist.h @@ -0,0 +1,645 @@ +/* + * RCU segmented callback lists + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + * Copyright IBM Corporation, 2017 + * + * Authors: Paul E. McKenney + */ + +#include + +/* Initialize simple callback list. */ +static inline void rcu_cblist_init(struct rcu_cblist *rclp) +{ + rclp->head = NULL; + rclp->tail = &rclp->head; + rclp->len = 0; + rclp->len_lazy = 0; +} + +/* Is simple callback list empty? */ +static inline bool rcu_cblist_empty(struct rcu_cblist *rclp) +{ + return !rclp->head; +} + +/* Return number of callbacks in simple callback list. */ +static inline long rcu_cblist_n_cbs(struct rcu_cblist *rclp) +{ + return rclp->len; +} + +/* Return number of lazy callbacks in simple callback list. */ +static inline long rcu_cblist_n_lazy_cbs(struct rcu_cblist *rclp) +{ + return rclp->len_lazy; +} + +/* + * Debug function to actually count the number of callbacks. + * If the number exceeds the limit specified, return -1. + */ +static inline long rcu_cblist_count_cbs(struct rcu_cblist *rclp, long lim) +{ + int cnt = 0; + struct rcu_head **rhpp = &rclp->head; + + for (;;) { + if (!*rhpp) + return cnt; + if (++cnt > lim) + return -1; + rhpp = &(*rhpp)->next; + } +} + +/* + * Dequeue the oldest rcu_head structure from the specified callback + * list. This function assumes that the callback is non-lazy, but + * the caller can later invoke rcu_cblist_dequeued_lazy() if it + * finds otherwise (and if it cares about laziness). This allows + * different users to have different ways of determining laziness. + */ +static inline struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp) +{ + struct rcu_head *rhp; + + rhp = rclp->head; + if (!rhp) + return NULL; + rclp->len--; + rclp->head = rhp->next; + if (!rclp->head) + rclp->tail = &rclp->head; + return rhp; +} + +/* + * Account for the fact that a previously dequeued callback turned out + * to be marked as lazy. + */ +static inline void rcu_cblist_dequeued_lazy(struct rcu_cblist *rclp) +{ + rclp->len_lazy--; +} + +/* + * Interim function to return rcu_cblist head pointer. Longer term, the + * rcu_cblist will be used more pervasively, removing the need for this + * function. + */ +static inline struct rcu_head *rcu_cblist_head(struct rcu_cblist *rclp) +{ + return rclp->head; +} + +/* + * Interim function to return rcu_cblist head pointer. Longer term, the + * rcu_cblist will be used more pervasively, removing the need for this + * function. + */ +static inline struct rcu_head **rcu_cblist_tail(struct rcu_cblist *rclp) +{ + WARN_ON_ONCE(rcu_cblist_empty(rclp)); + return rclp->tail; +} + +/* + * Initialize an rcu_segcblist structure. + */ +static inline void rcu_segcblist_init(struct rcu_segcblist *rsclp) +{ + int i; + + BUILD_BUG_ON(RCU_NEXT_TAIL + 1 != ARRAY_SIZE(rsclp->gp_seq)); + BUILD_BUG_ON(ARRAY_SIZE(rsclp->tails) != ARRAY_SIZE(rsclp->gp_seq)); + rsclp->head = NULL; + for (i = 0; i < RCU_CBLIST_NSEGS; i++) + rsclp->tails[i] = &rsclp->head; + rsclp->len = 0; + rsclp->len_lazy = 0; +} + +/* + * Is the specified rcu_segcblist structure empty? + * + * But careful! The fact that the ->head field is NULL does not + * necessarily imply that there are no callbacks associated with + * this structure. When callbacks are being invoked, they are + * removed as a group. If callback invocation must be preempted, + * the remaining callbacks will be added back to the list. Either + * way, the counts are updated later. + * + * So it is often the case that rcu_segcblist_n_cbs() should be used + * instead. + */ +static inline bool rcu_segcblist_empty(struct rcu_segcblist *rsclp) +{ + return !rsclp->head; +} + +/* Return number of callbacks in segmented callback list. */ +static inline long rcu_segcblist_n_cbs(struct rcu_segcblist *rsclp) +{ + return READ_ONCE(rsclp->len); +} + +/* Return number of lazy callbacks in segmented callback list. */ +static inline long rcu_segcblist_n_lazy_cbs(struct rcu_segcblist *rsclp) +{ + return rsclp->len_lazy; +} + +/* Return number of lazy callbacks in segmented callback list. */ +static inline long rcu_segcblist_n_nonlazy_cbs(struct rcu_segcblist *rsclp) +{ + return rsclp->len - rsclp->len_lazy; +} + +/* + * Is the specified rcu_segcblist enabled, for example, not corresponding + * to an offline or callback-offloaded CPU? + */ +static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp) +{ + return !!rsclp->tails[RCU_NEXT_TAIL]; +} + +/* + * Disable the specified rcu_segcblist structure, so that callbacks can + * no longer be posted to it. This structure must be empty. + */ +static inline void rcu_segcblist_disable(struct rcu_segcblist *rsclp) +{ + WARN_ON_ONCE(!rcu_segcblist_empty(rsclp)); + WARN_ON_ONCE(rcu_segcblist_n_cbs(rsclp)); + WARN_ON_ONCE(rcu_segcblist_n_lazy_cbs(rsclp)); + rsclp->tails[RCU_NEXT_TAIL] = NULL; +} + +/* + * Is the specified segment of the specified rcu_segcblist structure + * empty of callbacks? + */ +static inline bool rcu_segcblist_segempty(struct rcu_segcblist *rsclp, int seg) +{ + if (seg == RCU_DONE_TAIL) + return &rsclp->head == rsclp->tails[RCU_DONE_TAIL]; + return rsclp->tails[seg - 1] == rsclp->tails[seg]; +} + +/* + * Are all segments following the specified segment of the specified + * rcu_segcblist structure empty of callbacks? (The specified + * segment might well contain callbacks.) + */ +static inline bool rcu_segcblist_restempty(struct rcu_segcblist *rsclp, int seg) +{ + return !*rsclp->tails[seg]; +} + +/* + * Does the specified rcu_segcblist structure contain callbacks that + * are ready to be invoked? + */ +static inline bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp) +{ + return rcu_segcblist_is_enabled(rsclp) && + &rsclp->head != rsclp->tails[RCU_DONE_TAIL]; +} + +/* + * Does the specified rcu_segcblist structure contain callbacks that + * are still pending, that is, not yet ready to be invoked? + */ +static inline bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp) +{ + return rcu_segcblist_is_enabled(rsclp) && + !rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL); +} + +/* + * Dequeue and return the first ready-to-invoke callback. If there + * are no ready-to-invoke callbacks, return NULL. Disables interrupts + * to avoid interference. Does not protect from interference from other + * CPUs or tasks. + */ +static inline struct rcu_head * +rcu_segcblist_dequeue(struct rcu_segcblist *rsclp) +{ + unsigned long flags; + int i; + struct rcu_head *rhp; + + local_irq_save(flags); + if (!rcu_segcblist_ready_cbs(rsclp)) { + local_irq_restore(flags); + return NULL; + } + rhp = rsclp->head; + BUG_ON(!rhp); + rsclp->head = rhp->next; + for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++) { + if (rsclp->tails[i] != &rhp->next) + break; + rsclp->tails[i] = &rsclp->head; + } + smp_mb(); /* Dequeue before decrement for rcu_barrier(). */ + WRITE_ONCE(rsclp->len, rsclp->len - 1); + local_irq_restore(flags); + return rhp; +} + +/* + * Account for the fact that a previously dequeued callback turned out + * to be marked as lazy. + */ +static inline void rcu_segcblist_dequeued_lazy(struct rcu_segcblist *rsclp) +{ + unsigned long flags; + + local_irq_save(flags); + rsclp->len_lazy--; + local_irq_restore(flags); +} + +/* + * Return a pointer to the first callback in the specified rcu_segcblist + * structure. This is useful for diagnostics. + */ +static inline struct rcu_head * +rcu_segcblist_first_cb(struct rcu_segcblist *rsclp) +{ + if (rcu_segcblist_is_enabled(rsclp)) + return rsclp->head; + return NULL; +} + +/* + * Return a pointer to the first pending callback in the specified + * rcu_segcblist structure. This is useful just after posting a given + * callback -- if that callback is the first pending callback, then + * you cannot rely on someone else having already started up the required + * grace period. + */ +static inline struct rcu_head * +rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp) +{ + if (rcu_segcblist_is_enabled(rsclp)) + return *rsclp->tails[RCU_DONE_TAIL]; + return NULL; +} + +/* + * Does the specified rcu_segcblist structure contain callbacks that + * have not yet been processed beyond having been posted, that is, + * does it contain callbacks in its last segment? + */ +static inline bool rcu_segcblist_new_cbs(struct rcu_segcblist *rsclp) +{ + return rcu_segcblist_is_enabled(rsclp) && + !rcu_segcblist_restempty(rsclp, RCU_NEXT_READY_TAIL); +} + +/* + * Enqueue the specified callback onto the specified rcu_segcblist + * structure, updating accounting as needed. Note that the ->len + * field may be accessed locklessly, hence the WRITE_ONCE(). + * The ->len field is used by rcu_barrier() and friends to determine + * if it must post a callback on this structure, and it is OK + * for rcu_barrier() to sometimes post callbacks needlessly, but + * absolutely not OK for it to ever miss posting a callback. + */ +static inline void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp, + struct rcu_head *rhp, bool lazy) +{ + WRITE_ONCE(rsclp->len, rsclp->len + 1); /* ->len sampled locklessly. */ + if (lazy) + rsclp->len_lazy++; + smp_mb(); /* Ensure counts are updated before callback is enqueued. */ + rhp->next = NULL; + *rsclp->tails[RCU_NEXT_TAIL] = rhp; + rsclp->tails[RCU_NEXT_TAIL] = &rhp->next; +} + +/* + * Entrain the specified callback onto the specified rcu_segcblist at + * the end of the last non-empty segment. If the entire rcu_segcblist + * is empty, make no change, but return false. + * + * This is intended for use by rcu_barrier()-like primitives, -not- + * for normal grace-period use. IMPORTANT: The callback you enqueue + * will wait for all prior callbacks, NOT necessarily for a grace + * period. You have been warned. + */ +static inline bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp, + struct rcu_head *rhp, bool lazy) +{ + int i; + + if (rcu_segcblist_n_cbs(rsclp) == 0) + return false; + WRITE_ONCE(rsclp->len, rsclp->len + 1); + if (lazy) + rsclp->len_lazy++; + smp_mb(); /* Ensure counts are updated before callback is entrained. */ + rhp->next = NULL; + for (i = RCU_NEXT_TAIL; i > RCU_DONE_TAIL; i--) + if (rsclp->tails[i] != rsclp->tails[i - 1]) + break; + *rsclp->tails[i] = rhp; + for (; i <= RCU_NEXT_TAIL; i++) + rsclp->tails[i] = &rhp->next; + return true; +} + +/* + * Extract only the counts from the specified rcu_segcblist structure, + * and place them in the specified rcu_cblist structure. This function + * supports both callback orphaning and invocation, hence the separation + * of counts and callbacks. (Callbacks ready for invocation must be + * orphaned and adopted separately from pending callbacks, but counts + * apply to all callbacks. Locking must be used to make sure that + * both orphaned-callbacks lists are consistent.) + */ +static inline void rcu_segcblist_extract_count(struct rcu_segcblist *rsclp, + struct rcu_cblist *rclp) +{ + rclp->len_lazy += rsclp->len_lazy; + rclp->len += rsclp->len; + rsclp->len_lazy = 0; + WRITE_ONCE(rsclp->len, 0); /* ->len sampled locklessly. */ +} + +/* + * Extract only those callbacks ready to be invoked from the specified + * rcu_segcblist structure and place them in the specified rcu_cblist + * structure. + */ +static inline void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp, + struct rcu_cblist *rclp) +{ + int i; + + if (!rcu_segcblist_ready_cbs(rsclp)) + return; /* Nothing to do. */ + *rclp->tail = rsclp->head; + rsclp->head = *rsclp->tails[RCU_DONE_TAIL]; + *rsclp->tails[RCU_DONE_TAIL] = NULL; + rclp->tail = rsclp->tails[RCU_DONE_TAIL]; + for (i = RCU_CBLIST_NSEGS - 1; i >= RCU_DONE_TAIL; i--) + if (rsclp->tails[i] == rsclp->tails[RCU_DONE_TAIL]) + rsclp->tails[i] = &rsclp->head; +} + +/* + * Extract only those callbacks still pending (not yet ready to be + * invoked) from the specified rcu_segcblist structure and place them in + * the specified rcu_cblist structure. Note that this loses information + * about any callbacks that might have been partway done waiting for + * their grace period. Too bad! They will have to start over. + */ +static inline void +rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp, + struct rcu_cblist *rclp) +{ + int i; + + if (!rcu_segcblist_pend_cbs(rsclp)) + return; /* Nothing to do. */ + *rclp->tail = *rsclp->tails[RCU_DONE_TAIL]; + rclp->tail = rsclp->tails[RCU_NEXT_TAIL]; + *rsclp->tails[RCU_DONE_TAIL] = NULL; + for (i = RCU_DONE_TAIL + 1; i < RCU_CBLIST_NSEGS; i++) + rsclp->tails[i] = rsclp->tails[RCU_DONE_TAIL]; +} + +/* + * Move the entire contents of the specified rcu_segcblist structure, + * counts, callbacks, and all, to the specified rcu_cblist structure. + * @@@ Why do we need this??? Moving early-boot CBs to NOCB lists? + * @@@ Memory barrier needed? (Not if only used at boot time...) + */ +static inline void rcu_segcblist_extract_all(struct rcu_segcblist *rsclp, + struct rcu_cblist *rclp) +{ + rcu_segcblist_extract_done_cbs(rsclp, rclp); + rcu_segcblist_extract_pend_cbs(rsclp, rclp); + rcu_segcblist_extract_count(rsclp, rclp); +} + +/* + * Insert counts from the specified rcu_cblist structure in the + * specified rcu_segcblist structure. + */ +static inline void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp, + struct rcu_cblist *rclp) +{ + rsclp->len_lazy += rclp->len_lazy; + /* ->len sampled locklessly. */ + WRITE_ONCE(rsclp->len, rsclp->len + rclp->len); + rclp->len_lazy = 0; + rclp->len = 0; +} + +/* + * Move callbacks from the specified rcu_cblist to the beginning of the + * done-callbacks segment of the specified rcu_segcblist. + */ +static inline void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp, + struct rcu_cblist *rclp) +{ + int i; + + if (!rclp->head) + return; /* No callbacks to move. */ + *rclp->tail = rsclp->head; + rsclp->head = rclp->head; + for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++) + if (&rsclp->head == rsclp->tails[i]) + rsclp->tails[i] = rclp->tail; + else + break; + rclp->head = NULL; + rclp->tail = &rclp->head; +} + +/* + * Move callbacks from the specified rcu_cblist to the end of the + * new-callbacks segment of the specified rcu_segcblist. + */ +static inline void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist *rsclp, + struct rcu_cblist *rclp) +{ + if (!rclp->head) + return; /* Nothing to do. */ + *rsclp->tails[RCU_NEXT_TAIL] = rclp->head; + rsclp->tails[RCU_NEXT_TAIL] = rclp->tail; + rclp->head = NULL; + rclp->tail = &rclp->head; +} + +/* + * Advance the callbacks in the specified rcu_segcblist structure based + * on the current value passed in for the grace-period counter. + */ +static inline void rcu_segcblist_advance(struct rcu_segcblist *rsclp, + unsigned long seq) +{ + int i, j; + + WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp)); + if (rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL)) + return; + + /* + * Find all callbacks whose ->gp_seq numbers indicate that they + * are ready to invoke, and put them into the RCU_DONE_TAIL segment. + */ + for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) { + if (ULONG_CMP_LT(seq, rsclp->gp_seq[i])) + break; + rsclp->tails[RCU_DONE_TAIL] = rsclp->tails[i]; + } + + /* If no callbacks moved, nothing more need be done. */ + if (i == RCU_WAIT_TAIL) + return; + + /* Clean up tail pointers that might have been misordered above. */ + for (j = RCU_WAIT_TAIL; j < i; j++) + rsclp->tails[j] = rsclp->tails[RCU_DONE_TAIL]; + + /* + * Callbacks moved, so clean up the misordered ->tails[] pointers + * that now point into the middle of the list of ready-to-invoke + * callbacks. The overall effect is to copy down the later pointers + * into the gap that was created by the now-ready segments. + */ + for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) { + if (rsclp->tails[j] == rsclp->tails[RCU_NEXT_TAIL]) + break; /* No more callbacks. */ + rsclp->tails[j] = rsclp->tails[i]; + rsclp->gp_seq[j] = rsclp->gp_seq[i]; + } +} + +/* + * "Accelerate" callbacks based on more-accurate grace-period information. + * The reason for this is that RCU does not synchronize the beginnings and + * ends of grace periods, and that callbacks are posted locally. This in + * turn means that the callbacks must be labelled conservatively early + * on, as getting exact information would degrade both performance and + * scalability. When more accurate grace-period information becomes + * available, previously posted callbacks can be "accelerated", marking + * them to complete at the end of the earlier grace period. + * + * This function operates on an rcu_segcblist structure, and also the + * grace-period sequence number seq at which new callbacks would become + * ready to invoke. Returns true if there are callbacks that won't be + * ready to invoke until seq, false otherwise. + */ +static inline bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, + unsigned long seq) +{ + int i; + + WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp)); + if (rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL)) + return false; + + /* + * Find the segment preceding the oldest segment of callbacks + * whose ->gp_seq[] completion is at or after that passed in via + * "seq", skipping any empty segments. This oldest segment, along + * with any later segments, can be merged in with any newly arrived + * callbacks in the RCU_NEXT_TAIL segment, and assigned "seq" + * as their ->gp_seq[] grace-period completion sequence number. + */ + for (i = RCU_NEXT_READY_TAIL; i > RCU_DONE_TAIL; i--) + if (rsclp->tails[i] != rsclp->tails[i - 1] && + ULONG_CMP_LT(rsclp->gp_seq[i], seq)) + break; + + /* + * If all the segments contain callbacks that correspond to + * earlier grace-period sequence numbers than "seq", leave. + * Assuming that the rcu_segcblist structure has enough + * segments in its arrays, this can only happen if some of + * the non-done segments contain callbacks that really are + * ready to invoke. This situation will get straightened + * out by the next call to rcu_segcblist_advance(). + * + * Also advance to the oldest segment of callbacks whose + * ->gp_seq[] completion is at or after that passed in via "seq", + * skipping any empty segments. + */ + if (++i >= RCU_NEXT_TAIL) + return false; + + /* + * Merge all later callbacks, including newly arrived callbacks, + * into the segment located by the for-loop above. Assign "seq" + * as the ->gp_seq[] value in order to correctly handle the case + * where there were no pending callbacks in the rcu_segcblist + * structure other than in the RCU_NEXT_TAIL segment. + */ + for (; i < RCU_NEXT_TAIL; i++) { + rsclp->tails[i] = rsclp->tails[RCU_NEXT_TAIL]; + rsclp->gp_seq[i] = seq; + } + return true; +} + +/* + * Scan the specified rcu_segcblist structure for callbacks that need + * a grace period later than the one specified by "seq". We don't look + * at the RCU_DONE_TAIL or RCU_NEXT_TAIL segments because they don't + * have a grace-period sequence number. + */ +static inline bool rcu_segcblist_future_gp_needed(struct rcu_segcblist *rsclp, + unsigned long seq) +{ + int i; + + for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) + if (rsclp->tails[i - 1] != rsclp->tails[i] && + ULONG_CMP_LT(seq, rsclp->gp_seq[i])) + return true; + return false; +} + +/* + * Interim function to return rcu_segcblist head pointer. Longer term, the + * rcu_segcblist will be used more pervasively, removing the need for this + * function. + */ +static inline struct rcu_head *rcu_segcblist_head(struct rcu_segcblist *rsclp) +{ + return rsclp->head; +} + +/* + * Interim function to return rcu_segcblist head pointer. Longer term, the + * rcu_segcblist will be used more pervasively, removing the need for this + * function. + */ +static inline struct rcu_head **rcu_segcblist_tail(struct rcu_segcblist *rsclp) +{ + WARN_ON_ONCE(rcu_segcblist_empty(rsclp)); + return rsclp->tails[RCU_NEXT_TAIL]; +} diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c index b8293527ee18..36e1f82faed1 100644 --- a/kernel/rcu/srcutiny.c +++ b/kernel/rcu/srcutiny.c @@ -30,6 +30,7 @@ #include #include +#include "rcu_segcblist.h" #include "rcu.h" static int init_srcu_struct_fields(struct srcu_struct *sp) diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index 87b070de6371..3ae8474557df 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -38,6 +38,7 @@ #include #include "rcu.h" +#include "rcu_segcblist.h" ulong exp_holdoff = 25 * 1000; /* Holdoff (ns) for auto-expediting. */ module_param(exp_holdoff, ulong, 0444); diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 0e598ab08fea..ba38262c3554 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -30,9 +30,10 @@ #include #include #include -#include #include +#include "rcu_segcblist.h" + /* * Dynticks per-CPU state. */ -- cgit v1.2.3 From 9b2bbdb227588455afcc3b03475fa9b0a35d83af Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 6 Mar 2017 18:19:39 +0200 Subject: virtio: wrap find_vqs We are going to add more parameters to find_vqs, let's wrap the call so we don't need to tweak all drivers every time. Signed-off-by: Michael S. Tsirkin --- drivers/block/virtio_blk.c | 3 +-- drivers/char/virtio_console.c | 6 +++--- drivers/crypto/virtio/virtio_crypto_core.c | 3 +-- drivers/gpu/drm/virtio/virtgpu_kms.c | 3 +-- drivers/net/caif/caif_virtio.c | 3 +-- drivers/net/virtio_net.c | 3 +-- drivers/rpmsg/virtio_rpmsg_bus.c | 2 +- drivers/scsi/virtio_scsi.c | 3 +-- drivers/virtio/virtio_balloon.c | 3 +-- drivers/virtio/virtio_input.c | 3 +-- include/linux/virtio_config.h | 9 +++++++++ net/vmw_vsock/virtio_transport.c | 6 +++--- 12 files changed, 24 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 1d4c9f8bc1e1..c08c30c35035 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -455,8 +455,7 @@ static int init_vq(struct virtio_blk *vblk) } /* Discover virtqueues and write information to configuration. */ - err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names, - &desc); + err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc); if (err) goto out; diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 87fe111d0be6..d0699c5fec43 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1945,9 +1945,9 @@ static int init_vqs(struct ports_device *portdev) } } /* Find the queues. */ - err = portdev->vdev->config->find_vqs(portdev->vdev, nr_queues, vqs, - io_callbacks, - (const char **)io_names, NULL); + err = virtio_find_vqs(portdev->vdev, nr_queues, vqs, + io_callbacks, + (const char **)io_names, NULL); if (err) goto free; diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c index 21472e427f6f..a111cd72797b 100644 --- a/drivers/crypto/virtio/virtio_crypto_core.c +++ b/drivers/crypto/virtio/virtio_crypto_core.c @@ -119,8 +119,7 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi) names[i] = vi->data_vq[i].name; } - ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, - names, NULL); + ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, callbacks, names, NULL); if (ret) goto err_find; diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index 491866865c33..1e1c90b30d4a 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -175,8 +175,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags) DRM_INFO("virgl 3d acceleration not supported by guest\n"); #endif - ret = vgdev->vdev->config->find_vqs(vgdev->vdev, 2, vqs, - callbacks, names, NULL); + ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL); if (ret) { DRM_ERROR("failed to find virt queues\n"); goto err_vqs; diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c index bc0eb47eccee..6122768c8644 100644 --- a/drivers/net/caif/caif_virtio.c +++ b/drivers/net/caif/caif_virtio.c @@ -679,8 +679,7 @@ static int cfv_probe(struct virtio_device *vdev) goto err; /* Get the TX virtio ring. This is a "guest side vring". */ - err = vdev->config->find_vqs(vdev, 1, &cfv->vq_tx, &vq_cbs, &names, - NULL); + err = virtio_find_vqs(vdev, 1, &cfv->vq_tx, &vq_cbs, &names, NULL); if (err) goto err; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index f36584616e7d..71f447ab440e 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2079,8 +2079,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi) names[txq2vq(i)] = vi->sq[i].name; } - ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, - names, NULL); + ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, callbacks, names, NULL); if (ret) goto err_find; diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c index 5e66e081027e..f7cade09d38a 100644 --- a/drivers/rpmsg/virtio_rpmsg_bus.c +++ b/drivers/rpmsg/virtio_rpmsg_bus.c @@ -869,7 +869,7 @@ static int rpmsg_probe(struct virtio_device *vdev) init_waitqueue_head(&vrp->sendq); /* We expect two virtqueues, rx and tx (and in this order) */ - err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names, NULL); + err = virtio_find_vqs(vdev, 2, vqs, vq_cbs, names, NULL); if (err) goto free_vrp; diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 939c47df73fa..e9222dcb9707 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -870,8 +870,7 @@ static int virtscsi_init(struct virtio_device *vdev, } /* Discover virtqueues and write information to configuration. */ - err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names, - &desc); + err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc); if (err) goto out; diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 34adf9b9c053..408c174ef0d5 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -418,8 +418,7 @@ static int init_vqs(struct virtio_balloon *vb) * optionally stat. */ nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2; - err = vb->vdev->config->find_vqs(vb->vdev, nvqs, vqs, callbacks, names, - NULL); + err = virtio_find_vqs(vb->vdev, nvqs, vqs, callbacks, names, NULL); if (err) return err; diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c index 79f1293cda93..3a0468f2ceb0 100644 --- a/drivers/virtio/virtio_input.c +++ b/drivers/virtio/virtio_input.c @@ -173,8 +173,7 @@ static int virtinput_init_vqs(struct virtio_input *vi) static const char * const names[] = { "events", "status" }; int err; - err = vi->vdev->config->find_vqs(vi->vdev, 2, vqs, cbs, names, - NULL); + err = virtio_find_vqs(vi->vdev, 2, vqs, cbs, names, NULL); if (err) return err; vi->evt = vqs[0]; diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 8355bab175e1..47f3d805c290 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -179,6 +179,15 @@ struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev, return vq; } +static inline +int virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], vq_callback_t *callbacks[], + const char * const names[], + struct irq_affinity *desc) +{ + return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, desc); +} + /** * virtio_device_ready - enable vq use in probe function * @vdev: the device diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 68675a151f22..97e26e2955e1 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -573,9 +573,9 @@ static int virtio_vsock_probe(struct virtio_device *vdev) vsock->vdev = vdev; - ret = vsock->vdev->config->find_vqs(vsock->vdev, VSOCK_VQ_MAX, - vsock->vqs, callbacks, names, - NULL); + ret = virtio_find_vqs(vsock->vdev, VSOCK_VQ_MAX, + vsock->vqs, callbacks, names, + NULL); if (ret < 0) goto out; -- cgit v1.2.3 From f94682dde5ed23eed13533a37dfce942e60ade4e Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 6 Mar 2017 18:32:29 +0200 Subject: virtio: add context flag to find vqs Allows maintaining extra context per vq. For ease of use, passing in NULL is legal and disables the feature for all vqs. Includes fixes by Christian for s390, acked by Cornelia. Signed-off-by: Christian Borntraeger Acked-by: Cornelia Huck Signed-off-by: Michael S. Tsirkin --- drivers/misc/mic/vop/vop_main.c | 9 ++++++--- drivers/remoteproc/remoteproc_virtio.c | 10 ++++++---- drivers/s390/virtio/kvm_virtio.c | 8 +++++--- drivers/s390/virtio/virtio_ccw.c | 7 ++++--- drivers/virtio/virtio_mmio.c | 8 +++++--- drivers/virtio/virtio_pci_common.c | 17 +++++++++++------ drivers/virtio/virtio_pci_common.h | 4 +++- drivers/virtio/virtio_pci_legacy.c | 4 +++- drivers/virtio/virtio_pci_modern.c | 12 ++++++++---- drivers/virtio/virtio_ring.c | 7 +++++-- include/linux/virtio_config.h | 18 +++++++++++++++--- include/linux/virtio_ring.h | 3 +++ 12 files changed, 74 insertions(+), 33 deletions(-) (limited to 'include') diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c index c2e29d7f0de8..a341938c7e2c 100644 --- a/drivers/misc/mic/vop/vop_main.c +++ b/drivers/misc/mic/vop/vop_main.c @@ -278,7 +278,7 @@ static void vop_del_vqs(struct virtio_device *dev) static struct virtqueue *vop_find_vq(struct virtio_device *dev, unsigned index, void (*callback)(struct virtqueue *vq), - const char *name) + const char *name, bool ctx) { struct _vop_vdev *vdev = to_vopvdev(dev); struct vop_device *vpdev = vdev->vpdev; @@ -314,6 +314,7 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev, le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN, dev, false, + ctx, (void __force *)va, vop_notify, callback, name); if (!vq) { err = -ENOMEM; @@ -374,7 +375,8 @@ unmap: static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[], struct irq_affinity *desc) + const char * const names[], const bool *ctx, + struct irq_affinity *desc) { struct _vop_vdev *vdev = to_vopvdev(dev); struct vop_device *vpdev = vdev->vpdev; @@ -388,7 +390,8 @@ static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs, for (i = 0; i < nvqs; ++i) { dev_dbg(_vop_dev(vdev), "%s: %d: %s\n", __func__, i, names[i]); - vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i]); + vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i], + ctx ? ctx[i] : false); if (IS_ERR(vqs[i])) { err = PTR_ERR(vqs[i]); goto error; diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c index 0142cc3f0c91..294634836b32 100644 --- a/drivers/remoteproc/remoteproc_virtio.c +++ b/drivers/remoteproc/remoteproc_virtio.c @@ -71,7 +71,7 @@ EXPORT_SYMBOL(rproc_vq_interrupt); static struct virtqueue *rp_find_vq(struct virtio_device *vdev, unsigned int id, void (*callback)(struct virtqueue *vq), - const char *name) + const char *name, bool ctx) { struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); struct rproc *rproc = vdev_to_rproc(vdev); @@ -103,8 +103,8 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev, * Create the new vq, and tell virtio we're not interested in * the 'weak' smp barriers, since we're talking with a real device. */ - vq = vring_new_virtqueue(id, len, rvring->align, vdev, false, addr, - rproc_virtio_notify, callback, name); + vq = vring_new_virtqueue(id, len, rvring->align, vdev, false, ctx, + addr, rproc_virtio_notify, callback, name); if (!vq) { dev_err(dev, "vring_new_virtqueue %s failed\n", name); rproc_free_vring(rvring); @@ -138,12 +138,14 @@ static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char * const names[], + const bool * ctx, struct irq_affinity *desc) { int i, ret; for (i = 0; i < nvqs; ++i) { - vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i]); + vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i], + ctx ? ctx[i] : false); if (IS_ERR(vqs[i])) { ret = PTR_ERR(vqs[i]); goto error; diff --git a/drivers/s390/virtio/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c index 2ce0b3eb2efe..a99d09a11f05 100644 --- a/drivers/s390/virtio/kvm_virtio.c +++ b/drivers/s390/virtio/kvm_virtio.c @@ -189,7 +189,7 @@ static bool kvm_notify(struct virtqueue *vq) static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, unsigned index, void (*callback)(struct virtqueue *vq), - const char *name) + const char *name, bool ctx) { struct kvm_device *kdev = to_kvmdev(vdev); struct kvm_vqconfig *config; @@ -211,7 +211,7 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, goto out; vq = vring_new_virtqueue(index, config->num, KVM_S390_VIRTIO_RING_ALIGN, - vdev, true, (void *) config->address, + vdev, true, ctx, (void *) config->address, kvm_notify, callback, name); if (!vq) { err = -ENOMEM; @@ -256,6 +256,7 @@ static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char * const names[], + const bool *ctx, struct irq_affinity *desc) { struct kvm_device *kdev = to_kvmdev(vdev); @@ -266,7 +267,8 @@ static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs, return -ENOENT; for (i = 0; i < nvqs; ++i) { - vqs[i] = kvm_find_vq(vdev, i, callbacks[i], names[i]); + vqs[i] = kvm_find_vq(vdev, i, callbacks[i], names[i], + ctx ? ctx[i] : false); if (IS_ERR(vqs[i])) goto error; } diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 0ed209f3d8b0..2a76ea78a0bf 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -484,7 +484,7 @@ static void virtio_ccw_del_vqs(struct virtio_device *vdev) static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, int i, vq_callback_t *callback, - const char *name, + const char *name, bool ctx, struct ccw1 *ccw) { struct virtio_ccw_device *vcdev = to_vc_device(vdev); @@ -522,7 +522,7 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, } vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev, - true, info->queue, virtio_ccw_kvm_notify, + true, ctx, info->queue, virtio_ccw_kvm_notify, callback, name); if (!vq) { /* For now, we fail if we can't get the requested size. */ @@ -629,6 +629,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char * const names[], + const bool *ctx, struct irq_affinity *desc) { struct virtio_ccw_device *vcdev = to_vc_device(vdev); @@ -642,7 +643,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, for (i = 0; i < nvqs; ++i) { vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i], - ccw); + ctx ? ctx[i] : false, ccw); if (IS_ERR(vqs[i])) { ret = PTR_ERR(vqs[i]); vqs[i] = NULL; diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 78343b8f9034..74dc7170fd35 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -351,7 +351,7 @@ static void vm_del_vqs(struct virtio_device *vdev) static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, void (*callback)(struct virtqueue *vq), - const char *name) + const char *name, bool ctx) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); struct virtio_mmio_vq_info *info; @@ -388,7 +388,7 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, /* Create the vring */ vq = vring_create_virtqueue(index, num, VIRTIO_MMIO_VRING_ALIGN, vdev, - true, true, vm_notify, callback, name); + true, true, ctx, vm_notify, callback, name); if (!vq) { err = -ENOMEM; goto error_new_virtqueue; @@ -447,6 +447,7 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char * const names[], + const bool *ctx, struct irq_affinity *desc) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); @@ -459,7 +460,8 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, return err; for (i = 0; i < nvqs; ++i) { - vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i]); + vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i], + ctx ? ctx[i] : false); if (IS_ERR(vqs[i])) { vm_del_vqs(vdev); return PTR_ERR(vqs[i]); diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 698d5d06fa03..007a4f366086 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -172,6 +172,7 @@ error: static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index, void (*callback)(struct virtqueue *vq), const char *name, + bool ctx, u16 msix_vec) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); @@ -183,7 +184,7 @@ static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index, if (!info) return ERR_PTR(-ENOMEM); - vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, + vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, ctx, msix_vec); if (IS_ERR(vq)) goto out_info; @@ -274,6 +275,7 @@ void vp_del_vqs(struct virtio_device *vdev) static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char * const names[], bool per_vq_vectors, + const bool *ctx, struct irq_affinity *desc) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); @@ -315,6 +317,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, else msix_vec = VP_MSIX_VQ_VECTOR; vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], + ctx ? ctx[i] : false, msix_vec); if (IS_ERR(vqs[i])) { err = PTR_ERR(vqs[i]); @@ -345,7 +348,7 @@ error_find: static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[]) + const char * const names[], const bool *ctx) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); int i, err; @@ -367,6 +370,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs, continue; } vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], + ctx ? ctx[i] : false, VIRTIO_MSI_NO_VECTOR); if (IS_ERR(vqs[i])) { err = PTR_ERR(vqs[i]); @@ -383,20 +387,21 @@ out_del_vqs: /* the config->find_vqs() implementation */ int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[], struct irq_affinity *desc) + const char * const names[], const bool *ctx, + struct irq_affinity *desc) { int err; /* Try MSI-X with one vector per queue. */ - err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, desc); + err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, ctx, desc); if (!err) return 0; /* Fallback: MSI-X with one vector for config, one shared for queues. */ - err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, desc); + err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, ctx, desc); if (!err) return 0; /* Finally fall back to regular interrupts. */ - return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names); + return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx); } const char *vp_bus_name(struct virtio_device *vdev) diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h index e96334aec1e0..135ee3cf7175 100644 --- a/drivers/virtio/virtio_pci_common.h +++ b/drivers/virtio/virtio_pci_common.h @@ -102,6 +102,7 @@ struct virtio_pci_device { unsigned idx, void (*callback)(struct virtqueue *vq), const char *name, + bool ctx, u16 msix_vec); void (*del_vq)(struct virtio_pci_vq_info *info); @@ -131,7 +132,8 @@ void vp_del_vqs(struct virtio_device *vdev); /* the config->find_vqs() implementation */ int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[], struct irq_affinity *desc); + const char * const names[], const bool *ctx, + struct irq_affinity *desc); const char *vp_bus_name(struct virtio_device *vdev); /* Setup the affinity for a virtqueue: diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c index 4bfa48fb1324..2780886e8ba3 100644 --- a/drivers/virtio/virtio_pci_legacy.c +++ b/drivers/virtio/virtio_pci_legacy.c @@ -116,6 +116,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, unsigned index, void (*callback)(struct virtqueue *vq), const char *name, + bool ctx, u16 msix_vec) { struct virtqueue *vq; @@ -135,7 +136,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, /* create the vring */ vq = vring_create_virtqueue(index, num, VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev, - true, false, vp_notify, callback, name); + true, false, ctx, + vp_notify, callback, name); if (!vq) return ERR_PTR(-ENOMEM); diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index 8978f109d2d7..2555d80f6eec 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c @@ -297,6 +297,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, unsigned index, void (*callback)(struct virtqueue *vq), const char *name, + bool ctx, u16 msix_vec) { struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common; @@ -328,7 +329,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, /* create the vring */ vq = vring_create_virtqueue(index, num, SMP_CACHE_BYTES, &vp_dev->vdev, - true, true, vp_notify, callback, name); + true, true, ctx, + vp_notify, callback, name); if (!vq) return ERR_PTR(-ENOMEM); @@ -387,12 +389,14 @@ err_map_notify: } static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs, - struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[], struct irq_affinity *desc) + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char * const names[], const bool *ctx, + struct irq_affinity *desc) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtqueue *vq; - int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, desc); + int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, desc); if (rc) return rc; diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 409aeaa49246..b23b5fae468b 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -916,6 +916,7 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index, struct vring vring, struct virtio_device *vdev, bool weak_barriers, + bool context, bool (*notify)(struct virtqueue *), void (*callback)(struct virtqueue *), const char *name) @@ -1019,6 +1020,7 @@ struct virtqueue *vring_create_virtqueue( struct virtio_device *vdev, bool weak_barriers, bool may_reduce_num, + bool context, bool (*notify)(struct virtqueue *), void (*callback)(struct virtqueue *), const char *name) @@ -1058,7 +1060,7 @@ struct virtqueue *vring_create_virtqueue( queue_size_in_bytes = vring_size(num, vring_align); vring_init(&vring, num, queue, vring_align); - vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, + vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context, notify, callback, name); if (!vq) { vring_free_queue(vdev, queue_size_in_bytes, queue, @@ -1079,6 +1081,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int index, unsigned int vring_align, struct virtio_device *vdev, bool weak_barriers, + bool context, void *pages, bool (*notify)(struct virtqueue *vq), void (*callback)(struct virtqueue *vq), @@ -1086,7 +1089,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int index, { struct vring vring; vring_init(&vring, num, pages, vring_align); - return __vring_new_virtqueue(index, vring, vdev, weak_barriers, + return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context, notify, callback, name); } EXPORT_SYMBOL_GPL(vring_new_virtqueue); diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 47f3d805c290..0133d8a12ccd 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -72,7 +72,8 @@ struct virtio_config_ops { void (*reset)(struct virtio_device *vdev); int (*find_vqs)(struct virtio_device *, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[], struct irq_affinity *desc); + const char * const names[], const bool *ctx, + struct irq_affinity *desc); void (*del_vqs)(struct virtio_device *); u64 (*get_features)(struct virtio_device *vdev); int (*finalize_features)(struct virtio_device *vdev); @@ -173,7 +174,8 @@ struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev, vq_callback_t *callbacks[] = { c }; const char *names[] = { n }; struct virtqueue *vq; - int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names, NULL); + int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names, NULL, + NULL); if (err < 0) return ERR_PTR(err); return vq; @@ -185,7 +187,17 @@ int virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs, const char * const names[], struct irq_affinity *desc) { - return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, desc); + return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL, desc); +} + +static inline +int virtio_find_vqs_ctx(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], vq_callback_t *callbacks[], + const char * const names[], const bool *ctx, + struct irq_affinity *desc) +{ + return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, + desc); } /** diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index e8d36938f09a..270cfa81830e 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h @@ -71,6 +71,7 @@ struct virtqueue *vring_create_virtqueue(unsigned int index, struct virtio_device *vdev, bool weak_barriers, bool may_reduce_num, + bool ctx, bool (*notify)(struct virtqueue *vq), void (*callback)(struct virtqueue *vq), const char *name); @@ -80,6 +81,7 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index, struct vring vring, struct virtio_device *vdev, bool weak_barriers, + bool ctx, bool (*notify)(struct virtqueue *), void (*callback)(struct virtqueue *), const char *name); @@ -93,6 +95,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int index, unsigned int vring_align, struct virtio_device *vdev, bool weak_barriers, + bool ctx, void *pages, bool (*notify)(struct virtqueue *vq), void (*callback)(struct virtqueue *vq), -- cgit v1.2.3 From 5a08b04f637921e44ba767c07c74b0535504ab71 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 7 Feb 2017 06:15:13 +0200 Subject: virtio: allow extra context per descriptor Allow extra context per descriptor. To avoid slow down for data path, this disables use of indirect descriptors for this vq. Signed-off-by: Michael S. Tsirkin --- drivers/virtio/virtio_ring.c | 70 ++++++++++++++++++++++++++++++++++++-------- include/linux/virtio.h | 9 ++++++ 2 files changed, 66 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index b23b5fae468b..5e1b548828e6 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -263,6 +263,7 @@ static inline int virtqueue_add(struct virtqueue *_vq, unsigned int out_sgs, unsigned int in_sgs, void *data, + void *ctx, gfp_t gfp) { struct vring_virtqueue *vq = to_vvq(_vq); @@ -275,6 +276,7 @@ static inline int virtqueue_add(struct virtqueue *_vq, START_USE(vq); BUG_ON(data == NULL); + BUG_ON(ctx && vq->indirect); if (unlikely(vq->broken)) { END_USE(vq); @@ -389,6 +391,8 @@ static inline int virtqueue_add(struct virtqueue *_vq, vq->desc_state[head].data = data; if (indirect) vq->desc_state[head].indir_desc = desc; + if (ctx) + vq->desc_state[head].indir_desc = ctx; /* Put entry in available array (but don't update avail->idx until they * do sync). */ @@ -461,7 +465,8 @@ int virtqueue_add_sgs(struct virtqueue *_vq, for (sg = sgs[i]; sg; sg = sg_next(sg)) total_sg++; } - return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp); + return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, + data, NULL, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_sgs); @@ -483,7 +488,7 @@ int virtqueue_add_outbuf(struct virtqueue *vq, void *data, gfp_t gfp) { - return virtqueue_add(vq, &sg, num, 1, 0, data, gfp); + return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); @@ -505,10 +510,34 @@ int virtqueue_add_inbuf(struct virtqueue *vq, void *data, gfp_t gfp) { - return virtqueue_add(vq, &sg, num, 0, 1, data, gfp); + return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); +/** + * virtqueue_add_inbuf_ctx - expose input buffers to other end + * @vq: the struct virtqueue we're talking about. + * @sg: scatterlist (must be well-formed and terminated!) + * @num: the number of entries in @sg writable by other side + * @data: the token identifying the buffer. + * @ctx: extra context for the token + * @gfp: how to do memory allocations (if necessary). + * + * Caller must ensure we don't call this with other virtqueue operations + * at the same time (except where noted). + * + * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). + */ +int virtqueue_add_inbuf_ctx(struct virtqueue *vq, + struct scatterlist *sg, unsigned int num, + void *data, + void *ctx, + gfp_t gfp) +{ + return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp); +} +EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx); + /** * virtqueue_kick_prepare - first half of split virtqueue_kick call. * @vq: the struct virtqueue @@ -598,7 +627,8 @@ bool virtqueue_kick(struct virtqueue *vq) } EXPORT_SYMBOL_GPL(virtqueue_kick); -static void detach_buf(struct vring_virtqueue *vq, unsigned int head) +static void detach_buf(struct vring_virtqueue *vq, unsigned int head, + void **ctx) { unsigned int i, j; __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT); @@ -622,10 +652,15 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head) /* Plus final descriptor */ vq->vq.num_free++; - /* Free the indirect table, if any, now that it's unmapped. */ - if (vq->desc_state[head].indir_desc) { + if (vq->indirect) { struct vring_desc *indir_desc = vq->desc_state[head].indir_desc; - u32 len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len); + u32 len; + + /* Free the indirect table, if any, now that it's unmapped. */ + if (!indir_desc) + return; + + len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len); BUG_ON(!(vq->vring.desc[head].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT))); @@ -634,8 +669,10 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head) for (j = 0; j < len / sizeof(struct vring_desc); j++) vring_unmap_one(vq, &indir_desc[j]); - kfree(vq->desc_state[head].indir_desc); + kfree(indir_desc); vq->desc_state[head].indir_desc = NULL; + } else if (ctx) { + *ctx = vq->desc_state[head].indir_desc; } } @@ -660,7 +697,8 @@ static inline bool more_used(const struct vring_virtqueue *vq) * Returns NULL if there are no used buffers, or the "data" token * handed to virtqueue_add_*(). */ -void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) +void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len, + void **ctx) { struct vring_virtqueue *vq = to_vvq(_vq); void *ret; @@ -698,7 +736,7 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) /* detach_buf clears data, so grab it now. */ ret = vq->desc_state[i].data; - detach_buf(vq, i); + detach_buf(vq, i, ctx); vq->last_used_idx++; /* If we expect an interrupt for the next entry, tell host * by writing event index and flush out the write before @@ -715,8 +753,13 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) END_USE(vq); return ret; } -EXPORT_SYMBOL_GPL(virtqueue_get_buf); +EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx); +void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) +{ + return virtqueue_get_buf_ctx(_vq, len, NULL); +} +EXPORT_SYMBOL_GPL(virtqueue_get_buf); /** * virtqueue_disable_cb - disable callbacks * @vq: the struct virtqueue we're talking about. @@ -878,7 +921,7 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq) continue; /* detach_buf clears data, so grab it now. */ buf = vq->desc_state[i].data; - detach_buf(vq, i); + detach_buf(vq, i, NULL); vq->avail_idx_shadow--; vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow); END_USE(vq); @@ -951,7 +994,8 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index, vq->last_add_time_valid = false; #endif - vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); + vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && + !context; vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); /* No callback? Tell other side not to bother us. */ diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 7edfbdb55a99..ed04753278d4 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -44,6 +44,12 @@ int virtqueue_add_inbuf(struct virtqueue *vq, void *data, gfp_t gfp); +int virtqueue_add_inbuf_ctx(struct virtqueue *vq, + struct scatterlist sg[], unsigned int num, + void *data, + void *ctx, + gfp_t gfp); + int virtqueue_add_sgs(struct virtqueue *vq, struct scatterlist *sgs[], unsigned int out_sgs, @@ -59,6 +65,9 @@ bool virtqueue_notify(struct virtqueue *vq); void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len); +void *virtqueue_get_buf_ctx(struct virtqueue *vq, unsigned int *len, + void **ctx); + void virtqueue_disable_cb(struct virtqueue *vq); bool virtqueue_enable_cb(struct virtqueue *vq); -- cgit v1.2.3 From 991c7ed6acca506983c42d51965da8b8f3677682 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Wed, 3 May 2017 11:20:15 -0700 Subject: elf: Add ARCv2 specific core note section Signed-off-by: Vineet Gupta --- include/uapi/linux/elf.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index b59ee077a596..b5c00563ed9b 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -417,7 +417,7 @@ typedef struct elf64_shdr { #define NT_METAG_CBUF 0x500 /* Metag catch buffer registers */ #define NT_METAG_RPIPE 0x501 /* Metag read pipeline state */ #define NT_METAG_TLS 0x502 /* Metag TLS pointer */ - +#define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */ /* Note header in a PT_NOTE section */ typedef struct elf32_note { -- cgit v1.2.3 From 74da4a0f574d11ed60dbe50a1e5e942e20476590 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Fri, 3 Mar 2017 18:16:07 +0100 Subject: libceph, ceph: always advertise all supported features No reason to hide CephFS-specific features in the rbd case. Recent feature bits mix RADOS and CephFS-specific stuff together anyway. Signed-off-by: Ilya Dryomov --- drivers/block/rbd.c | 2 +- fs/ceph/super.c | 7 +------ include/linux/ceph/ceph_features.h | 4 ++++ include/linux/ceph/libceph.h | 5 +---- net/ceph/ceph_common.c | 16 ++++++---------- 5 files changed, 13 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 517838b65964..16010183b703 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -731,7 +731,7 @@ static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts) kref_init(&rbdc->kref); INIT_LIST_HEAD(&rbdc->node); - rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0); + rbdc->client = ceph_create_client(ceph_opts, rbdc); if (IS_ERR(rbdc->client)) goto out_rbdc; ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */ diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 0ec8d0114e57..8b9f645006da 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -544,10 +544,6 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, struct ceph_options *opt) { struct ceph_fs_client *fsc; - const u64 supported_features = - CEPH_FEATURE_FLOCK | CEPH_FEATURE_DIRLAYOUTHASH | - CEPH_FEATURE_MDSENC | CEPH_FEATURE_MDS_INLINE_DATA; - const u64 required_features = 0; int page_count; size_t size; int err = -ENOMEM; @@ -556,8 +552,7 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, if (!fsc) return ERR_PTR(-ENOMEM); - fsc->client = ceph_create_client(opt, fsc, supported_features, - required_features); + fsc->client = ceph_create_client(opt, fsc); if (IS_ERR(fsc->client)) { err = PTR_ERR(fsc->client); goto fail; diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h index ae2f66833762..fd8b2953c78f 100644 --- a/include/linux/ceph/ceph_features.h +++ b/include/linux/ceph/ceph_features.h @@ -105,8 +105,10 @@ static inline u64 ceph_sanitize_features(u64 features) */ #define CEPH_FEATURES_SUPPORTED_DEFAULT \ (CEPH_FEATURE_NOSRCADDR | \ + CEPH_FEATURE_FLOCK | \ CEPH_FEATURE_SUBSCRIBE2 | \ CEPH_FEATURE_RECONNECT_SEQ | \ + CEPH_FEATURE_DIRLAYOUTHASH | \ CEPH_FEATURE_PGID64 | \ CEPH_FEATURE_PGPOOL3 | \ CEPH_FEATURE_OSDENC | \ @@ -114,11 +116,13 @@ static inline u64 ceph_sanitize_features(u64 features) CEPH_FEATURE_MSG_AUTH | \ CEPH_FEATURE_CRUSH_TUNABLES2 | \ CEPH_FEATURE_REPLY_CREATE_INODE | \ + CEPH_FEATURE_MDSENC | \ CEPH_FEATURE_OSDHASHPSPOOL | \ CEPH_FEATURE_OSD_CACHEPOOL | \ CEPH_FEATURE_CRUSH_V2 | \ CEPH_FEATURE_EXPORT_PEER | \ CEPH_FEATURE_OSDMAP_ENC | \ + CEPH_FEATURE_MDS_INLINE_DATA | \ CEPH_FEATURE_CRUSH_TUNABLES3 | \ CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \ CEPH_FEATURE_MSGR_KEEPALIVE2 | \ diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 88cd5dc8e238..cecbf5a26e5a 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -262,10 +262,7 @@ int ceph_print_client_options(struct seq_file *m, struct ceph_client *client); extern void ceph_destroy_options(struct ceph_options *opt); extern int ceph_compare_options(struct ceph_options *new_opt, struct ceph_client *client); -extern struct ceph_client *ceph_create_client(struct ceph_options *opt, - void *private, - u64 supported_features, - u64 required_features); +struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private); struct ceph_entity_addr *ceph_client_addr(struct ceph_client *client); u64 ceph_client_gid(struct ceph_client *client); extern void ceph_destroy_client(struct ceph_client *client); diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index 108533859a53..4701d067169f 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c @@ -596,9 +596,7 @@ EXPORT_SYMBOL(ceph_client_gid); /* * create a fresh client instance */ -struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private, - u64 supported_features, - u64 required_features) +struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private) { struct ceph_client *client; struct ceph_entity_addr *myaddr = NULL; @@ -615,14 +613,12 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private, init_waitqueue_head(&client->auth_wq); client->auth_err = 0; - if (!ceph_test_opt(client, NOMSGAUTH)) - required_features |= CEPH_FEATURE_MSG_AUTH; - client->extra_mon_dispatch = NULL; - client->supported_features = CEPH_FEATURES_SUPPORTED_DEFAULT | - supported_features; - client->required_features = CEPH_FEATURES_REQUIRED_DEFAULT | - required_features; + client->supported_features = CEPH_FEATURES_SUPPORTED_DEFAULT; + client->required_features = CEPH_FEATURES_REQUIRED_DEFAULT; + + if (!ceph_test_opt(client, NOMSGAUTH)) + client->required_features |= CEPH_FEATURE_MSG_AUTH; /* msgr */ if (ceph_test_opt(client, MYIP)) -- cgit v1.2.3 From 06dfa96399a9a3280dd81e47f8696aa89f1783e7 Mon Sep 17 00:00:00 2001 From: Elena Reshetova Date: Fri, 17 Mar 2017 14:10:27 +0200 Subject: libceph: convert ceph_snap_context.nref from atomic_t to refcount_t refcount_t type and corresponding API should be used instead of atomic_t when the variable is used as a reference counter. This allows to avoid accidental refcounter overflows that might lead to use-after-free situations. Signed-off-by: Elena Reshetova Signed-off-by: Hans Liljestrand Signed-off-by: Kees Cook Signed-off-by: David Windsor Signed-off-by: Ilya Dryomov --- include/linux/ceph/libceph.h | 3 ++- net/ceph/snapshot.c | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index cecbf5a26e5a..3229ae6c7846 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -161,7 +162,7 @@ struct ceph_client { * dirtied. */ struct ceph_snap_context { - atomic_t nref; + refcount_t nref; u64 seq; u32 num_snaps; u64 snaps[]; diff --git a/net/ceph/snapshot.c b/net/ceph/snapshot.c index 705414e78ae0..e14a5d038656 100644 --- a/net/ceph/snapshot.c +++ b/net/ceph/snapshot.c @@ -49,7 +49,7 @@ struct ceph_snap_context *ceph_create_snap_context(u32 snap_count, if (!snapc) return NULL; - atomic_set(&snapc->nref, 1); + refcount_set(&snapc->nref, 1); snapc->num_snaps = snap_count; return snapc; @@ -59,7 +59,7 @@ EXPORT_SYMBOL(ceph_create_snap_context); struct ceph_snap_context *ceph_get_snap_context(struct ceph_snap_context *sc) { if (sc) - atomic_inc(&sc->nref); + refcount_inc(&sc->nref); return sc; } EXPORT_SYMBOL(ceph_get_snap_context); @@ -68,7 +68,7 @@ void ceph_put_snap_context(struct ceph_snap_context *sc) { if (!sc) return; - if (atomic_dec_and_test(&sc->nref)) { + if (refcount_dec_and_test(&sc->nref)) { /*printk(" deleting snap_context %p\n", sc);*/ kfree(sc); } -- cgit v1.2.3 From 02113a0f14e20bd8e675d7cec16db6eaaf2b2380 Mon Sep 17 00:00:00 2001 From: Elena Reshetova Date: Fri, 17 Mar 2017 14:10:28 +0200 Subject: libceph: convert ceph_osd.o_ref from atomic_t to refcount_t refcount_t type and corresponding API should be used instead of atomic_t when the variable is used as a reference counter. This allows to avoid accidental refcounter overflows that might lead to use-after-free situations. Signed-off-by: Elena Reshetova Signed-off-by: Hans Liljestrand Signed-off-by: Kees Cook Signed-off-by: David Windsor Signed-off-by: Ilya Dryomov --- include/linux/ceph/osd_client.h | 3 ++- net/ceph/osd_client.c | 16 ++++++++-------- 2 files changed, 10 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index c125b5d9e13c..d6a625e75040 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -27,7 +28,7 @@ typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *); /* a given osd we're communicating with */ struct ceph_osd { - atomic_t o_ref; + refcount_t o_ref; struct ceph_osd_client *o_osdc; int o_osd; int o_incarnation; diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index e15ea9e4c495..b4500a8ab8b3 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1005,7 +1005,7 @@ static bool osd_registered(struct ceph_osd *osd) */ static void osd_init(struct ceph_osd *osd) { - atomic_set(&osd->o_ref, 1); + refcount_set(&osd->o_ref, 1); RB_CLEAR_NODE(&osd->o_node); osd->o_requests = RB_ROOT; osd->o_linger_requests = RB_ROOT; @@ -1050,9 +1050,9 @@ static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum) static struct ceph_osd *get_osd(struct ceph_osd *osd) { - if (atomic_inc_not_zero(&osd->o_ref)) { - dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1, - atomic_read(&osd->o_ref)); + if (refcount_inc_not_zero(&osd->o_ref)) { + dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1, + refcount_read(&osd->o_ref)); return osd; } else { dout("get_osd %p FAIL\n", osd); @@ -1062,9 +1062,9 @@ static struct ceph_osd *get_osd(struct ceph_osd *osd) static void put_osd(struct ceph_osd *osd) { - dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref), - atomic_read(&osd->o_ref) - 1); - if (atomic_dec_and_test(&osd->o_ref)) { + dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref), + refcount_read(&osd->o_ref) - 1); + if (refcount_dec_and_test(&osd->o_ref)) { osd_cleanup(osd); kfree(osd); } @@ -4126,7 +4126,7 @@ void ceph_osdc_stop(struct ceph_osd_client *osdc) close_osd(osd); } up_write(&osdc->lock); - WARN_ON(atomic_read(&osdc->homeless_osd.o_ref) != 1); + WARN_ON(refcount_read(&osdc->homeless_osd.o_ref) != 1); osd_cleanup(&osdc->homeless_osd); WARN_ON(!list_empty(&osdc->osd_lru)); -- cgit v1.2.3 From 0e1a5ee6577e43e5be55369d398107080b360941 Mon Sep 17 00:00:00 2001 From: Elena Reshetova Date: Fri, 17 Mar 2017 14:10:29 +0200 Subject: libceph: convert ceph_pagelist.refcnt from atomic_t to refcount_t refcount_t type and corresponding API should be used instead of atomic_t when the variable is used as a reference counter. This allows to avoid accidental refcounter overflows that might lead to use-after-free situations. Signed-off-by: Elena Reshetova Signed-off-by: Hans Liljestrand Signed-off-by: Kees Cook Signed-off-by: David Windsor Signed-off-by: Ilya Dryomov --- fs/ceph/mds_client.c | 2 +- include/linux/ceph/pagelist.h | 6 +++--- net/ceph/pagelist.c | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 074490542b4c..b16f1cf552a8 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1991,7 +1991,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, if (req->r_pagelist) { struct ceph_pagelist *pagelist = req->r_pagelist; - atomic_inc(&pagelist->refcnt); + refcount_inc(&pagelist->refcnt); ceph_msg_data_add_pagelist(msg, pagelist); msg->hdr.data_len = cpu_to_le32(pagelist->length); } else { diff --git a/include/linux/ceph/pagelist.h b/include/linux/ceph/pagelist.h index 13d71fe18b0c..75a7db21457d 100644 --- a/include/linux/ceph/pagelist.h +++ b/include/linux/ceph/pagelist.h @@ -2,7 +2,7 @@ #define __FS_CEPH_PAGELIST_H #include -#include +#include #include #include @@ -13,7 +13,7 @@ struct ceph_pagelist { size_t room; struct list_head free_list; size_t num_pages_free; - atomic_t refcnt; + refcount_t refcnt; }; struct ceph_pagelist_cursor { @@ -30,7 +30,7 @@ static inline void ceph_pagelist_init(struct ceph_pagelist *pl) pl->room = 0; INIT_LIST_HEAD(&pl->free_list); pl->num_pages_free = 0; - atomic_set(&pl->refcnt, 1); + refcount_set(&pl->refcnt, 1); } extern void ceph_pagelist_release(struct ceph_pagelist *pl); diff --git a/net/ceph/pagelist.c b/net/ceph/pagelist.c index 6864007e64fc..ce09f73be759 100644 --- a/net/ceph/pagelist.c +++ b/net/ceph/pagelist.c @@ -16,7 +16,7 @@ static void ceph_pagelist_unmap_tail(struct ceph_pagelist *pl) void ceph_pagelist_release(struct ceph_pagelist *pl) { - if (!atomic_dec_and_test(&pl->refcnt)) + if (!refcount_dec_and_test(&pl->refcnt)) return; ceph_pagelist_unmap_tail(pl); while (!list_empty(&pl->head)) { -- cgit v1.2.3 From 76201b6354bb3aa31c7ba2bd42b9cbb8dda71c44 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Tue, 28 Mar 2017 17:04:13 +0800 Subject: ceph: allow connecting to mds whose rank >= mdsmap::m_max_mds mdsmap::m_max_mds is the expected count of active mds. It's not the max rank of active mds. User can decrease mdsmap::m_max_mds, but does not stop mds whose rank >= mdsmap::m_max_mds. Signed-off-by: "Yan, Zheng" Signed-off-by: Ilya Dryomov --- fs/ceph/debugfs.c | 21 ++++++++++----------- fs/ceph/mds_client.c | 10 +++++----- fs/ceph/mdsmap.c | 44 +++++++++++++++++++++++++++++++++++++------- include/linux/ceph/mdsmap.h | 7 ++++--- 4 files changed, 56 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c index f2ae393e2c31..4107a887ca34 100644 --- a/fs/ceph/debugfs.c +++ b/fs/ceph/debugfs.c @@ -22,20 +22,19 @@ static int mdsmap_show(struct seq_file *s, void *p) { int i; struct ceph_fs_client *fsc = s->private; + struct ceph_mdsmap *mdsmap; if (fsc->mdsc == NULL || fsc->mdsc->mdsmap == NULL) return 0; - seq_printf(s, "epoch %d\n", fsc->mdsc->mdsmap->m_epoch); - seq_printf(s, "root %d\n", fsc->mdsc->mdsmap->m_root); - seq_printf(s, "session_timeout %d\n", - fsc->mdsc->mdsmap->m_session_timeout); - seq_printf(s, "session_autoclose %d\n", - fsc->mdsc->mdsmap->m_session_autoclose); - for (i = 0; i < fsc->mdsc->mdsmap->m_max_mds; i++) { - struct ceph_entity_addr *addr = - &fsc->mdsc->mdsmap->m_info[i].addr; - int state = fsc->mdsc->mdsmap->m_info[i].state; - + mdsmap = fsc->mdsc->mdsmap; + seq_printf(s, "epoch %d\n", mdsmap->m_epoch); + seq_printf(s, "root %d\n", mdsmap->m_root); + seq_printf(s, "max_mds %d\n", mdsmap->m_max_mds); + seq_printf(s, "session_timeout %d\n", mdsmap->m_session_timeout); + seq_printf(s, "session_autoclose %d\n", mdsmap->m_session_autoclose); + for (i = 0; i < mdsmap->m_num_mds; i++) { + struct ceph_entity_addr *addr = &mdsmap->m_info[i].addr; + int state = mdsmap->m_info[i].state; seq_printf(s, "\tmds%d\t%s\t(%s)\n", i, ceph_pr_addr(&addr->in_addr), ceph_mds_state_name(state)); diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index b16f1cf552a8..2733932bf192 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -441,7 +441,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, { struct ceph_mds_session *s; - if (mds >= mdsc->mdsmap->m_max_mds) + if (mds >= mdsc->mdsmap->m_num_mds) return ERR_PTR(-EINVAL); s = kzalloc(sizeof(*s), GFP_NOFS); @@ -1004,7 +1004,7 @@ static void __open_export_target_sessions(struct ceph_mds_client *mdsc, struct ceph_mds_session *ts; int i, mds = session->s_mds; - if (mds >= mdsc->mdsmap->m_max_mds) + if (mds >= mdsc->mdsmap->m_num_mds) return; mi = &mdsc->mdsmap->m_info[mds]; @@ -3107,7 +3107,7 @@ static void check_new_map(struct ceph_mds_client *mdsc, dout("check_new_map new %u old %u\n", newmap->m_epoch, oldmap->m_epoch); - for (i = 0; i < oldmap->m_max_mds && i < mdsc->max_sessions; i++) { + for (i = 0; i < oldmap->m_num_mds && i < mdsc->max_sessions; i++) { if (mdsc->sessions[i] == NULL) continue; s = mdsc->sessions[i]; @@ -3121,7 +3121,7 @@ static void check_new_map(struct ceph_mds_client *mdsc, ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "", ceph_session_state_name(s->s_state)); - if (i >= newmap->m_max_mds || + if (i >= newmap->m_num_mds || memcmp(ceph_mdsmap_get_addr(oldmap, i), ceph_mdsmap_get_addr(newmap, i), sizeof(struct ceph_entity_addr))) { @@ -3167,7 +3167,7 @@ static void check_new_map(struct ceph_mds_client *mdsc, } } - for (i = 0; i < newmap->m_max_mds && i < mdsc->max_sessions; i++) { + for (i = 0; i < newmap->m_num_mds && i < mdsc->max_sessions; i++) { s = mdsc->sessions[i]; if (!s) continue; diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c index 5454e2327a5f..1a748cf88535 100644 --- a/fs/ceph/mdsmap.c +++ b/fs/ceph/mdsmap.c @@ -22,11 +22,11 @@ int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m) int i; /* special case for one mds */ - if (1 == m->m_max_mds && m->m_info[0].state > 0) + if (1 == m->m_num_mds && m->m_info[0].state > 0) return 0; /* count */ - for (i = 0; i < m->m_max_mds; i++) + for (i = 0; i < m->m_num_mds; i++) if (m->m_info[i].state > 0) n++; if (n == 0) @@ -135,8 +135,9 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end) m->m_session_autoclose = ceph_decode_32(p); m->m_max_file_size = ceph_decode_64(p); m->m_max_mds = ceph_decode_32(p); + m->m_num_mds = m->m_max_mds; - m->m_info = kcalloc(m->m_max_mds, sizeof(*m->m_info), GFP_NOFS); + m->m_info = kcalloc(m->m_num_mds, sizeof(*m->m_info), GFP_NOFS); if (m->m_info == NULL) goto nomem; @@ -207,9 +208,20 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end) ceph_pr_addr(&addr.in_addr), ceph_mds_state_name(state)); - if (mds < 0 || mds >= m->m_max_mds || state <= 0) + if (mds < 0 || state <= 0) continue; + if (mds >= m->m_num_mds) { + int new_num = max(mds + 1, m->m_num_mds * 2); + void *new_m_info = krealloc(m->m_info, + new_num * sizeof(*m->m_info), + GFP_NOFS | __GFP_ZERO); + if (!new_m_info) + goto nomem; + m->m_info = new_m_info; + m->m_num_mds = new_num; + } + info = &m->m_info[mds]; info->global_id = global_id; info->state = state; @@ -229,6 +241,14 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end) info->export_targets = NULL; } } + if (m->m_num_mds > m->m_max_mds) { + /* find max up mds */ + for (i = m->m_num_mds; i >= m->m_max_mds; i--) { + if (i == 0 || m->m_info[i-1].state > 0) + break; + } + m->m_num_mds = i; + } /* pg_pools */ ceph_decode_32_safe(p, end, n, bad); @@ -270,12 +290,22 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end) for (i = 0; i < n; i++) { s32 mds = ceph_decode_32(p); - if (mds >= 0 && mds < m->m_max_mds) { + if (mds >= 0 && mds < m->m_num_mds) { if (m->m_info[mds].laggy) num_laggy++; } } m->m_num_laggy = num_laggy; + + if (n > m->m_num_mds) { + void *new_m_info = krealloc(m->m_info, + n * sizeof(*m->m_info), + GFP_NOFS | __GFP_ZERO); + if (!new_m_info) + goto nomem; + m->m_info = new_m_info; + } + m->m_num_mds = n; } /* inc */ @@ -341,7 +371,7 @@ void ceph_mdsmap_destroy(struct ceph_mdsmap *m) { int i; - for (i = 0; i < m->m_max_mds; i++) + for (i = 0; i < m->m_num_mds; i++) kfree(m->m_info[i].export_targets); kfree(m->m_info); kfree(m->m_data_pg_pools); @@ -357,7 +387,7 @@ bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m) return false; if (m->m_num_laggy > 0) return false; - for (i = 0; i < m->m_max_mds; i++) { + for (i = 0; i < m->m_num_mds; i++) { if (m->m_info[i].state == CEPH_MDS_STATE_ACTIVE) nr_active++; } diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h index 8ed5dc505fbb..d5f783f3226a 100644 --- a/include/linux/ceph/mdsmap.h +++ b/include/linux/ceph/mdsmap.h @@ -25,6 +25,7 @@ struct ceph_mdsmap { u32 m_session_autoclose; /* seconds */ u64 m_max_file_size; u32 m_max_mds; /* size of m_addr, m_state arrays */ + int m_num_mds; struct ceph_mds_info *m_info; /* which object pools file data can be stored in */ @@ -40,7 +41,7 @@ struct ceph_mdsmap { static inline struct ceph_entity_addr * ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w) { - if (w >= m->m_max_mds) + if (w >= m->m_num_mds) return NULL; return &m->m_info[w].addr; } @@ -48,14 +49,14 @@ ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w) static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w) { BUG_ON(w < 0); - if (w >= m->m_max_mds) + if (w >= m->m_num_mds) return CEPH_MDS_STATE_DNE; return m->m_info[w].state; } static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w) { - if (w >= 0 && w < m->m_max_mds) + if (w >= 0 && w < m->m_num_mds) return m->m_info[w].laggy; return false; } -- cgit v1.2.3 From 79162547b76e4979b21ef80c9629ada94a51a59b Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Wed, 5 Apr 2017 12:54:05 -0400 Subject: ceph: make seeky readdir more efficient Current cephfs client uses string to indicate start position of readdir. The string is last entry of previous readdir reply. This approach does not work for seeky readdir because we can not easily convert the new postion to a string. For seeky readdir, mds needs to return dentries from the beginning. Client keeps retrying if the reply does not contain the dentry it wants. In current version of ceph, mds sorts CDentry in its cache in hash order. Client also uses dentry hash to compose dir postion. For seeky readdir, if client passes the hash part of dir postion to mds. mds can avoid replying useless dentries. Signed-off-by: "Yan, Zheng" Signed-off-by: Ilya Dryomov --- fs/ceph/dir.c | 4 ++++ fs/ceph/inode.c | 17 ++++++++++++----- fs/ceph/mds_client.c | 1 + fs/ceph/mds_client.h | 3 ++- include/linux/ceph/ceph_fs.h | 2 ++ 5 files changed, 21 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 3e9ad501addf..ae61cdf7d489 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -378,7 +378,11 @@ more: ceph_mdsc_put_request(req); return -ENOMEM; } + } else if (is_hash_order(ctx->pos)) { + req->r_args.readdir.offset_hash = + cpu_to_le32(fpos_hash(ctx->pos)); } + req->r_dir_release_cnt = fi->dir_release_count; req->r_dir_ordered_cnt = fi->dir_ordered_count; req->r_readdir_cache_idx = fi->readdir_cache_idx; diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index d3119fe3ab45..dcce79b84406 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -1482,10 +1482,17 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req, if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) return readdir_prepopulate_inodes_only(req, session); - if (rinfo->hash_order && req->r_path2) { - last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash, - req->r_path2, strlen(req->r_path2)); - last_hash = ceph_frag_value(last_hash); + if (rinfo->hash_order) { + if (req->r_path2) { + last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash, + req->r_path2, + strlen(req->r_path2)); + last_hash = ceph_frag_value(last_hash); + } else if (rinfo->offset_hash) { + /* mds understands offset_hash */ + WARN_ON_ONCE(req->r_readdir_offset != 2); + last_hash = le32_to_cpu(rhead->args.readdir.offset_hash); + } } if (rinfo->dir_dir && @@ -1510,7 +1517,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req, } if (ceph_frag_is_leftmost(frag) && req->r_readdir_offset == 2 && - !(rinfo->hash_order && req->r_path2)) { + !(rinfo->hash_order && last_hash)) { /* note dir version at start of readdir so we can tell * if any dentries get dropped */ req->r_dir_release_cnt = atomic64_read(&ci->i_release_count); diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index a22688873ec3..8cc4d4e8b077 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -189,6 +189,7 @@ static int parse_reply_info_dir(void **p, void *end, info->dir_end = !!(flags & CEPH_READDIR_FRAG_END); info->dir_complete = !!(flags & CEPH_READDIR_FRAG_COMPLETE); info->hash_order = !!(flags & CEPH_READDIR_HASH_ORDER); + info->offset_hash = !!(flags & CEPH_READDIR_OFFSET_HASH); } if (num == 0) goto done; diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index bbebcd55d79e..3e67dd2169fa 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -83,9 +83,10 @@ struct ceph_mds_reply_info_parsed { struct ceph_mds_reply_dirfrag *dir_dir; size_t dir_buf_size; int dir_nr; - bool dir_complete; bool dir_end; + bool dir_complete; bool hash_order; + bool offset_hash; struct ceph_mds_reply_dir_entry *dir_entries; }; diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index f4b2ee18f38c..1787e4a8e251 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -365,6 +365,7 @@ extern const char *ceph_mds_op_name(int op); #define CEPH_READDIR_FRAG_END (1<<0) #define CEPH_READDIR_FRAG_COMPLETE (1<<8) #define CEPH_READDIR_HASH_ORDER (1<<9) +#define CEPH_READDIR_OFFSET_HASH (1<<10) union ceph_mds_request_args { struct { @@ -384,6 +385,7 @@ union ceph_mds_request_args { __le32 max_entries; /* how many dentries to grab */ __le32 max_bytes; __le16 flags; + __le32 offset_hash; } __attribute__ ((packed)) readdir; struct { __le32 mode; -- cgit v1.2.3 From aa26d662b9d44e7f5b0d109e892e537a23471863 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 4 Apr 2017 08:39:36 -0400 Subject: libceph: remove req->r_replay_version Nothing uses this anymore with the removal of the ack vs. commit code. Remove the field and just encode zeroes into place in the request encoding. Signed-off-by: Jeff Layton Reviewed-by: Ilya Dryomov Signed-off-by: Ilya Dryomov --- include/linux/ceph/osd_client.h | 1 - net/ceph/debugfs.c | 4 +--- net/ceph/osd_client.c | 7 ++++--- 3 files changed, 5 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index d6a625e75040..3fc9e7754a9b 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -192,7 +192,6 @@ struct ceph_osd_request { unsigned long r_stamp; /* jiffies, send or check time */ unsigned long r_start_stamp; /* jiffies */ int r_attempts; - struct ceph_eversion r_replay_version; /* aka reassert_version */ u32 r_last_force_resend; u32 r_map_dne_bound; diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c index c62b2b029a6e..d7e63a4f5578 100644 --- a/net/ceph/debugfs.c +++ b/net/ceph/debugfs.c @@ -177,9 +177,7 @@ static void dump_request(struct seq_file *s, struct ceph_osd_request *req) seq_printf(s, "%llu\t", req->r_tid); dump_target(s, &req->r_t); - seq_printf(s, "\t%d\t%u'%llu", req->r_attempts, - le32_to_cpu(req->r_replay_version.epoch), - le64_to_cpu(req->r_replay_version.version)); + seq_printf(s, "\t%d", req->r_attempts); for (i = 0; i < req->r_num_ops; i++) { struct ceph_osd_req_op *op = &req->r_ops[i]; diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index b4500a8ab8b3..feb666c22381 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1503,9 +1503,10 @@ static void encode_request(struct ceph_osd_request *req, struct ceph_msg *msg) ceph_encode_32(&p, req->r_flags); ceph_encode_timespec(p, &req->r_mtime); p += sizeof(struct ceph_timespec); - /* aka reassert_version */ - memcpy(p, &req->r_replay_version, sizeof(req->r_replay_version)); - p += sizeof(req->r_replay_version); + + /* reassert_version */ + memset(p, 0, sizeof(struct ceph_eversion)); + p += sizeof(struct ceph_eversion); /* oloc */ ceph_start_encoding(&p, 5, 4, -- cgit v1.2.3 From a1f4020aab10a6bddb2d061c960ebe52cdfa30b5 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 4 Apr 2017 08:39:37 -0400 Subject: libceph: allow requests to return immediately on full conditions if caller wishes Usually, when the osd map is flagged as full or the pool is at quota, write requests just hang. This is not what we want for cephfs, where it would be better to simply report -ENOSPC back to userland instead of stalling. If the caller knows that it will want an immediate error return instead of blocking on a full or at-quota error condition then allow it to set a flag to request that behavior. Set that flag in ceph_osdc_new_request (since ceph.ko is the only caller), and on any other write request from ceph.ko. A later patch will deal with requests that were submitted before the new map showing the full condition came in. Signed-off-by: Jeff Layton Reviewed-by: Ilya Dryomov Signed-off-by: Ilya Dryomov --- fs/ceph/addr.c | 1 + fs/ceph/file.c | 1 + include/linux/ceph/osd_client.h | 1 + net/ceph/osd_client.c | 7 +++++++ 4 files changed, 10 insertions(+) (limited to 'include') diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 1a3e1b40799a..7e3fae334620 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -1892,6 +1892,7 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false); wr_req->r_mtime = ci->vfs_inode.i_mtime; + wr_req->r_abort_on_full = true; err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false); if (!err) diff --git a/fs/ceph/file.c b/fs/ceph/file.c index ab4823e3e0d5..134c978141d0 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -781,6 +781,7 @@ static void ceph_aio_retry_work(struct work_struct *work) req->r_callback = ceph_aio_complete_req; req->r_inode = inode; req->r_priv = aio_req; + req->r_abort_on_full = true; ret = ceph_osdc_start_request(req->r_osdc, req, false); out: diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 3fc9e7754a9b..8cf644197b1a 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -187,6 +187,7 @@ struct ceph_osd_request { struct timespec r_mtime; /* ditto */ u64 r_data_offset; /* ditto */ bool r_linger; /* don't resend on failure */ + bool r_abort_on_full; /* return ENOSPC when full */ /* internal */ unsigned long r_stamp; /* jiffies, send or check time */ diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index feb666c22381..52a2019a2b64 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -961,6 +961,7 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, truncate_size, truncate_seq); } + req->r_abort_on_full = true; req->r_flags = flags; req->r_base_oloc.pool = layout->pool_id; req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns); @@ -1627,6 +1628,7 @@ static void maybe_request_map(struct ceph_osd_client *osdc) ceph_monc_renew_subs(&osdc->client->monc); } +static void complete_request(struct ceph_osd_request *req, int err); static void send_map_check(struct ceph_osd_request *req); static void __submit_request(struct ceph_osd_request *req, bool wrlocked) @@ -1636,6 +1638,7 @@ static void __submit_request(struct ceph_osd_request *req, bool wrlocked) enum calc_target_result ct_res; bool need_send = false; bool promoted = false; + bool need_abort = false; WARN_ON(req->r_tid); dout("%s req %p wrlocked %d\n", __func__, req, wrlocked); @@ -1670,6 +1673,8 @@ again: pr_warn_ratelimited("FULL or reached pool quota\n"); req->r_t.paused = true; maybe_request_map(osdc); + if (req->r_abort_on_full) + need_abort = true; } else if (!osd_homeless(osd)) { need_send = true; } else { @@ -1686,6 +1691,8 @@ again: link_request(osd, req); if (need_send) send_request(req); + else if (need_abort) + complete_request(req, -ENOSPC); mutex_unlock(&osd->lock); if (ct_res == CALC_TARGET_POOL_DNE) -- cgit v1.2.3 From 58eb7932ae4d671d2a2377a1779eda96a2789b11 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 18 Apr 2017 09:21:16 -0400 Subject: libceph: add an epoch_barrier field to struct ceph_osd_client Cephfs can get cap update requests that contain a new epoch barrier in them. When that happens we want to pause all OSD traffic until the right map epoch arrives. Add an epoch_barrier field to ceph_osd_client that is protected by the osdc->lock rwsem. When the barrier is set, and the current OSD map epoch is below that, pause the request target when submitting the request or when revisiting it. Add a way for upper layers (cephfs) to update the epoch_barrier as well. If we get a new map, compare the new epoch against the barrier before kicking requests and request another map if the map epoch is still lower than the one we want. If we get a map with a full pool, or at quota condition, then set the barrier to the current epoch value. Signed-off-by: Jeff Layton Reviewed-by: Ilya Dryomov Signed-off-by: Ilya Dryomov --- include/linux/ceph/osd_client.h | 2 ++ net/ceph/debugfs.c | 3 +- net/ceph/osd_client.c | 79 +++++++++++++++++++++++++++++++++++++---- 3 files changed, 76 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 8cf644197b1a..85650b415e73 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -267,6 +267,7 @@ struct ceph_osd_client { struct rb_root osds; /* osds */ struct list_head osd_lru; /* idle osds */ spinlock_t osd_lru_lock; + u32 epoch_barrier; struct ceph_osd homeless_osd; atomic64_t last_tid; /* tid of last request */ u64 last_linger_id; @@ -305,6 +306,7 @@ extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg); extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg); +void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb); extern void osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, u32 flags); diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c index d7e63a4f5578..71ba13927b3d 100644 --- a/net/ceph/debugfs.c +++ b/net/ceph/debugfs.c @@ -62,7 +62,8 @@ static int osdmap_show(struct seq_file *s, void *p) return 0; down_read(&osdc->lock); - seq_printf(s, "epoch %d flags 0x%x\n", map->epoch, map->flags); + seq_printf(s, "epoch %u barrier %u flags 0x%x\n", map->epoch, + osdc->epoch_barrier, map->flags); for (n = rb_first(&map->pg_pools); n; n = rb_next(n)) { struct ceph_pg_pool_info *pi = diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 55b7585ccefd..9b8f57fd5ee1 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1298,8 +1298,9 @@ static bool target_should_be_paused(struct ceph_osd_client *osdc, __pool_full(pi); WARN_ON(pi->id != t->base_oloc.pool); - return (t->flags & CEPH_OSD_FLAG_READ && pauserd) || - (t->flags & CEPH_OSD_FLAG_WRITE && pausewr); + return ((t->flags & CEPH_OSD_FLAG_READ) && pauserd) || + ((t->flags & CEPH_OSD_FLAG_WRITE) && pausewr) || + (osdc->osdmap->epoch < osdc->epoch_barrier); } enum calc_target_result { @@ -1654,8 +1655,13 @@ again: goto promote; } - if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && - ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) { + if (osdc->osdmap->epoch < osdc->epoch_barrier) { + dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch, + osdc->epoch_barrier); + req->r_t.paused = true; + maybe_request_map(osdc); + } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && + ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) { dout("req %p pausewr\n", req); req->r_t.paused = true; maybe_request_map(osdc); @@ -1807,19 +1813,77 @@ static void abort_request(struct ceph_osd_request *req, int err) complete_request(req, err); } +static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb) +{ + if (likely(eb > osdc->epoch_barrier)) { + dout("updating epoch_barrier from %u to %u\n", + osdc->epoch_barrier, eb); + osdc->epoch_barrier = eb; + /* Request map if we're not to the barrier yet */ + if (eb > osdc->osdmap->epoch) + maybe_request_map(osdc); + } +} + +void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb) +{ + down_read(&osdc->lock); + if (unlikely(eb > osdc->epoch_barrier)) { + up_read(&osdc->lock); + down_write(&osdc->lock); + update_epoch_barrier(osdc, eb); + up_write(&osdc->lock); + } else { + up_read(&osdc->lock); + } +} +EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier); + /* * Drop all pending requests that are stalled waiting on a full condition to - * clear, and complete them with ENOSPC as the return code. + * clear, and complete them with ENOSPC as the return code. Set the + * osdc->epoch_barrier to the latest map epoch that we've seen if any were + * cancelled. */ static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc) { struct rb_node *n; + bool victims = false; dout("enter abort_on_full\n"); if (!ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) && !have_pool_full(osdc)) goto out; + /* Scan list and see if there is anything to abort */ + for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { + struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); + struct rb_node *m; + + m = rb_first(&osd->o_requests); + while (m) { + struct ceph_osd_request *req = rb_entry(m, + struct ceph_osd_request, r_node); + m = rb_next(m); + + if (req->r_abort_on_full) { + victims = true; + break; + } + } + if (victims) + break; + } + + if (!victims) + goto out; + + /* + * Update the barrier to current epoch if it's behind that point, + * since we know we have some calls to be aborted in the tree. + */ + update_epoch_barrier(osdc, osdc->osdmap->epoch); + for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); struct rb_node *m; @@ -1837,7 +1901,7 @@ static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc) } } out: - dout("return abort_on_full\n"); + dout("return abort_on_full barrier=%u\n", osdc->epoch_barrier); } static void check_pool_dne(struct ceph_osd_request *req) @@ -3293,7 +3357,8 @@ done: pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc); - if (was_pauserd || was_pausewr || pauserd || pausewr) + if (was_pauserd || was_pausewr || pauserd || pausewr || + osdc->osdmap->epoch < osdc->epoch_barrier) maybe_request_map(osdc); kick_requests(osdc, &need_resend, &need_resend_linger); -- cgit v1.2.3 From 14bb211d324d6c8140167bd6b2b8a80757348a2f Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Thu, 13 Apr 2017 12:17:38 +0200 Subject: rbd: support updating the lock cookie without releasing the lock As we no longer release the lock before potentially raising BLACKLISTED in rbd_reregister_watch(), the "either locked or blacklisted" assert in rbd_queue_workfn() needs to go: we can be both locked and blacklisted at that point now. Signed-off-by: Ilya Dryomov Reviewed-by: Jason Dillaman --- drivers/block/rbd.c | 66 ++++++++++++++++++++++-------------- include/linux/ceph/cls_lock_client.h | 5 +++ net/ceph/cls_lock_client.c | 51 ++++++++++++++++++++++++++++ 3 files changed, 97 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 5f563db59820..063c8f06fb9c 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3820,24 +3820,51 @@ static void rbd_unregister_watch(struct rbd_device *rbd_dev) ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc); } +/* + * lock_rwsem must be held for write + */ +static void rbd_reacquire_lock(struct rbd_device *rbd_dev) +{ + struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; + char cookie[32]; + int ret; + + WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED); + + format_lock_cookie(rbd_dev, cookie); + ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid, + &rbd_dev->header_oloc, RBD_LOCK_NAME, + CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie, + RBD_LOCK_TAG, cookie); + if (ret) { + if (ret != -EOPNOTSUPP) + rbd_warn(rbd_dev, "failed to update lock cookie: %d", + ret); + + /* + * Lock cookie cannot be updated on older OSDs, so do + * a manual release and queue an acquire. + */ + if (rbd_release_lock(rbd_dev)) + queue_delayed_work(rbd_dev->task_wq, + &rbd_dev->lock_dwork, 0); + } else { + strcpy(rbd_dev->lock_cookie, cookie); + } +} + static void rbd_reregister_watch(struct work_struct *work) { struct rbd_device *rbd_dev = container_of(to_delayed_work(work), struct rbd_device, watch_dwork); - bool was_lock_owner = false; - bool need_to_wake = false; int ret; dout("%s rbd_dev %p\n", __func__, rbd_dev); - down_write(&rbd_dev->lock_rwsem); - if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) - was_lock_owner = rbd_release_lock(rbd_dev); - mutex_lock(&rbd_dev->watch_mutex); if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) { mutex_unlock(&rbd_dev->watch_mutex); - goto out; + return; } ret = __rbd_register_watch(rbd_dev); @@ -3845,36 +3872,28 @@ static void rbd_reregister_watch(struct work_struct *work) rbd_warn(rbd_dev, "failed to reregister watch: %d", ret); if (ret == -EBLACKLISTED || ret == -ENOENT) { set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags); - need_to_wake = true; + wake_requests(rbd_dev, true); } else { queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, RBD_RETRY_DELAY); } mutex_unlock(&rbd_dev->watch_mutex); - goto out; + return; } - need_to_wake = true; rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED; rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id; mutex_unlock(&rbd_dev->watch_mutex); + down_write(&rbd_dev->lock_rwsem); + if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) + rbd_reacquire_lock(rbd_dev); + up_write(&rbd_dev->lock_rwsem); + ret = rbd_dev_refresh(rbd_dev); if (ret) rbd_warn(rbd_dev, "reregisteration refresh failed: %d", ret); - - if (was_lock_owner) { - ret = rbd_try_lock(rbd_dev); - if (ret) - rbd_warn(rbd_dev, "reregisteration lock failed: %d", - ret); - } - -out: - up_write(&rbd_dev->lock_rwsem); - if (need_to_wake) - wake_requests(rbd_dev, true); } /* @@ -4052,9 +4071,6 @@ static void rbd_queue_workfn(struct work_struct *work) if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED && !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) rbd_wait_state_locked(rbd_dev); - - WARN_ON((rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) ^ - !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)); if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { result = -EBLACKLISTED; goto err_unlock; diff --git a/include/linux/ceph/cls_lock_client.h b/include/linux/ceph/cls_lock_client.h index 84884d8d4710..0594d3bba774 100644 --- a/include/linux/ceph/cls_lock_client.h +++ b/include/linux/ceph/cls_lock_client.h @@ -37,6 +37,11 @@ int ceph_cls_break_lock(struct ceph_osd_client *osdc, struct ceph_object_locator *oloc, char *lock_name, char *cookie, struct ceph_entity_name *locker); +int ceph_cls_set_cookie(struct ceph_osd_client *osdc, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + char *lock_name, u8 type, char *old_cookie, + char *tag, char *new_cookie); void ceph_free_lockers(struct ceph_locker *lockers, u32 num_lockers); diff --git a/net/ceph/cls_lock_client.c b/net/ceph/cls_lock_client.c index b9233b990399..08ada893f01e 100644 --- a/net/ceph/cls_lock_client.c +++ b/net/ceph/cls_lock_client.c @@ -179,6 +179,57 @@ int ceph_cls_break_lock(struct ceph_osd_client *osdc, } EXPORT_SYMBOL(ceph_cls_break_lock); +int ceph_cls_set_cookie(struct ceph_osd_client *osdc, + struct ceph_object_id *oid, + struct ceph_object_locator *oloc, + char *lock_name, u8 type, char *old_cookie, + char *tag, char *new_cookie) +{ + int cookie_op_buf_size; + int name_len = strlen(lock_name); + int old_cookie_len = strlen(old_cookie); + int tag_len = strlen(tag); + int new_cookie_len = strlen(new_cookie); + void *p, *end; + struct page *cookie_op_page; + int ret; + + cookie_op_buf_size = name_len + sizeof(__le32) + + old_cookie_len + sizeof(__le32) + + tag_len + sizeof(__le32) + + new_cookie_len + sizeof(__le32) + + sizeof(u8) + CEPH_ENCODING_START_BLK_LEN; + if (cookie_op_buf_size > PAGE_SIZE) + return -E2BIG; + + cookie_op_page = alloc_page(GFP_NOIO); + if (!cookie_op_page) + return -ENOMEM; + + p = page_address(cookie_op_page); + end = p + cookie_op_buf_size; + + /* encode cls_lock_set_cookie_op struct */ + ceph_start_encoding(&p, 1, 1, + cookie_op_buf_size - CEPH_ENCODING_START_BLK_LEN); + ceph_encode_string(&p, end, lock_name, name_len); + ceph_encode_8(&p, type); + ceph_encode_string(&p, end, old_cookie, old_cookie_len); + ceph_encode_string(&p, end, tag, tag_len); + ceph_encode_string(&p, end, new_cookie, new_cookie_len); + + dout("%s lock_name %s type %d old_cookie %s tag %s new_cookie %s\n", + __func__, lock_name, type, old_cookie, tag, new_cookie); + ret = ceph_osdc_call(osdc, oid, oloc, "lock", "set_cookie", + CEPH_OSD_FLAG_WRITE, cookie_op_page, + cookie_op_buf_size, NULL, NULL); + + dout("%s: status %d\n", __func__, ret); + __free_page(cookie_op_page); + return ret; +} +EXPORT_SYMBOL(ceph_cls_set_cookie); + void ceph_free_lockers(struct ceph_locker *lockers, u32 num_lockers) { int i; -- cgit v1.2.3 From f775ff7d89f33fc9ba63f6f70df3bcc98c2d9828 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Thu, 27 Apr 2017 18:34:00 +0200 Subject: ceph: fix file open flags on ppc64 The file open flags (O_foo) are platform specific and should never go out to an interface that is not local to the system. Unfortunately these flags have leaked out onto the wire in the cephfs implementation. That lead to bogus flags getting transmitted on ppc64. This patch converts the kernel view of flags to the ceph view of file open flags. Fixes: 124e68e74 ("ceph: file operations") Signed-off-by: Alexander Graf Reviewed-by: "Yan, Zheng" Signed-off-by: Ilya Dryomov --- fs/ceph/file.c | 34 +++++++++++++++++++++++++++++++++- include/linux/ceph/ceph_fs.h | 12 ++++++++++++ 2 files changed, 45 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 39866d6a34b6..9d2eeed9e323 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -13,6 +13,38 @@ #include "mds_client.h" #include "cache.h" +static __le32 ceph_flags_sys2wire(u32 flags) +{ + u32 wire_flags = 0; + + switch (flags & O_ACCMODE) { + case O_RDONLY: + wire_flags |= CEPH_O_RDONLY; + break; + case O_WRONLY: + wire_flags |= CEPH_O_WRONLY; + break; + case O_RDWR: + wire_flags |= CEPH_O_RDWR; + break; + } + +#define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; } + + ceph_sys2wire(O_CREAT); + ceph_sys2wire(O_EXCL); + ceph_sys2wire(O_TRUNC); + ceph_sys2wire(O_DIRECTORY); + ceph_sys2wire(O_NOFOLLOW); + +#undef ceph_sys2wire + + if (flags) + dout("unused open flags: %x", flags); + + return cpu_to_le32(wire_flags); +} + /* * Ceph file operations * @@ -123,7 +155,7 @@ prepare_open_request(struct super_block *sb, int flags, int create_mode) if (IS_ERR(req)) goto out; req->r_fmode = ceph_flags_to_mode(flags); - req->r_args.open.flags = cpu_to_le32(flags); + req->r_args.open.flags = ceph_flags_sys2wire(flags); req->r_args.open.mode = cpu_to_le32(create_mode); out: return req; diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index 1787e4a8e251..ad078ebe25d6 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -367,6 +367,18 @@ extern const char *ceph_mds_op_name(int op); #define CEPH_READDIR_HASH_ORDER (1<<9) #define CEPH_READDIR_OFFSET_HASH (1<<10) +/* + * open request flags + */ +#define CEPH_O_RDONLY 00000000 +#define CEPH_O_WRONLY 00000001 +#define CEPH_O_RDWR 00000002 +#define CEPH_O_CREAT 00000100 +#define CEPH_O_EXCL 00000200 +#define CEPH_O_TRUNC 00001000 +#define CEPH_O_DIRECTORY 00200000 +#define CEPH_O_NOFOLLOW 00400000 + union ceph_mds_request_args { struct { __le32 mask; /* CEPH_CAP_* */ -- cgit v1.2.3 From 44f0aeec203738bf34f4b7e16b745c8c71fe0f06 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sun, 2 Apr 2017 16:50:33 +0200 Subject: dmaengine: pl080: Cut some unused defines There is no in-kernel code using these indexed register defines, and their offsets are clearly defined right below. Cut them. Signed-off-by: Linus Walleij Signed-off-by: Vinod Koul --- include/linux/amba/pl080.h | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) (limited to 'include') diff --git a/include/linux/amba/pl080.h b/include/linux/amba/pl080.h index 91b84a7f0539..800429f4b997 100644 --- a/include/linux/amba/pl080.h +++ b/include/linux/amba/pl080.h @@ -46,16 +46,8 @@ /* Per channel configuration registers */ -#define PL080_Cx_STRIDE (0x20) +/* Per channel configuration registers */ #define PL080_Cx_BASE(x) ((0x100 + (x * 0x20))) -#define PL080_Cx_SRC_ADDR(x) ((0x100 + (x * 0x20))) -#define PL080_Cx_DST_ADDR(x) ((0x104 + (x * 0x20))) -#define PL080_Cx_LLI(x) ((0x108 + (x * 0x20))) -#define PL080_Cx_CONTROL(x) ((0x10C + (x * 0x20))) -#define PL080_Cx_CONFIG(x) ((0x110 + (x * 0x20))) -#define PL080S_Cx_CONTROL2(x) ((0x110 + (x * 0x20))) -#define PL080S_Cx_CONFIG(x) ((0x114 + (x * 0x20))) - #define PL080_CH_SRC_ADDR (0x00) #define PL080_CH_DST_ADDR (0x04) #define PL080_CH_LLI (0x08) -- cgit v1.2.3 From ded091fee6806b7120f475d89c151d611758a395 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sun, 2 Apr 2017 16:50:53 +0200 Subject: dmaengine: pl08x: Use the BIT() macro consistently This makes the driver shift bits with BIT() which is used on other places in the driver. Signed-off-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/amba-pl08x.c | 10 +++++----- include/linux/amba/pl080.h | 40 ++++++++++++++++++++-------------------- 2 files changed, 25 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 8624598530f3..286114ebbbb9 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -420,7 +420,7 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan) /* Enable the DMA channel */ /* Do not access config register until channel shows as disabled */ - while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id)) + while (readl(pl08x->base + PL080_EN_CHAN) & BIT(phychan->id)) cpu_relax(); /* Do not access config register until channel shows as inactive */ @@ -487,8 +487,8 @@ static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x, writel(val, ch->reg_config); - writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR); - writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR); + writel(BIT(ch->id), pl08x->base + PL080_ERR_CLEAR); + writel(BIT(ch->id), pl08x->base + PL080_TC_CLEAR); } static inline u32 get_bytes_in_cctl(u32 cctl) @@ -1837,7 +1837,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev) return IRQ_NONE; for (i = 0; i < pl08x->vd->channels; i++) { - if (((1 << i) & err) || ((1 << i) & tc)) { + if ((BIT(i) & err) || (BIT(i) & tc)) { /* Locate physical channel */ struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; struct pl08x_dma_chan *plchan = phychan->serving; @@ -1875,7 +1875,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev) } spin_unlock(&plchan->vc.lock); - mask |= (1 << i); + mask |= BIT(i); } } diff --git a/include/linux/amba/pl080.h b/include/linux/amba/pl080.h index 800429f4b997..580b5323a717 100644 --- a/include/linux/amba/pl080.h +++ b/include/linux/amba/pl080.h @@ -38,9 +38,9 @@ #define PL080_SOFT_LSREQ (0x2C) #define PL080_CONFIG (0x30) -#define PL080_CONFIG_M2_BE (1 << 2) -#define PL080_CONFIG_M1_BE (1 << 1) -#define PL080_CONFIG_ENABLE (1 << 0) +#define PL080_CONFIG_M2_BE BIT(2) +#define PL080_CONFIG_M1_BE BIT(1) +#define PL080_CONFIG_ENABLE BIT(0) #define PL080_SYNC (0x34) @@ -58,18 +58,18 @@ #define PL080_LLI_ADDR_MASK (0x3fffffff << 2) #define PL080_LLI_ADDR_SHIFT (2) -#define PL080_LLI_LM_AHB2 (1 << 0) +#define PL080_LLI_LM_AHB2 BIT(0) -#define PL080_CONTROL_TC_IRQ_EN (1 << 31) +#define PL080_CONTROL_TC_IRQ_EN BIT(31) #define PL080_CONTROL_PROT_MASK (0x7 << 28) #define PL080_CONTROL_PROT_SHIFT (28) -#define PL080_CONTROL_PROT_CACHE (1 << 30) -#define PL080_CONTROL_PROT_BUFF (1 << 29) -#define PL080_CONTROL_PROT_SYS (1 << 28) -#define PL080_CONTROL_DST_INCR (1 << 27) -#define PL080_CONTROL_SRC_INCR (1 << 26) -#define PL080_CONTROL_DST_AHB2 (1 << 25) -#define PL080_CONTROL_SRC_AHB2 (1 << 24) +#define PL080_CONTROL_PROT_CACHE BIT(30) +#define PL080_CONTROL_PROT_BUFF BIT(29) +#define PL080_CONTROL_PROT_SYS BIT(28) +#define PL080_CONTROL_DST_INCR BIT(27) +#define PL080_CONTROL_SRC_INCR BIT(26) +#define PL080_CONTROL_DST_AHB2 BIT(25) +#define PL080_CONTROL_SRC_AHB2 BIT(24) #define PL080_CONTROL_DWIDTH_MASK (0x7 << 21) #define PL080_CONTROL_DWIDTH_SHIFT (21) #define PL080_CONTROL_SWIDTH_MASK (0x7 << 18) @@ -95,20 +95,20 @@ #define PL080_WIDTH_16BIT (0x1) #define PL080_WIDTH_32BIT (0x2) -#define PL080N_CONFIG_ITPROT (1 << 20) -#define PL080N_CONFIG_SECPROT (1 << 19) -#define PL080_CONFIG_HALT (1 << 18) -#define PL080_CONFIG_ACTIVE (1 << 17) /* RO */ -#define PL080_CONFIG_LOCK (1 << 16) -#define PL080_CONFIG_TC_IRQ_MASK (1 << 15) -#define PL080_CONFIG_ERR_IRQ_MASK (1 << 14) +#define PL080N_CONFIG_ITPROT BIT(20) +#define PL080N_CONFIG_SECPROT BIT(19) +#define PL080_CONFIG_HALT BIT(18) +#define PL080_CONFIG_ACTIVE BIT(17) /* RO */ +#define PL080_CONFIG_LOCK BIT(16) +#define PL080_CONFIG_TC_IRQ_MASK BIT(15) +#define PL080_CONFIG_ERR_IRQ_MASK BIT(14) #define PL080_CONFIG_FLOW_CONTROL_MASK (0x7 << 11) #define PL080_CONFIG_FLOW_CONTROL_SHIFT (11) #define PL080_CONFIG_DST_SEL_MASK (0xf << 6) #define PL080_CONFIG_DST_SEL_SHIFT (6) #define PL080_CONFIG_SRC_SEL_MASK (0xf << 1) #define PL080_CONFIG_SRC_SEL_SHIFT (1) -#define PL080_CONFIG_ENABLE (1 << 0) +#define PL080_CONFIG_ENABLE BIT(0) #define PL080_FLOW_MEM2MEM (0x0) #define PL080_FLOW_MEM2PER (0x1) -- cgit v1.2.3 From 2be83da85a64773efaa407639de81bd1377f880e Mon Sep 17 00:00:00 2001 From: Lukasz Luba Date: Thu, 4 May 2017 12:34:32 +0100 Subject: thermal: devfreq_cooling: add new interface for direct power read This patch introduces a new interface for device drivers connected to devfreq_cooling in the thermal framework: get_real_power(). Some devices have more sophisticated methods (like power counters) to approximate the actual power that they use. In the previous implementation we had a pre-calculated power table which was then scaled by 'utilization' ('busy_time' and 'total_time' taken from devfreq 'last_status'). With this new interface the driver can provide more precise data regarding actual power to the thermal governor every time the power budget is calculated. We then use this value and calculate the real resource utilization scaling factor. Reviewed-by: Chris Diamand Acked-by: Javi Merino Signed-off-by: Lukasz Luba --- drivers/thermal/devfreq_cooling.c | 105 +++++++++++++++++++++++++++++--------- include/linux/devfreq_cooling.h | 19 +++++++ 2 files changed, 101 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c index af9d32837a3a..26c31571a12c 100644 --- a/drivers/thermal/devfreq_cooling.c +++ b/drivers/thermal/devfreq_cooling.c @@ -28,6 +28,8 @@ #include +#define SCALE_ERROR_MITIGATION 100 + static DEFINE_IDA(devfreq_ida); /** @@ -45,6 +47,12 @@ static DEFINE_IDA(devfreq_ida); * @freq_table_size: Size of the @freq_table and @power_table * @power_ops: Pointer to devfreq_cooling_power, used to generate the * @power_table. + * @res_util: Resource utilization scaling factor for the power. + * It is multiplied by 100 to minimize the error. It is used + * for estimation of the power budget instead of using + * 'utilization' (which is 'busy_time / 'total_time'). + * The 'res_util' range is from 100 to (power_table[state] * 100) + * for the corresponding 'state'. */ struct devfreq_cooling_device { int id; @@ -55,6 +63,8 @@ struct devfreq_cooling_device { u32 *freq_table; size_t freq_table_size; struct devfreq_cooling_power *power_ops; + u32 res_util; + int capped_state; }; /** @@ -250,6 +260,16 @@ get_dynamic_power(struct devfreq_cooling_device *dfc, unsigned long freq, return power; } + +static inline unsigned long get_total_power(struct devfreq_cooling_device *dfc, + unsigned long freq, + unsigned long voltage) +{ + return get_static_power(dfc, freq) + get_dynamic_power(dfc, freq, + voltage); +} + + static int devfreq_cooling_get_requested_power(struct thermal_cooling_device *cdev, struct thermal_zone_device *tz, u32 *power) @@ -259,27 +279,55 @@ static int devfreq_cooling_get_requested_power(struct thermal_cooling_device *cd struct devfreq_dev_status *status = &df->last_status; unsigned long state; unsigned long freq = status->current_frequency; - u32 dyn_power, static_power; + unsigned long voltage; + u32 dyn_power = 0; + u32 static_power = 0; + int res; - /* Get dynamic power for state */ state = freq_get_state(dfc, freq); - if (state == THERMAL_CSTATE_INVALID) - return -EAGAIN; + if (state == THERMAL_CSTATE_INVALID) { + res = -EAGAIN; + goto fail; + } - dyn_power = dfc->power_table[state]; + if (dfc->power_ops->get_real_power) { + voltage = get_voltage(df, freq); + if (voltage == 0) { + res = -EINVAL; + goto fail; + } - /* Scale dynamic power for utilization */ - dyn_power = (dyn_power * status->busy_time) / status->total_time; + res = dfc->power_ops->get_real_power(df, power, freq, voltage); + if (!res) { + state = dfc->capped_state; + dfc->res_util = dfc->power_table[state]; + dfc->res_util *= SCALE_ERROR_MITIGATION; - /* Get static power */ - static_power = get_static_power(dfc, freq); + if (*power > 1) + dfc->res_util /= *power; + } else { + goto fail; + } + } else { + dyn_power = dfc->power_table[state]; + + /* Scale dynamic power for utilization */ + dyn_power *= status->busy_time; + dyn_power /= status->total_time; + /* Get static power */ + static_power = get_static_power(dfc, freq); + + *power = dyn_power + static_power; + } trace_thermal_power_devfreq_get_power(cdev, status, freq, dyn_power, static_power); - *power = dyn_power + static_power; - return 0; +fail: + /* It is safe to set max in this case */ + dfc->res_util = SCALE_ERROR_MITIGATION; + return res; } static int devfreq_cooling_state2power(struct thermal_cooling_device *cdev, @@ -312,26 +360,34 @@ static int devfreq_cooling_power2state(struct thermal_cooling_device *cdev, unsigned long busy_time; s32 dyn_power; u32 static_power; + s32 est_power; int i; - static_power = get_static_power(dfc, freq); + if (dfc->power_ops->get_real_power) { + /* Scale for resource utilization */ + est_power = power * dfc->res_util; + est_power /= SCALE_ERROR_MITIGATION; + } else { + static_power = get_static_power(dfc, freq); - dyn_power = power - static_power; - dyn_power = dyn_power > 0 ? dyn_power : 0; + dyn_power = power - static_power; + dyn_power = dyn_power > 0 ? dyn_power : 0; - /* Scale dynamic power for utilization */ - busy_time = status->busy_time ?: 1; - dyn_power = (dyn_power * status->total_time) / busy_time; + /* Scale dynamic power for utilization */ + busy_time = status->busy_time ?: 1; + est_power = (dyn_power * status->total_time) / busy_time; + } /* * Find the first cooling state that is within the power * budget for dynamic power. */ for (i = 0; i < dfc->freq_table_size - 1; i++) - if (dyn_power >= dfc->power_table[i]) + if (est_power >= dfc->power_table[i]) break; *state = i; + dfc->capped_state = i; trace_thermal_power_devfreq_limit(cdev, freq, *state, power); return 0; } @@ -387,7 +443,7 @@ static int devfreq_cooling_gen_tables(struct devfreq_cooling_device *dfc) } for (i = 0, freq = ULONG_MAX; i < num_opps; i++, freq--) { - unsigned long power_dyn, voltage; + unsigned long power, voltage; struct dev_pm_opp *opp; opp = dev_pm_opp_find_freq_floor(dev, &freq); @@ -400,12 +456,15 @@ static int devfreq_cooling_gen_tables(struct devfreq_cooling_device *dfc) dev_pm_opp_put(opp); if (dfc->power_ops) { - power_dyn = get_dynamic_power(dfc, freq, voltage); + if (dfc->power_ops->get_real_power) + power = get_total_power(dfc, freq, voltage); + else + power = get_dynamic_power(dfc, freq, voltage); - dev_dbg(dev, "Dynamic power table: %lu MHz @ %lu mV: %lu = %lu mW\n", - freq / 1000000, voltage, power_dyn, power_dyn); + dev_dbg(dev, "Power table: %lu MHz @ %lu mV: %lu = %lu mW\n", + freq / 1000000, voltage, power, power); - power_table[i] = power_dyn; + power_table[i] = power; } freq_table[i] = freq; diff --git a/include/linux/devfreq_cooling.h b/include/linux/devfreq_cooling.h index c35d0c0e0ada..4635f95000a4 100644 --- a/include/linux/devfreq_cooling.h +++ b/include/linux/devfreq_cooling.h @@ -34,6 +34,23 @@ * If get_dynamic_power() is NULL, then the * dynamic power is calculated as * @dyn_power_coeff * frequency * voltage^2 + * @get_real_power: When this is set, the framework uses it to ask the + * device driver for the actual power. + * Some devices have more sophisticated methods + * (like power counters) to approximate the actual power + * that they use. + * This function provides more accurate data to the + * thermal governor. When the driver does not provide + * such function, framework just uses pre-calculated + * table and scale the power by 'utilization' + * (based on 'busy_time' and 'total_time' taken from + * devfreq 'last_status'). + * The value returned by this function must be lower + * or equal than the maximum power value + * for the current state + * (which can be found in power_table[state]). + * When this interface is used, the power_table holds + * max total (static + dynamic) power value for each OPP. */ struct devfreq_cooling_power { unsigned long (*get_static_power)(struct devfreq *devfreq, @@ -41,6 +58,8 @@ struct devfreq_cooling_power { unsigned long (*get_dynamic_power)(struct devfreq *devfreq, unsigned long freq, unsigned long voltage); + int (*get_real_power)(struct devfreq *df, u32 *power, + unsigned long freq, unsigned long voltage); unsigned long dyn_power_coeff; }; -- cgit v1.2.3 From 771ffa14ead18887bed400c09f4bde5bca5bf342 Mon Sep 17 00:00:00 2001 From: Lukasz Luba Date: Thu, 4 May 2017 12:34:33 +0100 Subject: trace: thermal: add another parameter 'power' to the tracing function This patch adds another parameter to the trace function: trace_thermal_power_devfreq_get_power(). In case when we call directly driver's code for the real power, we do not have static/dynamic_power values. Instead we get total power in the '*power' value. The 'static_power' and 'dynamic_power' are set to 0. Therefore, we have to trace that '*power' value in this scenario. CC: Steven Rostedt CC: Ingo Molnar CC: Zhang Rui CC: Eduardo Valentin Acked-by: Javi Merino Signed-off-by: Lukasz Luba --- drivers/thermal/devfreq_cooling.c | 2 +- include/trace/events/thermal.h | 11 +++++++---- 2 files changed, 8 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c index 26c31571a12c..ef59256887ff 100644 --- a/drivers/thermal/devfreq_cooling.c +++ b/drivers/thermal/devfreq_cooling.c @@ -321,7 +321,7 @@ static int devfreq_cooling_get_requested_power(struct thermal_cooling_device *cd } trace_thermal_power_devfreq_get_power(cdev, status, freq, dyn_power, - static_power); + static_power, *power); return 0; fail: diff --git a/include/trace/events/thermal.h b/include/trace/events/thermal.h index 2b4a8ff72d0d..6cde5b3514c2 100644 --- a/include/trace/events/thermal.h +++ b/include/trace/events/thermal.h @@ -151,9 +151,9 @@ TRACE_EVENT(thermal_power_cpu_limit, TRACE_EVENT(thermal_power_devfreq_get_power, TP_PROTO(struct thermal_cooling_device *cdev, struct devfreq_dev_status *status, unsigned long freq, - u32 dynamic_power, u32 static_power), + u32 dynamic_power, u32 static_power, u32 power), - TP_ARGS(cdev, status, freq, dynamic_power, static_power), + TP_ARGS(cdev, status, freq, dynamic_power, static_power, power), TP_STRUCT__entry( __string(type, cdev->type ) @@ -161,6 +161,7 @@ TRACE_EVENT(thermal_power_devfreq_get_power, __field(u32, load ) __field(u32, dynamic_power ) __field(u32, static_power ) + __field(u32, power) ), TP_fast_assign( @@ -169,11 +170,13 @@ TRACE_EVENT(thermal_power_devfreq_get_power, __entry->load = (100 * status->busy_time) / status->total_time; __entry->dynamic_power = dynamic_power; __entry->static_power = static_power; + __entry->power = power; ), - TP_printk("type=%s freq=%lu load=%u dynamic_power=%u static_power=%u", + TP_printk("type=%s freq=%lu load=%u dynamic_power=%u static_power=%u power=%u", __get_str(type), __entry->freq, - __entry->load, __entry->dynamic_power, __entry->static_power) + __entry->load, __entry->dynamic_power, __entry->static_power, + __entry->power) ); TRACE_EVENT(thermal_power_devfreq_limit, -- cgit v1.2.3 From 84b114b98452c431299d99c135f751659e517acb Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 5 May 2017 06:56:54 -0700 Subject: tcp: randomize timestamps on syncookies Whole point of randomization was to hide server uptime, but an attacker can simply start a syn flood and TCP generates 'old style' timestamps, directly revealing server jiffies value. Also, TSval sent by the server to a particular remote address vary depending on syncookies being sent or not, potentially triggering PAWS drops for innocent clients. Lets implement proper randomization, including for SYNcookies. Also we do not need to export sysctl_tcp_timestamps, since it is not used from a module. In v2, I added Florian feedback and contribution, adding tsoff to tcp_get_cookie_sock(). v3 removed one unused variable in tcp_v4_connect() as Florian spotted. Fixes: 95a22caee396c ("tcp: randomize tcp timestamp offsets for each connection") Signed-off-by: Eric Dumazet Reviewed-by: Florian Westphal Tested-by: Florian Westphal Cc: Yuchung Cheng Signed-off-by: David S. Miller --- include/net/secure_seq.h | 10 ++++++---- include/net/tcp.h | 5 +++-- net/core/secure_seq.c | 31 +++++++++++++++++++------------ net/ipv4/syncookies.c | 12 ++++++++++-- net/ipv4/tcp_input.c | 8 +++----- net/ipv4/tcp_ipv4.c | 32 +++++++++++++++++++------------- net/ipv6/syncookies.c | 10 +++++++++- net/ipv6/tcp_ipv6.c | 32 +++++++++++++++++++------------- 8 files changed, 88 insertions(+), 52 deletions(-) (limited to 'include') diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h index fe236b3429f0..b94006f6fbdd 100644 --- a/include/net/secure_seq.h +++ b/include/net/secure_seq.h @@ -6,10 +6,12 @@ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, __be16 dport); -u32 secure_tcp_seq_and_tsoff(__be32 saddr, __be32 daddr, - __be16 sport, __be16 dport, u32 *tsoff); -u32 secure_tcpv6_seq_and_tsoff(const __be32 *saddr, const __be32 *daddr, - __be16 sport, __be16 dport, u32 *tsoff); +u32 secure_tcp_seq(__be32 saddr, __be32 daddr, + __be16 sport, __be16 dport); +u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr); +u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr, + __be16 sport, __be16 dport); +u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr); u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport); u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr, diff --git a/include/net/tcp.h b/include/net/tcp.h index 270e5cc43c99..8c0e5a901d64 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -470,7 +470,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb); /* From syncookies.c */ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, struct request_sock *req, - struct dst_entry *dst); + struct dst_entry *dst, u32 tsoff); int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th, u32 cookie); struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb); @@ -1822,7 +1822,8 @@ struct tcp_request_sock_ops { #endif struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl, const struct request_sock *req); - __u32 (*init_seq_tsoff)(const struct sk_buff *skb, u32 *tsoff); + u32 (*init_seq)(const struct sk_buff *skb); + u32 (*init_ts_off)(const struct sk_buff *skb); int (*send_synack)(const struct sock *sk, struct dst_entry *dst, struct flowi *fl, struct request_sock *req, struct tcp_fastopen_cookie *foc, diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c index 6bd2f8fb0476..ae35cce3a40d 100644 --- a/net/core/secure_seq.c +++ b/net/core/secure_seq.c @@ -24,9 +24,13 @@ static siphash_key_t ts_secret __read_mostly; static __always_inline void net_secret_init(void) { - net_get_random_once(&ts_secret, sizeof(ts_secret)); net_get_random_once(&net_secret, sizeof(net_secret)); } + +static __always_inline void ts_secret_init(void) +{ + net_get_random_once(&ts_secret, sizeof(ts_secret)); +} #endif #ifdef CONFIG_INET @@ -47,7 +51,7 @@ static u32 seq_scale(u32 seq) #endif #if IS_ENABLED(CONFIG_IPV6) -static u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr) +u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr) { const struct { struct in6_addr saddr; @@ -60,12 +64,14 @@ static u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr) if (sysctl_tcp_timestamps != 1) return 0; + ts_secret_init(); return siphash(&combined, offsetofend(typeof(combined), daddr), &ts_secret); } +EXPORT_SYMBOL(secure_tcpv6_ts_off); -u32 secure_tcpv6_seq_and_tsoff(const __be32 *saddr, const __be32 *daddr, - __be16 sport, __be16 dport, u32 *tsoff) +u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr, + __be16 sport, __be16 dport) { const struct { struct in6_addr saddr; @@ -78,14 +84,14 @@ u32 secure_tcpv6_seq_and_tsoff(const __be32 *saddr, const __be32 *daddr, .sport = sport, .dport = dport }; - u64 hash; + u32 hash; + net_secret_init(); hash = siphash(&combined, offsetofend(typeof(combined), dport), &net_secret); - *tsoff = secure_tcpv6_ts_off(saddr, daddr); return seq_scale(hash); } -EXPORT_SYMBOL(secure_tcpv6_seq_and_tsoff); +EXPORT_SYMBOL(secure_tcpv6_seq); u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, __be16 dport) @@ -107,11 +113,12 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral); #endif #ifdef CONFIG_INET -static u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr) +u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr) { if (sysctl_tcp_timestamps != 1) return 0; + ts_secret_init(); return siphash_2u32((__force u32)saddr, (__force u32)daddr, &ts_secret); } @@ -121,15 +128,15 @@ static u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr) * it would be easy enough to have the former function use siphash_4u32, passing * the arguments as separate u32. */ -u32 secure_tcp_seq_and_tsoff(__be32 saddr, __be32 daddr, - __be16 sport, __be16 dport, u32 *tsoff) +u32 secure_tcp_seq(__be32 saddr, __be32 daddr, + __be16 sport, __be16 dport) { - u64 hash; + u32 hash; + net_secret_init(); hash = siphash_3u32((__force u32)saddr, (__force u32)daddr, (__force u32)sport << 16 | (__force u32)dport, &net_secret); - *tsoff = secure_tcp_ts_off(saddr, daddr); return seq_scale(hash); } diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 496b97e17aaf..0257d965f111 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -203,7 +204,7 @@ EXPORT_SYMBOL_GPL(__cookie_v4_check); struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, struct request_sock *req, - struct dst_entry *dst) + struct dst_entry *dst, u32 tsoff) { struct inet_connection_sock *icsk = inet_csk(sk); struct sock *child; @@ -213,6 +214,7 @@ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, NULL, &own_req); if (child) { atomic_set(&req->rsk_refcnt, 1); + tcp_sk(child)->tsoffset = tsoff; sock_rps_save_rxhash(child, skb); inet_csk_reqsk_queue_add(sk, req, child); } else { @@ -292,6 +294,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) struct rtable *rt; __u8 rcv_wscale; struct flowi4 fl4; + u32 tsoff = 0; if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst) goto out; @@ -311,6 +314,11 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) memset(&tcp_opt, 0, sizeof(tcp_opt)); tcp_parse_options(skb, &tcp_opt, 0, NULL); + if (tcp_opt.saw_tstamp && tcp_opt.rcv_tsecr) { + tsoff = secure_tcp_ts_off(ip_hdr(skb)->daddr, ip_hdr(skb)->saddr); + tcp_opt.rcv_tsecr -= tsoff; + } + if (!cookie_timestamp_decode(&tcp_opt)) goto out; @@ -381,7 +389,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) ireq->rcv_wscale = rcv_wscale; ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), &rt->dst); - ret = tcp_get_cookie_sock(sk, skb, req, &rt->dst); + ret = tcp_get_cookie_sock(sk, skb, req, &rt->dst, tsoff); /* ip_queue_xmit() depends on our flow being setup * Normal sockets get it right from inet_csk_route_child_sock() */ diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 9739962bfb3f..5a3ad09e2786 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -85,7 +85,6 @@ int sysctl_tcp_dsack __read_mostly = 1; int sysctl_tcp_app_win __read_mostly = 31; int sysctl_tcp_adv_win_scale __read_mostly = 1; EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); -EXPORT_SYMBOL(sysctl_tcp_timestamps); /* rfc5961 challenge ack rate limiting */ int sysctl_tcp_challenge_ack_limit = 1000; @@ -6347,8 +6346,8 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, if (security_inet_conn_request(sk, skb, req)) goto drop_and_free; - if (isn && tmp_opt.tstamp_ok) - af_ops->init_seq_tsoff(skb, &tcp_rsk(req)->ts_off); + if (tmp_opt.tstamp_ok) + tcp_rsk(req)->ts_off = af_ops->init_ts_off(skb); if (!want_cookie && !isn) { /* Kill the following clause, if you dislike this way. */ @@ -6368,7 +6367,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, goto drop_and_release; } - isn = af_ops->init_seq_tsoff(skb, &tcp_rsk(req)->ts_off); + isn = af_ops->init_seq(skb); } if (!dst) { dst = af_ops->route_req(sk, &fl, req); @@ -6380,7 +6379,6 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, if (want_cookie) { isn = cookie_init_sequence(af_ops, sk, skb, &req->mss); - tcp_rsk(req)->ts_off = 0; req->cookie_ts = tmp_opt.tstamp_ok; if (!tmp_opt.tstamp_ok) inet_rsk(req)->ecn_ok = 0; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index cbbafe546c0f..3a51582bef55 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -94,12 +94,18 @@ static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, struct inet_hashinfo tcp_hashinfo; EXPORT_SYMBOL(tcp_hashinfo); -static u32 tcp_v4_init_seq_and_tsoff(const struct sk_buff *skb, u32 *tsoff) +static u32 tcp_v4_init_seq(const struct sk_buff *skb) { - return secure_tcp_seq_and_tsoff(ip_hdr(skb)->daddr, - ip_hdr(skb)->saddr, - tcp_hdr(skb)->dest, - tcp_hdr(skb)->source, tsoff); + return secure_tcp_seq(ip_hdr(skb)->daddr, + ip_hdr(skb)->saddr, + tcp_hdr(skb)->dest, + tcp_hdr(skb)->source); +} + +static u32 tcp_v4_init_ts_off(const struct sk_buff *skb) +{ + return secure_tcp_ts_off(ip_hdr(skb)->daddr, + ip_hdr(skb)->saddr); } int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) @@ -145,7 +151,6 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) struct flowi4 *fl4; struct rtable *rt; int err; - u32 seq; struct ip_options_rcu *inet_opt; struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row; @@ -232,13 +237,13 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) rt = NULL; if (likely(!tp->repair)) { - seq = secure_tcp_seq_and_tsoff(inet->inet_saddr, - inet->inet_daddr, - inet->inet_sport, - usin->sin_port, - &tp->tsoffset); if (!tp->write_seq) - tp->write_seq = seq; + tp->write_seq = secure_tcp_seq(inet->inet_saddr, + inet->inet_daddr, + inet->inet_sport, + usin->sin_port); + tp->tsoffset = secure_tcp_ts_off(inet->inet_saddr, + inet->inet_daddr); } inet->inet_id = tp->write_seq ^ jiffies; @@ -1239,7 +1244,8 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { .cookie_init_seq = cookie_v4_init_sequence, #endif .route_req = tcp_v4_route_req, - .init_seq_tsoff = tcp_v4_init_seq_and_tsoff, + .init_seq = tcp_v4_init_seq, + .init_ts_off = tcp_v4_init_ts_off, .send_synack = tcp_v4_send_synack, }; diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 895ff650db43..5abc3692b901 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -143,6 +144,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) int mss; struct dst_entry *dst; __u8 rcv_wscale; + u32 tsoff = 0; if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst) goto out; @@ -162,6 +164,12 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) memset(&tcp_opt, 0, sizeof(tcp_opt)); tcp_parse_options(skb, &tcp_opt, 0, NULL); + if (tcp_opt.saw_tstamp && tcp_opt.rcv_tsecr) { + tsoff = secure_tcpv6_ts_off(ipv6_hdr(skb)->daddr.s6_addr32, + ipv6_hdr(skb)->saddr.s6_addr32); + tcp_opt.rcv_tsecr -= tsoff; + } + if (!cookie_timestamp_decode(&tcp_opt)) goto out; @@ -242,7 +250,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) ireq->rcv_wscale = rcv_wscale; ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), dst); - ret = tcp_get_cookie_sock(sk, skb, req, dst); + ret = tcp_get_cookie_sock(sk, skb, req, dst, tsoff); out: return ret; out_free: diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 8e42e8f54b70..aeb9497b5bb7 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -101,12 +101,18 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) } } -static u32 tcp_v6_init_seq_and_tsoff(const struct sk_buff *skb, u32 *tsoff) +static u32 tcp_v6_init_seq(const struct sk_buff *skb) { - return secure_tcpv6_seq_and_tsoff(ipv6_hdr(skb)->daddr.s6_addr32, - ipv6_hdr(skb)->saddr.s6_addr32, - tcp_hdr(skb)->dest, - tcp_hdr(skb)->source, tsoff); + return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32, + ipv6_hdr(skb)->saddr.s6_addr32, + tcp_hdr(skb)->dest, + tcp_hdr(skb)->source); +} + +static u32 tcp_v6_init_ts_off(const struct sk_buff *skb) +{ + return secure_tcpv6_ts_off(ipv6_hdr(skb)->daddr.s6_addr32, + ipv6_hdr(skb)->saddr.s6_addr32); } static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, @@ -122,7 +128,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, struct flowi6 fl6; struct dst_entry *dst; int addr_type; - u32 seq; int err; struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row; @@ -282,13 +287,13 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, sk_set_txhash(sk); if (likely(!tp->repair)) { - seq = secure_tcpv6_seq_and_tsoff(np->saddr.s6_addr32, - sk->sk_v6_daddr.s6_addr32, - inet->inet_sport, - inet->inet_dport, - &tp->tsoffset); if (!tp->write_seq) - tp->write_seq = seq; + tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32, + sk->sk_v6_daddr.s6_addr32, + inet->inet_sport, + inet->inet_dport); + tp->tsoffset = secure_tcpv6_ts_off(np->saddr.s6_addr32, + sk->sk_v6_daddr.s6_addr32); } if (tcp_fastopen_defer_connect(sk, &err)) @@ -749,7 +754,8 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { .cookie_init_seq = cookie_v6_init_sequence, #endif .route_req = tcp_v6_route_req, - .init_seq_tsoff = tcp_v6_init_seq_and_tsoff, + .init_seq = tcp_v6_init_seq, + .init_ts_off = tcp_v6_init_ts_off, .send_synack = tcp_v6_send_synack, }; -- cgit v1.2.3 From 8a537ece3d946227e4afa81eae0e43fa47439c7d Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 26 Apr 2017 23:22:09 +0200 Subject: PM / wakeup: Integrate mechanism to abort transitions in progress The system wakeup framework is not very consistent with respect to the way it handles suspend-to-idle and generally wakeup events occurring during transitions to system low-power states. First off, system transitions in progress are aborted by the event reporting helpers like pm_wakeup_event() only if the wakeup_count sysfs attribute is in use (as documented), but there are cases in which system-wide transitions should be aborted even if that is not the case. For example, a wakeup signal from a designated wakeup device during system-wide PM transition, it should cause the transition to be aborted right away. Moreover, there is a freeze_wake() call in wakeup_source_activate(), but that really is only effective after suspend_freeze_state has been set to FREEZE_STATE_ENTER by freeze_enter(). However, it is very unlikely that wakeup_source_activate() will ever be called at that time, as it could only be triggered by a IRQF_NO_SUSPEND interrupt handler, so wakeups from suspend-to-idle don't really occur in wakeup_source_activate(). At the same time there is a way to abort a system suspend in progress (or wake up the system from suspend-to-idle), which is by calling pm_system_wakeup(), but in turn that doesn't cause any wakeup source objects to be activated, so it will not be covered by wakeup source statistics and will not prevent the system from suspending again immediately (in case autosleep is used, for example). Consequently, if anyone wants to abort system transitions in progress and allow the wakeup_count mechanism to work, they need to use both pm_system_wakeup() and pm_wakeup_event(), say, at the same time which is awkward. For the above reasons, make it possible to trigger pm_system_wakeup() from within wakeup_source_activate() and provide a new pm_wakeup_hard_event() helper to do so within the wakeup framework. Signed-off-by: Rafael J. Wysocki --- drivers/base/power/wakeup.c | 36 ++++++++++++++++++------------------ include/linux/pm_wakeup.h | 25 +++++++++++++++++++++---- 2 files changed, 39 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 136854970489..84c41c98d6fc 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -512,12 +512,13 @@ static bool wakeup_source_not_registered(struct wakeup_source *ws) /** * wakup_source_activate - Mark given wakeup source as active. * @ws: Wakeup source to handle. + * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. * * Update the @ws' statistics and, if @ws has just been activated, notify the PM * core of the event by incrementing the counter of of wakeup events being * processed. */ -static void wakeup_source_activate(struct wakeup_source *ws) +static void wakeup_source_activate(struct wakeup_source *ws, bool hard) { unsigned int cec; @@ -525,11 +526,8 @@ static void wakeup_source_activate(struct wakeup_source *ws) "unregistered wakeup source\n")) return; - /* - * active wakeup source should bring the system - * out of PM_SUSPEND_FREEZE state - */ - freeze_wake(); + if (hard) + pm_system_wakeup(); ws->active = true; ws->active_count++; @@ -546,8 +544,9 @@ static void wakeup_source_activate(struct wakeup_source *ws) /** * wakeup_source_report_event - Report wakeup event using the given source. * @ws: Wakeup source to report the event for. + * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. */ -static void wakeup_source_report_event(struct wakeup_source *ws) +static void wakeup_source_report_event(struct wakeup_source *ws, bool hard) { ws->event_count++; /* This is racy, but the counter is approximate anyway. */ @@ -555,7 +554,7 @@ static void wakeup_source_report_event(struct wakeup_source *ws) ws->wakeup_count++; if (!ws->active) - wakeup_source_activate(ws); + wakeup_source_activate(ws, hard); } /** @@ -573,7 +572,7 @@ void __pm_stay_awake(struct wakeup_source *ws) spin_lock_irqsave(&ws->lock, flags); - wakeup_source_report_event(ws); + wakeup_source_report_event(ws, false); del_timer(&ws->timer); ws->timer_expires = 0; @@ -739,9 +738,10 @@ static void pm_wakeup_timer_fn(unsigned long data) } /** - * __pm_wakeup_event - Notify the PM core of a wakeup event. + * pm_wakeup_ws_event - Notify the PM core of a wakeup event. * @ws: Wakeup source object associated with the event source. * @msec: Anticipated event processing time (in milliseconds). + * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. * * Notify the PM core of a wakeup event whose source is @ws that will take * approximately @msec milliseconds to be processed by the kernel. If @ws is @@ -750,7 +750,7 @@ static void pm_wakeup_timer_fn(unsigned long data) * * It is safe to call this function from interrupt context. */ -void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) +void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard) { unsigned long flags; unsigned long expires; @@ -760,7 +760,7 @@ void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) spin_lock_irqsave(&ws->lock, flags); - wakeup_source_report_event(ws); + wakeup_source_report_event(ws, hard); if (!msec) { wakeup_source_deactivate(ws); @@ -779,17 +779,17 @@ void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) unlock: spin_unlock_irqrestore(&ws->lock, flags); } -EXPORT_SYMBOL_GPL(__pm_wakeup_event); - +EXPORT_SYMBOL_GPL(pm_wakeup_ws_event); /** * pm_wakeup_event - Notify the PM core of a wakeup event. * @dev: Device the wakeup event is related to. * @msec: Anticipated event processing time (in milliseconds). + * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. * - * Call __pm_wakeup_event() for the @dev's wakeup source object. + * Call pm_wakeup_ws_event() for the @dev's wakeup source object. */ -void pm_wakeup_event(struct device *dev, unsigned int msec) +void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard) { unsigned long flags; @@ -797,10 +797,10 @@ void pm_wakeup_event(struct device *dev, unsigned int msec) return; spin_lock_irqsave(&dev->power.lock, flags); - __pm_wakeup_event(dev->power.wakeup, msec); + pm_wakeup_ws_event(dev->power.wakeup, msec, hard); spin_unlock_irqrestore(&dev->power.lock, flags); } -EXPORT_SYMBOL_GPL(pm_wakeup_event); +EXPORT_SYMBOL_GPL(pm_wakeup_dev_event); void pm_print_active_wakeup_sources(void) { diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index a3447932df1f..4c2cba7ec1d4 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h @@ -106,8 +106,8 @@ extern void __pm_stay_awake(struct wakeup_source *ws); extern void pm_stay_awake(struct device *dev); extern void __pm_relax(struct wakeup_source *ws); extern void pm_relax(struct device *dev); -extern void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec); -extern void pm_wakeup_event(struct device *dev, unsigned int msec); +extern void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard); +extern void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard); #else /* !CONFIG_PM_SLEEP */ @@ -182,9 +182,11 @@ static inline void __pm_relax(struct wakeup_source *ws) {} static inline void pm_relax(struct device *dev) {} -static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) {} +static inline void pm_wakeup_ws_event(struct wakeup_source *ws, + unsigned int msec, bool hard) {} -static inline void pm_wakeup_event(struct device *dev, unsigned int msec) {} +static inline void pm_wakeup_dev_event(struct device *dev, unsigned int msec, + bool hard) {} #endif /* !CONFIG_PM_SLEEP */ @@ -201,4 +203,19 @@ static inline void wakeup_source_trash(struct wakeup_source *ws) wakeup_source_drop(ws); } +static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) +{ + return pm_wakeup_ws_event(ws, msec, false); +} + +static inline void pm_wakeup_event(struct device *dev, unsigned int msec) +{ + return pm_wakeup_dev_event(dev, msec, false); +} + +static inline void pm_wakeup_hard_event(struct device *dev) +{ + return pm_wakeup_dev_event(dev, 0, true); +} + #endif /* _LINUX_PM_WAKEUP_H */ -- cgit v1.2.3 From eed4d47efe9508b855b09754cf6de4325d8a2f0d Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 26 Apr 2017 23:23:03 +0200 Subject: ACPI / sleep: Ignore spurious SCI wakeups from suspend-to-idle The ACPI SCI (System Control Interrupt) is set up as a wakeup IRQ during suspend-to-idle transitions and, consequently, any events signaled through it wake up the system from that state. However, on some systems some of the events signaled via the ACPI SCI while suspended to idle should not cause the system to wake up. In fact, quite often they should just be discarded. Arguably, systems should not resume entirely on such events, but in order to decide which events really should cause the system to resume and which are spurious, it is necessary to resume up to the point when ACPI SCIs are actually handled and processed, which is after executing dpm_resume_noirq() in the system resume path. For this reasons, add a loop around freeze_enter() in which the platforms can process events signaled via multiplexed IRQ lines like the ACPI SCI and add suspend-to-idle hooks that can be used for this purpose to struct platform_freeze_ops. In the ACPI case, the ->wake hook is used for checking if the SCI has triggered while suspended and deferring the interrupt-induced system wakeup until the events signaled through it are actually processed sufficiently to decide whether or not the system should resume. In turn, the ->sync hook allows all of the relevant event queues to be flushed so as to prevent events from being missed due to race conditions. In addition to that, some ACPI code processing wakeup events needs to be modified to use the "hard" version of wakeup triggers, so that it will cause a system resume to happen on device-induced wakeup events even if the "soft" mechanism to prevent the system from suspending is not enabled (that also helps to catch device-induced wakeup events occurring during suspend transitions in progress). Signed-off-by: Rafael J. Wysocki --- drivers/acpi/battery.c | 2 +- drivers/acpi/button.c | 5 +++-- drivers/acpi/device_pm.c | 3 ++- drivers/acpi/sleep.c | 28 ++++++++++++++++++++++++++++ drivers/base/power/main.c | 5 ----- drivers/base/power/wakeup.c | 18 ++++++++++++------ include/linux/suspend.h | 7 +++++-- kernel/power/process.c | 2 +- kernel/power/suspend.c | 29 +++++++++++++++++++++++++---- 9 files changed, 77 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 4ef1e4624b2b..83ab17e4a795 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -776,7 +776,7 @@ static int acpi_battery_update(struct acpi_battery *battery, bool resume) if ((battery->state & ACPI_BATTERY_STATE_CRITICAL) || (test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) && (battery->capacity_now <= battery->alarm))) - pm_wakeup_event(&battery->device->dev, 0); + pm_wakeup_hard_event(&battery->device->dev); return result; } diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index 668137e4a069..b7c2a06963d6 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -216,7 +216,7 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state) } if (state) - pm_wakeup_event(&device->dev, 0); + pm_wakeup_hard_event(&device->dev); ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device); if (ret == NOTIFY_DONE) @@ -398,7 +398,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event) } else { int keycode; - pm_wakeup_event(&device->dev, 0); + pm_wakeup_hard_event(&device->dev); if (button->suspended) break; @@ -530,6 +530,7 @@ static int acpi_button_add(struct acpi_device *device) lid_device = device; } + device_init_wakeup(&device->dev, true); printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device)); return 0; diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 993fd31394c8..798d5003a039 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "internal.h" @@ -399,7 +400,7 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used) mutex_lock(&acpi_pm_notifier_lock); if (adev->wakeup.flags.notifier_present) { - __pm_wakeup_event(adev->wakeup.ws, 0); + pm_wakeup_ws_event(adev->wakeup.ws, 0, true); if (adev->wakeup.context.work.func) queue_pm_work(&adev->wakeup.context.work); } diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index a4327af676fe..e84005d642e6 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -662,14 +662,40 @@ static int acpi_freeze_prepare(void) acpi_os_wait_events_complete(); if (acpi_sci_irq_valid()) enable_irq_wake(acpi_sci_irq); + return 0; } +static void acpi_freeze_wake(void) +{ + /* + * If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means + * that the SCI has triggered while suspended, so cancel the wakeup in + * case it has not been a wakeup event (the GPEs will be checked later). + */ + if (acpi_sci_irq_valid() && + !irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) + pm_system_cancel_wakeup(); +} + +static void acpi_freeze_sync(void) +{ + /* + * Process all pending events in case there are any wakeup ones. + * + * The EC driver uses the system workqueue, so that one needs to be + * flushed too. + */ + acpi_os_wait_events_complete(); + flush_scheduled_work(); +} + static void acpi_freeze_restore(void) { acpi_disable_wakeup_devices(ACPI_STATE_S0); if (acpi_sci_irq_valid()) disable_irq_wake(acpi_sci_irq); + acpi_enable_all_runtime_gpes(); } @@ -681,6 +707,8 @@ static void acpi_freeze_end(void) static const struct platform_freeze_ops acpi_freeze_ops = { .begin = acpi_freeze_begin, .prepare = acpi_freeze_prepare, + .wake = acpi_freeze_wake, + .sync = acpi_freeze_sync, .restore = acpi_freeze_restore, .end = acpi_freeze_end, }; diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 9faee1c893e5..e987a6f55d36 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1091,11 +1091,6 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a if (async_error) goto Complete; - if (pm_wakeup_pending()) { - async_error = -EBUSY; - goto Complete; - } - if (dev->power.syscore || dev->power.direct_complete) goto Complete; diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 84c41c98d6fc..f62082fdd670 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -28,8 +28,8 @@ bool events_check_enabled __read_mostly; /* First wakeup IRQ seen by the kernel in the last cycle. */ unsigned int pm_wakeup_irq __read_mostly; -/* If set and the system is suspending, terminate the suspend. */ -static bool pm_abort_suspend __read_mostly; +/* If greater than 0 and the system is suspending, terminate the suspend. */ +static atomic_t pm_abort_suspend __read_mostly; /* * Combined counters of registered wakeup events and wakeup events in progress. @@ -856,20 +856,26 @@ bool pm_wakeup_pending(void) pm_print_active_wakeup_sources(); } - return ret || pm_abort_suspend; + return ret || atomic_read(&pm_abort_suspend) > 0; } void pm_system_wakeup(void) { - pm_abort_suspend = true; + atomic_inc(&pm_abort_suspend); freeze_wake(); } EXPORT_SYMBOL_GPL(pm_system_wakeup); -void pm_wakeup_clear(void) +void pm_system_cancel_wakeup(void) +{ + atomic_dec(&pm_abort_suspend); +} + +void pm_wakeup_clear(bool reset) { - pm_abort_suspend = false; pm_wakeup_irq = 0; + if (reset) + atomic_set(&pm_abort_suspend, 0); } void pm_system_irq_wakeup(unsigned int irq_number) diff --git a/include/linux/suspend.h b/include/linux/suspend.h index d9718378a8be..0b1cf32edfd7 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -189,6 +189,8 @@ struct platform_suspend_ops { struct platform_freeze_ops { int (*begin)(void); int (*prepare)(void); + void (*wake)(void); + void (*sync)(void); void (*restore)(void); void (*end)(void); }; @@ -428,7 +430,8 @@ extern unsigned int pm_wakeup_irq; extern bool pm_wakeup_pending(void); extern void pm_system_wakeup(void); -extern void pm_wakeup_clear(void); +extern void pm_system_cancel_wakeup(void); +extern void pm_wakeup_clear(bool reset); extern void pm_system_irq_wakeup(unsigned int irq_number); extern bool pm_get_wakeup_count(unsigned int *count, bool block); extern bool pm_save_wakeup_count(unsigned int count); @@ -478,7 +481,7 @@ static inline int unregister_pm_notifier(struct notifier_block *nb) static inline bool pm_wakeup_pending(void) { return false; } static inline void pm_system_wakeup(void) {} -static inline void pm_wakeup_clear(void) {} +static inline void pm_wakeup_clear(bool reset) {} static inline void pm_system_irq_wakeup(unsigned int irq_number) {} static inline void lock_system_sleep(void) {} diff --git a/kernel/power/process.c b/kernel/power/process.c index c7209f060eeb..78672d324a6e 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -132,7 +132,7 @@ int freeze_processes(void) if (!pm_freezing) atomic_inc(&system_freezing_cnt); - pm_wakeup_clear(); + pm_wakeup_clear(true); pr_info("Freezing user space processes ... "); pm_freezing = true; error = try_to_freeze_tasks(true); diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 15e6baef5c73..c0248c74d6d4 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -72,6 +72,8 @@ static void freeze_begin(void) static void freeze_enter(void) { + trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, true); + spin_lock_irq(&suspend_freeze_lock); if (pm_wakeup_pending()) goto out; @@ -98,6 +100,27 @@ static void freeze_enter(void) out: suspend_freeze_state = FREEZE_STATE_NONE; spin_unlock_irq(&suspend_freeze_lock); + + trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, false); +} + +static void s2idle_loop(void) +{ + do { + freeze_enter(); + + if (freeze_ops && freeze_ops->wake) + freeze_ops->wake(); + + dpm_resume_noirq(PMSG_RESUME); + if (freeze_ops && freeze_ops->sync) + freeze_ops->sync(); + + if (pm_wakeup_pending()) + break; + + pm_wakeup_clear(false); + } while (!dpm_suspend_noirq(PMSG_SUSPEND)); } void freeze_wake(void) @@ -371,10 +394,8 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) * all the devices are suspended. */ if (state == PM_SUSPEND_FREEZE) { - trace_suspend_resume(TPS("machine_suspend"), state, true); - freeze_enter(); - trace_suspend_resume(TPS("machine_suspend"), state, false); - goto Platform_wake; + s2idle_loop(); + goto Platform_early_resume; } error = disable_nonboot_cpus(); -- cgit v1.2.3 From 2f242bf45370b8ea44f209b22c3c90984655a102 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 5 May 2017 11:53:19 +0200 Subject: mac80211: properly remove RX_ENC_FLAG_40MHZ Somehow I missed this in my RX rate cleanup series, causing some drivers to not report correct bandwidth since this flag isn't used by mac80211 anymore. Fix this, and make hwsim also report higher bandwidths appropriately. Signed-off-by: Johannes Berg --- drivers/net/wireless/ath/ath9k/ar9003_mac.c | 2 +- drivers/net/wireless/ath/ath9k/mac.c | 4 ++-- drivers/net/wireless/intel/iwlegacy/4965-mac.c | 4 +++- drivers/net/wireless/intel/iwlwifi/dvm/rx.c | 4 +++- drivers/net/wireless/mac80211_hwsim.c | 8 +++++++- include/net/mac80211.h | 2 -- 6 files changed, 16 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c index 68fcbe03bce2..b3f20b3c0210 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c @@ -522,7 +522,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs, rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0; rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7); rxs->enc_flags |= (rxsp->status4 & AR_GI) ? RX_ENC_FLAG_SHORT_GI : 0; - rxs->enc_flags |= (rxsp->status4 & AR_2040) ? RX_ENC_FLAG_40MHZ : 0; + rxs->bw = (rxsp->status4 & AR_2040) ? RATE_INFO_BW_40 : RATE_INFO_BW_20; rxs->evm0 = rxsp->status6; rxs->evm1 = rxsp->status7; diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index 6128c2bb23d8..77c94f9e7b61 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c @@ -580,8 +580,8 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, /* directly mapped flags for ieee80211_rx_status */ rs->enc_flags |= (ads.ds_rxstatus3 & AR_GI) ? RX_ENC_FLAG_SHORT_GI : 0; - rs->enc_flags |= - (ads.ds_rxstatus3 & AR_2040) ? RX_ENC_FLAG_40MHZ : 0; + rs->bw = (ads.ds_rxstatus3 & AR_2040) ? RATE_INFO_BW_40 : + RATE_INFO_BW_20; if (AR_SREV_9280_20_OR_LATER(ah)) rs->enc_flags |= (ads.ds_rxstatus3 & AR_STBC) ? diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index 5d5faa3cad24..49a2ff15ddae 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -734,7 +734,9 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb) if (rate_n_flags & RATE_MCS_HT_MSK) rx_status.encoding = RX_ENC_HT; if (rate_n_flags & RATE_MCS_HT40_MSK) - rx_status.enc_flags |= RX_ENC_FLAG_40MHZ; + rx_status.bw = RATE_INFO_BW_40; + else + rx_status.bw = RATE_INFO_BW_20; if (rate_n_flags & RATE_MCS_SGI_MSK) rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c index 1ee1ba9931a7..adfd6307edca 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c @@ -889,7 +889,9 @@ static void iwlagn_rx_reply_rx(struct iwl_priv *priv, if (rate_n_flags & RATE_MCS_HT_MSK) rx_status.encoding = RX_ENC_HT; if (rate_n_flags & RATE_MCS_HT40_MSK) - rx_status.enc_flags |= RX_ENC_FLAG_40MHZ; + rx_status.bw = RATE_INFO_BW_40; + else + rx_status.bw = RATE_INFO_BW_20; if (rate_n_flags & RATE_MCS_SGI_MSK) rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rate_n_flags & RATE_MCS_GF_MSK) diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 87444af20fc5..002b25cff5b6 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -1201,7 +1201,13 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw, rx_status.encoding = RX_ENC_HT; } if (info->control.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) - rx_status.enc_flags |= RX_ENC_FLAG_40MHZ; + rx_status.bw = RATE_INFO_BW_40; + else if (info->control.rates[0].flags & IEEE80211_TX_RC_80_MHZ_WIDTH) + rx_status.bw = RATE_INFO_BW_80; + else if (info->control.rates[0].flags & IEEE80211_TX_RC_160_MHZ_WIDTH) + rx_status.bw = RATE_INFO_BW_160; + else + rx_status.bw = RATE_INFO_BW_20; if (info->control.rates[0].flags & IEEE80211_TX_RC_SHORT_GI) rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI; /* TODO: simulate real signal strength (and optional packet loss) */ diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 4d05a9443344..76ed24a201eb 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1141,7 +1141,6 @@ enum mac80211_rx_flags { * enum mac80211_rx_encoding_flags - MCS & bandwidth flags * * @RX_ENC_FLAG_SHORTPRE: Short preamble was used for this frame - * @RX_ENC_FLAG_40MHZ: HT40 (40 MHz) was used * @RX_ENC_FLAG_SHORT_GI: Short guard interval was used * @RX_ENC_FLAG_HT_GF: This frame was received in a HT-greenfield transmission, * if the driver fills this value it should add @@ -1153,7 +1152,6 @@ enum mac80211_rx_flags { */ enum mac80211_rx_encoding_flags { RX_ENC_FLAG_SHORTPRE = BIT(0), - RX_ENC_FLAG_40MHZ = BIT(1), RX_ENC_FLAG_SHORT_GI = BIT(2), RX_ENC_FLAG_HT_GF = BIT(3), RX_ENC_FLAG_STBC_MASK = BIT(4) | BIT(5), -- cgit v1.2.3 From 6406c91943a0f29b6e8786921aaa038663e08055 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 2 May 2017 09:33:40 +0200 Subject: cfg80211: fix multi scheduled scan kernel-doc Replace @results_wk with @report_results, which was missed in an earlier patch between revisions thereof. Fixes: b34939b98369 ("cfg80211: add request id to cfg80211_sched_scan_*() api") Signed-off-by: Johannes Berg Acked-by: Arend van Spriel Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 15d6599b8bc6..b083e6cbae8c 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1666,7 +1666,7 @@ struct cfg80211_bss_select_adjust { * (others are filtered out). * If ommited, all results are passed. * @n_match_sets: number of match sets - * @results_wk: worker for processing results notification. + * @report_results: indicates that results were reported for this request * @wiphy: the wiphy this was for * @dev: the interface * @scan_start: start time of the scheduled scan -- cgit v1.2.3 From 71afe470e20db133b30730cfa856e5d6854312e9 Mon Sep 17 00:00:00 2001 From: Eric Auger Date: Thu, 13 Apr 2017 09:06:20 +0200 Subject: KVM: arm64: vgic-its: Introduce migration ABI infrastructure We plan to support different migration ABIs, ie. characterizing the ITS table layout format in guest RAM. For example, a new ABI will be needed if vLPIs get supported for nested use case. So let's introduce an array of supported ABIs (at the moment a single ABI is supported though). The following characteristics are foreseen to vary with the ABI: size of table entries, save/restore operation, the way abi settings are applied. By default the MAX_ABI_REV is applied on its creation. In subsequent patches we will introduce a way for the userspace to change the ABI in use. The entry sizes now are set according to the ABI version and not hardcoded anymore. Signed-off-by: Eric Auger Reviewed-by: Christoffer Dall --- include/kvm/arm_vgic.h | 3 ++ include/linux/irqchip/arm-gic-v3.h | 5 ++ virt/kvm/arm/vgic/vgic-its.c | 93 ++++++++++++++++++++++++++++++++++++-- 3 files changed, 97 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 26ed4fb896bb..fabcc649e2ce 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -162,6 +162,9 @@ struct vgic_its { u32 creadr; u32 cwriter; + /* migration ABI revision in use */ + u32 abi_rev; + /* Protects the device and collection lists */ struct mutex its_lock; struct list_head device_list; diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 97cbca19430d..81ebe437ccc3 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -132,6 +132,9 @@ #define GIC_BASER_SHAREABILITY(reg, type) \ (GIC_BASER_##type << reg##_SHAREABILITY_SHIFT) +/* encode a size field of width @w containing @n - 1 units */ +#define GIC_ENCODE_SZ(n, w) (((unsigned long)(n) - 1) & GENMASK_ULL(((w) - 1), 0)) + #define GICR_PROPBASER_SHAREABILITY_SHIFT (10) #define GICR_PROPBASER_INNER_CACHEABILITY_SHIFT (7) #define GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT (56) @@ -232,6 +235,7 @@ #define GITS_CTLR_QUIESCENT (1U << 31) #define GITS_TYPER_PLPIS (1UL << 0) +#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4 #define GITS_TYPER_IDBITS_SHIFT 8 #define GITS_TYPER_DEVBITS_SHIFT 13 #define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) @@ -290,6 +294,7 @@ #define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) #define GITS_BASER_ENTRY_SIZE_SHIFT (48) #define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1) +#define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48) #define GITS_BASER_SHAREABILITY_SHIFT (10) #define GITS_BASER_InnerShareable \ GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index bf3ff0931572..4f6ea46c496c 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -33,6 +33,10 @@ #include "vgic.h" #include "vgic-mmio.h" +static int vgic_its_save_tables_v0(struct vgic_its *its); +static int vgic_its_restore_tables_v0(struct vgic_its *its); +static int vgic_its_commit_v0(struct vgic_its *its); + /* * Creates a new (reference to a) struct vgic_irq for a given LPI. * If this LPI is already mapped on another ITS, we increase its refcount @@ -123,6 +127,50 @@ struct its_ite { u32 event_id; }; +/** + * struct vgic_its_abi - ITS abi ops and settings + * @cte_esz: collection table entry size + * @dte_esz: device table entry size + * @ite_esz: interrupt translation table entry size + * @save tables: save the ITS tables into guest RAM + * @restore_tables: restore the ITS internal structs from tables + * stored in guest RAM + * @commit: initialize the registers which expose the ABI settings, + * especially the entry sizes + */ +struct vgic_its_abi { + int cte_esz; + int dte_esz; + int ite_esz; + int (*save_tables)(struct vgic_its *its); + int (*restore_tables)(struct vgic_its *its); + int (*commit)(struct vgic_its *its); +}; + +static const struct vgic_its_abi its_table_abi_versions[] = { + [0] = {.cte_esz = 8, .dte_esz = 8, .ite_esz = 8, + .save_tables = vgic_its_save_tables_v0, + .restore_tables = vgic_its_restore_tables_v0, + .commit = vgic_its_commit_v0, + }, +}; + +#define NR_ITS_ABIS ARRAY_SIZE(its_table_abi_versions) + +inline const struct vgic_its_abi *vgic_its_get_abi(struct vgic_its *its) +{ + return &its_table_abi_versions[its->abi_rev]; +} + +int vgic_its_set_abi(struct vgic_its *its, int rev) +{ + const struct vgic_its_abi *abi; + + its->abi_rev = rev; + abi = vgic_its_get_abi(its); + return abi->commit(its); +} + /* * Find and returns a device in the device table for an ITS. * Must be called with the its_lock mutex held. @@ -364,6 +412,7 @@ static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm, struct vgic_its *its, gpa_t addr, unsigned int len) { + const struct vgic_its_abi *abi = vgic_its_get_abi(its); u64 reg = GITS_TYPER_PLPIS; /* @@ -376,6 +425,7 @@ static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm, */ reg |= 0x0f << GITS_TYPER_DEVBITS_SHIFT; reg |= 0x0f << GITS_TYPER_IDBITS_SHIFT; + reg |= GIC_ENCODE_SZ(abi->ite_esz, 4) << GITS_TYPER_ITT_ENTRY_SIZE_SHIFT; return extract_bytes(reg, addr & 7, len); } @@ -1268,6 +1318,7 @@ static void vgic_mmio_write_its_baser(struct kvm *kvm, gpa_t addr, unsigned int len, unsigned long val) { + const struct vgic_its_abi *abi = vgic_its_get_abi(its); u64 entry_size, device_type; u64 reg, *regptr, clearbits = 0; @@ -1278,12 +1329,12 @@ static void vgic_mmio_write_its_baser(struct kvm *kvm, switch (BASER_INDEX(addr)) { case 0: regptr = &its->baser_device_table; - entry_size = 8; + entry_size = abi->dte_esz; device_type = GITS_BASER_TYPE_DEVICE; break; case 1: regptr = &its->baser_coll_table; - entry_size = 8; + entry_size = abi->cte_esz; device_type = GITS_BASER_TYPE_COLLECTION; clearbits = GITS_BASER_INDIRECT; break; @@ -1425,7 +1476,6 @@ static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its) (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) | \ GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner) | \ GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) | \ - ((8ULL - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | \ GITS_BASER_PAGE_SIZE_64K) #define INITIAL_PROPBASER_VALUE \ @@ -1465,7 +1515,7 @@ static int vgic_its_create(struct kvm_device *dev, u32 type) dev->private = its; - return 0; + return vgic_its_set_abi(its, NR_ITS_ABIS - 1); } static void vgic_its_destroy(struct kvm_device *kvm_dev) @@ -1592,6 +1642,41 @@ out: return ret; } +/** + * vgic_its_save_tables_v0 - Save the ITS tables into guest ARM + * according to v0 ABI + */ +static int vgic_its_save_tables_v0(struct vgic_its *its) +{ + return -ENXIO; +} + +/** + * vgic_its_restore_tables_v0 - Restore the ITS tables from guest RAM + * to internal data structs according to V0 ABI + * + */ +static int vgic_its_restore_tables_v0(struct vgic_its *its) +{ + return -ENXIO; +} + +static int vgic_its_commit_v0(struct vgic_its *its) +{ + const struct vgic_its_abi *abi; + + abi = vgic_its_get_abi(its); + its->baser_coll_table &= ~GITS_BASER_ENTRY_SIZE_MASK; + its->baser_device_table &= ~GITS_BASER_ENTRY_SIZE_MASK; + + its->baser_coll_table |= (GIC_ENCODE_SZ(abi->cte_esz, 5) + << GITS_BASER_ENTRY_SIZE_SHIFT); + + its->baser_device_table |= (GIC_ENCODE_SZ(abi->dte_esz, 5) + << GITS_BASER_ENTRY_SIZE_SHIFT); + return 0; +} + static int vgic_its_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { -- cgit v1.2.3 From ab01c6bdacc43c41c6b326889f4358f5afc38bf9 Mon Sep 17 00:00:00 2001 From: Eric Auger Date: Thu, 23 Mar 2017 15:14:00 +0100 Subject: KVM: arm64: vgic-its: Implement vgic_mmio_uaccess_write_its_iidr The GITS_IIDR revision field is used to encode the migration ABI revision. So we need to restore it to check the table layout is readable by the destination. By writing the IIDR, userspace thus forces the ABI revision to be used and this must be less than or equal to the max revision KVM supports. Signed-off-by: Eric Auger Reviewed-by: Christoffer Dall --- include/linux/irqchip/arm-gic-v3.h | 5 +++++ virt/kvm/arm/vgic/vgic-its.c | 23 ++++++++++++++++++++--- 2 files changed, 25 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 81ebe437ccc3..2eaea308f003 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -242,6 +242,11 @@ #define GITS_TYPER_PTA (1UL << 19) #define GITS_TYPER_HWCOLLCNT_SHIFT 24 +#define GITS_IIDR_REV_SHIFT 12 +#define GITS_IIDR_REV_MASK (0xf << GITS_IIDR_REV_SHIFT) +#define GITS_IIDR_REV(r) (((r) >> GITS_IIDR_REV_SHIFT) & 0xf) +#define GITS_IIDR_PRODUCTID_SHIFT 24 + #define GITS_CBASER_VALID (1ULL << 63) #define GITS_CBASER_SHAREABILITY_SHIFT (10) #define GITS_CBASER_INNER_CACHEABILITY_SHIFT (59) diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index 4f6ea46c496c..9338efb79a54 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -434,7 +434,23 @@ static unsigned long vgic_mmio_read_its_iidr(struct kvm *kvm, struct vgic_its *its, gpa_t addr, unsigned int len) { - return (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0); + u32 val; + + val = (its->abi_rev << GITS_IIDR_REV_SHIFT) & GITS_IIDR_REV_MASK; + val |= (PRODUCT_ID_KVM << GITS_IIDR_PRODUCTID_SHIFT) | IMPLEMENTER_ARM; + return val; +} + +static int vgic_mmio_uaccess_write_its_iidr(struct kvm *kvm, + struct vgic_its *its, + gpa_t addr, unsigned int len, + unsigned long val) +{ + u32 rev = GITS_IIDR_REV(val); + + if (rev >= NR_ITS_ABIS) + return -EINVAL; + return vgic_its_set_abi(its, rev); } static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm, @@ -1415,8 +1431,9 @@ static struct vgic_register_region its_registers[] = { REGISTER_ITS_DESC(GITS_CTLR, vgic_mmio_read_its_ctlr, vgic_mmio_write_its_ctlr, 4, VGIC_ACCESS_32bit), - REGISTER_ITS_DESC(GITS_IIDR, - vgic_mmio_read_its_iidr, its_mmio_write_wi, 4, + REGISTER_ITS_DESC_UACCESS(GITS_IIDR, + vgic_mmio_read_its_iidr, its_mmio_write_wi, + vgic_mmio_uaccess_write_its_iidr, 4, VGIC_ACCESS_32bit), REGISTER_ITS_DESC(GITS_TYPER, vgic_mmio_read_its_typer, its_mmio_write_wi, 8, -- cgit v1.2.3 From 0d44cdb631ef53ea75be056886cf0541311e48df Mon Sep 17 00:00:00 2001 From: Eric Auger Date: Thu, 22 Dec 2016 18:14:14 +0100 Subject: KVM: arm64: vgic-its: Interpret MAPD Size field and check related errors Up to now the MAPD's ITT size field has been ignored. It encodes the number of eventid bit minus 1. It should be used to check the eventid when a MAPTI command is issued on a device. Let's store the number of eventid bits in the its_device and do the check on MAPTI. Also make sure the ITT size field does not exceed the GITS_TYPER IDBITS field. Signed-off-by: Eric Auger Reviewed-by: Christoffer Dall Reviewed-by: Marc Zyngier --- include/linux/irqchip/arm-gic-v3.h | 2 ++ virt/kvm/arm/vgic/vgic-its.c | 15 ++++++++++++++- 2 files changed, 16 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 2eaea308f003..be8bad00c419 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -347,9 +347,11 @@ #define E_ITS_INT_UNMAPPED_INTERRUPT 0x010307 #define E_ITS_CLEAR_UNMAPPED_INTERRUPT 0x010507 #define E_ITS_MAPD_DEVICE_OOR 0x010801 +#define E_ITS_MAPD_ITTSIZE_OOR 0x010802 #define E_ITS_MAPC_PROCNUM_OOR 0x010902 #define E_ITS_MAPC_COLLECTION_OOR 0x010903 #define E_ITS_MAPTI_UNMAPPED_DEVICE 0x010a04 +#define E_ITS_MAPTI_ID_OOR 0x010a05 #define E_ITS_MAPTI_PHYSICALID_OOR 0x010a06 #define E_ITS_INV_UNMAPPED_INTERRUPT 0x010c07 #define E_ITS_INVALL_UNMAPPED_COLLECTION 0x010d09 diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index 9338efb79a54..031f6abd50fd 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -103,6 +103,7 @@ struct its_device { /* the head for the list of ITTEs */ struct list_head itt_head; + u32 num_eventid_bits; u32 device_id; }; @@ -224,6 +225,8 @@ static struct its_ite *find_ite(struct vgic_its *its, u32 device_id, #define GIC_LPI_OFFSET 8192 +#define VITS_TYPER_IDBITS 16 + /* * Finds and returns a collection in the ITS collection table. * Must be called with the its_lock mutex held. @@ -424,7 +427,7 @@ static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm, * DevBits low - as least for the time being. */ reg |= 0x0f << GITS_TYPER_DEVBITS_SHIFT; - reg |= 0x0f << GITS_TYPER_IDBITS_SHIFT; + reg |= GIC_ENCODE_SZ(VITS_TYPER_IDBITS, 5) << GITS_TYPER_IDBITS_SHIFT; reg |= GIC_ENCODE_SZ(abi->ite_esz, 4) << GITS_TYPER_ITT_ENTRY_SIZE_SHIFT; return extract_bytes(reg, addr & 7, len); @@ -595,6 +598,7 @@ static u64 its_cmd_mask_field(u64 *its_cmd, int word, int shift, int size) #define its_cmd_get_command(cmd) its_cmd_mask_field(cmd, 0, 0, 8) #define its_cmd_get_deviceid(cmd) its_cmd_mask_field(cmd, 0, 32, 32) +#define its_cmd_get_size(cmd) (its_cmd_mask_field(cmd, 1, 0, 5) + 1) #define its_cmd_get_id(cmd) its_cmd_mask_field(cmd, 1, 0, 32) #define its_cmd_get_physical_id(cmd) its_cmd_mask_field(cmd, 1, 32, 32) #define its_cmd_get_collection(cmd) its_cmd_mask_field(cmd, 2, 0, 16) @@ -785,6 +789,9 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its, if (!device) return E_ITS_MAPTI_UNMAPPED_DEVICE; + if (event_id >= BIT_ULL(device->num_eventid_bits)) + return E_ITS_MAPTI_ID_OOR; + if (its_cmd_get_command(its_cmd) == GITS_CMD_MAPTI) lpi_nr = its_cmd_get_physical_id(its_cmd); else @@ -865,11 +872,15 @@ static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its, { u32 device_id = its_cmd_get_deviceid(its_cmd); bool valid = its_cmd_get_validbit(its_cmd); + u8 num_eventid_bits = its_cmd_get_size(its_cmd); struct its_device *device; if (!vgic_its_check_id(its, its->baser_device_table, device_id)) return E_ITS_MAPD_DEVICE_OOR; + if (valid && num_eventid_bits > VITS_TYPER_IDBITS) + return E_ITS_MAPD_ITTSIZE_OOR; + device = find_its_device(its, device_id); /* @@ -892,6 +903,8 @@ static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its, return -ENOMEM; device->device_id = device_id; + device->num_eventid_bits = num_eventid_bits; + INIT_LIST_HEAD(&device->itt_head); list_add_tail(&device->dev_list, &its->device_list); -- cgit v1.2.3 From 44de9d683847ba6dbac290bb8c9f1b773cbda746 Mon Sep 17 00:00:00 2001 From: Eric Auger Date: Thu, 4 May 2017 11:19:52 +0200 Subject: KVM: arm64: vgic-v3: vgic_v3_lpi_sync_pending_status this new helper synchronizes the irq pending_latch with the LPI pending bit status found in rdist pending table. As the status is consumed, we reset the bit in pending table. As we need the PENDBASER_ADDRESS() in vgic-v3, let's move its definition in the irqchip header. We restore the full length of the field, ie [51:16]. Same for PROPBASER_ADDRESS with full field length of [51:12]. Signed-off-by: Eric Auger Reviewed-by: Marc Zyngier Reviewed-by: Christoffer Dall --- include/linux/irqchip/arm-gic-v3.h | 2 ++ virt/kvm/arm/vgic/vgic-its.c | 6 ++---- virt/kvm/arm/vgic/vgic-v3.c | 44 ++++++++++++++++++++++++++++++++++++++ virt/kvm/arm/vgic/vgic.h | 1 + 4 files changed, 49 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index be8bad00c419..fffb91202bc9 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -159,6 +159,8 @@ #define GICR_PROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWb) #define GICR_PROPBASER_IDBITS_MASK (0x1f) +#define GICR_PROPBASER_ADDRESS(x) ((x) & GENMASK_ULL(51, 12)) +#define GICR_PENDBASER_ADDRESS(x) ((x) & GENMASK_ULL(51, 16)) #define GICR_PENDBASER_SHAREABILITY_SHIFT (10) #define GICR_PENDBASER_INNER_CACHEABILITY_SHIFT (7) diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index bd1362e9c214..3601790d841e 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -221,8 +221,6 @@ static struct its_ite *find_ite(struct vgic_its *its, u32 device_id, */ #define BASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 16)) #define CBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 12)) -#define PENDBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 16)) -#define PROPBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 12)) #define GIC_LPI_OFFSET 8192 @@ -257,7 +255,7 @@ static struct its_collection *find_collection(struct vgic_its *its, int coll_id) static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, struct kvm_vcpu *filter_vcpu) { - u64 propbase = PROPBASER_ADDRESS(kvm->arch.vgic.propbaser); + u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser); u8 prop; int ret; @@ -369,7 +367,7 @@ static u32 max_lpis_propbaser(u64 propbaser) */ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu) { - gpa_t pendbase = PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser); + gpa_t pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser); struct vgic_irq *irq; int last_byte_offset = -1; int ret = 0; diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index df1503650300..9ae8adddff69 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -234,6 +234,50 @@ void vgic_v3_enable(struct kvm_vcpu *vcpu) vgic_v3->vgic_hcr = ICH_HCR_EN; } +int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq) +{ + struct kvm_vcpu *vcpu; + int byte_offset, bit_nr; + gpa_t pendbase, ptr; + bool status; + u8 val; + int ret; + +retry: + vcpu = irq->target_vcpu; + if (!vcpu) + return 0; + + pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser); + + byte_offset = irq->intid / BITS_PER_BYTE; + bit_nr = irq->intid % BITS_PER_BYTE; + ptr = pendbase + byte_offset; + + ret = kvm_read_guest(kvm, ptr, &val, 1); + if (ret) + return ret; + + status = val & (1 << bit_nr); + + spin_lock(&irq->irq_lock); + if (irq->target_vcpu != vcpu) { + spin_unlock(&irq->irq_lock); + goto retry; + } + irq->pending_latch = status; + vgic_queue_irq_unlock(vcpu->kvm, irq); + + if (status) { + /* clear consumed data */ + val &= ~(1 << bit_nr); + ret = kvm_write_guest(kvm, ptr, &val, 1); + if (ret) + return ret; + } + return 0; +} + /* check for overlapping regions and for regions crossing the end of memory */ static bool vgic_v3_check_base(struct kvm *kvm) { diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index a48f33b04b2d..79768c801863 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -149,6 +149,7 @@ void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); void vgic_v3_enable(struct kvm_vcpu *vcpu); int vgic_v3_probe(const struct gic_kvm_info *info); int vgic_v3_map_resources(struct kvm *kvm); +int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq); int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t dist_base_address); void vgic_v3_load(struct kvm_vcpu *vcpu); -- cgit v1.2.3 From ef51042472f55b325fd7f2b26a2e29fd89757234 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 8 May 2017 10:55:27 -0700 Subject: block, dax: move "select DAX" from BLOCK to FS_DAX For configurations that do not enable DAX filesystems or drivers, do not require the DAX core to be built. Given that the 'direct_access' method has been removed from 'block_device_operations', we can also go ahead and remove the block-related dax helper functions from fs/block_dev.c to drivers/dax/super.c. This keeps dax details out of the block layer and lets the DAX core be built as a module in the FS_DAX=n case. Filesystems need to include dax.h to call bdev_dax_supported(). Cc: linux-xfs@vger.kernel.org Cc: Jens Axboe Cc: "Theodore Ts'o" Cc: Matthew Wilcox Cc: Alexander Viro Cc: "Darrick J. Wong" Cc: Ross Zwisler Reviewed-by: Jan Kara Reported-by: Geert Uytterhoeven Signed-off-by: Dan Williams --- block/Kconfig | 1 - drivers/dax/super.c | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++ fs/Kconfig | 1 + fs/block_dev.c | 66 ----------------------------------------------- fs/ext2/super.c | 1 + fs/ext4/super.c | 1 + fs/xfs/xfs_super.c | 1 + include/linux/blkdev.h | 2 -- include/linux/dax.h | 30 ++++++++++++++++++++-- 9 files changed, 102 insertions(+), 71 deletions(-) (limited to 'include') diff --git a/block/Kconfig b/block/Kconfig index 93da7fc3f254..e9f780f815f5 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -6,7 +6,6 @@ menuconfig BLOCK default y select SBITMAP select SRCU - select DAX help Provide block layer support for the kernel. diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 465dcd7317d5..e8998d15f3cd 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -47,6 +48,75 @@ void dax_read_unlock(int id) } EXPORT_SYMBOL_GPL(dax_read_unlock); +int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size, + pgoff_t *pgoff) +{ + phys_addr_t phys_off = (get_start_sect(bdev) + sector) * 512; + + if (pgoff) + *pgoff = PHYS_PFN(phys_off); + if (phys_off % PAGE_SIZE || size % PAGE_SIZE) + return -EINVAL; + return 0; +} +EXPORT_SYMBOL(bdev_dax_pgoff); + +/** + * __bdev_dax_supported() - Check if the device supports dax for filesystem + * @sb: The superblock of the device + * @blocksize: The block size of the device + * + * This is a library function for filesystems to check if the block device + * can be mounted with dax option. + * + * Return: negative errno if unsupported, 0 if supported. + */ +int __bdev_dax_supported(struct super_block *sb, int blocksize) +{ + struct block_device *bdev = sb->s_bdev; + struct dax_device *dax_dev; + pgoff_t pgoff; + int err, id; + void *kaddr; + pfn_t pfn; + long len; + + if (blocksize != PAGE_SIZE) { + pr_err("VFS (%s): error: unsupported blocksize for dax\n", + sb->s_id); + return -EINVAL; + } + + err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff); + if (err) { + pr_err("VFS (%s): error: unaligned partition for dax\n", + sb->s_id); + return err; + } + + dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); + if (!dax_dev) { + pr_err("VFS (%s): error: device does not support dax\n", + sb->s_id); + return -EOPNOTSUPP; + } + + id = dax_read_lock(); + len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn); + dax_read_unlock(id); + + put_dax(dax_dev); + + if (len < 1) { + pr_err("VFS (%s): error: dax access failed (%ld)", + sb->s_id, len); + return len < 0 ? len : -EIO; + } + + return 0; +} +EXPORT_SYMBOL_GPL(__bdev_dax_supported); + /** * struct dax_device - anchor object for dax services * @inode: core vfs diff --git a/fs/Kconfig b/fs/Kconfig index 83eab52fb3f6..b0e42b6a96b9 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -39,6 +39,7 @@ config FS_DAX depends on MMU depends on !(ARM || MIPS || SPARC) select FS_IOMAP + select DAX help Direct Access (DAX) can be used on memory-backed block devices. If the block device supports DAX and the filesystem supports DAX, diff --git a/fs/block_dev.c b/fs/block_dev.c index 666367e13711..3096ecd48304 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -718,72 +718,6 @@ int bdev_write_page(struct block_device *bdev, sector_t sector, } EXPORT_SYMBOL_GPL(bdev_write_page); -int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size, - pgoff_t *pgoff) -{ - phys_addr_t phys_off = (get_start_sect(bdev) + sector) * 512; - - if (pgoff) - *pgoff = PHYS_PFN(phys_off); - if (phys_off % PAGE_SIZE || size % PAGE_SIZE) - return -EINVAL; - return 0; -} -EXPORT_SYMBOL(bdev_dax_pgoff); - -/** - * bdev_dax_supported() - Check if the device supports dax for filesystem - * @sb: The superblock of the device - * @blocksize: The block size of the device - * - * This is a library function for filesystems to check if the block device - * can be mounted with dax option. - * - * Return: negative errno if unsupported, 0 if supported. - */ -int bdev_dax_supported(struct super_block *sb, int blocksize) -{ - struct block_device *bdev = sb->s_bdev; - struct dax_device *dax_dev; - pgoff_t pgoff; - int err, id; - void *kaddr; - pfn_t pfn; - long len; - - if (blocksize != PAGE_SIZE) { - vfs_msg(sb, KERN_ERR, "error: unsupported blocksize for dax"); - return -EINVAL; - } - - err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff); - if (err) { - vfs_msg(sb, KERN_ERR, "error: unaligned partition for dax"); - return err; - } - - dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); - if (!dax_dev) { - vfs_msg(sb, KERN_ERR, "error: device does not support dax"); - return -EOPNOTSUPP; - } - - id = dax_read_lock(); - len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn); - dax_read_unlock(id); - - put_dax(dax_dev); - - if (len < 1) { - vfs_msg(sb, KERN_ERR, - "error: dax access failed (%ld)", len); - return len < 0 ? len : -EIO; - } - - return 0; -} -EXPORT_SYMBOL_GPL(bdev_dax_supported); - /* * pseudo-fs */ diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 9e25a71fe1a2..d07773b81da9 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -32,6 +32,7 @@ #include #include #include +#include #include "ext2.h" #include "xattr.h" #include "acl.h" diff --git a/fs/ext4/super.c b/fs/ext4/super.c index a9448db1cf7e..bf6bb8997124 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 685c042a120f..f5c58d6dcafb 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -52,6 +52,7 @@ #include "xfs_reflink.h" #include +#include #include #include #include diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 848f87eb1905..e4d9899755a7 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1940,8 +1940,6 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, extern int bdev_read_page(struct block_device *, sector_t, struct page *); extern int bdev_write_page(struct block_device *, sector_t, struct page *, struct writeback_control *); -extern int bdev_dax_supported(struct super_block *, int); -int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff); #else /* CONFIG_BLOCK */ struct block_device; diff --git a/include/linux/dax.h b/include/linux/dax.h index d3158e74a59e..7fdf1d710042 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -18,12 +18,38 @@ struct dax_operations { void **, pfn_t *); }; +int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff); +#if IS_ENABLED(CONFIG_FS_DAX) +int __bdev_dax_supported(struct super_block *sb, int blocksize); +static inline int bdev_dax_supported(struct super_block *sb, int blocksize) +{ + return __bdev_dax_supported(sb, blocksize); +} +#else +static inline int bdev_dax_supported(struct super_block *sb, int blocksize) +{ + return -EOPNOTSUPP; +} +#endif + +#if IS_ENABLED(CONFIG_DAX) +struct dax_device *dax_get_by_host(const char *host); +void put_dax(struct dax_device *dax_dev); +#else +static inline struct dax_device *dax_get_by_host(const char *host) +{ + return NULL; +} + +static inline void put_dax(struct dax_device *dax_dev) +{ +} +#endif + int dax_read_lock(void); void dax_read_unlock(int id); -struct dax_device *dax_get_by_host(const char *host); struct dax_device *alloc_dax(void *private, const char *host, const struct dax_operations *ops); -void put_dax(struct dax_device *dax_dev); bool dax_alive(struct dax_device *dax_dev); void kill_dax(struct dax_device *dax_dev); void *dax_get_private(struct dax_device *dax_dev); -- cgit v1.2.3 From 82486aa6f1b9bc8145e6d0fa2bc0b44307f3b875 Mon Sep 17 00:00:00 2001 From: WANG Cong Date: Thu, 4 May 2017 14:54:17 -0700 Subject: ipv4: restore rt->fi for reference counting IPv4 dst could use fi->fib_metrics to store metrics but fib_info itself is refcnt'ed, so without taking a refcnt fi and fi->fib_metrics could be freed while dst metrics still points to it. This triggers use-after-free as reported by Andrey twice. This patch reverts commit 2860583fe840 ("ipv4: Kill rt->fi") to restore this reference counting. It is a quick fix for -net and -stable, for -net-next, as Eric suggested, we can consider doing reference counting for metrics itself instead of relying on fib_info. IPv6 is very different, it copies or steals the metrics from mx6_config in fib6_commit_metrics() so probably doesn't need a refcnt. Decnet has already done the refcnt'ing, see dn_fib_semantic_match(). Fixes: 2860583fe840 ("ipv4: Kill rt->fi") Reported-by: Andrey Konovalov Tested-by: Andrey Konovalov Signed-off-by: Cong Wang Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/route.h | 1 + net/ipv4/route.c | 18 +++++++++++++++++- 2 files changed, 18 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/route.h b/include/net/route.h index 2cc0e14c6359..4335eb72a04c 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -69,6 +69,7 @@ struct rtable { struct list_head rt_uncached; struct uncached_list *rt_uncached_list; + struct fib_info *fi; /* for refcnt to shared metrics */ }; static inline bool rt_is_input_route(const struct rtable *rt) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 655d9eebe43e..f647310f8e4d 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1387,6 +1387,11 @@ static void ipv4_dst_destroy(struct dst_entry *dst) { struct rtable *rt = (struct rtable *) dst; + if (rt->fi) { + fib_info_put(rt->fi); + rt->fi = NULL; + } + if (!list_empty(&rt->rt_uncached)) { struct uncached_list *ul = rt->rt_uncached_list; @@ -1424,6 +1429,16 @@ static bool rt_cache_valid(const struct rtable *rt) !rt_is_expired(rt); } +static void rt_init_metrics(struct rtable *rt, struct fib_info *fi) +{ + if (fi->fib_metrics != (u32 *)dst_default_metrics) { + fib_info_hold(fi); + rt->fi = fi; + } + + dst_init_metrics(&rt->dst, fi->fib_metrics, true); +} + static void rt_set_nexthop(struct rtable *rt, __be32 daddr, const struct fib_result *res, struct fib_nh_exception *fnhe, @@ -1438,7 +1453,7 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr, rt->rt_gateway = nh->nh_gw; rt->rt_uses_gateway = 1; } - dst_init_metrics(&rt->dst, fi->fib_metrics, true); + rt_init_metrics(rt, fi); #ifdef CONFIG_IP_ROUTE_CLASSID rt->dst.tclassid = nh->nh_tclassid; #endif @@ -1490,6 +1505,7 @@ struct rtable *rt_dst_alloc(struct net_device *dev, rt->rt_gateway = 0; rt->rt_uses_gateway = 0; rt->rt_table_id = 0; + rt->fi = NULL; INIT_LIST_HEAD(&rt->rt_uncached); rt->dst.output = ip_output; -- cgit v1.2.3 From 1b1fc3fddabfb8739ef2c8f04e05a9858b42c1f7 Mon Sep 17 00:00:00 2001 From: Wei Wang Date: Fri, 5 May 2017 12:53:23 -0700 Subject: tcp: make congestion control optionally skip slow start after idle Congestion control modules that want full control over congestion control behavior do not want the cwnd modifications controlled by the sysctl_tcp_slow_start_after_idle code path. So skip those code paths for CC modules that use the cong_control() API. As an example, those cwnd effects are not desired for the BBR congestion control algorithm. Fixes: c0402760f565 ("tcp: new CC hook to set sending rate with rate_sample in any CA state") Signed-off-by: Wei Wang Signed-off-by: Yuchung Cheng Signed-off-by: Neal Cardwell Signed-off-by: David S. Miller --- include/net/tcp.h | 4 +++- net/ipv4/tcp_output.c | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/net/tcp.h b/include/net/tcp.h index 8c0e5a901d64..38a7427ae902 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1234,10 +1234,12 @@ void tcp_cwnd_restart(struct sock *sk, s32 delta); static inline void tcp_slow_start_after_idle_check(struct sock *sk) { + const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; struct tcp_sock *tp = tcp_sk(sk); s32 delta; - if (!sysctl_tcp_slow_start_after_idle || tp->packets_out) + if (!sysctl_tcp_slow_start_after_idle || tp->packets_out || + ca_ops->cong_control) return; delta = tcp_time_stamp - tp->lsndtime; if (delta > inet_csk(sk)->icsk_rto) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 60111a0fc201..4858e190f6ac 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1514,6 +1514,7 @@ static void tcp_cwnd_application_limited(struct sock *sk) static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) { + const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; struct tcp_sock *tp = tcp_sk(sk); /* Track the maximum number of outstanding packets in each @@ -1536,7 +1537,8 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) tp->snd_cwnd_used = tp->packets_out; if (sysctl_tcp_slow_start_after_idle && - (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) + (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto && + !ca_ops->cong_control) tcp_cwnd_application_limited(sk); /* The following conditions together indicate the starvation -- cgit v1.2.3 From 242d3a49a2a1a71d8eb9f953db1bcaa9d698ce00 Mon Sep 17 00:00:00 2001 From: WANG Cong Date: Mon, 8 May 2017 10:12:13 -0700 Subject: ipv6: reorder ip6_route_dev_notifier after ipv6_dev_notf For each netns (except init_net), we initialize its null entry in 3 places: 1) The template itself, as we use kmemdup() 2) Code around dst_init_metrics() in ip6_route_net_init() 3) ip6_route_dev_notify(), which is supposed to initialize it after loopback registers Unfortunately the last one still happens in a wrong order because we expect to initialize net->ipv6.ip6_null_entry->rt6i_idev to net->loopback_dev's idev, thus we have to do that after we add idev to loopback. However, this notifier has priority == 0 same as ipv6_dev_notf, and ipv6_dev_notf is registered after ip6_route_dev_notifier so it is called actually after ip6_route_dev_notifier. This is similar to commit 2f460933f58e ("ipv6: initialize route null entry in addrconf_init()") which fixes init_net. Fix it by picking a smaller priority for ip6_route_dev_notifier. Also, we have to release the refcnt accordingly when unregistering loopback_dev because device exit functions are called before subsys exit functions. Acked-by: David Ahern Tested-by: David Ahern Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- include/net/addrconf.h | 2 ++ net/ipv6/addrconf.c | 1 + net/ipv6/route.c | 13 +++++++++++-- 3 files changed, 14 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 2452e6449532..b43a4eec3cec 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -20,6 +20,8 @@ #define ADDRCONF_TIMER_FUZZ (HZ / 4) #define ADDRCONF_TIMER_FUZZ_MAX (HZ) +#define ADDRCONF_NOTIFY_PRIORITY 0 + #include #include diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 77a4bd526d6e..8d297a79b568 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3548,6 +3548,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, */ static struct notifier_block ipv6_dev_notf = { .notifier_call = addrconf_notify, + .priority = ADDRCONF_NOTIFY_PRIORITY, }; static void addrconf_type_change(struct net_device *dev, unsigned long event) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 2f1136627dcb..dc61b0b5e64e 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -3709,7 +3709,10 @@ static int ip6_route_dev_notify(struct notifier_block *this, struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); - if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) { + if (!(dev->flags & IFF_LOOPBACK)) + return NOTIFY_OK; + + if (event == NETDEV_REGISTER) { net->ipv6.ip6_null_entry->dst.dev = dev; net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev); #ifdef CONFIG_IPV6_MULTIPLE_TABLES @@ -3717,6 +3720,12 @@ static int ip6_route_dev_notify(struct notifier_block *this, net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev); net->ipv6.ip6_blk_hole_entry->dst.dev = dev; net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev); +#endif + } else if (event == NETDEV_UNREGISTER) { + in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev); +#ifdef CONFIG_IPV6_MULTIPLE_TABLES + in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev); + in6_dev_put(net->ipv6.ip6_blk_hole_entry->rt6i_idev); #endif } @@ -4024,7 +4033,7 @@ static struct pernet_operations ip6_route_net_late_ops = { static struct notifier_block ip6_route_dev_notifier = { .notifier_call = ip6_route_dev_notify, - .priority = 0, + .priority = ADDRCONF_NOTIFY_PRIORITY - 10, }; void __init ip6_route_init_special_entries(void) -- cgit v1.2.3 From e092693443b995c8e3a565a73b5fdb05f1260f9b Mon Sep 17 00:00:00 2001 From: Olga Kornievskaia Date: Mon, 8 May 2017 18:02:24 -0400 Subject: NFS append COMMIT after synchronous COPY Instead of messing with the commit path which has been causing issues, add a COMMIT op after the COPY and ask for stable copies in the first space. It saves a round trip, since after the COPY, the client sends a COMMIT anyway. Signed-off-by: Olga Kornievskaia Signed-off-by: Trond Myklebust --- fs/nfs/internal.h | 1 - fs/nfs/nfs42proc.c | 21 +++++++++++++++------ fs/nfs/nfs42xdr.c | 22 ++++++++++++++++++++-- fs/nfs/write.c | 30 ------------------------------ include/linux/nfs_xdr.h | 1 + 5 files changed, 36 insertions(+), 39 deletions(-) (limited to 'include') diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 31b26cf1b476..e9b4c3320e37 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -495,7 +495,6 @@ void nfs_mark_request_commit(struct nfs_page *req, u32 ds_commit_idx); int nfs_write_need_commit(struct nfs_pgio_header *); void nfs_writeback_update_inode(struct nfs_pgio_header *hdr); -int nfs_commit_file(struct file *file, struct nfs_write_verifier *verf); int nfs_generic_commit_list(struct inode *inode, struct list_head *head, int how, struct nfs_commit_info *cinfo); void nfs_retry_commit(struct list_head *page_list, diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index 87f5b7b971ca..929d09a5310a 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -167,23 +167,29 @@ static ssize_t _nfs42_proc_copy(struct file *src, if (status) return status; + res->commit_res.verf = kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS); + if (!res->commit_res.verf) + return -ENOMEM; status = nfs4_call_sync(server->client, server, &msg, &args->seq_args, &res->seq_res, 0); if (status == -ENOTSUPP) server->caps &= ~NFS_CAP_COPY; if (status) - return status; + goto out; - if (res->write_res.verifier.committed != NFS_FILE_SYNC) { - status = nfs_commit_file(dst, &res->write_res.verifier.verifier); - if (status) - return status; + if (!nfs_write_verifier_cmp(&res->write_res.verifier.verifier, + &res->commit_res.verf->verifier)) { + status = -EAGAIN; + goto out; } truncate_pagecache_range(dst_inode, pos_dst, pos_dst + res->write_res.count); - return res->write_res.count; + status = res->write_res.count; +out: + kfree(res->commit_res.verf); + return status; } ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src, @@ -240,6 +246,9 @@ ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src, if (err == -ENOTSUPP) { err = -EOPNOTSUPP; break; + } if (err == -EAGAIN) { + dst_exception.retry = 1; + continue; } err2 = nfs4_handle_exception(server, err, &src_exception); diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c index 6c7296454bbc..528362f69cc1 100644 --- a/fs/nfs/nfs42xdr.c +++ b/fs/nfs/nfs42xdr.c @@ -66,12 +66,14 @@ encode_putfh_maxsz + \ encode_savefh_maxsz + \ encode_putfh_maxsz + \ - encode_copy_maxsz) + encode_copy_maxsz + \ + encode_commit_maxsz) #define NFS4_dec_copy_sz (compound_decode_hdr_maxsz + \ decode_putfh_maxsz + \ decode_savefh_maxsz + \ decode_putfh_maxsz + \ - decode_copy_maxsz) + decode_copy_maxsz + \ + decode_commit_maxsz) #define NFS4_enc_deallocate_sz (compound_encode_hdr_maxsz + \ encode_putfh_maxsz + \ encode_deallocate_maxsz + \ @@ -222,6 +224,18 @@ static void nfs4_xdr_enc_allocate(struct rpc_rqst *req, encode_nops(&hdr); } +static void encode_copy_commit(struct xdr_stream *xdr, + struct nfs42_copy_args *args, + struct compound_hdr *hdr) +{ + __be32 *p; + + encode_op_hdr(xdr, OP_COMMIT, decode_commit_maxsz, hdr); + p = reserve_space(xdr, 12); + p = xdr_encode_hyper(p, args->dst_pos); + *p = cpu_to_be32(args->count); +} + /* * Encode COPY request */ @@ -239,6 +253,7 @@ static void nfs4_xdr_enc_copy(struct rpc_rqst *req, encode_savefh(xdr, &hdr); encode_putfh(xdr, args->dst_fh, &hdr); encode_copy(xdr, args, &hdr); + encode_copy_commit(xdr, args, &hdr); encode_nops(&hdr); } @@ -481,6 +496,9 @@ static int nfs4_xdr_dec_copy(struct rpc_rqst *rqstp, if (status) goto out; status = decode_copy(xdr, res); + if (status) + goto out; + status = decode_commit(xdr, &res->commit_res); out: return status; } diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 59e21cc0a266..85bfa416fcc8 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1742,36 +1742,6 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how, data->mds_ops, how, 0); } -int nfs_commit_file(struct file *file, struct nfs_write_verifier *verf) -{ - struct inode *inode = file_inode(file); - struct nfs_open_context *open; - struct nfs_commit_info cinfo; - struct nfs_page *req; - int ret; - - open = get_nfs_open_context(nfs_file_open_context(file)); - req = nfs_create_request(open, NULL, NULL, 0, i_size_read(inode)); - if (IS_ERR(req)) { - ret = PTR_ERR(req); - goto out_put; - } - - nfs_init_cinfo_from_inode(&cinfo, inode); - - memcpy(&req->wb_verf, verf, sizeof(struct nfs_write_verifier)); - nfs_request_add_commit_list(req, &cinfo); - ret = nfs_commit_inode(inode, FLUSH_SYNC); - if (ret > 0) - ret = 0; - - nfs_free_request(req); -out_put: - put_nfs_open_context(open); - return ret; -} -EXPORT_SYMBOL_GPL(nfs_commit_file); - /* * COMMIT call returned */ diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 677c6b91dfcd..b28c83475ee8 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1383,6 +1383,7 @@ struct nfs42_copy_res { struct nfs42_write_res write_res; bool consecutive; bool synchronous; + struct nfs_commitres commit_res; }; struct nfs42_seek_args { -- cgit v1.2.3 From 32f1bc0f3d262125169f9212aac306c638e34b54 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 8 May 2017 22:35:32 -0400 Subject: Revert "ipv4: restore rt->fi for reference counting" This reverts commit 82486aa6f1b9bc8145e6d0fa2bc0b44307f3b875. As implemented, this causes dangling netdevice refs. Reported-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/route.h | 1 - net/ipv4/route.c | 18 +----------------- 2 files changed, 1 insertion(+), 18 deletions(-) (limited to 'include') diff --git a/include/net/route.h b/include/net/route.h index 4335eb72a04c..2cc0e14c6359 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -69,7 +69,6 @@ struct rtable { struct list_head rt_uncached; struct uncached_list *rt_uncached_list; - struct fib_info *fi; /* for refcnt to shared metrics */ }; static inline bool rt_is_input_route(const struct rtable *rt) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index f647310f8e4d..655d9eebe43e 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1387,11 +1387,6 @@ static void ipv4_dst_destroy(struct dst_entry *dst) { struct rtable *rt = (struct rtable *) dst; - if (rt->fi) { - fib_info_put(rt->fi); - rt->fi = NULL; - } - if (!list_empty(&rt->rt_uncached)) { struct uncached_list *ul = rt->rt_uncached_list; @@ -1429,16 +1424,6 @@ static bool rt_cache_valid(const struct rtable *rt) !rt_is_expired(rt); } -static void rt_init_metrics(struct rtable *rt, struct fib_info *fi) -{ - if (fi->fib_metrics != (u32 *)dst_default_metrics) { - fib_info_hold(fi); - rt->fi = fi; - } - - dst_init_metrics(&rt->dst, fi->fib_metrics, true); -} - static void rt_set_nexthop(struct rtable *rt, __be32 daddr, const struct fib_result *res, struct fib_nh_exception *fnhe, @@ -1453,7 +1438,7 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr, rt->rt_gateway = nh->nh_gw; rt->rt_uses_gateway = 1; } - rt_init_metrics(rt, fi); + dst_init_metrics(&rt->dst, fi->fib_metrics, true); #ifdef CONFIG_IP_ROUTE_CLASSID rt->dst.tclassid = nh->nh_tclassid; #endif @@ -1505,7 +1490,6 @@ struct rtable *rt_dst_alloc(struct net_device *dev, rt->rt_gateway = 0; rt->rt_uses_gateway = 0; rt->rt_table_id = 0; - rt->fi = NULL; INIT_LIST_HEAD(&rt->rt_uncached); rt->dst.output = ip_output; -- cgit v1.2.3 From 497d72d80a789501501cccabdad6b145f9e31371 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Mon, 8 May 2017 20:38:40 +0200 Subject: KVM: Add kvm_vcpu_get_idx to get vcpu index in kvm->vcpus There are occasional needs to use the index of vcpu in the kvm->vcpus array to map something related to a VCPU. For example, unlike the vcpu->vcpu_id, the vcpu index is guaranteed to not be sparse across all vcpus which is useful when allocating a memory area for each vcpu. Signed-off-by: Christoffer Dall Reviewed-by: Eric Auger --- include/linux/kvm_host.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'include') diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 2c14ad9809da..12eb26d665e8 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -490,6 +490,17 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) return NULL; } +static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu *tmp; + int idx; + + kvm_for_each_vcpu(idx, tmp, vcpu->kvm) + if (tmp == vcpu) + return idx; + BUG(); +} + #define kvm_for_each_memslot(memslot, slots) \ for (memslot = &slots->memslots[0]; \ memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ -- cgit v1.2.3 From 1aab6f468c10a1ec3977fb6f7bc12869174d516e Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Mon, 8 May 2017 12:30:24 +0200 Subject: KVM: arm/arm64: Register iodevs when setting redist base and creating VCPUs Instead of waiting with registering KVM iodevs until the first VCPU is run, we can actually create the iodevs when the redist base address is set. The only downside is that we must now also check if we need to do this for VCPUs which are created after creating the VGIC, because there is no enforced ordering between creating the VGIC (and setting its base addresses) and creating the VCPUs. Signed-off-by: Christoffer Dall Reviewed-by: Eric Auger --- include/kvm/arm_vgic.h | 1 + virt/kvm/arm/arm.c | 2 +- virt/kvm/arm/vgic/vgic-init.c | 21 ++++++++++++++++++ virt/kvm/arm/vgic/vgic-kvm-device.c | 7 +++++- virt/kvm/arm/vgic/vgic-mmio-v3.c | 43 +++++++++++++++++++++++++++++++++++-- virt/kvm/arm/vgic/vgic-v3.c | 6 ------ virt/kvm/arm/vgic/vgic.h | 3 ++- 7 files changed, 72 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index fabcc649e2ce..4ff65efcfebd 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -286,6 +286,7 @@ extern struct static_key_false vgic_v2_cpuif_trap; int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write); void kvm_vgic_early_init(struct kvm *kvm); +int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu); int kvm_vgic_create(struct kvm *kvm, u32 type); void kvm_vgic_destroy(struct kvm *kvm); void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu); diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 7941699a766d..9f6f522a4bfc 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -335,7 +335,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) kvm_arm_reset_debug_ptr(vcpu); - return 0; + return kvm_vgic_vcpu_init(vcpu); } void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index 0ea64a1beff9..962bb577a1a0 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -226,6 +226,27 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis) return 0; } +/** + * kvm_vgic_vcpu_init() - Register VCPU-specific KVM iodevs + * @vcpu: pointer to the VCPU being created and initialized + */ +int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) +{ + int ret = 0; + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + + if (!irqchip_in_kernel(vcpu->kvm)) + return 0; + + /* + * If we are creating a VCPU with a GICv3 we must also register the + * KVM io device for the redistributor that belongs to this VCPU. + */ + if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) + ret = vgic_register_redist_iodev(vcpu); + return ret; +} + static void kvm_vgic_vcpu_enable(struct kvm_vcpu *vcpu) { if (kvm_vgic_global_state.type == VGIC_V2) diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c b/virt/kvm/arm/vgic/vgic-kvm-device.c index 69ccfd5b2271..10ae6f394b71 100644 --- a/virt/kvm/arm/vgic/vgic-kvm-device.c +++ b/virt/kvm/arm/vgic/vgic-kvm-device.c @@ -86,8 +86,13 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) break; case KVM_VGIC_V3_ADDR_TYPE_REDIST: r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3); + if (r) + break; + if (write) { + r = vgic_v3_set_redist_base(kvm, *addr); + goto out; + } addr_ptr = &vgic->vgic_redist_base; - alignment = SZ_64K; break; default: r = -ENODEV; diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c index 168269b4e28d..99da1a207c19 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c @@ -565,7 +565,7 @@ unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev) * * Return 0 on success, -ERRNO otherwise. */ -static int vgic_register_redist_iodev(struct kvm_vcpu *vcpu) +int vgic_register_redist_iodev(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; struct vgic_dist *vgic = &kvm->arch.vgic; @@ -574,6 +574,18 @@ static int vgic_register_redist_iodev(struct kvm_vcpu *vcpu) gpa_t rd_base, sgi_base; int ret; + /* + * We may be creating VCPUs before having set the base address for the + * redistributor region, in which case we will come back to this + * function for all VCPUs when the base address is set. Just return + * without doing any work for now. + */ + if (IS_VGIC_ADDR_UNDEF(vgic->vgic_redist_base)) + return 0; + + if (!vgic_v3_check_base(kvm)) + return -EINVAL; + rd_base = vgic->vgic_redist_base + kvm_vcpu_get_idx(vcpu) * SZ_64K * 2; sgi_base = rd_base + SZ_64K; @@ -619,7 +631,7 @@ static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu) kvm_io_bus_unregister_dev(vcpu->kvm, KVM_MMIO_BUS, &sgi_dev->dev); } -int vgic_register_redist_iodevs(struct kvm *kvm) +static int vgic_register_all_redist_iodevs(struct kvm *kvm) { struct kvm_vcpu *vcpu; int c, ret = 0; @@ -641,6 +653,33 @@ int vgic_register_redist_iodevs(struct kvm *kvm) return ret; } +int vgic_v3_set_redist_base(struct kvm *kvm, u64 addr) +{ + struct vgic_dist *vgic = &kvm->arch.vgic; + int ret; + + /* vgic_check_ioaddr makes sure we don't do this twice */ + ret = vgic_check_ioaddr(kvm, &vgic->vgic_redist_base, addr, SZ_64K); + if (ret) + return ret; + + vgic->vgic_redist_base = addr; + if (!vgic_v3_check_base(kvm)) { + vgic->vgic_redist_base = VGIC_ADDR_UNDEF; + return -EINVAL; + } + + /* + * Register iodevs for each existing VCPU. Adding more VCPUs + * afterwards will register the iodevs when needed. + */ + ret = vgic_register_all_redist_iodevs(kvm); + if (ret) + return ret; + + return 0; +} + int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr) { const struct vgic_register_region *region; diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index 2d53d7a24bda..bb35078f4e40 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -397,12 +397,6 @@ int vgic_v3_map_resources(struct kvm *kvm) goto out; } - ret = vgic_register_redist_iodevs(kvm); - if (ret) { - kvm_err("Unable to register VGICv3 redist MMIO regions\n"); - goto out; - } - if (vgic_has_its(kvm)) { ret = vgic_register_its_iodevs(kvm); if (ret) { diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index 89eb935c05be..5f17eace077c 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -174,7 +174,8 @@ int vgic_v3_probe(const struct gic_kvm_info *info); int vgic_v3_map_resources(struct kvm *kvm); int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq); int vgic_v3_save_pending_tables(struct kvm *kvm); -int vgic_register_redist_iodevs(struct kvm *kvm); +int vgic_v3_set_redist_base(struct kvm *kvm, u64 addr); +int vgic_register_redist_iodev(struct kvm_vcpu *vcpu); bool vgic_v3_check_base(struct kvm *kvm); void vgic_v3_load(struct kvm_vcpu *vcpu); -- cgit v1.2.3 From 6cc40f273b30ef8f7b37f95cd2e6456d652808c0 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 8 May 2017 18:15:40 +0100 Subject: KVM: arm/arm64: Get rid of its->initialized field The its->initialized doesn't bring much to the table, and creates unnecessary ordering between setting the address and initializing it (which amounts to exactly nothing). Let's kill it altogether, making KVM_DEV_ARM_VGIC_CTRL_INIT the no-op it deserves to be. Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall Reviewed-by: Eric Auger --- include/kvm/arm_vgic.h | 1 - virt/kvm/arm/vgic/vgic-its.c | 7 +------ 2 files changed, 1 insertion(+), 7 deletions(-) (limited to 'include') diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 4ff65efcfebd..bfde6fb78e8a 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -148,7 +148,6 @@ struct vgic_its { gpa_t vgic_its_base; bool enabled; - bool initialized; struct vgic_io_device iodev; struct kvm_device *dev; diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index 9f7105c61ece..18318c67ed93 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -1545,9 +1545,6 @@ static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its) struct vgic_io_device *iodev = &its->iodev; int ret; - if (!its->initialized) - return -EBUSY; - if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) return -ENXIO; @@ -1597,7 +1594,6 @@ static int vgic_its_create(struct kvm_device *dev, u32 type) INIT_LIST_HEAD(&its->collection_list); dev->kvm->arch.vgic.has_its = true; - its->initialized = false; its->enabled = false; its->dev = dev; @@ -2397,8 +2393,7 @@ static int vgic_its_set_attr(struct kvm_device *dev, switch (attr->attr) { case KVM_DEV_ARM_VGIC_CTRL_INIT: - its->initialized = true; - + /* Nothing to do */ return 0; case KVM_DEV_ARM_ITS_SAVE_TABLES: return abi->save_tables(its); -- cgit v1.2.3 From ea47dd191d543f81e0912b5dc0471b48346b016e Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 19 Apr 2017 05:12:18 +1000 Subject: of/fdt: introduce of_scan_flat_dt_subnodes and of_get_flat_dt_phandle Introduce primitives for FDT parsing. These will be used for powerpc cpufeatures node scanning, which has quite complex structure but should be processed early. Cc: devicetree@vger.kernel.org Acked-by: Rob Herring Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- drivers/of/fdt.c | 38 ++++++++++++++++++++++++++++++++++++++ include/linux/of_fdt.h | 6 ++++++ 2 files changed, 44 insertions(+) (limited to 'include') diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index e5ce4b59e162..961ca97072a9 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -753,6 +753,36 @@ int __init of_scan_flat_dt(int (*it)(unsigned long node, return rc; } +/** + * of_scan_flat_dt_subnodes - scan sub-nodes of a node call callback on each. + * @it: callback function + * @data: context data pointer + * + * This function is used to scan sub-nodes of a node. + */ +int __init of_scan_flat_dt_subnodes(unsigned long parent, + int (*it)(unsigned long node, + const char *uname, + void *data), + void *data) +{ + const void *blob = initial_boot_params; + int node; + + fdt_for_each_subnode(node, blob, parent) { + const char *pathp; + int rc; + + pathp = fdt_get_name(blob, node, NULL); + if (*pathp == '/') + pathp = kbasename(pathp); + rc = it(node, pathp, data); + if (rc) + return rc; + } + return 0; +} + /** * of_get_flat_dt_subnode_by_name - get the subnode by given name * @@ -812,6 +842,14 @@ int __init of_flat_dt_match(unsigned long node, const char *const *compat) return of_fdt_match(initial_boot_params, node, compat); } +/** + * of_get_flat_dt_prop - Given a node in the flat blob, return the phandle + */ +uint32_t __init of_get_flat_dt_phandle(unsigned long node) +{ + return fdt_get_phandle(initial_boot_params, node); +} + struct fdt_scan_status { const char *name; int namelen; diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index 271b3fdf0070..1dfbfd0d8040 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -54,6 +54,11 @@ extern char __dtb_end[]; extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname, int depth, void *data), void *data); +extern int of_scan_flat_dt_subnodes(unsigned long node, + int (*it)(unsigned long node, + const char *uname, + void *data), + void *data); extern int of_get_flat_dt_subnode_by_name(unsigned long node, const char *uname); extern const void *of_get_flat_dt_prop(unsigned long node, const char *name, @@ -62,6 +67,7 @@ extern int of_flat_dt_is_compatible(unsigned long node, const char *name); extern int of_flat_dt_match(unsigned long node, const char *const *matches); extern unsigned long of_get_flat_dt_root(void); extern int of_get_flat_dt_size(void); +extern uint32_t of_get_flat_dt_phandle(unsigned long node); extern int early_init_dt_scan_chosen(unsigned long node, const char *uname, int depth, void *data); -- cgit v1.2.3 From 9ea762a5ae45235eb3e5ec9c05c33cf37db78d70 Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Thu, 30 Mar 2017 13:13:33 +0200 Subject: virtio: virtio_driver doc Add comments for the virtio_driver members that were not documented. Signed-off-by: Cornelia Huck Signed-off-by: Michael S. Tsirkin --- include/linux/virtio.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/linux/virtio.h b/include/linux/virtio.h index ed04753278d4..28b0e965360f 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -165,9 +165,13 @@ int virtio_device_restore(struct virtio_device *dev); * @feature_table_legacy: same as feature_table but when working in legacy mode. * @feature_table_size_legacy: number of entries in feature table legacy array. * @probe: the function to call when a device is found. Returns 0 or -errno. + * @scan: optional function to call after successful probe; intended + * for virtio-scsi to invoke a scan. * @remove: the function to call when a device is removed. * @config_changed: optional function to call when the device configuration * changes; may be called in interrupt context. + * @freeze: optional function to call during suspend/hibernation. + * @restore: optional function to call on resume. */ struct virtio_driver { struct device_driver driver; -- cgit v1.2.3 From fb9de9704775d6190c204f4ddf8da4cfdac26be1 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 7 Apr 2017 08:25:09 +0300 Subject: ptr_ring: batch ring zeroing A known weakness in ptr_ring design is that it does not handle well the situation when ring is almost full: as entries are consumed they are immediately used again by the producer, so consumer and producer are writing to a shared cache line. To fix this, add batching to consume calls: as entries are consumed do not write NULL into the ring until we get a multiple (in current implementation 2x) of cache lines away from the producer. At that point, write them all out. We do the write out in the reverse order to keep producer from sharing cache with consumer for as long as possible. Writeout also triggers when ring wraps around - there's no special reason to do this but it helps keep the code a bit simpler. What should we do if getting away from producer by 2 cache lines would mean we are keeping the ring moe than half empty? Maybe we should reduce the batching in this case, current patch simply reduces the batching. Notes: - it is no longer true that a call to consume guarantees that the following call to produce will succeed. No users seem to assume that. - batching can also in theory reduce the signalling rate: users that would previously send interrups to the producer to wake it up after consuming each entry would now only need to do this once in a batch. Doing this would be easy by returning a flag to the caller. No users seem to do signalling on consume yet so this was not implemented yet. Signed-off-by: Michael S. Tsirkin Reviewed-by: Jesper Dangaard Brouer Acked-by: Jason Wang --- include/linux/ptr_ring.h | 63 +++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 54 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 6c70444da3b9..6b2e0dd88569 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -34,11 +34,13 @@ struct ptr_ring { int producer ____cacheline_aligned_in_smp; spinlock_t producer_lock; - int consumer ____cacheline_aligned_in_smp; + int consumer_head ____cacheline_aligned_in_smp; /* next valid entry */ + int consumer_tail; /* next entry to invalidate */ spinlock_t consumer_lock; /* Shared consumer/producer data */ /* Read-only by both the producer and the consumer */ int size ____cacheline_aligned_in_smp; /* max entries in queue */ + int batch; /* number of entries to consume in a batch */ void **queue; }; @@ -170,7 +172,7 @@ static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr) static inline void *__ptr_ring_peek(struct ptr_ring *r) { if (likely(r->size)) - return r->queue[r->consumer]; + return r->queue[r->consumer_head]; return NULL; } @@ -231,9 +233,38 @@ static inline bool ptr_ring_empty_bh(struct ptr_ring *r) /* Must only be called after __ptr_ring_peek returned !NULL */ static inline void __ptr_ring_discard_one(struct ptr_ring *r) { - r->queue[r->consumer++] = NULL; - if (unlikely(r->consumer >= r->size)) - r->consumer = 0; + /* Fundamentally, what we want to do is update consumer + * index and zero out the entry so producer can reuse it. + * Doing it naively at each consume would be as simple as: + * r->queue[r->consumer++] = NULL; + * if (unlikely(r->consumer >= r->size)) + * r->consumer = 0; + * but that is suboptimal when the ring is full as producer is writing + * out new entries in the same cache line. Defer these updates until a + * batch of entries has been consumed. + */ + int head = r->consumer_head++; + + /* Once we have processed enough entries invalidate them in + * the ring all at once so producer can reuse their space in the ring. + * We also do this when we reach end of the ring - not mandatory + * but helps keep the implementation simple. + */ + if (unlikely(r->consumer_head - r->consumer_tail >= r->batch || + r->consumer_head >= r->size)) { + /* Zero out entries in the reverse order: this way we touch the + * cache line that producer might currently be reading the last; + * producer won't make progress and touch other cache lines + * besides the first one until we write out all entries. + */ + while (likely(head >= r->consumer_tail)) + r->queue[head--] = NULL; + r->consumer_tail = r->consumer_head; + } + if (unlikely(r->consumer_head >= r->size)) { + r->consumer_head = 0; + r->consumer_tail = 0; + } } static inline void *__ptr_ring_consume(struct ptr_ring *r) @@ -345,14 +376,27 @@ static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp) return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp); } +static inline void __ptr_ring_set_size(struct ptr_ring *r, int size) +{ + r->size = size; + r->batch = SMP_CACHE_BYTES * 2 / sizeof(*(r->queue)); + /* We need to set batch at least to 1 to make logic + * in __ptr_ring_discard_one work correctly. + * Batching too much (because ring is small) would cause a lot of + * burstiness. Needs tuning, for now disable batching. + */ + if (r->batch > r->size / 2 || !r->batch) + r->batch = 1; +} + static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp) { r->queue = __ptr_ring_init_queue_alloc(size, gfp); if (!r->queue) return -ENOMEM; - r->size = size; - r->producer = r->consumer = 0; + __ptr_ring_set_size(r, size); + r->producer = r->consumer_head = r->consumer_tail = 0; spin_lock_init(&r->producer_lock); spin_lock_init(&r->consumer_lock); @@ -373,9 +417,10 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue, else if (destroy) destroy(ptr); - r->size = size; + __ptr_ring_set_size(r, size); r->producer = producer; - r->consumer = 0; + r->consumer_head = 0; + r->consumer_tail = 0; old = r->queue; r->queue = queue; -- cgit v1.2.3 From 9078b4eea119c13d633d45af0397c821a517b522 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Mon, 27 Mar 2017 14:20:11 +0200 Subject: uapi: includes linux/types.h before exporting files Some files will be exported after a following patch. 0-day tests report the following warning/error: ./usr/include/linux/bcache.h:8: include of is preferred over ./usr/include/linux/bcache.h:11: found __[us]{8,16,32,64} type without #include ./usr/include/linux/qrtr.h:8: found __[us]{8,16,32,64} type without #include ./usr/include/linux/cryptouser.h:39: found __[us]{8,16,32,64} type without #include ./usr/include/linux/pr.h:14: found __[us]{8,16,32,64} type without #include ./usr/include/linux/btrfs_tree.h:337: found __[us]{8,16,32,64} type without #include ./usr/include/rdma/bnxt_re-abi.h:45: found __[us]{8,16,32,64} type without #include Signed-off-by: Nicolas Dichtel Signed-off-by: Masahiro Yamada --- include/uapi/linux/bcache.h | 2 +- include/uapi/linux/btrfs_tree.h | 2 ++ include/uapi/linux/cryptouser.h | 2 ++ include/uapi/linux/pr.h | 2 ++ include/uapi/linux/qrtr.h | 1 + include/uapi/rdma/bnxt_re-abi.h | 2 ++ 6 files changed, 10 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h index 22b6ad31c706..e3bb0635e94a 100644 --- a/include/uapi/linux/bcache.h +++ b/include/uapi/linux/bcache.h @@ -5,7 +5,7 @@ * Bcache on disk data structures */ -#include +#include #define BITMASK(name, type, field, offset, size) \ static inline __u64 name(const type *k) \ diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h index d5ad15a106a7..6a261cb52d95 100644 --- a/include/uapi/linux/btrfs_tree.h +++ b/include/uapi/linux/btrfs_tree.h @@ -1,6 +1,8 @@ #ifndef _BTRFS_CTREE_H_ #define _BTRFS_CTREE_H_ +#include + /* * This header contains the structure definitions and constants used * by file system objects that can be retrieved using diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h index b4def5c630e7..fdcbb3c29083 100644 --- a/include/uapi/linux/cryptouser.h +++ b/include/uapi/linux/cryptouser.h @@ -18,6 +18,8 @@ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ +#include + /* Netlink configuration messages. */ enum { CRYPTO_MSG_BASE = 0x10, diff --git a/include/uapi/linux/pr.h b/include/uapi/linux/pr.h index 57d7c0f916b6..645ef3cf3dd0 100644 --- a/include/uapi/linux/pr.h +++ b/include/uapi/linux/pr.h @@ -1,6 +1,8 @@ #ifndef _UAPI_PR_H #define _UAPI_PR_H +#include + enum pr_type { PR_WRITE_EXCLUSIVE = 1, PR_EXCLUSIVE_ACCESS = 2, diff --git a/include/uapi/linux/qrtr.h b/include/uapi/linux/qrtr.h index 66c0748d26e2..9d76c566f66e 100644 --- a/include/uapi/linux/qrtr.h +++ b/include/uapi/linux/qrtr.h @@ -2,6 +2,7 @@ #define _LINUX_QRTR_H #include +#include struct sockaddr_qrtr { __kernel_sa_family_t sq_family; diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h index e2c8a3f0ccec..74018bd18d72 100644 --- a/include/uapi/rdma/bnxt_re-abi.h +++ b/include/uapi/rdma/bnxt_re-abi.h @@ -39,6 +39,8 @@ #ifndef __BNXT_RE_UVERBS_ABI_H__ #define __BNXT_RE_UVERBS_ABI_H__ +#include + #define BNXT_RE_ABI_VERSION 1 struct bnxt_re_uctx_resp { -- cgit v1.2.3 From 3a4e7f56ca6cd66e69a159073584434b2e639c71 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Mon, 27 Mar 2017 14:20:12 +0200 Subject: btrfs_tree.h: fix include from userland MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch prepares the uapi export by fixing the following errors: .../linux/btrfs_tree.h:283:2: error: #error "UUID items require BTRFS_UUID_SIZE == 16!" #error "UUID items require BTRFS_UUID_SIZE == 16!" .../linux/btrfs_tree.h:390:12: error: ‘BTRFS_UUID_SIZE’ undeclared here (not in a function) __u8 uuid[BTRFS_UUID_SIZE]; ^ .../linux/btrfs_tree.h:796:16: error: ‘BTRFS_DEV_STAT_VALUES_MAX’ undeclared here (not in a function) __le64 values[BTRFS_DEV_STAT_VALUES_MAX]; Signed-off-by: Nicolas Dichtel Signed-off-by: Masahiro Yamada --- include/uapi/linux/btrfs_tree.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h index 6a261cb52d95..10689e1fdf11 100644 --- a/include/uapi/linux/btrfs_tree.h +++ b/include/uapi/linux/btrfs_tree.h @@ -1,6 +1,7 @@ #ifndef _BTRFS_CTREE_H_ #define _BTRFS_CTREE_H_ +#include #include /* -- cgit v1.2.3 From ea6819e1f2d6c30624ea067f4b3a50a3cca79d8a Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Mon, 27 Mar 2017 14:20:14 +0200 Subject: smc_diag.h: fix include from userland This patch prepares the uapi export by fixing the following error: .../linux/smc_diag.h:6:27: fatal error: rdma/ib_verbs.h: No such file or directory #include Signed-off-by: Nicolas Dichtel Signed-off-by: Masahiro Yamada --- include/rdma/ib_verbs.h | 3 +-- include/uapi/linux/smc_diag.h | 2 +- include/uapi/rdma/ib_user_verbs.h | 2 ++ 3 files changed, 4 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index f0cb4906478a..ba8314ec5768 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -62,6 +62,7 @@ #include #include #include +#include extern struct workqueue_struct *ib_wq; extern struct workqueue_struct *ib_comp_wq; @@ -1889,8 +1890,6 @@ enum ib_mad_result { IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */ }; -#define IB_DEVICE_NAME_MAX 64 - struct ib_port_cache { struct ib_pkey_cache *pkey; struct ib_gid_table *gid; diff --git a/include/uapi/linux/smc_diag.h b/include/uapi/linux/smc_diag.h index 0063919fea34..87712bfaa9dd 100644 --- a/include/uapi/linux/smc_diag.h +++ b/include/uapi/linux/smc_diag.h @@ -3,7 +3,7 @@ #include #include -#include +#include /* Request structure */ struct smc_diag_req { diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index 477d629f539d..270c350bedc6 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h @@ -1135,4 +1135,6 @@ struct ib_uverbs_ex_destroy_rwq_ind_table { __u32 ind_tbl_handle; }; +#define IB_DEVICE_NAME_MAX 64 + #endif /* IB_USER_VERBS_H */ -- cgit v1.2.3 From fcc8487d477a3452a1d0ccbdd4c5e0e1e3cb8bed Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Mon, 27 Mar 2017 14:20:15 +0200 Subject: uapi: export all headers under uapi directories Regularly, when a new header is created in include/uapi/, the developer forgets to add it in the corresponding Kbuild file. This error is usually detected after the release is out. In fact, all headers under uapi directories should be exported, thus it's useless to have an exhaustive list. After this patch, the following files, which were not exported, are now exported (with make headers_install_all): asm-arc/kvm_para.h asm-arc/ucontext.h asm-blackfin/shmparam.h asm-blackfin/ucontext.h asm-c6x/shmparam.h asm-c6x/ucontext.h asm-cris/kvm_para.h asm-h8300/shmparam.h asm-h8300/ucontext.h asm-hexagon/shmparam.h asm-m32r/kvm_para.h asm-m68k/kvm_para.h asm-m68k/shmparam.h asm-metag/kvm_para.h asm-metag/shmparam.h asm-metag/ucontext.h asm-mips/hwcap.h asm-mips/reg.h asm-mips/ucontext.h asm-nios2/kvm_para.h asm-nios2/ucontext.h asm-openrisc/shmparam.h asm-parisc/kvm_para.h asm-powerpc/perf_regs.h asm-sh/kvm_para.h asm-sh/ucontext.h asm-tile/shmparam.h asm-unicore32/shmparam.h asm-unicore32/ucontext.h asm-x86/hwcap2.h asm-xtensa/kvm_para.h drm/armada_drm.h drm/etnaviv_drm.h drm/vgem_drm.h linux/aspeed-lpc-ctrl.h linux/auto_dev-ioctl.h linux/bcache.h linux/btrfs_tree.h linux/can/vxcan.h linux/cifs/cifs_mount.h linux/coresight-stm.h linux/cryptouser.h linux/fsmap.h linux/genwqe/genwqe_card.h linux/hash_info.h linux/kcm.h linux/kcov.h linux/kfd_ioctl.h linux/lightnvm.h linux/module.h linux/nbd-netlink.h linux/nilfs2_api.h linux/nilfs2_ondisk.h linux/nsfs.h linux/pr.h linux/qrtr.h linux/rpmsg.h linux/sched/types.h linux/sed-opal.h linux/smc.h linux/smc_diag.h linux/stm.h linux/switchtec_ioctl.h linux/vfio_ccw.h linux/wil6210_uapi.h rdma/bnxt_re-abi.h Note that I have removed from this list the files which are generated in every exported directories (like .install or .install.cmd). Thanks to Julien Floret for the tip to get all subdirs with a pure makefile command. For the record, note that exported files for asm directories are a mix of files listed by: - include/uapi/asm-generic/Kbuild.asm; - arch//include/uapi/asm/Kbuild; - arch//include/asm/Kbuild. Signed-off-by: Nicolas Dichtel Acked-by: Daniel Vetter Acked-by: Russell King Acked-by: Mark Salter Acked-by: Michael Ellerman (powerpc) Signed-off-by: Masahiro Yamada --- Documentation/kbuild/makefiles.txt | 66 ++-- arch/alpha/include/uapi/asm/Kbuild | 41 --- arch/arc/include/uapi/asm/Kbuild | 3 - arch/arm/include/uapi/asm/Kbuild | 17 - arch/arm64/include/uapi/asm/Kbuild | 18 - arch/blackfin/include/uapi/asm/Kbuild | 17 - arch/c6x/include/uapi/asm/Kbuild | 8 - arch/cris/include/uapi/arch-v10/arch/Kbuild | 5 - arch/cris/include/uapi/arch-v32/arch/Kbuild | 3 - arch/cris/include/uapi/asm/Kbuild | 43 +-- arch/frv/include/uapi/asm/Kbuild | 33 -- arch/h8300/include/uapi/asm/Kbuild | 28 -- arch/hexagon/include/asm/Kbuild | 3 - arch/hexagon/include/uapi/asm/Kbuild | 13 - arch/ia64/include/uapi/asm/Kbuild | 45 --- arch/m32r/include/uapi/asm/Kbuild | 31 -- arch/m68k/include/uapi/asm/Kbuild | 24 -- arch/metag/include/uapi/asm/Kbuild | 8 - arch/microblaze/include/uapi/asm/Kbuild | 32 -- arch/mips/include/uapi/asm/Kbuild | 37 --- arch/mn10300/include/uapi/asm/Kbuild | 32 -- arch/nios2/include/uapi/asm/Kbuild | 3 +- arch/openrisc/include/asm/Kbuild | 3 - arch/openrisc/include/uapi/asm/Kbuild | 8 - arch/parisc/include/uapi/asm/Kbuild | 28 -- arch/powerpc/include/uapi/asm/Kbuild | 45 --- arch/s390/include/uapi/asm/Kbuild | 46 --- arch/score/include/asm/Kbuild | 3 - arch/score/include/uapi/asm/Kbuild | 32 -- arch/sh/include/uapi/asm/Kbuild | 23 -- arch/sparc/include/uapi/asm/Kbuild | 48 --- arch/tile/include/asm/Kbuild | 3 - arch/tile/include/uapi/arch/Kbuild | 17 - arch/tile/include/uapi/asm/Kbuild | 19 +- arch/unicore32/include/uapi/asm/Kbuild | 6 - arch/x86/include/uapi/asm/Kbuild | 58 ---- arch/xtensa/include/uapi/asm/Kbuild | 23 -- include/Kbuild | 2 - include/asm-generic/Kbuild.asm | 1 - include/scsi/fc/Kbuild | 0 include/uapi/Kbuild | 15 - include/uapi/asm-generic/Kbuild | 36 -- include/uapi/asm-generic/Kbuild.asm | 76 ++--- include/uapi/drm/Kbuild | 23 -- include/uapi/linux/Kbuild | 494 +--------------------------- include/uapi/linux/android/Kbuild | 2 - include/uapi/linux/byteorder/Kbuild | 3 - include/uapi/linux/caif/Kbuild | 3 - include/uapi/linux/can/Kbuild | 6 - include/uapi/linux/dvb/Kbuild | 9 - include/uapi/linux/hdlc/Kbuild | 2 - include/uapi/linux/hsi/Kbuild | 2 - include/uapi/linux/iio/Kbuild | 3 - include/uapi/linux/isdn/Kbuild | 2 - include/uapi/linux/mmc/Kbuild | 2 - include/uapi/linux/netfilter/Kbuild | 89 ----- include/uapi/linux/netfilter/ipset/Kbuild | 5 - include/uapi/linux/netfilter_arp/Kbuild | 3 - include/uapi/linux/netfilter_bridge/Kbuild | 18 - include/uapi/linux/netfilter_ipv4/Kbuild | 10 - include/uapi/linux/netfilter_ipv6/Kbuild | 13 - include/uapi/linux/nfsd/Kbuild | 6 - include/uapi/linux/raid/Kbuild | 3 - include/uapi/linux/spi/Kbuild | 2 - include/uapi/linux/sunrpc/Kbuild | 2 - include/uapi/linux/tc_act/Kbuild | 16 - include/uapi/linux/tc_ematch/Kbuild | 5 - include/uapi/linux/usb/Kbuild | 12 - include/uapi/linux/wimax/Kbuild | 2 - include/uapi/misc/Kbuild | 2 - include/uapi/mtd/Kbuild | 6 - include/uapi/rdma/Kbuild | 20 -- include/uapi/rdma/hfi/Kbuild | 3 - include/uapi/scsi/Kbuild | 6 - include/uapi/scsi/fc/Kbuild | 5 - include/uapi/sound/Kbuild | 16 - include/uapi/video/Kbuild | 4 - include/uapi/xen/Kbuild | 5 - include/video/Kbuild | 0 scripts/Makefile.headersinst | 55 ++-- 80 files changed, 111 insertions(+), 1750 deletions(-) delete mode 100644 arch/cris/include/uapi/arch-v10/arch/Kbuild delete mode 100644 arch/cris/include/uapi/arch-v32/arch/Kbuild delete mode 100644 arch/tile/include/uapi/arch/Kbuild delete mode 100644 include/Kbuild delete mode 100644 include/asm-generic/Kbuild.asm delete mode 100644 include/scsi/fc/Kbuild delete mode 100644 include/uapi/Kbuild delete mode 100644 include/uapi/asm-generic/Kbuild delete mode 100644 include/uapi/drm/Kbuild delete mode 100644 include/uapi/linux/android/Kbuild delete mode 100644 include/uapi/linux/byteorder/Kbuild delete mode 100644 include/uapi/linux/caif/Kbuild delete mode 100644 include/uapi/linux/can/Kbuild delete mode 100644 include/uapi/linux/dvb/Kbuild delete mode 100644 include/uapi/linux/hdlc/Kbuild delete mode 100644 include/uapi/linux/hsi/Kbuild delete mode 100644 include/uapi/linux/iio/Kbuild delete mode 100644 include/uapi/linux/isdn/Kbuild delete mode 100644 include/uapi/linux/mmc/Kbuild delete mode 100644 include/uapi/linux/netfilter/Kbuild delete mode 100644 include/uapi/linux/netfilter/ipset/Kbuild delete mode 100644 include/uapi/linux/netfilter_arp/Kbuild delete mode 100644 include/uapi/linux/netfilter_bridge/Kbuild delete mode 100644 include/uapi/linux/netfilter_ipv4/Kbuild delete mode 100644 include/uapi/linux/netfilter_ipv6/Kbuild delete mode 100644 include/uapi/linux/nfsd/Kbuild delete mode 100644 include/uapi/linux/raid/Kbuild delete mode 100644 include/uapi/linux/spi/Kbuild delete mode 100644 include/uapi/linux/sunrpc/Kbuild delete mode 100644 include/uapi/linux/tc_act/Kbuild delete mode 100644 include/uapi/linux/tc_ematch/Kbuild delete mode 100644 include/uapi/linux/usb/Kbuild delete mode 100644 include/uapi/linux/wimax/Kbuild delete mode 100644 include/uapi/misc/Kbuild delete mode 100644 include/uapi/mtd/Kbuild delete mode 100644 include/uapi/rdma/Kbuild delete mode 100644 include/uapi/rdma/hfi/Kbuild delete mode 100644 include/uapi/scsi/Kbuild delete mode 100644 include/uapi/scsi/fc/Kbuild delete mode 100644 include/uapi/sound/Kbuild delete mode 100644 include/uapi/video/Kbuild delete mode 100644 include/uapi/xen/Kbuild delete mode 100644 include/video/Kbuild (limited to 'include') diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt index 37b525d329ae..b9f7ca4e62ae 100644 --- a/Documentation/kbuild/makefiles.txt +++ b/Documentation/kbuild/makefiles.txt @@ -44,10 +44,12 @@ This document describes the Linux kernel Makefiles. --- 6.11 Post-link pass === 7 Kbuild syntax for exported headers - --- 7.1 header-y + --- 7.1 no-export-headers --- 7.2 genhdr-y --- 7.3 generic-y --- 7.4 generated-y + --- 7.5 mandatory-y + --- 7.6 subdir-y === 8 Kbuild Variables === 9 Makefile language @@ -1235,7 +1237,7 @@ When kbuild executes, the following steps are followed (roughly): that may be shared between individual architectures. The recommended approach how to use a generic header file is to list the file in the Kbuild file. - See "7.4 generic-y" for further info on syntax etc. + See "7.3 generic-y" for further info on syntax etc. --- 6.11 Post-link pass @@ -1262,37 +1264,30 @@ The pre-processing does: - drop include of compiler.h - drop all sections that are kernel internal (guarded by ifdef __KERNEL__) -Each relevant directory contains a file name "Kbuild" which specifies the -headers to be exported. -See subsequent chapter for the syntax of the Kbuild file. - - --- 7.1 header-y - - header-y specifies header files to be exported. - - Example: - #include/linux/Kbuild - header-y += usb/ - header-y += aio_abi.h +All headers under include/uapi/, include/generated/uapi/, +arch//include/uapi/asm/ and arch//include/generated/uapi/asm/ +are exported. - The convention is to list one file per line and - preferably in alphabetic order. +A Kbuild file may be defined under arch//include/uapi/asm/ and +arch//include/asm/ to list asm files coming from asm-generic. +See subsequent chapter for the syntax of the Kbuild file. - header-y also specifies which subdirectories to visit. - A subdirectory is identified by a trailing '/' which - can be seen in the example above for the usb subdirectory. + --- 7.1 no-export-headers - Subdirectories are visited before their parent directories. + no-export-headers is essentially used by include/uapi/linux/Kbuild to + avoid exporting specific headers (e.g. kvm.h) on architectures that do + not support it. It should be avoided as much as possible. --- 7.2 genhdr-y - genhdr-y specifies generated files to be exported. - Generated files are special as they need to be looked - up in another directory when doing 'make O=...' builds. + genhdr-y specifies asm files to be generated. Example: - #include/linux/Kbuild - genhdr-y += version.h + #arch/x86/include/uapi/asm/Kbuild + genhdr-y += unistd_32.h + genhdr-y += unistd_64.h + genhdr-y += unistd_x32.h + --- 7.3 generic-y @@ -1334,6 +1329,27 @@ See subsequent chapter for the syntax of the Kbuild file. #arch/x86/include/asm/Kbuild generated-y += syscalls_32.h + --- 7.5 mandatory-y + + mandatory-y is essentially used by include/uapi/asm-generic/Kbuild.asm + to define the minimun set of headers that must be exported in + include/asm. + + The convention is to list one subdir per line and + preferably in alphabetic order. + + --- 7.6 subdir-y + + subdir-y may be used to specify a subdirectory to be exported. + + Example: + #arch/cris/include/uapi/asm/Kbuild + subdir-y += ../arch-v10/arch/ + subdir-y += ../arch-v32/arch/ + + The convention is to list one subdir per line and + preferably in alphabetic order. + === 8 Kbuild Variables The top Makefile exports the following variables: diff --git a/arch/alpha/include/uapi/asm/Kbuild b/arch/alpha/include/uapi/asm/Kbuild index d96f2ef5b639..b15bf6bc0e94 100644 --- a/arch/alpha/include/uapi/asm/Kbuild +++ b/arch/alpha/include/uapi/asm/Kbuild @@ -1,43 +1,2 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm - -header-y += a.out.h -header-y += auxvec.h -header-y += bitsperlong.h -header-y += byteorder.h -header-y += compiler.h -header-y += console.h -header-y += errno.h -header-y += fcntl.h -header-y += fpu.h -header-y += gentrap.h -header-y += ioctl.h -header-y += ioctls.h -header-y += ipcbuf.h -header-y += kvm_para.h -header-y += mman.h -header-y += msgbuf.h -header-y += pal.h -header-y += param.h -header-y += poll.h -header-y += posix_types.h -header-y += ptrace.h -header-y += reg.h -header-y += regdef.h -header-y += resource.h -header-y += sembuf.h -header-y += setup.h -header-y += shmbuf.h -header-y += sigcontext.h -header-y += siginfo.h -header-y += signal.h -header-y += socket.h -header-y += sockios.h -header-y += stat.h -header-y += statfs.h -header-y += swab.h -header-y += sysinfo.h -header-y += termbits.h -header-y += termios.h -header-y += types.h -header-y += unistd.h diff --git a/arch/arc/include/uapi/asm/Kbuild b/arch/arc/include/uapi/asm/Kbuild index f50d02df78d5..b15bf6bc0e94 100644 --- a/arch/arc/include/uapi/asm/Kbuild +++ b/arch/arc/include/uapi/asm/Kbuild @@ -1,5 +1,2 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm -header-y += elf.h -header-y += page.h -header-y += cachectl.h diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild index 46a76cd6acb6..607f702c2d62 100644 --- a/arch/arm/include/uapi/asm/Kbuild +++ b/arch/arm/include/uapi/asm/Kbuild @@ -1,23 +1,6 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm -header-y += auxvec.h -header-y += byteorder.h -header-y += fcntl.h -header-y += hwcap.h -header-y += ioctls.h -header-y += kvm_para.h -header-y += mman.h -header-y += perf_regs.h -header-y += posix_types.h -header-y += ptrace.h -header-y += setup.h -header-y += sigcontext.h -header-y += signal.h -header-y += stat.h -header-y += statfs.h -header-y += swab.h -header-y += unistd.h genhdr-y += unistd-common.h genhdr-y += unistd-oabi.h genhdr-y += unistd-eabi.h diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild index 825b0fe51c2b..13a97aa2285f 100644 --- a/arch/arm64/include/uapi/asm/Kbuild +++ b/arch/arm64/include/uapi/asm/Kbuild @@ -2,21 +2,3 @@ include include/uapi/asm-generic/Kbuild.asm generic-y += kvm_para.h - -header-y += auxvec.h -header-y += bitsperlong.h -header-y += byteorder.h -header-y += fcntl.h -header-y += hwcap.h -header-y += kvm_para.h -header-y += perf_regs.h -header-y += param.h -header-y += ptrace.h -header-y += setup.h -header-y += sigcontext.h -header-y += siginfo.h -header-y += signal.h -header-y += stat.h -header-y += statfs.h -header-y += ucontext.h -header-y += unistd.h diff --git a/arch/blackfin/include/uapi/asm/Kbuild b/arch/blackfin/include/uapi/asm/Kbuild index 0bd28f77abc3..b15bf6bc0e94 100644 --- a/arch/blackfin/include/uapi/asm/Kbuild +++ b/arch/blackfin/include/uapi/asm/Kbuild @@ -1,19 +1,2 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm - -header-y += bfin_sport.h -header-y += byteorder.h -header-y += cachectl.h -header-y += fcntl.h -header-y += fixed_code.h -header-y += ioctls.h -header-y += kvm_para.h -header-y += poll.h -header-y += posix_types.h -header-y += ptrace.h -header-y += sigcontext.h -header-y += siginfo.h -header-y += signal.h -header-y += stat.h -header-y += swab.h -header-y += unistd.h diff --git a/arch/c6x/include/uapi/asm/Kbuild b/arch/c6x/include/uapi/asm/Kbuild index e9bc2b2b8147..13a97aa2285f 100644 --- a/arch/c6x/include/uapi/asm/Kbuild +++ b/arch/c6x/include/uapi/asm/Kbuild @@ -2,11 +2,3 @@ include include/uapi/asm-generic/Kbuild.asm generic-y += kvm_para.h - -header-y += byteorder.h -header-y += kvm_para.h -header-y += ptrace.h -header-y += setup.h -header-y += sigcontext.h -header-y += swab.h -header-y += unistd.h diff --git a/arch/cris/include/uapi/arch-v10/arch/Kbuild b/arch/cris/include/uapi/arch-v10/arch/Kbuild deleted file mode 100644 index 9048c87a782b..000000000000 --- a/arch/cris/include/uapi/arch-v10/arch/Kbuild +++ /dev/null @@ -1,5 +0,0 @@ -# UAPI Header export list -header-y += sv_addr.agh -header-y += sv_addr_ag.h -header-y += svinto.h -header-y += user.h diff --git a/arch/cris/include/uapi/arch-v32/arch/Kbuild b/arch/cris/include/uapi/arch-v32/arch/Kbuild deleted file mode 100644 index 59efffd16b61..000000000000 --- a/arch/cris/include/uapi/arch-v32/arch/Kbuild +++ /dev/null @@ -1,3 +0,0 @@ -# UAPI Header export list -header-y += cryptocop.h -header-y += user.h diff --git a/arch/cris/include/uapi/asm/Kbuild b/arch/cris/include/uapi/asm/Kbuild index d5564a0ae66a..d0c5471856e0 100644 --- a/arch/cris/include/uapi/asm/Kbuild +++ b/arch/cris/include/uapi/asm/Kbuild @@ -1,44 +1,5 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm -header-y += ../arch-v10/arch/ -header-y += ../arch-v32/arch/ -header-y += auxvec.h -header-y += bitsperlong.h -header-y += byteorder.h -header-y += elf.h -header-y += elf_v10.h -header-y += elf_v32.h -header-y += errno.h -header-y += ethernet.h -header-y += etraxgpio.h -header-y += fcntl.h -header-y += ioctl.h -header-y += ioctls.h -header-y += ipcbuf.h -header-y += mman.h -header-y += msgbuf.h -header-y += param.h -header-y += poll.h -header-y += posix_types.h -header-y += ptrace.h -header-y += ptrace_v10.h -header-y += ptrace_v32.h -header-y += resource.h -header-y += rs485.h -header-y += sembuf.h -header-y += setup.h -header-y += shmbuf.h -header-y += sigcontext.h -header-y += siginfo.h -header-y += signal.h -header-y += socket.h -header-y += sockios.h -header-y += stat.h -header-y += statfs.h -header-y += swab.h -header-y += sync_serial.h -header-y += termbits.h -header-y += termios.h -header-y += types.h -header-y += unistd.h +subdir-y += ../arch-v10/arch/ +subdir-y += ../arch-v32/arch/ diff --git a/arch/frv/include/uapi/asm/Kbuild b/arch/frv/include/uapi/asm/Kbuild index 42a2b33461c0..b15bf6bc0e94 100644 --- a/arch/frv/include/uapi/asm/Kbuild +++ b/arch/frv/include/uapi/asm/Kbuild @@ -1,35 +1,2 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm - -header-y += auxvec.h -header-y += bitsperlong.h -header-y += byteorder.h -header-y += errno.h -header-y += fcntl.h -header-y += ioctl.h -header-y += ioctls.h -header-y += ipcbuf.h -header-y += kvm_para.h -header-y += mman.h -header-y += msgbuf.h -header-y += param.h -header-y += poll.h -header-y += posix_types.h -header-y += ptrace.h -header-y += registers.h -header-y += resource.h -header-y += sembuf.h -header-y += setup.h -header-y += shmbuf.h -header-y += sigcontext.h -header-y += siginfo.h -header-y += signal.h -header-y += socket.h -header-y += sockios.h -header-y += stat.h -header-y += statfs.h -header-y += swab.h -header-y += termbits.h -header-y += termios.h -header-y += types.h -header-y += unistd.h diff --git a/arch/h8300/include/uapi/asm/Kbuild b/arch/h8300/include/uapi/asm/Kbuild index fb6101a5d4f1..b15bf6bc0e94 100644 --- a/arch/h8300/include/uapi/asm/Kbuild +++ b/arch/h8300/include/uapi/asm/Kbuild @@ -1,30 +1,2 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm - -header-y += auxvec.h -header-y += bitsperlong.h -header-y += errno.h -header-y += fcntl.h -header-y += ioctl.h -header-y += ioctls.h -header-y += ipcbuf.h -header-y += kvm_para.h -header-y += mman.h -header-y += msgbuf.h -header-y += param.h -header-y += poll.h -header-y += posix_types.h -header-y += resource.h -header-y += sembuf.h -header-y += setup.h -header-y += shmbuf.h -header-y += siginfo.h -header-y += socket.h -header-y += sockios.h -header-y += stat.h -header-y += statfs.h -header-y += swab.h -header-y += termbits.h -header-y += termios.h -header-y += types.h -header-y += unistd.h diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild index a2036bfda8af..6b45ef79eb8f 100644 --- a/arch/hexagon/include/asm/Kbuild +++ b/arch/hexagon/include/asm/Kbuild @@ -1,6 +1,3 @@ - -header-y += ucontext.h - generic-y += auxvec.h generic-y += barrier.h generic-y += bug.h diff --git a/arch/hexagon/include/uapi/asm/Kbuild b/arch/hexagon/include/uapi/asm/Kbuild index c31706c38631..b15bf6bc0e94 100644 --- a/arch/hexagon/include/uapi/asm/Kbuild +++ b/arch/hexagon/include/uapi/asm/Kbuild @@ -1,15 +1,2 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm - -header-y += bitsperlong.h -header-y += byteorder.h -header-y += kvm_para.h -header-y += param.h -header-y += ptrace.h -header-y += registers.h -header-y += setup.h -header-y += sigcontext.h -header-y += signal.h -header-y += swab.h -header-y += unistd.h -header-y += user.h diff --git a/arch/ia64/include/uapi/asm/Kbuild b/arch/ia64/include/uapi/asm/Kbuild index 891002bbb995..13a97aa2285f 100644 --- a/arch/ia64/include/uapi/asm/Kbuild +++ b/arch/ia64/include/uapi/asm/Kbuild @@ -2,48 +2,3 @@ include include/uapi/asm-generic/Kbuild.asm generic-y += kvm_para.h - -header-y += auxvec.h -header-y += bitsperlong.h -header-y += break.h -header-y += byteorder.h -header-y += cmpxchg.h -header-y += errno.h -header-y += fcntl.h -header-y += fpu.h -header-y += gcc_intrin.h -header-y += ia64regs.h -header-y += intel_intrin.h -header-y += intrinsics.h -header-y += ioctl.h -header-y += ioctls.h -header-y += ipcbuf.h -header-y += kvm_para.h -header-y += mman.h -header-y += msgbuf.h -header-y += param.h -header-y += perfmon.h -header-y += perfmon_default_smpl.h -header-y += poll.h -header-y += posix_types.h -header-y += ptrace.h -header-y += ptrace_offsets.h -header-y += resource.h -header-y += rse.h -header-y += sembuf.h -header-y += setup.h -header-y += shmbuf.h -header-y += sigcontext.h -header-y += siginfo.h -header-y += signal.h -header-y += socket.h -header-y += sockios.h -header-y += stat.h -header-y += statfs.h -header-y += swab.h -header-y += termbits.h -header-y += termios.h -header-y += types.h -header-y += ucontext.h -header-y += unistd.h -header-y += ustack.h diff --git a/arch/m32r/include/uapi/asm/Kbuild b/arch/m32r/include/uapi/asm/Kbuild index 43937a61d6cf..b15bf6bc0e94 100644 --- a/arch/m32r/include/uapi/asm/Kbuild +++ b/arch/m32r/include/uapi/asm/Kbuild @@ -1,33 +1,2 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm - -header-y += auxvec.h -header-y += bitsperlong.h -header-y += byteorder.h -header-y += errno.h -header-y += fcntl.h -header-y += ioctl.h -header-y += ioctls.h -header-y += ipcbuf.h -header-y += mman.h -header-y += msgbuf.h -header-y += param.h -header-y += poll.h -header-y += posix_types.h -header-y += ptrace.h -header-y += resource.h -header-y += sembuf.h -header-y += setup.h -header-y += shmbuf.h -header-y += sigcontext.h -header-y += siginfo.h -header-y += signal.h -header-y += socket.h -header-y += sockios.h -header-y += stat.h -header-y += statfs.h -header-y += swab.h -header-y += termbits.h -header-y += termios.h -header-y += types.h -header-y += unistd.h diff --git a/arch/m68k/include/uapi/asm/Kbuild b/arch/m68k/include/uapi/asm/Kbuild index 6a2d257bdfb2..64368077235a 100644 --- a/arch/m68k/include/uapi/asm/Kbuild +++ b/arch/m68k/include/uapi/asm/Kbuild @@ -9,27 +9,3 @@ generic-y += socket.h generic-y += sockios.h generic-y += termbits.h generic-y += termios.h - -header-y += a.out.h -header-y += bootinfo.h -header-y += bootinfo-amiga.h -header-y += bootinfo-apollo.h -header-y += bootinfo-atari.h -header-y += bootinfo-hp300.h -header-y += bootinfo-mac.h -header-y += bootinfo-q40.h -header-y += bootinfo-vme.h -header-y += byteorder.h -header-y += cachectl.h -header-y += fcntl.h -header-y += ioctls.h -header-y += param.h -header-y += poll.h -header-y += posix_types.h -header-y += ptrace.h -header-y += setup.h -header-y += sigcontext.h -header-y += signal.h -header-y += stat.h -header-y += swab.h -header-y += unistd.h diff --git a/arch/metag/include/uapi/asm/Kbuild b/arch/metag/include/uapi/asm/Kbuild index ab78be2b6eb0..b29731ebd7a9 100644 --- a/arch/metag/include/uapi/asm/Kbuild +++ b/arch/metag/include/uapi/asm/Kbuild @@ -1,14 +1,6 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm -header-y += byteorder.h -header-y += ech.h -header-y += ptrace.h -header-y += sigcontext.h -header-y += siginfo.h -header-y += swab.h -header-y += unistd.h - generic-y += mman.h generic-y += resource.h generic-y += setup.h diff --git a/arch/microblaze/include/uapi/asm/Kbuild b/arch/microblaze/include/uapi/asm/Kbuild index 1aac99f87df1..2178c78c7c1a 100644 --- a/arch/microblaze/include/uapi/asm/Kbuild +++ b/arch/microblaze/include/uapi/asm/Kbuild @@ -2,35 +2,3 @@ include include/uapi/asm-generic/Kbuild.asm generic-y += types.h - -header-y += auxvec.h -header-y += bitsperlong.h -header-y += byteorder.h -header-y += elf.h -header-y += errno.h -header-y += fcntl.h -header-y += ioctl.h -header-y += ioctls.h -header-y += ipcbuf.h -header-y += kvm_para.h -header-y += mman.h -header-y += msgbuf.h -header-y += param.h -header-y += poll.h -header-y += posix_types.h -header-y += ptrace.h -header-y += resource.h -header-y += sembuf.h -header-y += setup.h -header-y += shmbuf.h -header-y += sigcontext.h -header-y += siginfo.h -header-y += signal.h -header-y += socket.h -header-y += sockios.h -header-y += stat.h -header-y += statfs.h -header-y += swab.h -header-y += termbits.h -header-y += termios.h -header-y += unistd.h diff --git a/arch/mips/include/uapi/asm/Kbuild b/arch/mips/include/uapi/asm/Kbuild index f2cf41461146..a0266feba9e6 100644 --- a/arch/mips/include/uapi/asm/Kbuild +++ b/arch/mips/include/uapi/asm/Kbuild @@ -2,40 +2,3 @@ include include/uapi/asm-generic/Kbuild.asm generic-y += ipcbuf.h - -header-y += auxvec.h -header-y += bitfield.h -header-y += bitsperlong.h -header-y += break.h -header-y += byteorder.h -header-y += cachectl.h -header-y += errno.h -header-y += fcntl.h -header-y += inst.h -header-y += ioctl.h -header-y += ioctls.h -header-y += kvm_para.h -header-y += mman.h -header-y += msgbuf.h -header-y += param.h -header-y += poll.h -header-y += posix_types.h -header-y += ptrace.h -header-y += resource.h -header-y += sembuf.h -header-y += setup.h -header-y += sgidefs.h -header-y += shmbuf.h -header-y += sigcontext.h -header-y += siginfo.h -header-y += signal.h -header-y += socket.h -header-y += sockios.h -header-y += stat.h -header-y += statfs.h -header-y += swab.h -header-y += sysmips.h -header-y += termbits.h -header-y += termios.h -header-y += types.h -header-y += unistd.h diff --git a/arch/mn10300/include/uapi/asm/Kbuild b/arch/mn10300/include/uapi/asm/Kbuild index 040178cdb3eb..b15bf6bc0e94 100644 --- a/arch/mn10300/include/uapi/asm/Kbuild +++ b/arch/mn10300/include/uapi/asm/Kbuild @@ -1,34 +1,2 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm - -header-y += auxvec.h -header-y += bitsperlong.h -header-y += byteorder.h -header-y += errno.h -header-y += fcntl.h -header-y += ioctl.h -header-y += ioctls.h -header-y += ipcbuf.h -header-y += kvm_para.h -header-y += mman.h -header-y += msgbuf.h -header-y += param.h -header-y += poll.h -header-y += posix_types.h -header-y += ptrace.h -header-y += resource.h -header-y += sembuf.h -header-y += setup.h -header-y += shmbuf.h -header-y += sigcontext.h -header-y += siginfo.h -header-y += signal.h -header-y += socket.h -header-y += sockios.h -header-y += stat.h -header-y += statfs.h -header-y += swab.h -header-y += termbits.h -header-y += termios.h -header-y += types.h -header-y += unistd.h diff --git a/arch/nios2/include/uapi/asm/Kbuild b/arch/nios2/include/uapi/asm/Kbuild index 69c965304146..374bd123329f 100644 --- a/arch/nios2/include/uapi/asm/Kbuild +++ b/arch/nios2/include/uapi/asm/Kbuild @@ -1,6 +1,5 @@ +# UAPI Header export list include include/uapi/asm-generic/Kbuild.asm -header-y += elf.h - generic-y += setup.h generic-y += ucontext.h diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild index df8e2f7bc7dd..fdbcf0bf44a4 100644 --- a/arch/openrisc/include/asm/Kbuild +++ b/arch/openrisc/include/asm/Kbuild @@ -1,6 +1,3 @@ - -header-y += ucontext.h - generic-y += auxvec.h generic-y += barrier.h generic-y += bitsperlong.h diff --git a/arch/openrisc/include/uapi/asm/Kbuild b/arch/openrisc/include/uapi/asm/Kbuild index 80761eb82b5f..b15bf6bc0e94 100644 --- a/arch/openrisc/include/uapi/asm/Kbuild +++ b/arch/openrisc/include/uapi/asm/Kbuild @@ -1,10 +1,2 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm - -header-y += byteorder.h -header-y += elf.h -header-y += kvm_para.h -header-y += param.h -header-y += ptrace.h -header-y += sigcontext.h -header-y += unistd.h diff --git a/arch/parisc/include/uapi/asm/Kbuild b/arch/parisc/include/uapi/asm/Kbuild index 348356c99514..3971c60a7e7f 100644 --- a/arch/parisc/include/uapi/asm/Kbuild +++ b/arch/parisc/include/uapi/asm/Kbuild @@ -2,31 +2,3 @@ include include/uapi/asm-generic/Kbuild.asm generic-y += resource.h - -header-y += bitsperlong.h -header-y += byteorder.h -header-y += errno.h -header-y += fcntl.h -header-y += ioctl.h -header-y += ioctls.h -header-y += ipcbuf.h -header-y += mman.h -header-y += msgbuf.h -header-y += pdc.h -header-y += posix_types.h -header-y += ptrace.h -header-y += sembuf.h -header-y += setup.h -header-y += shmbuf.h -header-y += sigcontext.h -header-y += siginfo.h -header-y += signal.h -header-y += socket.h -header-y += sockios.h -header-y += stat.h -header-y += statfs.h -header-y += swab.h -header-y += termbits.h -header-y += termios.h -header-y += types.h -header-y += unistd.h diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild index dab3717e3ea0..b15bf6bc0e94 100644 --- a/arch/powerpc/include/uapi/asm/Kbuild +++ b/arch/powerpc/include/uapi/asm/Kbuild @@ -1,47 +1,2 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm - -header-y += auxvec.h -header-y += bitsperlong.h -header-y += bootx.h -header-y += byteorder.h -header-y += cputable.h -header-y += eeh.h -header-y += elf.h -header-y += epapr_hcalls.h -header-y += errno.h -header-y += fcntl.h -header-y += ioctl.h -header-y += ioctls.h -header-y += ipcbuf.h -header-y += kvm.h -header-y += kvm_para.h -header-y += mman.h -header-y += msgbuf.h -header-y += nvram.h -header-y += opal-prd.h -header-y += param.h -header-y += perf_event.h -header-y += poll.h -header-y += posix_types.h -header-y += ps3fb.h -header-y += ptrace.h -header-y += resource.h -header-y += sembuf.h -header-y += setup.h -header-y += shmbuf.h -header-y += sigcontext.h -header-y += siginfo.h -header-y += signal.h -header-y += socket.h -header-y += sockios.h -header-y += spu_info.h -header-y += stat.h -header-y += statfs.h -header-y += swab.h -header-y += termbits.h -header-y += termios.h -header-y += tm.h -header-y += types.h -header-y += ucontext.h -header-y += unistd.h diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild index addb09cee0f5..ca62066895e0 100644 --- a/arch/s390/include/uapi/asm/Kbuild +++ b/arch/s390/include/uapi/asm/Kbuild @@ -10,49 +10,3 @@ generic-y += poll.h generic-y += resource.h generic-y += sockios.h generic-y += termbits.h - -header-y += auxvec.h -header-y += bitsperlong.h -header-y += byteorder.h -header-y += chpid.h -header-y += chsc.h -header-y += clp.h -header-y += cmb.h -header-y += dasd.h -header-y += debug.h -header-y += errno.h -header-y += guarded_storage.h -header-y += hypfs.h -header-y += ioctls.h -header-y += ipcbuf.h -header-y += kvm.h -header-y += kvm_para.h -header-y += kvm_perf.h -header-y += kvm_virtio.h -header-y += monwriter.h -header-y += msgbuf.h -header-y += pkey.h -header-y += posix_types.h -header-y += ptrace.h -header-y += qeth.h -header-y += schid.h -header-y += sclp_ctl.h -header-y += sembuf.h -header-y += setup.h -header-y += shmbuf.h -header-y += sie.h -header-y += sigcontext.h -header-y += siginfo.h -header-y += signal.h -header-y += socket.h -header-y += stat.h -header-y += statfs.h -header-y += swab.h -header-y += tape390.h -header-y += termios.h -header-y += types.h -header-y += ucontext.h -header-y += unistd.h -header-y += virtio-ccw.h -header-y += vtoc.h -header-y += zcrypt.h diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild index e3a8d0f96652..54b3b2039af1 100644 --- a/arch/score/include/asm/Kbuild +++ b/arch/score/include/asm/Kbuild @@ -1,6 +1,3 @@ - -header-y += - generic-y += barrier.h generic-y += clkdev.h generic-y += current.h diff --git a/arch/score/include/uapi/asm/Kbuild b/arch/score/include/uapi/asm/Kbuild index 040178cdb3eb..b15bf6bc0e94 100644 --- a/arch/score/include/uapi/asm/Kbuild +++ b/arch/score/include/uapi/asm/Kbuild @@ -1,34 +1,2 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm - -header-y += auxvec.h -header-y += bitsperlong.h -header-y += byteorder.h -header-y += errno.h -header-y += fcntl.h -header-y += ioctl.h -header-y += ioctls.h -header-y += ipcbuf.h -header-y += kvm_para.h -header-y += mman.h -header-y += msgbuf.h -header-y += param.h -header-y += poll.h -header-y += posix_types.h -header-y += ptrace.h -header-y += resource.h -header-y += sembuf.h -header-y += setup.h -header-y += shmbuf.h -header-y += sigcontext.h -header-y += siginfo.h -header-y += signal.h -header-y += socket.h -header-y += sockios.h -header-y += stat.h -header-y += statfs.h -header-y += swab.h -header-y += termbits.h -header-y += termios.h -header-y += types.h -header-y += unistd.h diff --git a/arch/sh/include/uapi/asm/Kbuild b/arch/sh/include/uapi/asm/Kbuild index 60613ae78513..b15bf6bc0e94 100644 --- a/arch/sh/include/uapi/asm/Kbuild +++ b/arch/sh/include/uapi/asm/Kbuild @@ -1,25 +1,2 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm - -header-y += auxvec.h -header-y += byteorder.h -header-y += cachectl.h -header-y += cpu-features.h -header-y += hw_breakpoint.h -header-y += ioctls.h -header-y += posix_types.h -header-y += posix_types_32.h -header-y += posix_types_64.h -header-y += ptrace.h -header-y += ptrace_32.h -header-y += ptrace_64.h -header-y += setup.h -header-y += sigcontext.h -header-y += signal.h -header-y += sockios.h -header-y += stat.h -header-y += swab.h -header-y += types.h -header-y += unistd.h -header-y += unistd_32.h -header-y += unistd_64.h diff --git a/arch/sparc/include/uapi/asm/Kbuild b/arch/sparc/include/uapi/asm/Kbuild index b5843ee09fb5..b15bf6bc0e94 100644 --- a/arch/sparc/include/uapi/asm/Kbuild +++ b/arch/sparc/include/uapi/asm/Kbuild @@ -1,50 +1,2 @@ # UAPI Header export list -# User exported sparc header files - include include/uapi/asm-generic/Kbuild.asm - -header-y += apc.h -header-y += asi.h -header-y += auxvec.h -header-y += bitsperlong.h -header-y += byteorder.h -header-y += display7seg.h -header-y += envctrl.h -header-y += errno.h -header-y += fbio.h -header-y += fcntl.h -header-y += ioctl.h -header-y += ioctls.h -header-y += ipcbuf.h -header-y += jsflash.h -header-y += kvm_para.h -header-y += mman.h -header-y += msgbuf.h -header-y += openpromio.h -header-y += param.h -header-y += perfctr.h -header-y += poll.h -header-y += posix_types.h -header-y += psr.h -header-y += psrcompat.h -header-y += pstate.h -header-y += ptrace.h -header-y += resource.h -header-y += sembuf.h -header-y += setup.h -header-y += shmbuf.h -header-y += sigcontext.h -header-y += siginfo.h -header-y += signal.h -header-y += socket.h -header-y += sockios.h -header-y += stat.h -header-y += statfs.h -header-y += swab.h -header-y += termbits.h -header-y += termios.h -header-y += traps.h -header-y += uctx.h -header-y += unistd.h -header-y += utrap.h -header-y += watchdog.h diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild index 24c44e93804d..16f0b08c8ce9 100644 --- a/arch/tile/include/asm/Kbuild +++ b/arch/tile/include/asm/Kbuild @@ -1,6 +1,3 @@ - -header-y += ../arch/ - generic-y += bug.h generic-y += bugs.h generic-y += clkdev.h diff --git a/arch/tile/include/uapi/arch/Kbuild b/arch/tile/include/uapi/arch/Kbuild deleted file mode 100644 index 97dfbecec6b6..000000000000 --- a/arch/tile/include/uapi/arch/Kbuild +++ /dev/null @@ -1,17 +0,0 @@ -# UAPI Header export list -header-y += abi.h -header-y += chip.h -header-y += chip_tilegx.h -header-y += chip_tilepro.h -header-y += icache.h -header-y += interrupts.h -header-y += interrupts_32.h -header-y += interrupts_64.h -header-y += opcode.h -header-y += opcode_tilegx.h -header-y += opcode_tilepro.h -header-y += sim.h -header-y += sim_def.h -header-y += spr_def.h -header-y += spr_def_32.h -header-y += spr_def_64.h diff --git a/arch/tile/include/uapi/asm/Kbuild b/arch/tile/include/uapi/asm/Kbuild index c20db8e428bf..e0a50111e07f 100644 --- a/arch/tile/include/uapi/asm/Kbuild +++ b/arch/tile/include/uapi/asm/Kbuild @@ -1,21 +1,6 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm -header-y += auxvec.h -header-y += bitsperlong.h -header-y += byteorder.h -header-y += cachectl.h -header-y += hardwall.h -header-y += kvm_para.h -header-y += mman.h -header-y += ptrace.h -header-y += setup.h -header-y += sigcontext.h -header-y += siginfo.h -header-y += signal.h -header-y += stat.h -header-y += swab.h -header-y += ucontext.h -header-y += unistd.h - generic-y += ucontext.h + +subdir-y += ../arch diff --git a/arch/unicore32/include/uapi/asm/Kbuild b/arch/unicore32/include/uapi/asm/Kbuild index 0514d7ad6855..13a97aa2285f 100644 --- a/arch/unicore32/include/uapi/asm/Kbuild +++ b/arch/unicore32/include/uapi/asm/Kbuild @@ -1,10 +1,4 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm -header-y += byteorder.h -header-y += kvm_para.h -header-y += ptrace.h -header-y += sigcontext.h -header-y += unistd.h - generic-y += kvm_para.h diff --git a/arch/x86/include/uapi/asm/Kbuild b/arch/x86/include/uapi/asm/Kbuild index 1c532b3f18ea..83b6e9a0dce4 100644 --- a/arch/x86/include/uapi/asm/Kbuild +++ b/arch/x86/include/uapi/asm/Kbuild @@ -4,61 +4,3 @@ include include/uapi/asm-generic/Kbuild.asm genhdr-y += unistd_32.h genhdr-y += unistd_64.h genhdr-y += unistd_x32.h -header-y += a.out.h -header-y += auxvec.h -header-y += bitsperlong.h -header-y += boot.h -header-y += bootparam.h -header-y += byteorder.h -header-y += debugreg.h -header-y += e820.h -header-y += errno.h -header-y += fcntl.h -header-y += hw_breakpoint.h -header-y += hyperv.h -header-y += ioctl.h -header-y += ioctls.h -header-y += ipcbuf.h -header-y += ist.h -header-y += kvm.h -header-y += kvm_para.h -header-y += kvm_perf.h -header-y += ldt.h -header-y += mce.h -header-y += mman.h -header-y += msgbuf.h -header-y += msr.h -header-y += mtrr.h -header-y += param.h -header-y += perf_regs.h -header-y += poll.h -header-y += posix_types.h -header-y += posix_types_32.h -header-y += posix_types_64.h -header-y += posix_types_x32.h -header-y += prctl.h -header-y += processor-flags.h -header-y += ptrace-abi.h -header-y += ptrace.h -header-y += resource.h -header-y += sembuf.h -header-y += setup.h -header-y += shmbuf.h -header-y += sigcontext.h -header-y += sigcontext32.h -header-y += siginfo.h -header-y += signal.h -header-y += socket.h -header-y += sockios.h -header-y += stat.h -header-y += statfs.h -header-y += svm.h -header-y += swab.h -header-y += termbits.h -header-y += termios.h -header-y += types.h -header-y += ucontext.h -header-y += unistd.h -header-y += vm86.h -header-y += vmx.h -header-y += vsyscall.h diff --git a/arch/xtensa/include/uapi/asm/Kbuild b/arch/xtensa/include/uapi/asm/Kbuild index 56aad54e7fb7..b15bf6bc0e94 100644 --- a/arch/xtensa/include/uapi/asm/Kbuild +++ b/arch/xtensa/include/uapi/asm/Kbuild @@ -1,25 +1,2 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm - -header-y += auxvec.h -header-y += byteorder.h -header-y += ioctls.h -header-y += ipcbuf.h -header-y += mman.h -header-y += msgbuf.h -header-y += param.h -header-y += poll.h -header-y += posix_types.h -header-y += ptrace.h -header-y += sembuf.h -header-y += setup.h -header-y += shmbuf.h -header-y += sigcontext.h -header-y += signal.h -header-y += socket.h -header-y += sockios.h -header-y += stat.h -header-y += swab.h -header-y += termbits.h -header-y += types.h -header-y += unistd.h diff --git a/include/Kbuild b/include/Kbuild deleted file mode 100644 index bab1145bc7a7..000000000000 --- a/include/Kbuild +++ /dev/null @@ -1,2 +0,0 @@ -# Top-level Makefile calls into asm-$(ARCH) -# List only non-arch directories below diff --git a/include/asm-generic/Kbuild.asm b/include/asm-generic/Kbuild.asm deleted file mode 100644 index d2ee86b4c091..000000000000 --- a/include/asm-generic/Kbuild.asm +++ /dev/null @@ -1 +0,0 @@ -include include/uapi/asm-generic/Kbuild.asm diff --git a/include/scsi/fc/Kbuild b/include/scsi/fc/Kbuild deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/include/uapi/Kbuild b/include/uapi/Kbuild deleted file mode 100644 index 245aa6e05e6a..000000000000 --- a/include/uapi/Kbuild +++ /dev/null @@ -1,15 +0,0 @@ -# UAPI Header export list -# Top-level Makefile calls into asm-$(ARCH) -# List only non-arch directories below - - -header-y += asm-generic/ -header-y += linux/ -header-y += sound/ -header-y += mtd/ -header-y += rdma/ -header-y += video/ -header-y += drm/ -header-y += xen/ -header-y += scsi/ -header-y += misc/ diff --git a/include/uapi/asm-generic/Kbuild b/include/uapi/asm-generic/Kbuild deleted file mode 100644 index b73de7bb7a62..000000000000 --- a/include/uapi/asm-generic/Kbuild +++ /dev/null @@ -1,36 +0,0 @@ -# UAPI Header export list -header-y += auxvec.h -header-y += bitsperlong.h -header-y += errno-base.h -header-y += errno.h -header-y += fcntl.h -header-y += int-l64.h -header-y += int-ll64.h -header-y += ioctl.h -header-y += ioctls.h -header-y += ipcbuf.h -header-y += kvm_para.h -header-y += mman-common.h -header-y += mman.h -header-y += msgbuf.h -header-y += param.h -header-y += poll.h -header-y += posix_types.h -header-y += resource.h -header-y += sembuf.h -header-y += setup.h -header-y += shmbuf.h -header-y += shmparam.h -header-y += siginfo.h -header-y += signal-defs.h -header-y += signal.h -header-y += socket.h -header-y += sockios.h -header-y += stat.h -header-y += statfs.h -header-y += swab.h -header-y += termbits.h -header-y += termios.h -header-y += types.h -header-y += ucontext.h -header-y += unistd.h diff --git a/include/uapi/asm-generic/Kbuild.asm b/include/uapi/asm-generic/Kbuild.asm index fcd50b759217..21381449d98a 100644 --- a/include/uapi/asm-generic/Kbuild.asm +++ b/include/uapi/asm-generic/Kbuild.asm @@ -1,49 +1,33 @@ -# -# Headers that are optional in usr/include/asm/ -# -opt-header += kvm.h -opt-header += kvm_para.h -opt-header += a.out.h - # # Headers that are mandatory in usr/include/asm/ # -header-y += auxvec.h -header-y += bitsperlong.h -header-y += byteorder.h -header-y += errno.h -header-y += fcntl.h -header-y += ioctl.h -header-y += ioctls.h -header-y += ipcbuf.h -header-y += mman.h -header-y += msgbuf.h -header-y += param.h -header-y += poll.h -header-y += posix_types.h -header-y += ptrace.h -header-y += resource.h -header-y += sembuf.h -header-y += setup.h -header-y += shmbuf.h -header-y += sigcontext.h -header-y += siginfo.h -header-y += signal.h -header-y += socket.h -header-y += sockios.h -header-y += stat.h -header-y += statfs.h -header-y += swab.h -header-y += termbits.h -header-y += termios.h -header-y += types.h -header-y += unistd.h - -header-y += $(foreach hdr,$(opt-header), \ - $(if \ - $(wildcard \ - $(srctree)/arch/$(SRCARCH)/include/uapi/asm/$(hdr) \ - $(srctree)/arch/$(SRCARCH)/include/asm/$(hdr) \ - ), \ - $(hdr) \ - )) +mandatory-y += auxvec.h +mandatory-y += bitsperlong.h +mandatory-y += byteorder.h +mandatory-y += errno.h +mandatory-y += fcntl.h +mandatory-y += ioctl.h +mandatory-y += ioctls.h +mandatory-y += ipcbuf.h +mandatory-y += mman.h +mandatory-y += msgbuf.h +mandatory-y += param.h +mandatory-y += poll.h +mandatory-y += posix_types.h +mandatory-y += ptrace.h +mandatory-y += resource.h +mandatory-y += sembuf.h +mandatory-y += setup.h +mandatory-y += shmbuf.h +mandatory-y += sigcontext.h +mandatory-y += siginfo.h +mandatory-y += signal.h +mandatory-y += socket.h +mandatory-y += sockios.h +mandatory-y += stat.h +mandatory-y += statfs.h +mandatory-y += swab.h +mandatory-y += termbits.h +mandatory-y += termios.h +mandatory-y += types.h +mandatory-y += unistd.h diff --git a/include/uapi/drm/Kbuild b/include/uapi/drm/Kbuild deleted file mode 100644 index c97addd08f8c..000000000000 --- a/include/uapi/drm/Kbuild +++ /dev/null @@ -1,23 +0,0 @@ -# UAPI Header export list -header-y += drm.h -header-y += drm_fourcc.h -header-y += drm_mode.h -header-y += drm_sarea.h -header-y += amdgpu_drm.h -header-y += exynos_drm.h -header-y += i810_drm.h -header-y += i915_drm.h -header-y += mga_drm.h -header-y += nouveau_drm.h -header-y += omap_drm.h -header-y += qxl_drm.h -header-y += r128_drm.h -header-y += radeon_drm.h -header-y += savage_drm.h -header-y += sis_drm.h -header-y += tegra_drm.h -header-y += via_drm.h -header-y += vmwgfx_drm.h -header-y += msm_drm.h -header-y += vc4_drm.h -header-y += virtgpu_drm.h diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 662c592b74dd..ca2787d9bf0f 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -1,495 +1,13 @@ # UAPI Header export list -header-y += android/ -header-y += byteorder/ -header-y += can/ -header-y += caif/ -header-y += dvb/ -header-y += hdlc/ -header-y += hsi/ -header-y += iio/ -header-y += isdn/ -header-y += mmc/ -header-y += nfsd/ -header-y += raid/ -header-y += spi/ -header-y += sunrpc/ -header-y += tc_act/ -header-y += tc_ematch/ -header-y += netfilter/ -header-y += netfilter_arp/ -header-y += netfilter_bridge/ -header-y += netfilter_ipv4/ -header-y += netfilter_ipv6/ -header-y += usb/ -header-y += wimax/ -genhdr-y += version.h - -ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/a.out.h \ - $(srctree)/arch/$(SRCARCH)/include/asm/a.out.h),) -header-y += a.out.h +ifeq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/a.out.h),) +no-export-headers += a.out.h endif -header-y += acct.h -header-y += adb.h -header-y += adfs_fs.h -header-y += affs_hardblocks.h -header-y += agpgart.h -header-y += aio_abi.h -header-y += am437x-vpfe.h -header-y += apm_bios.h -header-y += arcfb.h -header-y += atalk.h -header-y += atmapi.h -header-y += atmarp.h -header-y += atmbr2684.h -header-y += atmclip.h -header-y += atmdev.h -header-y += atm_eni.h -header-y += atm.h -header-y += atm_he.h -header-y += atm_idt77105.h -header-y += atmioc.h -header-y += atmlec.h -header-y += atmmpc.h -header-y += atm_nicstar.h -header-y += atmppp.h -header-y += atmsap.h -header-y += atmsvc.h -header-y += atm_tcp.h -header-y += atm_zatm.h -header-y += audit.h -header-y += auto_fs4.h -header-y += auto_fs.h -header-y += auxvec.h -header-y += ax25.h -header-y += b1lli.h -header-y += batman_adv.h -header-y += baycom.h -header-y += bcm933xx_hcs.h -header-y += bfs_fs.h -header-y += binfmts.h -header-y += blkpg.h -header-y += blktrace_api.h -header-y += blkzoned.h -header-y += bpf_common.h -header-y += bpf_perf_event.h -header-y += bpf.h -header-y += bpqether.h -header-y += bsg.h -header-y += bt-bmc.h -header-y += btrfs.h -header-y += can.h -header-y += capability.h -header-y += capi.h -header-y += cciss_defs.h -header-y += cciss_ioctl.h -header-y += cdrom.h -header-y += cec.h -header-y += cec-funcs.h -header-y += cgroupstats.h -header-y += chio.h -header-y += cm4000_cs.h -header-y += cn_proc.h -header-y += coda.h -header-y += coda_psdev.h -header-y += coff.h -header-y += connector.h -header-y += const.h -header-y += cramfs_fs.h -header-y += cuda.h -header-y += cyclades.h -header-y += cycx_cfm.h -header-y += dcbnl.h -header-y += dccp.h -header-y += devlink.h -header-y += dlmconstants.h -header-y += dlm_device.h -header-y += dlm.h -header-y += dlm_netlink.h -header-y += dlm_plock.h -header-y += dm-ioctl.h -header-y += dm-log-userspace.h -header-y += dma-buf.h -header-y += dn.h -header-y += dqblk_xfs.h -header-y += edd.h -header-y += efs_fs_sb.h -header-y += elfcore.h -header-y += elf-em.h -header-y += elf-fdpic.h -header-y += elf.h -header-y += errno.h -header-y += errqueue.h -header-y += ethtool.h -header-y += eventpoll.h -header-y += fadvise.h -header-y += falloc.h -header-y += fanotify.h -header-y += fb.h -header-y += fcntl.h -header-y += fd.h -header-y += fdreg.h -header-y += fib_rules.h -header-y += fiemap.h -header-y += filter.h -header-y += firewire-cdev.h -header-y += firewire-constants.h -header-y += flat.h -header-y += fou.h -header-y += fs.h -header-y += fsl_hypervisor.h -header-y += fuse.h -header-y += futex.h -header-y += gameport.h -header-y += genetlink.h -header-y += gen_stats.h -header-y += gfs2_ondisk.h -header-y += gigaset_dev.h -header-y += gpio.h -header-y += gsmmux.h -header-y += gtp.h -header-y += hdlcdrv.h -header-y += hdlc.h -header-y += hdreg.h -header-y += hiddev.h -header-y += hid.h -header-y += hidraw.h -header-y += hpet.h -header-y += hsr_netlink.h -header-y += hyperv.h -header-y += hysdn_if.h -header-y += i2c-dev.h -header-y += i2c.h -header-y += i2o-dev.h -header-y += i8k.h -header-y += icmp.h -header-y += icmpv6.h -header-y += if_addr.h -header-y += if_addrlabel.h -header-y += if_alg.h -header-y += if_arcnet.h -header-y += if_arp.h -header-y += if_bonding.h -header-y += if_bridge.h -header-y += if_cablemodem.h -header-y += if_eql.h -header-y += if_ether.h -header-y += if_fc.h -header-y += if_fddi.h -header-y += if_frad.h -header-y += if.h -header-y += if_hippi.h -header-y += if_infiniband.h -header-y += if_link.h -header-y += if_ltalk.h -header-y += if_macsec.h -header-y += if_packet.h -header-y += if_phonet.h -header-y += if_plip.h -header-y += if_ppp.h -header-y += if_pppol2tp.h -header-y += if_pppox.h -header-y += if_slip.h -header-y += if_team.h -header-y += if_tun.h -header-y += if_tunnel.h -header-y += if_vlan.h -header-y += if_x25.h -header-y += ife.h -header-y += igmp.h -header-y += ila.h -header-y += in6.h -header-y += inet_diag.h -header-y += in.h -header-y += inotify.h -header-y += input.h -header-y += input-event-codes.h -header-y += in_route.h -header-y += ioctl.h -header-y += ip6_tunnel.h -header-y += ipc.h -header-y += ip.h -header-y += ipmi.h -header-y += ipmi_msgdefs.h -header-y += ipsec.h -header-y += ipv6.h -header-y += ipv6_route.h -header-y += ip_vs.h -header-y += ipx.h -header-y += irda.h -header-y += irqnr.h -header-y += isdn_divertif.h -header-y += isdn.h -header-y += isdnif.h -header-y += isdn_ppp.h -header-y += iso_fs.h -header-y += ivtvfb.h -header-y += ivtv.h -header-y += ixjuser.h -header-y += jffs2.h -header-y += joystick.h -header-y += kcmp.h -header-y += kdev_t.h -header-y += kd.h -header-y += kernelcapi.h -header-y += kernel.h -header-y += kernel-page-flags.h -header-y += kexec.h -header-y += keyboard.h -header-y += keyctl.h - -ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/kvm.h \ - $(srctree)/arch/$(SRCARCH)/include/asm/kvm.h),) -header-y += kvm.h +ifeq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/kvm.h),) +no-export-headers += kvm.h endif - -ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/kvm_para.h \ - $(srctree)/arch/$(SRCARCH)/include/asm/kvm_para.h),) -header-y += kvm_para.h +ifeq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/kvm_para.h),) +no-export-headers += kvm_para.h endif - -header-y += hw_breakpoint.h -header-y += l2tp.h -header-y += libc-compat.h -header-y += lirc.h -header-y += limits.h -header-y += llc.h -header-y += loop.h -header-y += lp.h -header-y += lwtunnel.h -header-y += magic.h -header-y += major.h -header-y += map_to_7segment.h -header-y += matroxfb.h -header-y += mdio.h -header-y += media.h -header-y += media-bus-format.h -header-y += mei.h -header-y += membarrier.h -header-y += memfd.h -header-y += mempolicy.h -header-y += meye.h -header-y += mic_common.h -header-y += mic_ioctl.h -header-y += mii.h -header-y += minix_fs.h -header-y += mman.h -header-y += mmtimer.h -header-y += mpls.h -header-y += mpls_iptunnel.h -header-y += mqueue.h -header-y += mroute6.h -header-y += mroute.h -header-y += msdos_fs.h -header-y += msg.h -header-y += mtio.h -header-y += nbd.h -header-y += ncp_fs.h -header-y += ncp.h -header-y += ncp_mount.h -header-y += ncp_no.h -header-y += ndctl.h -header-y += neighbour.h -header-y += netconf.h -header-y += netdevice.h -header-y += net_dropmon.h -header-y += netfilter_arp.h -header-y += netfilter_bridge.h -header-y += netfilter_decnet.h -header-y += netfilter.h -header-y += netfilter_ipv4.h -header-y += netfilter_ipv6.h -header-y += net.h -header-y += netlink_diag.h -header-y += netlink.h -header-y += netrom.h -header-y += net_namespace.h -header-y += net_tstamp.h -header-y += nfc.h -header-y += psample.h -header-y += nfs2.h -header-y += nfs3.h -header-y += nfs4.h -header-y += nfs4_mount.h -header-y += nfsacl.h -header-y += nfs_fs.h -header-y += nfs.h -header-y += nfs_idmap.h -header-y += nfs_mount.h -header-y += nl80211.h -header-y += n_r3964.h -header-y += nubus.h -header-y += nvme_ioctl.h -header-y += nvram.h -header-y += omap3isp.h -header-y += omapfb.h -header-y += oom.h -header-y += openvswitch.h -header-y += packet_diag.h -header-y += param.h -header-y += parport.h -header-y += patchkey.h -header-y += pci.h -header-y += pci_regs.h -header-y += pcitest.h -header-y += perf_event.h -header-y += personality.h -header-y += pfkeyv2.h -header-y += pg.h -header-y += phantom.h -header-y += phonet.h -header-y += pktcdvd.h -header-y += pkt_cls.h -header-y += pkt_sched.h -header-y += pmu.h -header-y += poll.h -header-y += posix_acl.h -header-y += posix_acl_xattr.h -header-y += posix_types.h -header-y += ppdev.h -header-y += ppp-comp.h -header-y += ppp_defs.h -header-y += ppp-ioctl.h -header-y += pps.h -header-y += prctl.h -header-y += psci.h -header-y += ptp_clock.h -header-y += ptrace.h -header-y += qnx4_fs.h -header-y += qnxtypes.h -header-y += quota.h -header-y += radeonfb.h -header-y += random.h -header-y += raw.h -header-y += rds.h -header-y += reboot.h -header-y += reiserfs_fs.h -header-y += reiserfs_xattr.h -header-y += resource.h -header-y += rfkill.h -header-y += rio_cm_cdev.h -header-y += rio_mport_cdev.h -header-y += romfs_fs.h -header-y += rose.h -header-y += route.h -header-y += rtc.h -header-y += rtnetlink.h -header-y += scc.h -header-y += sched.h -header-y += scif_ioctl.h -header-y += screen_info.h -header-y += sctp.h -header-y += sdla.h -header-y += seccomp.h -header-y += securebits.h -header-y += seg6_genl.h -header-y += seg6.h -header-y += seg6_hmac.h -header-y += seg6_iptunnel.h -header-y += selinux_netlink.h -header-y += sem.h -header-y += serial_core.h -header-y += serial.h -header-y += serial_reg.h -header-y += serio.h -header-y += shm.h -header-y += signalfd.h -header-y += signal.h -header-y += smiapp.h -header-y += snmp.h -header-y += sock_diag.h -header-y += socket.h -header-y += sockios.h -header-y += sonet.h -header-y += sonypi.h -header-y += soundcard.h -header-y += sound.h -header-y += stat.h -header-y += stddef.h -header-y += string.h -header-y += suspend_ioctls.h -header-y += swab.h -header-y += synclink.h -header-y += sync_file.h -header-y += sysctl.h -header-y += sysinfo.h -header-y += target_core_user.h -header-y += taskstats.h -header-y += tcp.h -header-y += tcp_metrics.h -header-y += telephony.h -header-y += termios.h -header-y += thermal.h -header-y += time.h -header-y += timerfd.h -header-y += times.h -header-y += timex.h -header-y += tiocl.h -header-y += tipc_config.h -header-y += tipc_netlink.h -header-y += tipc.h -header-y += toshiba.h -header-y += tty_flags.h -header-y += tty.h -header-y += types.h -header-y += udf_fs_i.h -header-y += udp.h -header-y += uhid.h -header-y += uinput.h -header-y += uio.h -header-y += uleds.h -header-y += ultrasound.h -header-y += un.h -header-y += unistd.h -header-y += unix_diag.h -header-y += usbdevice_fs.h -header-y += usbip.h -header-y += userio.h -header-y += utime.h -header-y += utsname.h -header-y += uuid.h -header-y += uvcvideo.h -header-y += v4l2-common.h -header-y += v4l2-controls.h -header-y += v4l2-dv-timings.h -header-y += v4l2-mediabus.h -header-y += v4l2-subdev.h -header-y += veth.h -header-y += vfio.h -header-y += vhost.h -header-y += videodev2.h -header-y += virtio_9p.h -header-y += virtio_balloon.h -header-y += virtio_blk.h -header-y += virtio_config.h -header-y += virtio_console.h -header-y += virtio_gpu.h -header-y += virtio_ids.h -header-y += virtio_input.h -header-y += virtio_mmio.h -header-y += virtio_net.h -header-y += virtio_pci.h -header-y += virtio_ring.h -header-y += virtio_rng.h -header-y += virtio_scsi.h -header-y += virtio_types.h -header-y += virtio_vsock.h -header-y += virtio_crypto.h -header-y += vm_sockets.h -header-y += vsockmon.h -header-y += vt.h -header-y += vtpm_proxy.h -header-y += wait.h -header-y += wanrouter.h -header-y += watchdog.h -header-y += wimax.h -header-y += wireless.h -header-y += x25.h -header-y += xattr.h -header-y += xfrm.h -header-y += xilinx-v4l2-controls.h -header-y += zorro.h -header-y += zorro_ids.h -header-y += userfaultfd.h diff --git a/include/uapi/linux/android/Kbuild b/include/uapi/linux/android/Kbuild deleted file mode 100644 index ca011eec252a..000000000000 --- a/include/uapi/linux/android/Kbuild +++ /dev/null @@ -1,2 +0,0 @@ -# UAPI Header export list -header-y += binder.h diff --git a/include/uapi/linux/byteorder/Kbuild b/include/uapi/linux/byteorder/Kbuild deleted file mode 100644 index 619225b9ff2e..000000000000 --- a/include/uapi/linux/byteorder/Kbuild +++ /dev/null @@ -1,3 +0,0 @@ -# UAPI Header export list -header-y += big_endian.h -header-y += little_endian.h diff --git a/include/uapi/linux/caif/Kbuild b/include/uapi/linux/caif/Kbuild deleted file mode 100644 index 43396612d3a3..000000000000 --- a/include/uapi/linux/caif/Kbuild +++ /dev/null @@ -1,3 +0,0 @@ -# UAPI Header export list -header-y += caif_socket.h -header-y += if_caif.h diff --git a/include/uapi/linux/can/Kbuild b/include/uapi/linux/can/Kbuild deleted file mode 100644 index 21c91bf25a29..000000000000 --- a/include/uapi/linux/can/Kbuild +++ /dev/null @@ -1,6 +0,0 @@ -# UAPI Header export list -header-y += bcm.h -header-y += error.h -header-y += gw.h -header-y += netlink.h -header-y += raw.h diff --git a/include/uapi/linux/dvb/Kbuild b/include/uapi/linux/dvb/Kbuild deleted file mode 100644 index d40942cfc627..000000000000 --- a/include/uapi/linux/dvb/Kbuild +++ /dev/null @@ -1,9 +0,0 @@ -# UAPI Header export list -header-y += audio.h -header-y += ca.h -header-y += dmx.h -header-y += frontend.h -header-y += net.h -header-y += osd.h -header-y += version.h -header-y += video.h diff --git a/include/uapi/linux/hdlc/Kbuild b/include/uapi/linux/hdlc/Kbuild deleted file mode 100644 index 8c1d2cb75e33..000000000000 --- a/include/uapi/linux/hdlc/Kbuild +++ /dev/null @@ -1,2 +0,0 @@ -# UAPI Header export list -header-y += ioctl.h diff --git a/include/uapi/linux/hsi/Kbuild b/include/uapi/linux/hsi/Kbuild deleted file mode 100644 index a16a00544258..000000000000 --- a/include/uapi/linux/hsi/Kbuild +++ /dev/null @@ -1,2 +0,0 @@ -# UAPI Header export list -header-y += hsi_char.h cs-protocol.h diff --git a/include/uapi/linux/iio/Kbuild b/include/uapi/linux/iio/Kbuild deleted file mode 100644 index 86f76d84c44f..000000000000 --- a/include/uapi/linux/iio/Kbuild +++ /dev/null @@ -1,3 +0,0 @@ -# UAPI Header export list -header-y += events.h -header-y += types.h diff --git a/include/uapi/linux/isdn/Kbuild b/include/uapi/linux/isdn/Kbuild deleted file mode 100644 index 89e52850bf29..000000000000 --- a/include/uapi/linux/isdn/Kbuild +++ /dev/null @@ -1,2 +0,0 @@ -# UAPI Header export list -header-y += capicmd.h diff --git a/include/uapi/linux/mmc/Kbuild b/include/uapi/linux/mmc/Kbuild deleted file mode 100644 index 8c1d2cb75e33..000000000000 --- a/include/uapi/linux/mmc/Kbuild +++ /dev/null @@ -1,2 +0,0 @@ -# UAPI Header export list -header-y += ioctl.h diff --git a/include/uapi/linux/netfilter/Kbuild b/include/uapi/linux/netfilter/Kbuild deleted file mode 100644 index 03f194aeadc5..000000000000 --- a/include/uapi/linux/netfilter/Kbuild +++ /dev/null @@ -1,89 +0,0 @@ -# UAPI Header export list -header-y += ipset/ -header-y += nf_conntrack_common.h -header-y += nf_conntrack_ftp.h -header-y += nf_conntrack_sctp.h -header-y += nf_conntrack_tcp.h -header-y += nf_conntrack_tuple_common.h -header-y += nf_log.h -header-y += nf_tables.h -header-y += nf_tables_compat.h -header-y += nf_nat.h -header-y += nfnetlink.h -header-y += nfnetlink_acct.h -header-y += nfnetlink_compat.h -header-y += nfnetlink_conntrack.h -header-y += nfnetlink_cthelper.h -header-y += nfnetlink_cttimeout.h -header-y += nfnetlink_log.h -header-y += nfnetlink_queue.h -header-y += x_tables.h -header-y += xt_AUDIT.h -header-y += xt_CHECKSUM.h -header-y += xt_CLASSIFY.h -header-y += xt_CONNMARK.h -header-y += xt_CONNSECMARK.h -header-y += xt_CT.h -header-y += xt_DSCP.h -header-y += xt_HMARK.h -header-y += xt_IDLETIMER.h -header-y += xt_LED.h -header-y += xt_LOG.h -header-y += xt_MARK.h -header-y += xt_NFLOG.h -header-y += xt_NFQUEUE.h -header-y += xt_RATEEST.h -header-y += xt_SECMARK.h -header-y += xt_SYNPROXY.h -header-y += xt_TCPMSS.h -header-y += xt_TCPOPTSTRIP.h -header-y += xt_TEE.h -header-y += xt_TPROXY.h -header-y += xt_addrtype.h -header-y += xt_bpf.h -header-y += xt_cgroup.h -header-y += xt_cluster.h -header-y += xt_comment.h -header-y += xt_connbytes.h -header-y += xt_connlabel.h -header-y += xt_connlimit.h -header-y += xt_connmark.h -header-y += xt_conntrack.h -header-y += xt_cpu.h -header-y += xt_dccp.h -header-y += xt_devgroup.h -header-y += xt_dscp.h -header-y += xt_ecn.h -header-y += xt_esp.h -header-y += xt_hashlimit.h -header-y += xt_helper.h -header-y += xt_ipcomp.h -header-y += xt_iprange.h -header-y += xt_ipvs.h -header-y += xt_l2tp.h -header-y += xt_length.h -header-y += xt_limit.h -header-y += xt_mac.h -header-y += xt_mark.h -header-y += xt_multiport.h -header-y += xt_nfacct.h -header-y += xt_osf.h -header-y += xt_owner.h -header-y += xt_physdev.h -header-y += xt_pkttype.h -header-y += xt_policy.h -header-y += xt_quota.h -header-y += xt_rateest.h -header-y += xt_realm.h -header-y += xt_recent.h -header-y += xt_rpfilter.h -header-y += xt_sctp.h -header-y += xt_set.h -header-y += xt_socket.h -header-y += xt_state.h -header-y += xt_statistic.h -header-y += xt_string.h -header-y += xt_tcpmss.h -header-y += xt_tcpudp.h -header-y += xt_time.h -header-y += xt_u32.h diff --git a/include/uapi/linux/netfilter/ipset/Kbuild b/include/uapi/linux/netfilter/ipset/Kbuild deleted file mode 100644 index d2680423d9ab..000000000000 --- a/include/uapi/linux/netfilter/ipset/Kbuild +++ /dev/null @@ -1,5 +0,0 @@ -# UAPI Header export list -header-y += ip_set.h -header-y += ip_set_bitmap.h -header-y += ip_set_hash.h -header-y += ip_set_list.h diff --git a/include/uapi/linux/netfilter_arp/Kbuild b/include/uapi/linux/netfilter_arp/Kbuild deleted file mode 100644 index 62d5637cc0ac..000000000000 --- a/include/uapi/linux/netfilter_arp/Kbuild +++ /dev/null @@ -1,3 +0,0 @@ -# UAPI Header export list -header-y += arp_tables.h -header-y += arpt_mangle.h diff --git a/include/uapi/linux/netfilter_bridge/Kbuild b/include/uapi/linux/netfilter_bridge/Kbuild deleted file mode 100644 index 0fbad8ef96de..000000000000 --- a/include/uapi/linux/netfilter_bridge/Kbuild +++ /dev/null @@ -1,18 +0,0 @@ -# UAPI Header export list -header-y += ebt_802_3.h -header-y += ebt_among.h -header-y += ebt_arp.h -header-y += ebt_arpreply.h -header-y += ebt_ip.h -header-y += ebt_ip6.h -header-y += ebt_limit.h -header-y += ebt_log.h -header-y += ebt_mark_m.h -header-y += ebt_mark_t.h -header-y += ebt_nat.h -header-y += ebt_nflog.h -header-y += ebt_pkttype.h -header-y += ebt_redirect.h -header-y += ebt_stp.h -header-y += ebt_vlan.h -header-y += ebtables.h diff --git a/include/uapi/linux/netfilter_ipv4/Kbuild b/include/uapi/linux/netfilter_ipv4/Kbuild deleted file mode 100644 index ecb291df390e..000000000000 --- a/include/uapi/linux/netfilter_ipv4/Kbuild +++ /dev/null @@ -1,10 +0,0 @@ -# UAPI Header export list -header-y += ip_tables.h -header-y += ipt_CLUSTERIP.h -header-y += ipt_ECN.h -header-y += ipt_LOG.h -header-y += ipt_REJECT.h -header-y += ipt_TTL.h -header-y += ipt_ah.h -header-y += ipt_ecn.h -header-y += ipt_ttl.h diff --git a/include/uapi/linux/netfilter_ipv6/Kbuild b/include/uapi/linux/netfilter_ipv6/Kbuild deleted file mode 100644 index 75a668ca2353..000000000000 --- a/include/uapi/linux/netfilter_ipv6/Kbuild +++ /dev/null @@ -1,13 +0,0 @@ -# UAPI Header export list -header-y += ip6_tables.h -header-y += ip6t_HL.h -header-y += ip6t_LOG.h -header-y += ip6t_NPT.h -header-y += ip6t_REJECT.h -header-y += ip6t_ah.h -header-y += ip6t_frag.h -header-y += ip6t_hl.h -header-y += ip6t_ipv6header.h -header-y += ip6t_mh.h -header-y += ip6t_opts.h -header-y += ip6t_rt.h diff --git a/include/uapi/linux/nfsd/Kbuild b/include/uapi/linux/nfsd/Kbuild deleted file mode 100644 index c11bc404053c..000000000000 --- a/include/uapi/linux/nfsd/Kbuild +++ /dev/null @@ -1,6 +0,0 @@ -# UAPI Header export list -header-y += cld.h -header-y += debug.h -header-y += export.h -header-y += nfsfh.h -header-y += stats.h diff --git a/include/uapi/linux/raid/Kbuild b/include/uapi/linux/raid/Kbuild deleted file mode 100644 index e2c3d25405d7..000000000000 --- a/include/uapi/linux/raid/Kbuild +++ /dev/null @@ -1,3 +0,0 @@ -# UAPI Header export list -header-y += md_p.h -header-y += md_u.h diff --git a/include/uapi/linux/spi/Kbuild b/include/uapi/linux/spi/Kbuild deleted file mode 100644 index 0cc747eff165..000000000000 --- a/include/uapi/linux/spi/Kbuild +++ /dev/null @@ -1,2 +0,0 @@ -# UAPI Header export list -header-y += spidev.h diff --git a/include/uapi/linux/sunrpc/Kbuild b/include/uapi/linux/sunrpc/Kbuild deleted file mode 100644 index 8e02e47c20fb..000000000000 --- a/include/uapi/linux/sunrpc/Kbuild +++ /dev/null @@ -1,2 +0,0 @@ -# UAPI Header export list -header-y += debug.h diff --git a/include/uapi/linux/tc_act/Kbuild b/include/uapi/linux/tc_act/Kbuild deleted file mode 100644 index ba62ddf0e58a..000000000000 --- a/include/uapi/linux/tc_act/Kbuild +++ /dev/null @@ -1,16 +0,0 @@ -# UAPI Header export list -header-y += tc_csum.h -header-y += tc_defact.h -header-y += tc_gact.h -header-y += tc_ipt.h -header-y += tc_mirred.h -header-y += tc_sample.h -header-y += tc_nat.h -header-y += tc_pedit.h -header-y += tc_skbedit.h -header-y += tc_vlan.h -header-y += tc_bpf.h -header-y += tc_connmark.h -header-y += tc_ife.h -header-y += tc_tunnel_key.h -header-y += tc_skbmod.h diff --git a/include/uapi/linux/tc_ematch/Kbuild b/include/uapi/linux/tc_ematch/Kbuild deleted file mode 100644 index 53fca3925535..000000000000 --- a/include/uapi/linux/tc_ematch/Kbuild +++ /dev/null @@ -1,5 +0,0 @@ -# UAPI Header export list -header-y += tc_em_cmp.h -header-y += tc_em_meta.h -header-y += tc_em_nbyte.h -header-y += tc_em_text.h diff --git a/include/uapi/linux/usb/Kbuild b/include/uapi/linux/usb/Kbuild deleted file mode 100644 index 4cc4d6e7e523..000000000000 --- a/include/uapi/linux/usb/Kbuild +++ /dev/null @@ -1,12 +0,0 @@ -# UAPI Header export list -header-y += audio.h -header-y += cdc.h -header-y += cdc-wdm.h -header-y += ch11.h -header-y += ch9.h -header-y += functionfs.h -header-y += g_printer.h -header-y += gadgetfs.h -header-y += midi.h -header-y += tmc.h -header-y += video.h diff --git a/include/uapi/linux/wimax/Kbuild b/include/uapi/linux/wimax/Kbuild deleted file mode 100644 index 1c97be49971f..000000000000 --- a/include/uapi/linux/wimax/Kbuild +++ /dev/null @@ -1,2 +0,0 @@ -# UAPI Header export list -header-y += i2400m.h diff --git a/include/uapi/misc/Kbuild b/include/uapi/misc/Kbuild deleted file mode 100644 index e96cae7d58c9..000000000000 --- a/include/uapi/misc/Kbuild +++ /dev/null @@ -1,2 +0,0 @@ -# misc Header export list -header-y += cxl.h diff --git a/include/uapi/mtd/Kbuild b/include/uapi/mtd/Kbuild deleted file mode 100644 index 5a691e10cd0e..000000000000 --- a/include/uapi/mtd/Kbuild +++ /dev/null @@ -1,6 +0,0 @@ -# UAPI Header export list -header-y += inftl-user.h -header-y += mtd-abi.h -header-y += mtd-user.h -header-y += nftl-user.h -header-y += ubi-user.h diff --git a/include/uapi/rdma/Kbuild b/include/uapi/rdma/Kbuild deleted file mode 100644 index 1e0af1ff75c3..000000000000 --- a/include/uapi/rdma/Kbuild +++ /dev/null @@ -1,20 +0,0 @@ -# UAPI Header export list -header-y += ib_user_cm.h -header-y += rdma_user_ioctl.h -header-y += ib_user_mad.h -header-y += ib_user_sa.h -header-y += ib_user_verbs.h -header-y += rdma_netlink.h -header-y += rdma_user_cm.h -header-y += hfi/ -header-y += rdma_user_rxe.h -header-y += cxgb3-abi.h -header-y += cxgb4-abi.h -header-y += mlx4-abi.h -header-y += mlx5-abi.h -header-y += mthca-abi.h -header-y += nes-abi.h -header-y += ocrdma-abi.h -header-y += hns-abi.h -header-y += vmw_pvrdma-abi.h -header-y += qedr-abi.h diff --git a/include/uapi/rdma/hfi/Kbuild b/include/uapi/rdma/hfi/Kbuild deleted file mode 100644 index b65b0b3a5f63..000000000000 --- a/include/uapi/rdma/hfi/Kbuild +++ /dev/null @@ -1,3 +0,0 @@ -# UAPI Header export list -header-y += hfi1_user.h -header-y += hfi1_ioctl.h diff --git a/include/uapi/scsi/Kbuild b/include/uapi/scsi/Kbuild deleted file mode 100644 index d791e0ad509d..000000000000 --- a/include/uapi/scsi/Kbuild +++ /dev/null @@ -1,6 +0,0 @@ -# UAPI Header export list -header-y += fc/ -header-y += scsi_bsg_fc.h -header-y += scsi_netlink.h -header-y += scsi_netlink_fc.h -header-y += cxlflash_ioctl.h diff --git a/include/uapi/scsi/fc/Kbuild b/include/uapi/scsi/fc/Kbuild deleted file mode 100644 index 5ead9fac265c..000000000000 --- a/include/uapi/scsi/fc/Kbuild +++ /dev/null @@ -1,5 +0,0 @@ -# UAPI Header export list -header-y += fc_els.h -header-y += fc_fs.h -header-y += fc_gs.h -header-y += fc_ns.h diff --git a/include/uapi/sound/Kbuild b/include/uapi/sound/Kbuild deleted file mode 100644 index 9578d8bdbf31..000000000000 --- a/include/uapi/sound/Kbuild +++ /dev/null @@ -1,16 +0,0 @@ -# UAPI Header export list -header-y += asequencer.h -header-y += asoc.h -header-y += asound.h -header-y += asound_fm.h -header-y += compress_offload.h -header-y += compress_params.h -header-y += emu10k1.h -header-y += firewire.h -header-y += hdsp.h -header-y += hdspm.h -header-y += sb16_csp.h -header-y += sfnt_info.h -header-y += tlv.h -header-y += usb_stream.h -header-y += snd_sst_tokens.h diff --git a/include/uapi/video/Kbuild b/include/uapi/video/Kbuild deleted file mode 100644 index ac7203bb32cc..000000000000 --- a/include/uapi/video/Kbuild +++ /dev/null @@ -1,4 +0,0 @@ -# UAPI Header export list -header-y += edid.h -header-y += sisfb.h -header-y += uvesafb.h diff --git a/include/uapi/xen/Kbuild b/include/uapi/xen/Kbuild deleted file mode 100644 index 5c459628e8c7..000000000000 --- a/include/uapi/xen/Kbuild +++ /dev/null @@ -1,5 +0,0 @@ -# UAPI Header export list -header-y += evtchn.h -header-y += gntalloc.h -header-y += gntdev.h -header-y += privcmd.h diff --git a/include/video/Kbuild b/include/video/Kbuild deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/scripts/Makefile.headersinst b/scripts/Makefile.headersinst index ca5d439c9abf..20be1fbc19cc 100644 --- a/scripts/Makefile.headersinst +++ b/scripts/Makefile.headersinst @@ -1,17 +1,19 @@ # ========================================================================== # Installing headers # -# header-y - list files to be installed. They are preprocessed -# to remove __KERNEL__ section of the file -# genhdr-y - Same as header-y but in a generated/ directory +# All headers under include/uapi, include/generated/uapi, +# arch//include/uapi/asm and arch//include/generated/uapi/asm are +# exported. +# They are preprocessed to remove __KERNEL__ section of the file. # # ========================================================================== # generated header directory gen := $(if $(gen),$(gen),$(subst include/,include/generated/,$(obj))) +# Kbuild file is optional kbuild-file := $(srctree)/$(obj)/Kbuild -include $(kbuild-file) +-include $(kbuild-file) # called may set destination dir (when installing to asm/) _dst := $(if $(dst),$(dst),$(obj)) @@ -25,9 +27,15 @@ include scripts/Kbuild.include installdir := $(INSTALL_HDR_PATH)/$(subst uapi/,,$(_dst)) -header-y := $(sort $(header-y)) -subdirs := $(patsubst %/,%,$(filter %/, $(header-y))) -header-y := $(filter-out %/, $(header-y)) +srcdir := $(srctree)/$(obj) +gendir := $(objtree)/$(gen) +subdirs := $(patsubst $(srcdir)/%/.,%,$(wildcard $(srcdir)/*/.)) +subdirs += $(subdir-y) +header-files := $(notdir $(wildcard $(srcdir)/*.h)) +header-files += $(notdir $(wildcard $(srcdir)/*.agh)) +header-files := $(filter-out $(no-export-headers), $(header-files)) +genhdr-files := $(notdir $(wildcard $(gendir)/*.h)) +genhdr-files := $(filter-out $(header-files), $(genhdr-files)) # files used to track state of install/check install-file := $(installdir)/.install @@ -35,25 +43,20 @@ check-file := $(installdir)/.check # generic-y list all files an architecture uses from asm-generic # Use this to build a list of headers which require a wrapper -wrapper-files := $(filter $(header-y), $(generic-y)) - -srcdir := $(srctree)/$(obj) -gendir := $(objtree)/$(gen) +generic-files := $(notdir $(wildcard $(srctree)/include/uapi/asm-generic/*.h)) +wrapper-files := $(filter $(generic-files), $(generic-y)) +wrapper-files := $(filter-out $(header-files), $(wrapper-files)) # all headers files for this dir -header-y := $(filter-out $(generic-y), $(header-y)) -all-files := $(header-y) $(genhdr-y) $(wrapper-files) +all-files := $(header-files) $(genhdr-files) $(wrapper-files) output-files := $(addprefix $(installdir)/, $(all-files)) -# Check that all expected files exist -$(foreach hdr, $(header-y), \ - $(if $(wildcard $(srcdir)/$(hdr)),, \ - $(error Missing UAPI file $(srcdir)/$(hdr)) \ - )) -$(foreach hdr, $(genhdr-y), \ - $(if $(wildcard $(gendir)/$(hdr)),, \ - $(error Missing generated UAPI file $(gendir)/$(hdr)) \ - )) +ifneq ($(mandatory-y),) +missing := $(filter-out $(all-files),$(mandatory-y)) +ifneq ($(missing),) +$(error Some mandatory headers ($(missing)) are missing in $(obj)) +endif +endif # Work out what needs to be removed oldheaders := $(patsubst $(installdir)/%,%,$(wildcard $(installdir)/*.h)) @@ -67,8 +70,8 @@ printdir = $(patsubst $(INSTALL_HDR_PATH)/%/,%,$(dir $@)) quiet_cmd_install = INSTALL $(printdir) ($(words $(all-files))\ file$(if $(word 2, $(all-files)),s)) cmd_install = \ - $(CONFIG_SHELL) $< $(installdir) $(srcdir) $(header-y); \ - $(CONFIG_SHELL) $< $(installdir) $(gendir) $(genhdr-y); \ + $(CONFIG_SHELL) $< $(installdir) $(srcdir) $(header-files); \ + $(CONFIG_SHELL) $< $(installdir) $(gendir) $(genhdr-files); \ for F in $(wrapper-files); do \ echo "\#include " > $(installdir)/$$F; \ done; \ @@ -95,8 +98,8 @@ __headersinst: $(subdirs) $(install-file) targets += $(install-file) $(install-file): scripts/headers_install.sh \ - $(addprefix $(srcdir)/,$(header-y)) \ - $(addprefix $(gendir)/,$(genhdr-y)) FORCE + $(addprefix $(srcdir)/,$(header-files)) \ + $(addprefix $(gendir)/,$(genhdr-files)) FORCE $(if $(unwanted),$(call cmd,remove),) $(if $(wildcard $(dir $@)),,$(shell mkdir -p $(dir $@))) $(call if_changed,install) -- cgit v1.2.3 From 3ae3d67ba705c754a3c91ac009f9ce73a0e7286a Mon Sep 17 00:00:00 2001 From: Vishal Verma Date: Wed, 10 May 2017 15:01:30 -0600 Subject: libnvdimm: add an atomic vs process context flag to rw_bytes nsio_rw_bytes can clear media errors, but this cannot be done while we are in an atomic context due to locking within ACPI. From the BTT, ->rw_bytes may be called either from atomic or process context depending on whether the calls happen during initialization or during IO. During init, we want to ensure error clearing happens, and the flag marking process context allows nsio_rw_bytes to do that. When called during IO, we're in atomic context, and error clearing can be skipped. Cc: Dan Williams Signed-off-by: Vishal Verma Signed-off-by: Dan Williams --- drivers/nvdimm/blk.c | 3 ++- drivers/nvdimm/btt.c | 67 +++++++++++++++++++++++++---------------------- drivers/nvdimm/btt_devs.c | 2 +- drivers/nvdimm/claim.c | 6 +++-- drivers/nvdimm/nd.h | 1 + drivers/nvdimm/pfn_devs.c | 4 +-- include/linux/nd.h | 12 +++++---- 7 files changed, 53 insertions(+), 42 deletions(-) (limited to 'include') diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c index 9faaa9694d87..822198a75e96 100644 --- a/drivers/nvdimm/blk.c +++ b/drivers/nvdimm/blk.c @@ -218,7 +218,8 @@ static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio) } static int nsblk_rw_bytes(struct nd_namespace_common *ndns, - resource_size_t offset, void *iobuf, size_t n, int rw) + resource_size_t offset, void *iobuf, size_t n, int rw, + unsigned long flags) { struct nd_namespace_blk *nsblk = to_nd_namespace_blk(&ndns->dev); struct nd_blk_region *ndbr = to_ndbr(nsblk); diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 368795aad5c9..aa977cd4869d 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -32,25 +32,25 @@ enum log_ent_request { }; static int arena_read_bytes(struct arena_info *arena, resource_size_t offset, - void *buf, size_t n) + void *buf, size_t n, unsigned long flags) { struct nd_btt *nd_btt = arena->nd_btt; struct nd_namespace_common *ndns = nd_btt->ndns; /* arena offsets are 4K from the base of the device */ offset += SZ_4K; - return nvdimm_read_bytes(ndns, offset, buf, n); + return nvdimm_read_bytes(ndns, offset, buf, n, flags); } static int arena_write_bytes(struct arena_info *arena, resource_size_t offset, - void *buf, size_t n) + void *buf, size_t n, unsigned long flags) { struct nd_btt *nd_btt = arena->nd_btt; struct nd_namespace_common *ndns = nd_btt->ndns; /* arena offsets are 4K from the base of the device */ offset += SZ_4K; - return nvdimm_write_bytes(ndns, offset, buf, n); + return nvdimm_write_bytes(ndns, offset, buf, n, flags); } static int btt_info_write(struct arena_info *arena, struct btt_sb *super) @@ -58,19 +58,19 @@ static int btt_info_write(struct arena_info *arena, struct btt_sb *super) int ret; ret = arena_write_bytes(arena, arena->info2off, super, - sizeof(struct btt_sb)); + sizeof(struct btt_sb), 0); if (ret) return ret; return arena_write_bytes(arena, arena->infooff, super, - sizeof(struct btt_sb)); + sizeof(struct btt_sb), 0); } static int btt_info_read(struct arena_info *arena, struct btt_sb *super) { WARN_ON(!super); return arena_read_bytes(arena, arena->infooff, super, - sizeof(struct btt_sb)); + sizeof(struct btt_sb), 0); } /* @@ -79,16 +79,17 @@ static int btt_info_read(struct arena_info *arena, struct btt_sb *super) * mapping is in little-endian * mapping contains 'E' and 'Z' flags as desired */ -static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping) +static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping, + unsigned long flags) { u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE); WARN_ON(lba >= arena->external_nlba); - return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE); + return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags); } static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping, - u32 z_flag, u32 e_flag) + u32 z_flag, u32 e_flag, unsigned long rwb_flags) { u32 ze; __le32 mapping_le; @@ -127,11 +128,11 @@ static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping, } mapping_le = cpu_to_le32(mapping); - return __btt_map_write(arena, lba, mapping_le); + return __btt_map_write(arena, lba, mapping_le, rwb_flags); } static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping, - int *trim, int *error) + int *trim, int *error, unsigned long rwb_flags) { int ret; __le32 in; @@ -140,7 +141,7 @@ static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping, WARN_ON(lba >= arena->external_nlba); - ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE); + ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE, rwb_flags); if (ret) return ret; @@ -189,7 +190,7 @@ static int btt_log_read_pair(struct arena_info *arena, u32 lane, WARN_ON(!ent); return arena_read_bytes(arena, arena->logoff + (2 * lane * LOG_ENT_SIZE), ent, - 2 * LOG_ENT_SIZE); + 2 * LOG_ENT_SIZE, 0); } static struct dentry *debugfs_root; @@ -335,7 +336,7 @@ static int btt_log_read(struct arena_info *arena, u32 lane, * btt_flog_write is the wrapper for updating the freelist elements */ static int __btt_log_write(struct arena_info *arena, u32 lane, - u32 sub, struct log_entry *ent) + u32 sub, struct log_entry *ent, unsigned long flags) { int ret; /* @@ -350,13 +351,13 @@ static int __btt_log_write(struct arena_info *arena, u32 lane, void *src = ent; /* split the 16B write into atomic, durable halves */ - ret = arena_write_bytes(arena, ns_off, src, log_half); + ret = arena_write_bytes(arena, ns_off, src, log_half, flags); if (ret) return ret; ns_off += log_half; src += log_half; - return arena_write_bytes(arena, ns_off, src, log_half); + return arena_write_bytes(arena, ns_off, src, log_half, flags); } static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub, @@ -364,7 +365,7 @@ static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub, { int ret; - ret = __btt_log_write(arena, lane, sub, ent); + ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC); if (ret) return ret; @@ -397,7 +398,7 @@ static int btt_map_init(struct arena_info *arena) size_t size = min(mapsize, chunk_size); ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf, - size); + size, 0); if (ret) goto free; @@ -428,10 +429,10 @@ static int btt_log_init(struct arena_info *arena) log.old_map = cpu_to_le32(arena->external_nlba + i); log.new_map = cpu_to_le32(arena->external_nlba + i); log.seq = cpu_to_le32(LOG_SEQ_INIT); - ret = __btt_log_write(arena, i, 0, &log); + ret = __btt_log_write(arena, i, 0, &log, 0); if (ret) return ret; - ret = __btt_log_write(arena, i, 1, &zerolog); + ret = __btt_log_write(arena, i, 1, &zerolog, 0); if (ret) return ret; } @@ -470,7 +471,7 @@ static int btt_freelist_init(struct arena_info *arena) /* Check if map recovery is needed */ ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry, - NULL, NULL); + NULL, NULL, 0); if (ret) return ret; if ((le32_to_cpu(log_new.new_map) != map_entry) && @@ -480,7 +481,7 @@ static int btt_freelist_init(struct arena_info *arena) * to complete the map write. So fix up the map. */ ret = btt_map_write(arena, le32_to_cpu(log_new.lba), - le32_to_cpu(log_new.new_map), 0, 0); + le32_to_cpu(log_new.new_map), 0, 0, 0); if (ret) return ret; } @@ -875,7 +876,7 @@ static int btt_data_read(struct arena_info *arena, struct page *page, u64 nsoff = to_namespace_offset(arena, lba); void *mem = kmap_atomic(page); - ret = arena_read_bytes(arena, nsoff, mem + off, len); + ret = arena_read_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC); kunmap_atomic(mem); return ret; @@ -888,7 +889,7 @@ static int btt_data_write(struct arena_info *arena, u32 lba, u64 nsoff = to_namespace_offset(arena, lba); void *mem = kmap_atomic(page); - ret = arena_write_bytes(arena, nsoff, mem + off, len); + ret = arena_write_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC); kunmap_atomic(mem); return ret; @@ -931,10 +932,12 @@ static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip, mem = kmap_atomic(bv.bv_page); if (rw) ret = arena_write_bytes(arena, meta_nsoff, - mem + bv.bv_offset, cur_len); + mem + bv.bv_offset, cur_len, + NVDIMM_IO_ATOMIC); else ret = arena_read_bytes(arena, meta_nsoff, - mem + bv.bv_offset, cur_len); + mem + bv.bv_offset, cur_len, + NVDIMM_IO_ATOMIC); kunmap_atomic(mem); if (ret) @@ -976,7 +979,8 @@ static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip, cur_len = min(btt->sector_size, len); - ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag); + ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag, + NVDIMM_IO_ATOMIC); if (ret) goto out_lane; @@ -1006,7 +1010,7 @@ static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip, barrier(); ret = btt_map_read(arena, premap, &new_map, &t_flag, - &e_flag); + &e_flag, NVDIMM_IO_ATOMIC); if (ret) goto out_rtt; @@ -1093,7 +1097,8 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip, } lock_map(arena, premap); - ret = btt_map_read(arena, premap, &old_postmap, NULL, NULL); + ret = btt_map_read(arena, premap, &old_postmap, NULL, NULL, + NVDIMM_IO_ATOMIC); if (ret) goto out_map; if (old_postmap >= arena->internal_nlba) { @@ -1110,7 +1115,7 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip, if (ret) goto out_map; - ret = btt_map_write(arena, premap, new_postmap, 0, 0); + ret = btt_map_write(arena, premap, new_postmap, 0, 0, 0); if (ret) goto out_map; diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c index 4b76af2b8715..ae00dc0d9791 100644 --- a/drivers/nvdimm/btt_devs.c +++ b/drivers/nvdimm/btt_devs.c @@ -273,7 +273,7 @@ static int __nd_btt_probe(struct nd_btt *nd_btt, if (!btt_sb || !ndns || !nd_btt) return -ENODEV; - if (nvdimm_read_bytes(ndns, SZ_4K, btt_sb, sizeof(*btt_sb))) + if (nvdimm_read_bytes(ndns, SZ_4K, btt_sb, sizeof(*btt_sb), 0)) return -ENXIO; if (nvdimm_namespace_capacity(ndns) < SZ_16M) diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c index 93d128da1c92..7ceb5fa4f2a1 100644 --- a/drivers/nvdimm/claim.c +++ b/drivers/nvdimm/claim.c @@ -228,7 +228,8 @@ u64 nd_sb_checksum(struct nd_gen_sb *nd_gen_sb) EXPORT_SYMBOL(nd_sb_checksum); static int nsio_rw_bytes(struct nd_namespace_common *ndns, - resource_size_t offset, void *buf, size_t size, int rw) + resource_size_t offset, void *buf, size_t size, int rw, + unsigned long flags) { struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512); @@ -259,7 +260,8 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns, * work around this collision. */ if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512) - && (!ndns->claim || !is_nd_btt(ndns->claim))) { + && !(flags & NVDIMM_IO_ATOMIC) + && !ndns->claim) { long cleared; cleared = nvdimm_clear_poison(&ndns->dev, diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 77d032192bf7..03852d738eec 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -31,6 +31,7 @@ enum { ND_MAX_LANES = 256, SECTOR_SHIFT = 9, INT_LBASIZE_ALIGNMENT = 64, + NVDIMM_IO_ATOMIC = 1, }; struct nd_poison { diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 335c8175410b..a6c403600d19 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -357,7 +357,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) if (!is_nd_pmem(nd_pfn->dev.parent)) return -ENODEV; - if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb))) + if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0)) return -ENXIO; if (memcmp(pfn_sb->signature, sig, PFN_SIG_LEN) != 0) @@ -662,7 +662,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb); pfn_sb->checksum = cpu_to_le64(checksum); - return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb)); + return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0); } /* diff --git a/include/linux/nd.h b/include/linux/nd.h index fa66aeed441a..194b8e002ea7 100644 --- a/include/linux/nd.h +++ b/include/linux/nd.h @@ -48,7 +48,7 @@ struct nd_namespace_common { struct device dev; struct device *claim; int (*rw_bytes)(struct nd_namespace_common *, resource_size_t offset, - void *buf, size_t size, int rw); + void *buf, size_t size, int rw, unsigned long flags); }; static inline struct nd_namespace_common *to_ndns(struct device *dev) @@ -134,9 +134,10 @@ static inline struct nd_namespace_blk *to_nd_namespace_blk(const struct device * * @buf is up-to-date upon return from this routine. */ static inline int nvdimm_read_bytes(struct nd_namespace_common *ndns, - resource_size_t offset, void *buf, size_t size) + resource_size_t offset, void *buf, size_t size, + unsigned long flags) { - return ndns->rw_bytes(ndns, offset, buf, size, READ); + return ndns->rw_bytes(ndns, offset, buf, size, READ, flags); } /** @@ -152,9 +153,10 @@ static inline int nvdimm_read_bytes(struct nd_namespace_common *ndns, * to media is handled internal to the @ndns driver, if at all. */ static inline int nvdimm_write_bytes(struct nd_namespace_common *ndns, - resource_size_t offset, void *buf, size_t size) + resource_size_t offset, void *buf, size_t size, + unsigned long flags) { - return ndns->rw_bytes(ndns, offset, buf, size, WRITE); + return ndns->rw_bytes(ndns, offset, buf, size, WRITE, flags); } #define MODULE_ALIAS_ND_DEVICE(type) \ -- cgit v1.2.3 From 572e0ca9b909339fbe017aaff1a225efb6db3b61 Mon Sep 17 00:00:00 2001 From: Deepa Dinamani Date: Fri, 12 May 2017 15:46:29 -0700 Subject: time: delete current_fs_time() All uses of the current_fs_time() function have been replaced by other time interfaces. And, its use cases can be fulfilled by current_time() or ktime_get_* variants. Link: http://lkml.kernel.org/r/1491613030-11599-13-git-send-email-deepa.kernel@gmail.com Signed-off-by: Deepa Dinamani Reviewed-by: Arnd Bergmann Cc: John Stultz Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/fs.h | 1 - kernel/time/time.c | 14 -------------- 2 files changed, 15 deletions(-) (limited to 'include') diff --git a/include/linux/fs.h b/include/linux/fs.h index 0ad325ed71e8..803e5a9b2654 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1431,7 +1431,6 @@ static inline void i_gid_write(struct inode *inode, gid_t gid) inode->i_gid = make_kgid(inode->i_sb->s_user_ns, gid); } -extern struct timespec current_fs_time(struct super_block *sb); extern struct timespec current_time(struct inode *inode); /* diff --git a/kernel/time/time.c b/kernel/time/time.c index 6574bba44b55..49c73c6ed648 100644 --- a/kernel/time/time.c +++ b/kernel/time/time.c @@ -230,20 +230,6 @@ SYSCALL_DEFINE1(adjtimex, struct timex __user *, txc_p) return copy_to_user(txc_p, &txc, sizeof(struct timex)) ? -EFAULT : ret; } -/** - * current_fs_time - Return FS time - * @sb: Superblock. - * - * Return the current time truncated to the time granularity supported by - * the fs. - */ -struct timespec current_fs_time(struct super_block *sb) -{ - struct timespec now = current_kernel_time(); - return timespec_trunc(now, sb->s_time_gran); -} -EXPORT_SYMBOL(current_fs_time); - /* * Convert jiffies to milliseconds and back. * -- cgit v1.2.3 From 8594a21cf7a8318baedbedc3fcd2967a17ddeec0 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Fri, 12 May 2017 15:46:41 -0700 Subject: mm, vmalloc: fix vmalloc users tracking properly Commit 1f5307b1e094 ("mm, vmalloc: properly track vmalloc users") has pulled asm/pgtable.h include dependency to linux/vmalloc.h and that turned out to be a bad idea for some architectures. E.g. m68k fails with In file included from arch/m68k/include/asm/pgtable_mm.h:145:0, from arch/m68k/include/asm/pgtable.h:4, from include/linux/vmalloc.h:9, from arch/m68k/kernel/module.c:9: arch/m68k/include/asm/mcf_pgtable.h: In function 'nocache_page': >> arch/m68k/include/asm/mcf_pgtable.h:339:43: error: 'init_mm' undeclared (first use in this function) #define pgd_offset_k(address) pgd_offset(&init_mm, address) as spotted by kernel build bot. nios2 fails for other reason In file included from include/asm-generic/io.h:767:0, from arch/nios2/include/asm/io.h:61, from include/linux/io.h:25, from arch/nios2/include/asm/pgtable.h:18, from include/linux/mm.h:70, from include/linux/pid_namespace.h:6, from include/linux/ptrace.h:9, from arch/nios2/include/uapi/asm/elf.h:23, from arch/nios2/include/asm/elf.h:22, from include/linux/elf.h:4, from include/linux/module.h:15, from init/main.c:16: include/linux/vmalloc.h: In function '__vmalloc_node_flags': include/linux/vmalloc.h:99:40: error: 'PAGE_KERNEL' undeclared (first use in this function); did you mean 'GFP_KERNEL'? which is due to the newly added #include , which on nios2 includes and thus and which again includes . Tweaking that around just turns out a bigger headache than necessary. This patch reverts 1f5307b1e094 and reimplements the original fix in a different way. __vmalloc_node_flags can stay static inline which will cover vmalloc* functions. We only have one external user (kvmalloc_node) and we can export __vmalloc_node_flags_caller and provide the caller directly. This is much simpler and it doesn't really need any games with header files. [akpm@linux-foundation.org: coding-style fixes] [mhocko@kernel.org: revert old comment] Link: http://lkml.kernel.org/r/20170509211054.GB16325@dhcp22.suse.cz Fixes: 1f5307b1e094 ("mm, vmalloc: properly track vmalloc users") Link: http://lkml.kernel.org/r/20170509153702.GR6481@dhcp22.suse.cz Signed-off-by: Michal Hocko Cc: Tobias Klauser Cc: Geert Uytterhoeven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/vmalloc.h | 21 ++++++--------------- mm/util.c | 3 ++- mm/vmalloc.c | 19 ++++++++++++++++++- 3 files changed, 26 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 0328ce003992..2d92dd002abd 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -6,7 +6,6 @@ #include #include #include /* pgprot_t */ -#include /* PAGE_KERNEL */ #include struct vm_area_struct; /* vma defining user mapping in mm_types.h */ @@ -83,22 +82,14 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align, const void *caller); #ifndef CONFIG_MMU extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags); -#else -extern void *__vmalloc_node(unsigned long size, unsigned long align, - gfp_t gfp_mask, pgprot_t prot, - int node, const void *caller); - -/* - * We really want to have this inlined due to caller tracking. This - * function is used by the highlevel vmalloc apis and so we want to track - * their callers and inlining will achieve that. - */ -static inline void *__vmalloc_node_flags(unsigned long size, - int node, gfp_t flags) +static inline void *__vmalloc_node_flags_caller(unsigned long size, int node, + gfp_t flags, void *caller) { - return __vmalloc_node(size, 1, flags, PAGE_KERNEL, - node, __builtin_return_address(0)); + return __vmalloc_node_flags(size, node, flags); } +#else +extern void *__vmalloc_node_flags_caller(unsigned long size, + int node, gfp_t flags, void *caller); #endif extern void vfree(const void *addr); diff --git a/mm/util.c b/mm/util.c index 718154debc87..464df3489903 100644 --- a/mm/util.c +++ b/mm/util.c @@ -382,7 +382,8 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node) if (ret || size <= PAGE_SIZE) return ret; - return __vmalloc_node_flags(size, node, flags); + return __vmalloc_node_flags_caller(size, node, flags, + __builtin_return_address(0)); } EXPORT_SYMBOL(kvmalloc_node); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 194c22eccb9d..34a1c3e46ed7 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1649,6 +1649,9 @@ void *vmap(struct page **pages, unsigned int count, } EXPORT_SYMBOL(vmap); +static void *__vmalloc_node(unsigned long size, unsigned long align, + gfp_t gfp_mask, pgprot_t prot, + int node, const void *caller); static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot, int node) { @@ -1791,7 +1794,7 @@ fail: * with mm people. * */ -void *__vmalloc_node(unsigned long size, unsigned long align, +static void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, pgprot_t prot, int node, const void *caller) { @@ -1806,6 +1809,20 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) } EXPORT_SYMBOL(__vmalloc); +static inline void *__vmalloc_node_flags(unsigned long size, + int node, gfp_t flags) +{ + return __vmalloc_node(size, 1, flags, PAGE_KERNEL, + node, __builtin_return_address(0)); +} + + +void *__vmalloc_node_flags_caller(unsigned long size, int node, gfp_t flags, + void *caller) +{ + return __vmalloc_node(size, 1, flags, PAGE_KERNEL, node, caller); +} + /** * vmalloc - allocate virtually contiguous memory * @size: allocation size -- cgit v1.2.3 From 4636e70bb0a8b871998b6841a2e4b205cf2bc863 Mon Sep 17 00:00:00 2001 From: Ross Zwisler Date: Fri, 12 May 2017 15:46:47 -0700 Subject: dax: prevent invalidation of mapped DAX entries Patch series "mm,dax: Fix data corruption due to mmap inconsistency", v4. This series fixes data corruption that can happen for DAX mounts when page faults race with write(2) and as a result page tables get out of sync with block mappings in the filesystem and thus data seen through mmap is different from data seen through read(2). The series passes testing with t_mmap_stale test program from Ross and also other mmap related tests on DAX filesystem. This patch (of 4): dax_invalidate_mapping_entry() currently removes DAX exceptional entries only if they are clean and unlocked. This is done via: invalidate_mapping_pages() invalidate_exceptional_entry() dax_invalidate_mapping_entry() However, for page cache pages removed in invalidate_mapping_pages() there is an additional criteria which is that the page must not be mapped. This is noted in the comments above invalidate_mapping_pages() and is checked in invalidate_inode_page(). For DAX entries this means that we can can end up in a situation where a DAX exceptional entry, either a huge zero page or a regular DAX entry, could end up mapped but without an associated radix tree entry. This is inconsistent with the rest of the DAX code and with what happens in the page cache case. We aren't able to unmap the DAX exceptional entry because according to its comments invalidate_mapping_pages() isn't allowed to block, and unmap_mapping_range() takes a write lock on the mapping->i_mmap_rwsem. Since we essentially never have unmapped DAX entries to evict from the radix tree, just remove dax_invalidate_mapping_entry(). Fixes: c6dcf52c23d2 ("mm: Invalidate DAX radix tree entries only if appropriate") Link: http://lkml.kernel.org/r/20170510085419.27601-2-jack@suse.cz Signed-off-by: Ross Zwisler Signed-off-by: Jan Kara Reported-by: Jan Kara Cc: Dan Williams Cc: [4.10+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/dax.c | 29 ----------------------------- include/linux/dax.h | 1 - mm/truncate.c | 9 +++------ 3 files changed, 3 insertions(+), 36 deletions(-) (limited to 'include') diff --git a/fs/dax.c b/fs/dax.c index 66d79067eedf..38deebb8c86e 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -460,35 +460,6 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) return ret; } -/* - * Invalidate exceptional DAX entry if easily possible. This handles DAX - * entries for invalidate_inode_pages() so we evict the entry only if we can - * do so without blocking. - */ -int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index) -{ - int ret = 0; - void *entry, **slot; - struct radix_tree_root *page_tree = &mapping->page_tree; - - spin_lock_irq(&mapping->tree_lock); - entry = __radix_tree_lookup(page_tree, index, NULL, &slot); - if (!entry || !radix_tree_exceptional_entry(entry) || - slot_locked(mapping, slot)) - goto out; - if (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) || - radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)) - goto out; - radix_tree_delete(page_tree, index); - mapping->nrexceptional--; - ret = 1; -out: - spin_unlock_irq(&mapping->tree_lock); - if (ret) - dax_wake_mapping_entry_waiter(mapping, index, entry, true); - return ret; -} - /* * Invalidate exceptional DAX entry if it is clean. */ diff --git a/include/linux/dax.h b/include/linux/dax.h index d3158e74a59e..d1236d16ef00 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -63,7 +63,6 @@ ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, const struct iomap_ops *ops); int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); -int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index); int dax_invalidate_mapping_entry_sync(struct address_space *mapping, pgoff_t index); void dax_wake_mapping_entry_waiter(struct address_space *mapping, diff --git a/mm/truncate.c b/mm/truncate.c index 83a059e8cd1d..706cff171a15 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -67,17 +67,14 @@ static void truncate_exceptional_entry(struct address_space *mapping, /* * Invalidate exceptional entry if easily possible. This handles exceptional - * entries for invalidate_inode_pages() so for DAX it evicts only unlocked and - * clean entries. + * entries for invalidate_inode_pages(). */ static int invalidate_exceptional_entry(struct address_space *mapping, pgoff_t index, void *entry) { - /* Handled by shmem itself */ - if (shmem_mapping(mapping)) + /* Handled by shmem itself, or for DAX we do nothing. */ + if (shmem_mapping(mapping) || dax_mapping(mapping)) return 1; - if (dax_mapping(mapping)) - return dax_invalidate_mapping_entry(mapping, index); clear_shadow_entry(mapping, index, entry); return 1; } -- cgit v1.2.3