diff options
author | Mark Brown <broonie@linaro.org> | 2013-08-09 17:26:31 +0100 |
---|---|---|
committer | Mark Brown <broonie@linaro.org> | 2013-08-09 17:26:31 +0100 |
commit | c7abc19ca6d48392f91ca04ebe1e9813b3ace435 (patch) | |
tree | c23540ea1425e9891b39cdff550e90caaba38163 /include | |
parent | d60990d597bfa2816dfe28a5c5c6787610b423e6 (diff) | |
parent | c095ba7224d8edc71dcef0d655911399a8bd4a3f (diff) | |
download | linux-c7abc19ca6d48392f91ca04ebe1e9813b3ace435.tar.bz2 |
Merge tag 'v3.11-rc4' into spi-bitbang
Linux 3.11-rc4
Diffstat (limited to 'include')
48 files changed, 640 insertions, 290 deletions
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 1b09300810e6..22d497ee6ef9 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -62,6 +62,7 @@ extern u32 acpi_current_gpe_count; extern struct acpi_table_fadt acpi_gbl_FADT; extern u8 acpi_gbl_system_awake_and_running; extern u8 acpi_gbl_reduced_hardware; /* ACPI 5.0 */ +extern u8 acpi_gbl_osi_data; /* Runtime configuration of debug print levels */ diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index a64adcc29ae5..22b03c9286e9 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -1144,4 +1144,19 @@ struct acpi_memory_list { #endif }; +/* Definitions for _OSI support */ + +#define ACPI_OSI_WIN_2000 0x01 +#define ACPI_OSI_WIN_XP 0x02 +#define ACPI_OSI_WIN_XP_SP1 0x03 +#define ACPI_OSI_WINSRV_2003 0x04 +#define ACPI_OSI_WIN_XP_SP2 0x05 +#define ACPI_OSI_WINSRV_2003_SP1 0x06 +#define ACPI_OSI_WIN_VISTA 0x07 +#define ACPI_OSI_WINSRV_2008 0x08 +#define ACPI_OSI_WIN_VISTA_SP1 0x09 +#define ACPI_OSI_WIN_VISTA_SP2 0x0A +#define ACPI_OSI_WIN_7 0x0B +#define ACPI_OSI_WIN_8 0x0C + #endif /* __ACTYPES_H__ */ diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h index f5e1168c7647..d639049a613d 100644 --- a/include/drm/drm_fixed.h +++ b/include/drm/drm_fixed.h @@ -84,12 +84,12 @@ static inline int drm_fixp2int(int64_t a) return ((s64)a) >> DRM_FIXED_POINT; } -static inline s64 drm_fixp_msbset(int64_t a) +static inline unsigned drm_fixp_msbset(int64_t a) { unsigned shift, sign = (a >> 63) & 1; for (shift = 62; shift > 0; --shift) - if ((a >> shift) != sign) + if (((a >> shift) & 1) != sign) return shift; return 0; @@ -100,9 +100,9 @@ static inline s64 drm_fixp_mul(s64 a, s64 b) unsigned shift = drm_fixp_msbset(a) + drm_fixp_msbset(b); s64 result; - if (shift > 63) { - shift = shift - 63; - a >>= shift >> 1; + if (shift > 61) { + shift = shift - 61; + a >>= (shift >> 1) + (shift & 1); b >>= shift >> 1; } else shift = 0; @@ -120,7 +120,7 @@ static inline s64 drm_fixp_mul(s64 a, s64 b) static inline s64 drm_fixp_div(s64 a, s64 b) { - unsigned shift = 63 - drm_fixp_msbset(a); + unsigned shift = 62 - drm_fixp_msbset(a); s64 result; a <<= shift; @@ -154,7 +154,7 @@ static inline s64 drm_fixp_exp(s64 x) } if (x < 0) - sum = drm_fixp_div(1, sum); + sum = drm_fixp_div(DRM_FIXED_ONE, sum); return sum; } diff --git a/include/dt-bindings/clock/vf610-clock.h b/include/dt-bindings/clock/vf610-clock.h index 15e997fa78f2..4aa2b48cd151 100644 --- a/include/dt-bindings/clock/vf610-clock.h +++ b/include/dt-bindings/clock/vf610-clock.h @@ -158,6 +158,8 @@ #define VF610_CLK_GPU_SEL 145 #define VF610_CLK_GPU_EN 146 #define VF610_CLK_GPU2D 147 -#define VF610_CLK_END 148 +#define VF610_CLK_ENET0 148 +#define VF610_CLK_ENET1 149 +#define VF610_CLK_END 150 #endif /* __DT_BINDINGS_CLOCK_VF610_H */ diff --git a/include/dt-bindings/pinctrl/am33xx.h b/include/dt-bindings/pinctrl/am33xx.h index 469e0325e6f4..2fbc804e1a45 100644 --- a/include/dt-bindings/pinctrl/am33xx.h +++ b/include/dt-bindings/pinctrl/am33xx.h @@ -5,7 +5,7 @@ #ifndef _DT_BINDINGS_PINCTRL_AM33XX_H #define _DT_BINDINGS_PINCTRL_AM33XX_H -#include <include/dt-bindings/pinctrl/omap.h> +#include <dt-bindings/pinctrl/omap.h> /* am33xx specific mux bit defines */ #undef PULL_ENA diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 297462b9f41a..e9ac882868c0 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -542,8 +542,7 @@ int cgroup_rm_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor); int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen); -int task_cgroup_path_from_hierarchy(struct task_struct *task, int hierarchy_id, - char *buf, size_t buflen); +int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen); int cgroup_task_count(const struct cgroup *cgrp); diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h index 6e7ec64b69ab..b613ffd402d1 100644 --- a/include/linux/cgroup_subsys.h +++ b/include/linux/cgroup_subsys.h @@ -1,86 +1,55 @@ -/* Add subsystem definitions of the form SUBSYS(<name>) in this - * file. Surround each one by a line of comment markers so that - * patches don't collide +/* + * List of cgroup subsystems. + * + * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS. */ - -/* */ - -/* */ - #if IS_SUBSYS_ENABLED(CONFIG_CPUSETS) SUBSYS(cpuset) #endif -/* */ - #if IS_SUBSYS_ENABLED(CONFIG_CGROUP_DEBUG) SUBSYS(debug) #endif -/* */ - #if IS_SUBSYS_ENABLED(CONFIG_CGROUP_SCHED) SUBSYS(cpu_cgroup) #endif -/* */ - #if IS_SUBSYS_ENABLED(CONFIG_CGROUP_CPUACCT) SUBSYS(cpuacct) #endif -/* */ - #if IS_SUBSYS_ENABLED(CONFIG_MEMCG) SUBSYS(mem_cgroup) #endif -/* */ - #if IS_SUBSYS_ENABLED(CONFIG_CGROUP_DEVICE) SUBSYS(devices) #endif -/* */ - #if IS_SUBSYS_ENABLED(CONFIG_CGROUP_FREEZER) SUBSYS(freezer) #endif -/* */ - #if IS_SUBSYS_ENABLED(CONFIG_NET_CLS_CGROUP) SUBSYS(net_cls) #endif -/* */ - #if IS_SUBSYS_ENABLED(CONFIG_BLK_CGROUP) SUBSYS(blkio) #endif -/* */ - #if IS_SUBSYS_ENABLED(CONFIG_CGROUP_PERF) SUBSYS(perf) #endif -/* */ - #if IS_SUBSYS_ENABLED(CONFIG_NETPRIO_CGROUP) SUBSYS(net_prio) #endif -/* */ - #if IS_SUBSYS_ENABLED(CONFIG_CGROUP_HUGETLB) SUBSYS(hugetlb) #endif - -/* */ - -#ifdef CONFIG_CGROUP_BCACHE -SUBSYS(bcache) -#endif - -/* */ +/* + * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS. + */ diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 944f283f01c4..ab0eade73039 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -114,7 +114,7 @@ enum { /* Need to know about CPUs going up/down? */ #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) #define cpu_notifier(fn, pri) { \ - static struct notifier_block fn##_nb __cpuinitdata = \ + static struct notifier_block fn##_nb = \ { .notifier_call = fn, .priority = pri }; \ register_cpu_notifier(&fn##_nb); \ } diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h index b3cb71f0d3b0..a9c96d865ee7 100644 --- a/include/linux/crc-t10dif.h +++ b/include/linux/crc-t10dif.h @@ -3,10 +3,6 @@ #include <linux/types.h> -#define CRC_T10DIF_DIGEST_SIZE 2 -#define CRC_T10DIF_BLOCK_SIZE 1 - -__u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len); __u16 crc_t10dif(unsigned char const *, size_t); #endif diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 3092df3614ae..b90337c9d468 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -324,7 +324,7 @@ static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq) return ret; } -static inline unsigned d_count(struct dentry *dentry) +static inline unsigned d_count(const struct dentry *dentry) { return dentry->d_count; } diff --git a/include/linux/device.h b/include/linux/device.h index bcf8c0d4cd98..22b546a58591 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -47,7 +47,11 @@ struct bus_attribute { }; #define BUS_ATTR(_name, _mode, _show, _store) \ -struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store) + struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store) +#define BUS_ATTR_RW(_name) \ + struct bus_attribute bus_attr_##_name = __ATTR_RW(_name) +#define BUS_ATTR_RO(_name) \ + struct bus_attribute bus_attr_##_name = __ATTR_RO(_name) extern int __must_check bus_create_file(struct bus_type *, struct bus_attribute *); @@ -261,9 +265,12 @@ struct driver_attribute { size_t count); }; -#define DRIVER_ATTR(_name, _mode, _show, _store) \ -struct driver_attribute driver_attr_##_name = \ - __ATTR(_name, _mode, _show, _store) +#define DRIVER_ATTR(_name, _mode, _show, _store) \ + struct driver_attribute driver_attr_##_name = __ATTR(_name, _mode, _show, _store) +#define DRIVER_ATTR_RW(_name) \ + struct driver_attribute driver_attr_##_name = __ATTR_RW(_name) +#define DRIVER_ATTR_RO(_name) \ + struct driver_attribute driver_attr_##_name = __ATTR_RO(_name) extern int __must_check driver_create_file(struct device_driver *driver, const struct driver_attribute *attr); @@ -313,6 +320,7 @@ int subsys_virtual_register(struct bus_type *subsys, * @name: Name of the class. * @owner: The module owner. * @class_attrs: Default attributes of this class. + * @dev_groups: Default attributes of the devices that belong to the class. * @dev_attrs: Default attributes of the devices belong to the class. * @dev_bin_attrs: Default binary attributes of the devices belong to the class. * @dev_kobj: The kobject that represents this class and links it into the hierarchy. @@ -342,7 +350,8 @@ struct class { struct module *owner; struct class_attribute *class_attrs; - struct device_attribute *dev_attrs; + struct device_attribute *dev_attrs; /* use dev_groups instead */ + const struct attribute_group **dev_groups; struct bin_attribute *dev_bin_attrs; struct kobject *dev_kobj; @@ -414,8 +423,12 @@ struct class_attribute { const struct class_attribute *attr); }; -#define CLASS_ATTR(_name, _mode, _show, _store) \ -struct class_attribute class_attr_##_name = __ATTR(_name, _mode, _show, _store) +#define CLASS_ATTR(_name, _mode, _show, _store) \ + struct class_attribute class_attr_##_name = __ATTR(_name, _mode, _show, _store) +#define CLASS_ATTR_RW(_name) \ + struct class_attribute class_attr_##_name = __ATTR_RW(_name) +#define CLASS_ATTR_RO(_name) \ + struct class_attribute class_attr_##_name = __ATTR_RO(_name) extern int __must_check class_create_file(struct class *class, const struct class_attribute *attr); @@ -423,7 +436,6 @@ extern void class_remove_file(struct class *class, const struct class_attribute *attr); /* Simple class attribute that is just a static string */ - struct class_attribute_string { struct class_attribute attr; char *str; @@ -512,6 +524,10 @@ ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, #define DEVICE_ATTR(_name, _mode, _show, _store) \ struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) +#define DEVICE_ATTR_RW(_name) \ + struct device_attribute dev_attr_##_name = __ATTR_RW(_name) +#define DEVICE_ATTR_RO(_name) \ + struct device_attribute dev_attr_##_name = __ATTR_RO(_name) #define DEVICE_ULONG_ATTR(_name, _mode, _var) \ struct dev_ext_attribute dev_attr_##_name = \ { __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) } @@ -924,6 +940,11 @@ extern __printf(5, 6) struct device *device_create(struct class *cls, struct device *parent, dev_t devt, void *drvdata, const char *fmt, ...); +extern __printf(6, 7) +struct device *device_create_with_groups(struct class *cls, + struct device *parent, dev_t devt, void *drvdata, + const struct attribute_group **groups, + const char *fmt, ...); extern void device_destroy(struct class *cls, dev_t devt); /* diff --git a/include/linux/drbd.h b/include/linux/drbd.h index 1b4d4ee1168f..de7d74ab3de6 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h @@ -177,7 +177,11 @@ enum drbd_ret_code { ERR_NEED_APV_100 = 163, ERR_NEED_ALLOW_TWO_PRI = 164, ERR_MD_UNCLEAN = 165, - + ERR_MD_LAYOUT_CONNECTED = 166, + ERR_MD_LAYOUT_TOO_BIG = 167, + ERR_MD_LAYOUT_TOO_SMALL = 168, + ERR_MD_LAYOUT_NO_FIT = 169, + ERR_IMPLICIT_SHRINK = 170, /* insert new ones above this line */ AFTER_LAST_ERR_CODE }; diff --git a/include/linux/drbd_genl.h b/include/linux/drbd_genl.h index d0d8fac8a6e4..e8c44572b8cb 100644 --- a/include/linux/drbd_genl.h +++ b/include/linux/drbd_genl.h @@ -181,6 +181,8 @@ GENL_struct(DRBD_NLA_RESIZE_PARMS, 7, resize_parms, __u64_field(1, DRBD_GENLA_F_MANDATORY, resize_size) __flg_field(2, DRBD_GENLA_F_MANDATORY, resize_force) __flg_field(3, DRBD_GENLA_F_MANDATORY, no_resync) + __u32_field_def(4, 0 /* OPTIONAL */, al_stripes, DRBD_AL_STRIPES_DEF) + __u32_field_def(5, 0 /* OPTIONAL */, al_stripe_size, DRBD_AL_STRIPE_SIZE_DEF) ) GENL_struct(DRBD_NLA_STATE_INFO, 8, state_info, diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h index 1fedf2b17cc8..17e50bb00521 100644 --- a/include/linux/drbd_limits.h +++ b/include/linux/drbd_limits.h @@ -215,4 +215,13 @@ #define DRBD_ALWAYS_ASBP_DEF 0 #define DRBD_USE_RLE_DEF 1 +#define DRBD_AL_STRIPES_MIN 1 +#define DRBD_AL_STRIPES_MAX 1024 +#define DRBD_AL_STRIPES_DEF 1 +#define DRBD_AL_STRIPES_SCALE '1' + +#define DRBD_AL_STRIPE_SIZE_MIN 4 +#define DRBD_AL_STRIPE_SIZE_MAX 16777216 +#define DRBD_AL_STRIPE_SIZE_DEF 32 +#define DRBD_AL_STRIPE_SIZE_SCALE 'k' /* kilobytes */ #endif diff --git a/include/linux/edac.h b/include/linux/edac.h index 0b763276f619..5c6d7fbaf89e 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -622,7 +622,7 @@ struct edac_raw_error_desc { */ struct mem_ctl_info { struct device dev; - struct bus_type bus; + struct bus_type *bus; struct list_head link; /* for global list of mem_ctl_info structs */ @@ -742,4 +742,9 @@ struct mem_ctl_info { #endif }; +/* + * Maximum number of memory controllers in the coherent fabric. + */ +#define EDAC_MAX_MCS 16 + #endif diff --git a/include/linux/firewire.h b/include/linux/firewire.h index 3b0e820375ab..5d7782e42b8f 100644 --- a/include/linux/firewire.h +++ b/include/linux/firewire.h @@ -436,6 +436,7 @@ struct fw_iso_context { int type; int channel; int speed; + bool drop_overflow_headers; size_t header_size; union { fw_iso_callback_t sc; diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index cdcbafa9b39a..715c343f7c00 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -79,9 +79,8 @@ static inline int is_vlan_dev(struct net_device *dev) } #define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT) -#define vlan_tx_nonzero_tag_present(__skb) \ - (vlan_tx_tag_present(__skb) && ((__skb)->vlan_tci & VLAN_VID_MASK)) #define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) +#define vlan_tx_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 8d171f427632..3d35b7023591 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -211,8 +211,8 @@ struct iio_chan_spec { static inline bool iio_channel_has_info(const struct iio_chan_spec *chan, enum iio_chan_info_enum type) { - return (chan->info_mask_separate & type) | - (chan->info_mask_shared_by_type & type); + return (chan->info_mask_separate & BIT(type)) | + (chan->info_mask_shared_by_type & BIT(type)); } #define IIO_ST(si, rb, sb, sh) \ diff --git a/include/linux/list.h b/include/linux/list.h index b83e5657365a..f4d8a2f12a33 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -381,17 +381,6 @@ static inline void list_splice_tail_init(struct list_head *list, for (pos = (head)->next; pos != (head); pos = pos->next) /** - * __list_for_each - iterate over a list - * @pos: the &struct list_head to use as a loop cursor. - * @head: the head for your list. - * - * This variant doesn't differ from list_for_each() any more. - * We don't do prefetching in either case. - */ -#define __list_for_each(pos, head) \ - for (pos = (head)->next; pos != (head); pos = pos->next) - -/** * list_for_each_prev - iterate over a list backwards * @pos: the &struct list_head to use as a loop cursor. * @head: the head for your list. diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h index dab34a1deb2c..b6bdcd66c07d 100644 --- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h +++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h @@ -103,15 +103,15 @@ #define IMX6Q_GPR1_EXC_MON_MASK BIT(22) #define IMX6Q_GPR1_EXC_MON_OKAY 0x0 #define IMX6Q_GPR1_EXC_MON_SLVE BIT(22) -#define IMX6Q_GPR1_MIPI_IPU2_SEL_MASK BIT(21) -#define IMX6Q_GPR1_MIPI_IPU2_SEL_GASKET 0x0 -#define IMX6Q_GPR1_MIPI_IPU2_SEL_IOMUX BIT(21) -#define IMX6Q_GPR1_MIPI_IPU1_MUX_MASK BIT(20) -#define IMX6Q_GPR1_MIPI_IPU1_MUX_GASKET 0x0 -#define IMX6Q_GPR1_MIPI_IPU1_MUX_IOMUX BIT(20) -#define IMX6Q_GPR1_MIPI_IPU2_MUX_MASK BIT(19) +#define IMX6Q_GPR1_ENET_CLK_SEL_MASK BIT(21) +#define IMX6Q_GPR1_ENET_CLK_SEL_PAD 0 +#define IMX6Q_GPR1_ENET_CLK_SEL_ANATOP BIT(21) +#define IMX6Q_GPR1_MIPI_IPU2_MUX_MASK BIT(20) #define IMX6Q_GPR1_MIPI_IPU2_MUX_GASKET 0x0 -#define IMX6Q_GPR1_MIPI_IPU2_MUX_IOMUX BIT(19) +#define IMX6Q_GPR1_MIPI_IPU2_MUX_IOMUX BIT(20) +#define IMX6Q_GPR1_MIPI_IPU1_MUX_MASK BIT(19) +#define IMX6Q_GPR1_MIPI_IPU1_MUX_GASKET 0x0 +#define IMX6Q_GPR1_MIPI_IPU1_MUX_IOMUX BIT(19) #define IMX6Q_GPR1_PCIE_TEST_PD BIT(18) #define IMX6Q_GPR1_IPU_VPU_MUX_MASK BIT(17) #define IMX6Q_GPR1_IPU_VPU_MUX_IPU1 0x0 @@ -279,41 +279,88 @@ #define IMX6Q_GPR13_CAN2_STOP_REQ BIT(29) #define IMX6Q_GPR13_CAN1_STOP_REQ BIT(28) #define IMX6Q_GPR13_ENET_STOP_REQ BIT(27) -#define IMX6Q_GPR13_SATA_PHY_8_MASK (0x7 << 24) -#define IMX6Q_GPR13_SATA_PHY_8_0_5_DB (0x0 << 24) -#define IMX6Q_GPR13_SATA_PHY_8_1_0_DB (0x1 << 24) -#define IMX6Q_GPR13_SATA_PHY_8_1_5_DB (0x2 << 24) -#define IMX6Q_GPR13_SATA_PHY_8_2_0_DB (0x3 << 24) -#define IMX6Q_GPR13_SATA_PHY_8_2_5_DB (0x4 << 24) -#define IMX6Q_GPR13_SATA_PHY_8_3_0_DB (0x5 << 24) -#define IMX6Q_GPR13_SATA_PHY_8_3_5_DB (0x6 << 24) -#define IMX6Q_GPR13_SATA_PHY_8_4_0_DB (0x7 << 24) -#define IMX6Q_GPR13_SATA_PHY_7_MASK (0x1f << 19) -#define IMX6Q_GPR13_SATA_PHY_7_SATA1I (0x10 << 19) -#define IMX6Q_GPR13_SATA_PHY_7_SATA1M (0x10 << 19) -#define IMX6Q_GPR13_SATA_PHY_7_SATA1X (0x1a << 19) -#define IMX6Q_GPR13_SATA_PHY_7_SATA2I (0x12 << 19) -#define IMX6Q_GPR13_SATA_PHY_7_SATA2M (0x12 << 19) -#define IMX6Q_GPR13_SATA_PHY_7_SATA2X (0x1a << 19) -#define IMX6Q_GPR13_SATA_PHY_6_MASK (0x7 << 16) -#define IMX6Q_GPR13_SATA_SPEED_MASK BIT(15) -#define IMX6Q_GPR13_SATA_SPEED_1P5G 0x0 -#define IMX6Q_GPR13_SATA_SPEED_3P0G BIT(15) -#define IMX6Q_GPR13_SATA_PHY_5 BIT(14) -#define IMX6Q_GPR13_SATA_PHY_4_MASK (0x7 << 11) -#define IMX6Q_GPR13_SATA_PHY_4_16_16 (0x0 << 11) -#define IMX6Q_GPR13_SATA_PHY_4_14_16 (0x1 << 11) -#define IMX6Q_GPR13_SATA_PHY_4_12_16 (0x2 << 11) -#define IMX6Q_GPR13_SATA_PHY_4_10_16 (0x3 << 11) -#define IMX6Q_GPR13_SATA_PHY_4_9_16 (0x4 << 11) -#define IMX6Q_GPR13_SATA_PHY_4_8_16 (0x5 << 11) -#define IMX6Q_GPR13_SATA_PHY_3_MASK (0xf << 7) -#define IMX6Q_GPR13_SATA_PHY_3_OFF 0x7 -#define IMX6Q_GPR13_SATA_PHY_2_MASK (0x1f << 2) -#define IMX6Q_GPR13_SATA_PHY_2_OFF 0x2 -#define IMX6Q_GPR13_SATA_PHY_1_MASK (0x3 << 0) -#define IMX6Q_GPR13_SATA_PHY_1_FAST (0x0 << 0) -#define IMX6Q_GPR13_SATA_PHY_1_MED (0x1 << 0) -#define IMX6Q_GPR13_SATA_PHY_1_SLOW (0x2 << 0) - +#define IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK (0x7 << 24) +#define IMX6Q_GPR13_SATA_RX_EQ_VAL_0_5_DB (0x0 << 24) +#define IMX6Q_GPR13_SATA_RX_EQ_VAL_1_0_DB (0x1 << 24) +#define IMX6Q_GPR13_SATA_RX_EQ_VAL_1_5_DB (0x2 << 24) +#define IMX6Q_GPR13_SATA_RX_EQ_VAL_2_0_DB (0x3 << 24) +#define IMX6Q_GPR13_SATA_RX_EQ_VAL_2_5_DB (0x4 << 24) +#define IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB (0x5 << 24) +#define IMX6Q_GPR13_SATA_RX_EQ_VAL_3_5_DB (0x6 << 24) +#define IMX6Q_GPR13_SATA_RX_EQ_VAL_4_0_DB (0x7 << 24) +#define IMX6Q_GPR13_SATA_RX_LOS_LVL_MASK (0x1f << 19) +#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA1I (0x10 << 19) +#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA1M (0x10 << 19) +#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA1X (0x1a << 19) +#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2I (0x12 << 19) +#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M (0x12 << 19) +#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2X (0x1a << 19) +#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_MASK (0x7 << 16) +#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_1P_1F (0x0 << 16) +#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_2F (0x1 << 16) +#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_1P_4F (0x2 << 16) +#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F (0x3 << 16) +#define IMX6Q_GPR13_SATA_SPD_MODE_MASK BIT(15) +#define IMX6Q_GPR13_SATA_SPD_MODE_1P5G 0x0 +#define IMX6Q_GPR13_SATA_SPD_MODE_3P0G BIT(15) +#define IMX6Q_GPR13_SATA_MPLL_SS_EN BIT(14) +#define IMX6Q_GPR13_SATA_TX_ATTEN_MASK (0x7 << 11) +#define IMX6Q_GPR13_SATA_TX_ATTEN_16_16 (0x0 << 11) +#define IMX6Q_GPR13_SATA_TX_ATTEN_14_16 (0x1 << 11) +#define IMX6Q_GPR13_SATA_TX_ATTEN_12_16 (0x2 << 11) +#define IMX6Q_GPR13_SATA_TX_ATTEN_10_16 (0x3 << 11) +#define IMX6Q_GPR13_SATA_TX_ATTEN_9_16 (0x4 << 11) +#define IMX6Q_GPR13_SATA_TX_ATTEN_8_16 (0x5 << 11) +#define IMX6Q_GPR13_SATA_TX_BOOST_MASK (0xf << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_0_00_DB (0x0 << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_0_37_DB (0x1 << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_0_74_DB (0x2 << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_1_11_DB (0x3 << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_1_48_DB (0x4 << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_1_85_DB (0x5 << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_2_22_DB (0x6 << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_2_59_DB (0x7 << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_2_96_DB (0x8 << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB (0x9 << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_3_70_DB (0xa << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_4_07_DB (0xb << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_4_44_DB (0xc << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_4_81_DB (0xd << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_5_28_DB (0xe << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_5_75_DB (0xf << 7) +#define IMX6Q_GPR13_SATA_TX_LVL_MASK (0x1f << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_0_937_V (0x00 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_0_947_V (0x01 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_0_957_V (0x02 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_0_966_V (0x03 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_0_976_V (0x04 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_0_986_V (0x05 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_0_996_V (0x06 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_005_V (0x07 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_015_V (0x08 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_025_V (0x09 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_035_V (0x0a << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_045_V (0x0b << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_054_V (0x0c << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_064_V (0x0d << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_074_V (0x0e << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_084_V (0x0f << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_094_V (0x10 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_104_V (0x11 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_113_V (0x12 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_123_V (0x13 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_133_V (0x14 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_143_V (0x15 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_152_V (0x16 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_162_V (0x17 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_172_V (0x18 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_182_V (0x19 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_191_V (0x1a << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_201_V (0x1b << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_211_V (0x1c << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_221_V (0x1d << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_230_V (0x1e << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_240_V (0x1f << 2) +#define IMX6Q_GPR13_SATA_MPLL_CLK_EN BIT(1) +#define IMX6Q_GPR13_SATA_TX_EDGE_RATE BIT(0) #endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */ diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 8de8d8f22384..737685e9e852 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -690,6 +690,26 @@ struct mlx5_query_cq_mbox_out { __be64 pas[0]; }; +struct mlx5_enable_hca_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_enable_hca_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_disable_hca_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_disable_hca_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + struct mlx5_eq_context { u8 status; u8 ec_oi; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index f22e4419839b..2aa258b0ced1 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -101,6 +101,8 @@ enum { MLX5_CMD_OP_QUERY_ADAPTER = 0x101, MLX5_CMD_OP_INIT_HCA = 0x102, MLX5_CMD_OP_TEARDOWN_HCA = 0x103, + MLX5_CMD_OP_ENABLE_HCA = 0x104, + MLX5_CMD_OP_DISABLE_HCA = 0x105, MLX5_CMD_OP_QUERY_PAGES = 0x107, MLX5_CMD_OP_MANAGE_PAGES = 0x108, MLX5_CMD_OP_SET_HCA_CAP = 0x109, @@ -690,7 +692,7 @@ int mlx5_pagealloc_start(struct mlx5_core_dev *dev); void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, s16 npages); -int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev); +int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); void mlx5_register_debugfs(void); void mlx5_unregister_debugfs(void); diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index b62d4af6c667..45e921401b06 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -361,7 +361,8 @@ struct ssb_device_id { __u16 vendor; __u16 coreid; __u8 revision; -}; + __u8 __pad; +} __attribute__((packed, aligned(2))); #define SSB_DEVICE(_vendor, _coreid, _revision) \ { .vendor = _vendor, .coreid = _coreid, .revision = _revision, } #define SSB_DEVTABLE_END \ @@ -377,7 +378,7 @@ struct bcma_device_id { __u16 id; __u8 rev; __u8 class; -}; +} __attribute__((packed,aligned(2))); #define BCMA_CORE(_manuf, _id, _rev, _class) \ { .manuf = _manuf, .id = _id, .rev = _rev, .class = _class, } #define BCMA_CORETABLE_END \ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 0741a1e919a5..9a4156845e93 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -973,7 +973,7 @@ struct net_device_ops { gfp_t gfp); void (*ndo_netpoll_cleanup)(struct net_device *dev); #endif -#ifdef CONFIG_NET_LL_RX_POLL +#ifdef CONFIG_NET_RX_BUSY_POLL int (*ndo_busy_poll)(struct napi_struct *dev); #endif int (*ndo_set_vf_mac)(struct net_device *dev, diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 8873f82c7baa..c43f6eabad5b 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -826,7 +826,7 @@ static inline void perf_restore_debug_store(void) { } */ #define perf_cpu_notifier(fn) \ do { \ - static struct notifier_block fn##_nb __cpuinitdata = \ + static struct notifier_block fn##_nb = \ { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ unsigned long cpu = smp_processor_id(); \ unsigned long flags; \ diff --git a/include/linux/platform_data/mmc-pxamci.h b/include/linux/platform_data/mmc-pxamci.h index 9eb515bb799d..1706b3597ce0 100644 --- a/include/linux/platform_data/mmc-pxamci.h +++ b/include/linux/platform_data/mmc-pxamci.h @@ -12,7 +12,7 @@ struct pxamci_platform_data { unsigned long detect_delay_ms; /* delay in millisecond before detecting cards after interrupt */ int (*init)(struct device *, irq_handler_t , void *); int (*get_ro)(struct device *); - void (*setpower)(struct device *, unsigned int); + int (*setpower)(struct device *, unsigned int); void (*exit)(struct device *, void *); int gpio_card_detect; /* gpio detecting card insertion */ int gpio_card_ro; /* gpio detecting read only toggle */ diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index 569781faa504..a0f70808d7f4 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h @@ -36,8 +36,8 @@ * @last_time: Monotonic clock when the wakeup source's was touched last time. * @prevent_sleep_time: Total time this source has been preventing autosleep. * @event_count: Number of signaled wakeup events. - * @active_count: Number of times the wakeup sorce was activated. - * @relax_count: Number of times the wakeup sorce was deactivated. + * @active_count: Number of times the wakeup source was activated. + * @relax_count: Number of times the wakeup source was deactivated. * @expire_count: Number of times the wakeup source's timeout has expired. * @wakeup_count: Number of times the wakeup source might abort suspend. * @active: Status of the wakeup source. diff --git a/include/linux/sched.h b/include/linux/sched.h index 50d04b92ceda..d722490da030 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1628,6 +1628,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ +#define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */ /* * Only the _current_ task can read/write to tsk->flags, but other diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h index 382cf710ca9a..5b1c9848124c 100644 --- a/include/linux/shdma-base.h +++ b/include/linux/shdma-base.h @@ -124,6 +124,10 @@ void shdma_chan_remove(struct shdma_chan *schan); int shdma_init(struct device *dev, struct shdma_dev *sdev, int chan_num); void shdma_cleanup(struct shdma_dev *sdev); +#if IS_ENABLED(CONFIG_SH_DMAE_BASE) bool shdma_chan_filter(struct dma_chan *chan, void *arg); +#else +#define shdma_chan_filter NULL +#endif #endif diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 5afefa01a13c..3b71a4e83642 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -501,7 +501,7 @@ struct sk_buff { /* 7/9 bit hole (depending on ndisc_nodetype presence) */ kmemcheck_bitfield_end(flags2); -#if defined CONFIG_NET_DMA || defined CONFIG_NET_LL_RX_POLL +#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL union { unsigned int napi_id; dma_cookie_t dma_cookie; diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index e2cee22f578a..9e8a9b555ad6 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -17,10 +17,12 @@ #include <linux/list.h> #include <linux/lockdep.h> #include <linux/kobject_ns.h> +#include <linux/stat.h> #include <linux/atomic.h> struct kobject; struct module; +struct bin_attribute; enum kobj_ns_type; struct attribute { @@ -59,26 +61,28 @@ struct attribute_group { umode_t (*is_visible)(struct kobject *, struct attribute *, int); struct attribute **attrs; + struct bin_attribute **bin_attrs; }; - - /** * Use these macros to make defining attributes easier. See include/linux/device.h * for examples.. */ -#define __ATTR(_name,_mode,_show,_store) { \ - .attr = {.name = __stringify(_name), .mode = _mode }, \ - .show = _show, \ - .store = _store, \ +#define __ATTR(_name,_mode,_show,_store) { \ + .attr = {.name = __stringify(_name), .mode = _mode }, \ + .show = _show, \ + .store = _store, \ } -#define __ATTR_RO(_name) { \ - .attr = { .name = __stringify(_name), .mode = 0444 }, \ - .show = _name##_show, \ +#define __ATTR_RO(_name) { \ + .attr = { .name = __stringify(_name), .mode = S_IRUGO }, \ + .show = _name##_show, \ } +#define __ATTR_RW(_name) __ATTR(_name, (S_IWUSR | S_IRUGO), \ + _name##_show, _name##_store) + #define __ATTR_NULL { .attr = { .name = NULL } } #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -92,6 +96,18 @@ struct attribute_group { #define __ATTR_IGNORE_LOCKDEP __ATTR #endif +#define __ATTRIBUTE_GROUPS(_name) \ +static const struct attribute_group *_name##_groups[] = { \ + &_name##_group, \ + NULL, \ +} + +#define ATTRIBUTE_GROUPS(_name) \ +static const struct attribute_group _name##_group = { \ + .attrs = _name##_attrs, \ +}; \ +__ATTRIBUTE_GROUPS(_name) + #define attr_name(_attr) (_attr).attr.name struct file; @@ -121,6 +137,36 @@ struct bin_attribute { */ #define sysfs_bin_attr_init(bin_attr) sysfs_attr_init(&(bin_attr)->attr) +/* macros to create static binary attributes easier */ +#define __BIN_ATTR(_name, _mode, _read, _write, _size) { \ + .attr = { .name = __stringify(_name), .mode = _mode }, \ + .read = _read, \ + .write = _write, \ + .size = _size, \ +} + +#define __BIN_ATTR_RO(_name, _size) { \ + .attr = { .name = __stringify(_name), .mode = S_IRUGO }, \ + .read = _name##_read, \ + .size = _size, \ +} + +#define __BIN_ATTR_RW(_name, _size) __BIN_ATTR(_name, \ + (S_IWUSR | S_IRUGO), _name##_read, \ + _name##_write) + +#define __BIN_ATTR_NULL __ATTR_NULL + +#define BIN_ATTR(_name, _mode, _read, _write, _size) \ +struct bin_attribute bin_attr_##_name = __BIN_ATTR(_name, _mode, _read, \ + _write, _size) + +#define BIN_ATTR_RO(_name, _size) \ +struct bin_attribute bin_attr_##_name = __BIN_ATTR_RO(_name, _size) + +#define BIN_ATTR_RW(_name, _size) \ +struct bin_attribute bin_attr_##_name = __BIN_ATTR_RW(_name, _size) + struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *,char *); ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t); diff --git a/include/linux/tick.h b/include/linux/tick.h index 9180f4b85e6d..62bd8b72873c 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -174,10 +174,4 @@ static inline void tick_nohz_task_switch(struct task_struct *tsk) { } #endif -# ifdef CONFIG_CPU_IDLE_GOV_MENU -extern void menu_hrtimer_cancel(void); -# else -static inline void menu_hrtimer_cancel(void) {} -# endif /* CONFIG_CPU_IDLE_GOV_MENU */ - #endif diff --git a/include/linux/usb.h b/include/linux/usb.h index a232b7ece1f6..0eec2689b955 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -367,17 +367,6 @@ struct usb_bus { /* ----------------------------------------------------------------------- */ -/* This is arbitrary. - * From USB 2.0 spec Table 11-13, offset 7, a hub can - * have up to 255 ports. The most yet reported is 10. - * - * Current Wireless USB host hardware (Intel i1480 for example) allows - * up to 22 devices to connect. Upcoming hardware might raise that - * limit. Because the arrays need to add a bit for hub status data, we - * do 31, so plus one evens out to four bytes. - */ -#define USB_MAXCHILDREN (31) - struct usb_tt; enum usb_device_removable { diff --git a/include/linux/vmpressure.h b/include/linux/vmpressure.h index 76be077340ea..7dc17e2456de 100644 --- a/include/linux/vmpressure.h +++ b/include/linux/vmpressure.h @@ -12,7 +12,7 @@ struct vmpressure { unsigned long scanned; unsigned long reclaimed; /* The lock is used to keep the scanned/reclaimed above in sync. */ - struct mutex sr_lock; + struct spinlock sr_lock; /* The list of vmpressure_event structs. */ struct list_head events; @@ -30,6 +30,7 @@ extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio); extern void vmpressure_init(struct vmpressure *vmpr); +extern void vmpressure_cleanup(struct vmpressure *vmpr); extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg); extern struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr); extern struct vmpressure *css_to_vmpressure(struct cgroup_subsys_state *css); diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index a14339c2985f..f18b91966d3d 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -27,7 +27,7 @@ #include <linux/netdevice.h> #include <net/ip.h> -#ifdef CONFIG_NET_LL_RX_POLL +#ifdef CONFIG_NET_RX_BUSY_POLL struct napi_struct; extern unsigned int sysctl_net_busy_read __read_mostly; @@ -146,7 +146,7 @@ static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb) sk->sk_napi_id = skb->napi_id; } -#else /* CONFIG_NET_LL_RX_POLL */ +#else /* CONFIG_NET_RX_BUSY_POLL */ static inline unsigned long net_busy_loop_on(void) { return 0; @@ -181,5 +181,10 @@ static inline bool busy_loop_timeout(unsigned long end_time) return true; } -#endif /* CONFIG_NET_LL_RX_POLL */ +static inline bool sk_busy_loop(struct sock *sk, int nonblock) +{ + return false; +} + +#endif /* CONFIG_NET_RX_BUSY_POLL */ #endif /* _LINUX_NET_BUSY_POLL_H */ diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 2a601e7da1bf..48ec25a7fcb6 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -300,7 +300,7 @@ extern void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info); extern void fib6_run_gc(unsigned long expires, - struct net *net); + struct net *net, bool force); extern void fib6_gc_cleanup(void); diff --git a/include/net/ndisc.h b/include/net/ndisc.h index 949d77528f2f..6fea32340ae8 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h @@ -119,7 +119,7 @@ extern struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len, * if RFC 3831 IPv6-over-Fibre Channel is ever implemented it may * also need a pad of 2. */ -static int ndisc_addr_option_pad(unsigned short type) +static inline int ndisc_addr_option_pad(unsigned short type) { switch (type) { case ARPHRD_INFINIBAND: return 2; diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h index 0af851c3b038..b64b7bce4b94 100644 --- a/include/net/nfc/hci.h +++ b/include/net/nfc/hci.h @@ -59,7 +59,7 @@ struct nfc_hci_ops { struct nfc_target *target); int (*event_received)(struct nfc_hci_dev *hdev, u8 gate, u8 event, struct sk_buff *skb); - int (*fw_upload)(struct nfc_hci_dev *hdev, const char *firmware_name); + int (*fw_download)(struct nfc_hci_dev *hdev, const char *firmware_name); int (*discover_se)(struct nfc_hci_dev *dev); int (*enable_se)(struct nfc_hci_dev *dev, u32 se_idx); int (*disable_se)(struct nfc_hci_dev *dev, u32 se_idx); diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h index 0e353f1658bb..5f286b726bb6 100644 --- a/include/net/nfc/nfc.h +++ b/include/net/nfc/nfc.h @@ -68,7 +68,7 @@ struct nfc_ops { void *cb_context); int (*tm_send)(struct nfc_dev *dev, struct sk_buff *skb); int (*check_presence)(struct nfc_dev *dev, struct nfc_target *target); - int (*fw_upload)(struct nfc_dev *dev, const char *firmware_name); + int (*fw_download)(struct nfc_dev *dev, const char *firmware_name); /* Secure Element API */ int (*discover_se)(struct nfc_dev *dev); @@ -127,7 +127,7 @@ struct nfc_dev { int targets_generation; struct device dev; bool dev_up; - bool fw_upload_in_progress; + bool fw_download_in_progress; u8 rf_mode; bool polling; struct nfc_target *active_target; diff --git a/include/net/sock.h b/include/net/sock.h index 95a5a2c6925a..31d5cfbb51ec 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -327,7 +327,7 @@ struct sock { #ifdef CONFIG_RPS __u32 sk_rxhash; #endif -#ifdef CONFIG_NET_LL_RX_POLL +#ifdef CONFIG_NET_RX_BUSY_POLL unsigned int sk_napi_id; unsigned int sk_ll_usec; #endif diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h index 3cc5a0b278c3..5ebda976ea93 100644 --- a/include/trace/events/bcache.h +++ b/include/trace/events/bcache.h @@ -9,9 +9,7 @@ struct search; DECLARE_EVENT_CLASS(bcache_request, - TP_PROTO(struct search *s, struct bio *bio), - TP_ARGS(s, bio), TP_STRUCT__entry( @@ -22,7 +20,6 @@ DECLARE_EVENT_CLASS(bcache_request, __field(dev_t, orig_sector ) __field(unsigned int, nr_sector ) __array(char, rwbs, 6 ) - __array(char, comm, TASK_COMM_LEN ) ), TP_fast_assign( @@ -33,36 +30,66 @@ DECLARE_EVENT_CLASS(bcache_request, __entry->orig_sector = bio->bi_sector - 16; __entry->nr_sector = bio->bi_size >> 9; blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); - memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), - TP_printk("%d,%d %s %llu + %u [%s] (from %d,%d @ %llu)", + TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)", MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->rwbs, - (unsigned long long)__entry->sector, - __entry->nr_sector, __entry->comm, - __entry->orig_major, __entry->orig_minor, + __entry->rwbs, (unsigned long long)__entry->sector, + __entry->nr_sector, __entry->orig_major, __entry->orig_minor, (unsigned long long)__entry->orig_sector) ); -DEFINE_EVENT(bcache_request, bcache_request_start, +DECLARE_EVENT_CLASS(bkey, + TP_PROTO(struct bkey *k), + TP_ARGS(k), - TP_PROTO(struct search *s, struct bio *bio), + TP_STRUCT__entry( + __field(u32, size ) + __field(u32, inode ) + __field(u64, offset ) + __field(bool, dirty ) + ), - TP_ARGS(s, bio) + TP_fast_assign( + __entry->inode = KEY_INODE(k); + __entry->offset = KEY_OFFSET(k); + __entry->size = KEY_SIZE(k); + __entry->dirty = KEY_DIRTY(k); + ), + + TP_printk("%u:%llu len %u dirty %u", __entry->inode, + __entry->offset, __entry->size, __entry->dirty) ); -DEFINE_EVENT(bcache_request, bcache_request_end, +DECLARE_EVENT_CLASS(btree_node, + TP_PROTO(struct btree *b), + TP_ARGS(b), + + TP_STRUCT__entry( + __field(size_t, bucket ) + ), + TP_fast_assign( + __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); + ), + + TP_printk("bucket %zu", __entry->bucket) +); + +/* request.c */ + +DEFINE_EVENT(bcache_request, bcache_request_start, TP_PROTO(struct search *s, struct bio *bio), + TP_ARGS(s, bio) +); +DEFINE_EVENT(bcache_request, bcache_request_end, + TP_PROTO(struct search *s, struct bio *bio), TP_ARGS(s, bio) ); DECLARE_EVENT_CLASS(bcache_bio, - TP_PROTO(struct bio *bio), - TP_ARGS(bio), TP_STRUCT__entry( @@ -70,7 +97,6 @@ DECLARE_EVENT_CLASS(bcache_bio, __field(sector_t, sector ) __field(unsigned int, nr_sector ) __array(char, rwbs, 6 ) - __array(char, comm, TASK_COMM_LEN ) ), TP_fast_assign( @@ -78,191 +104,328 @@ DECLARE_EVENT_CLASS(bcache_bio, __entry->sector = bio->bi_sector; __entry->nr_sector = bio->bi_size >> 9; blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); - memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), - TP_printk("%d,%d %s %llu + %u [%s]", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->rwbs, - (unsigned long long)__entry->sector, - __entry->nr_sector, __entry->comm) + TP_printk("%d,%d %s %llu + %u", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, + (unsigned long long)__entry->sector, __entry->nr_sector) ); - -DEFINE_EVENT(bcache_bio, bcache_passthrough, - +DEFINE_EVENT(bcache_bio, bcache_bypass_sequential, TP_PROTO(struct bio *bio), + TP_ARGS(bio) +); +DEFINE_EVENT(bcache_bio, bcache_bypass_congested, + TP_PROTO(struct bio *bio), TP_ARGS(bio) ); -DEFINE_EVENT(bcache_bio, bcache_cache_hit, +TRACE_EVENT(bcache_read, + TP_PROTO(struct bio *bio, bool hit, bool bypass), + TP_ARGS(bio, hit, bypass), - TP_PROTO(struct bio *bio), + TP_STRUCT__entry( + __field(dev_t, dev ) + __field(sector_t, sector ) + __field(unsigned int, nr_sector ) + __array(char, rwbs, 6 ) + __field(bool, cache_hit ) + __field(bool, bypass ) + ), - TP_ARGS(bio) + TP_fast_assign( + __entry->dev = bio->bi_bdev->bd_dev; + __entry->sector = bio->bi_sector; + __entry->nr_sector = bio->bi_size >> 9; + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); + __entry->cache_hit = hit; + __entry->bypass = bypass; + ), + + TP_printk("%d,%d %s %llu + %u hit %u bypass %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->rwbs, (unsigned long long)__entry->sector, + __entry->nr_sector, __entry->cache_hit, __entry->bypass) ); -DEFINE_EVENT(bcache_bio, bcache_cache_miss, +TRACE_EVENT(bcache_write, + TP_PROTO(struct bio *bio, bool writeback, bool bypass), + TP_ARGS(bio, writeback, bypass), - TP_PROTO(struct bio *bio), + TP_STRUCT__entry( + __field(dev_t, dev ) + __field(sector_t, sector ) + __field(unsigned int, nr_sector ) + __array(char, rwbs, 6 ) + __field(bool, writeback ) + __field(bool, bypass ) + ), - TP_ARGS(bio) + TP_fast_assign( + __entry->dev = bio->bi_bdev->bd_dev; + __entry->sector = bio->bi_sector; + __entry->nr_sector = bio->bi_size >> 9; + blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); + __entry->writeback = writeback; + __entry->bypass = bypass; + ), + + TP_printk("%d,%d %s %llu + %u hit %u bypass %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->rwbs, (unsigned long long)__entry->sector, + __entry->nr_sector, __entry->writeback, __entry->bypass) ); DEFINE_EVENT(bcache_bio, bcache_read_retry, - TP_PROTO(struct bio *bio), - TP_ARGS(bio) ); -DEFINE_EVENT(bcache_bio, bcache_writethrough, +DEFINE_EVENT(bkey, bcache_cache_insert, + TP_PROTO(struct bkey *k), + TP_ARGS(k) +); - TP_PROTO(struct bio *bio), +/* Journal */ - TP_ARGS(bio) -); +DECLARE_EVENT_CLASS(cache_set, + TP_PROTO(struct cache_set *c), + TP_ARGS(c), -DEFINE_EVENT(bcache_bio, bcache_writeback, + TP_STRUCT__entry( + __array(char, uuid, 16 ) + ), - TP_PROTO(struct bio *bio), + TP_fast_assign( + memcpy(__entry->uuid, c->sb.set_uuid, 16); + ), - TP_ARGS(bio) + TP_printk("%pU", __entry->uuid) ); -DEFINE_EVENT(bcache_bio, bcache_write_skip, - - TP_PROTO(struct bio *bio), +DEFINE_EVENT(bkey, bcache_journal_replay_key, + TP_PROTO(struct bkey *k), + TP_ARGS(k) +); - TP_ARGS(bio) +DEFINE_EVENT(cache_set, bcache_journal_full, + TP_PROTO(struct cache_set *c), + TP_ARGS(c) ); -DEFINE_EVENT(bcache_bio, bcache_btree_read, +DEFINE_EVENT(cache_set, bcache_journal_entry_full, + TP_PROTO(struct cache_set *c), + TP_ARGS(c) +); +DEFINE_EVENT(bcache_bio, bcache_journal_write, TP_PROTO(struct bio *bio), - TP_ARGS(bio) ); -DEFINE_EVENT(bcache_bio, bcache_btree_write, +/* Btree */ - TP_PROTO(struct bio *bio), +DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize, + TP_PROTO(struct cache_set *c), + TP_ARGS(c) +); - TP_ARGS(bio) +DEFINE_EVENT(btree_node, bcache_btree_read, + TP_PROTO(struct btree *b), + TP_ARGS(b) ); -DEFINE_EVENT(bcache_bio, bcache_write_dirty, +TRACE_EVENT(bcache_btree_write, + TP_PROTO(struct btree *b), + TP_ARGS(b), - TP_PROTO(struct bio *bio), + TP_STRUCT__entry( + __field(size_t, bucket ) + __field(unsigned, block ) + __field(unsigned, keys ) + ), - TP_ARGS(bio) + TP_fast_assign( + __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); + __entry->block = b->written; + __entry->keys = b->sets[b->nsets].data->keys; + ), + + TP_printk("bucket %zu", __entry->bucket) ); -DEFINE_EVENT(bcache_bio, bcache_read_dirty, +DEFINE_EVENT(btree_node, bcache_btree_node_alloc, + TP_PROTO(struct btree *b), + TP_ARGS(b) +); - TP_PROTO(struct bio *bio), +DEFINE_EVENT(btree_node, bcache_btree_node_alloc_fail, + TP_PROTO(struct btree *b), + TP_ARGS(b) +); - TP_ARGS(bio) +DEFINE_EVENT(btree_node, bcache_btree_node_free, + TP_PROTO(struct btree *b), + TP_ARGS(b) ); -DEFINE_EVENT(bcache_bio, bcache_write_moving, +TRACE_EVENT(bcache_btree_gc_coalesce, + TP_PROTO(unsigned nodes), + TP_ARGS(nodes), - TP_PROTO(struct bio *bio), + TP_STRUCT__entry( + __field(unsigned, nodes ) + ), - TP_ARGS(bio) + TP_fast_assign( + __entry->nodes = nodes; + ), + + TP_printk("coalesced %u nodes", __entry->nodes) ); -DEFINE_EVENT(bcache_bio, bcache_read_moving, +DEFINE_EVENT(cache_set, bcache_gc_start, + TP_PROTO(struct cache_set *c), + TP_ARGS(c) +); - TP_PROTO(struct bio *bio), +DEFINE_EVENT(cache_set, bcache_gc_end, + TP_PROTO(struct cache_set *c), + TP_ARGS(c) +); - TP_ARGS(bio) +DEFINE_EVENT(bkey, bcache_gc_copy, + TP_PROTO(struct bkey *k), + TP_ARGS(k) ); -DEFINE_EVENT(bcache_bio, bcache_journal_write, +DEFINE_EVENT(bkey, bcache_gc_copy_collision, + TP_PROTO(struct bkey *k), + TP_ARGS(k) +); - TP_PROTO(struct bio *bio), +TRACE_EVENT(bcache_btree_insert_key, + TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status), + TP_ARGS(b, k, op, status), - TP_ARGS(bio) -); + TP_STRUCT__entry( + __field(u64, btree_node ) + __field(u32, btree_level ) + __field(u32, inode ) + __field(u64, offset ) + __field(u32, size ) + __field(u8, dirty ) + __field(u8, op ) + __field(u8, status ) + ), -DECLARE_EVENT_CLASS(bcache_cache_bio, + TP_fast_assign( + __entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0); + __entry->btree_level = b->level; + __entry->inode = KEY_INODE(k); + __entry->offset = KEY_OFFSET(k); + __entry->size = KEY_SIZE(k); + __entry->dirty = KEY_DIRTY(k); + __entry->op = op; + __entry->status = status; + ), - TP_PROTO(struct bio *bio, - sector_t orig_sector, - struct block_device* orig_bdev), + TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u", + __entry->status, __entry->op, + __entry->btree_node, __entry->btree_level, + __entry->inode, __entry->offset, + __entry->size, __entry->dirty) +); - TP_ARGS(bio, orig_sector, orig_bdev), +DECLARE_EVENT_CLASS(btree_split, + TP_PROTO(struct btree *b, unsigned keys), + TP_ARGS(b, keys), TP_STRUCT__entry( - __field(dev_t, dev ) - __field(dev_t, orig_dev ) - __field(sector_t, sector ) - __field(sector_t, orig_sector ) - __field(unsigned int, nr_sector ) - __array(char, rwbs, 6 ) - __array(char, comm, TASK_COMM_LEN ) + __field(size_t, bucket ) + __field(unsigned, keys ) ), TP_fast_assign( - __entry->dev = bio->bi_bdev->bd_dev; - __entry->orig_dev = orig_bdev->bd_dev; - __entry->sector = bio->bi_sector; - __entry->orig_sector = orig_sector; - __entry->nr_sector = bio->bi_size >> 9; - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); - memcpy(__entry->comm, current->comm, TASK_COMM_LEN); + __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); + __entry->keys = keys; ), - TP_printk("%d,%d %s %llu + %u [%s] (from %d,%d %llu)", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->rwbs, - (unsigned long long)__entry->sector, - __entry->nr_sector, __entry->comm, - MAJOR(__entry->orig_dev), MINOR(__entry->orig_dev), - (unsigned long long)__entry->orig_sector) + TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys) ); -DEFINE_EVENT(bcache_cache_bio, bcache_cache_insert, - - TP_PROTO(struct bio *bio, - sector_t orig_sector, - struct block_device *orig_bdev), +DEFINE_EVENT(btree_split, bcache_btree_node_split, + TP_PROTO(struct btree *b, unsigned keys), + TP_ARGS(b, keys) +); - TP_ARGS(bio, orig_sector, orig_bdev) +DEFINE_EVENT(btree_split, bcache_btree_node_compact, + TP_PROTO(struct btree *b, unsigned keys), + TP_ARGS(b, keys) ); -DECLARE_EVENT_CLASS(bcache_gc, +DEFINE_EVENT(btree_node, bcache_btree_set_root, + TP_PROTO(struct btree *b), + TP_ARGS(b) +); - TP_PROTO(uint8_t *uuid), +/* Allocator */ - TP_ARGS(uuid), +TRACE_EVENT(bcache_alloc_invalidate, + TP_PROTO(struct cache *ca), + TP_ARGS(ca), TP_STRUCT__entry( - __field(uint8_t *, uuid) + __field(unsigned, free ) + __field(unsigned, free_inc ) + __field(unsigned, free_inc_size ) + __field(unsigned, unused ) ), TP_fast_assign( - __entry->uuid = uuid; + __entry->free = fifo_used(&ca->free); + __entry->free_inc = fifo_used(&ca->free_inc); + __entry->free_inc_size = ca->free_inc.size; + __entry->unused = fifo_used(&ca->unused); ), - TP_printk("%pU", __entry->uuid) + TP_printk("free %u free_inc %u/%u unused %u", __entry->free, + __entry->free_inc, __entry->free_inc_size, __entry->unused) ); +TRACE_EVENT(bcache_alloc_fail, + TP_PROTO(struct cache *ca), + TP_ARGS(ca), -DEFINE_EVENT(bcache_gc, bcache_gc_start, + TP_STRUCT__entry( + __field(unsigned, free ) + __field(unsigned, free_inc ) + __field(unsigned, unused ) + __field(unsigned, blocked ) + ), - TP_PROTO(uint8_t *uuid), + TP_fast_assign( + __entry->free = fifo_used(&ca->free); + __entry->free_inc = fifo_used(&ca->free_inc); + __entry->unused = fifo_used(&ca->unused); + __entry->blocked = atomic_read(&ca->set->prio_blocked); + ), - TP_ARGS(uuid) + TP_printk("free %u free_inc %u unused %u blocked %u", __entry->free, + __entry->free_inc, __entry->unused, __entry->blocked) ); -DEFINE_EVENT(bcache_gc, bcache_gc_end, +/* Background writeback */ - TP_PROTO(uint8_t *uuid), +DEFINE_EVENT(bkey, bcache_writeback, + TP_PROTO(struct bkey *k), + TP_ARGS(k) +); - TP_ARGS(uuid) +DEFINE_EVENT(bkey, bcache_writeback_collision, + TP_PROTO(struct bkey *k), + TP_ARGS(k) ); #endif /* _TRACE_BCACHE_H */ diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index d615f78cc6b6..41a6643e2136 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -670,10 +670,6 @@ perf_trace_##call(void *__data, proto) \ sizeof(u64)); \ __entry_size -= sizeof(u32); \ \ - if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \ - "profile buffer not large enough")) \ - return; \ - \ entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \ __entry_size, event_call->event.type, &__regs, &rctx); \ if (!entry) \ diff --git a/include/uapi/asm-generic/fcntl.h b/include/uapi/asm-generic/fcntl.h index 05ac354e124d..95e46c8e05f9 100644 --- a/include/uapi/asm-generic/fcntl.h +++ b/include/uapi/asm-generic/fcntl.h @@ -89,8 +89,8 @@ #endif /* a horrid kludge trying to make sure that this will fail on old kernels */ -#define O_TMPFILE (__O_TMPFILE | O_DIRECTORY | O_RDWR) -#define O_TMPFILE_MASK (__O_TMPFILE | O_DIRECTORY | O_CREAT | O_ACCMODE) +#define O_TMPFILE (__O_TMPFILE | O_DIRECTORY) +#define O_TMPFILE_MASK (__O_TMPFILE | O_DIRECTORY | O_CREAT) #ifndef O_NDELAY #define O_NDELAY O_NONBLOCK diff --git a/include/uapi/linux/firewire-cdev.h b/include/uapi/linux/firewire-cdev.h index d50036953497..1db453e4b550 100644 --- a/include/uapi/linux/firewire-cdev.h +++ b/include/uapi/linux/firewire-cdev.h @@ -215,8 +215,8 @@ struct fw_cdev_event_request2 { * with the %FW_CDEV_ISO_INTERRUPT bit set, when explicitly requested with * %FW_CDEV_IOC_FLUSH_ISO, or when there have been so many completed packets * without the interrupt bit set that the kernel's internal buffer for @header - * is about to overflow. (In the last case, kernels with ABI version < 5 drop - * header data up to the next interrupt packet.) + * is about to overflow. (In the last case, ABI versions < 5 drop header data + * up to the next interrupt packet.) * * Isochronous transmit events (context type %FW_CDEV_ISO_CONTEXT_TRANSMIT): * diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h index caed0f324d5f..8137dd8d2adf 100644 --- a/include/uapi/linux/nfc.h +++ b/include/uapi/linux/nfc.h @@ -69,8 +69,8 @@ * starting a poll from a device which has a secure element enabled means * we want to do SE based card emulation. * @NFC_CMD_DISABLE_SE: Disable the physical link to a specific secure element. - * @NFC_CMD_FW_UPLOAD: Request to Load/flash firmware, or event to inform that - * some firmware was loaded + * @NFC_CMD_FW_DOWNLOAD: Request to Load/flash firmware, or event to inform + * that some firmware was loaded */ enum nfc_commands { NFC_CMD_UNSPEC, @@ -94,7 +94,7 @@ enum nfc_commands { NFC_CMD_DISABLE_SE, NFC_CMD_LLC_SDREQ, NFC_EVENT_LLC_SDRES, - NFC_CMD_FW_UPLOAD, + NFC_CMD_FW_DOWNLOAD, NFC_EVENT_SE_ADDED, NFC_EVENT_SE_REMOVED, /* private: internal use only */ diff --git a/include/uapi/linux/usb/ch11.h b/include/uapi/linux/usb/ch11.h index 7692dc69ccf7..331499d597fa 100644 --- a/include/uapi/linux/usb/ch11.h +++ b/include/uapi/linux/usb/ch11.h @@ -11,6 +11,17 @@ #include <linux/types.h> /* __u8 etc */ +/* This is arbitrary. + * From USB 2.0 spec Table 11-13, offset 7, a hub can + * have up to 255 ports. The most yet reported is 10. + * + * Current Wireless USB host hardware (Intel i1480 for example) allows + * up to 22 devices to connect. Upcoming hardware might raise that + * limit. Because the arrays need to add a bit for hub status data, we + * use 31, so plus one evens out to four bytes. + */ +#define USB_MAXCHILDREN 31 + /* * Hub request types */ diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h index ffd4652de91c..65e12099ef89 100644 --- a/include/xen/interface/io/blkif.h +++ b/include/xen/interface/io/blkif.h @@ -103,12 +103,46 @@ typedef uint64_t blkif_sector_t; #define BLKIF_OP_DISCARD 5 /* + * Recognized if "feature-max-indirect-segments" in present in the backend + * xenbus info. The "feature-max-indirect-segments" node contains the maximum + * number of segments allowed by the backend per request. If the node is + * present, the frontend might use blkif_request_indirect structs in order to + * issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The + * maximum number of indirect segments is fixed by the backend, but the + * frontend can issue requests with any number of indirect segments as long as + * it's less than the number provided by the backend. The indirect_grefs field + * in blkif_request_indirect should be filled by the frontend with the + * grant references of the pages that are holding the indirect segments. + * This pages are filled with an array of blkif_request_segment_aligned + * that hold the information about the segments. The number of indirect + * pages to use is determined by the maximum number of segments + * a indirect request contains. Every indirect page can contain a maximum + * of 512 segments (PAGE_SIZE/sizeof(blkif_request_segment_aligned)), + * so to calculate the number of indirect pages to use we have to do + * ceil(indirect_segments/512). + * + * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not* + * create the "feature-max-indirect-segments" node! + */ +#define BLKIF_OP_INDIRECT 6 + +/* * Maximum scatter/gather segments per request. * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE. * NB. This could be 12 if the ring indexes weren't stored in the same page. */ #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 +#define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8 + +struct blkif_request_segment_aligned { + grant_ref_t gref; /* reference to I/O buffer frame */ + /* @first_sect: first sector in frame to transfer (inclusive). */ + /* @last_sect: last sector in frame to transfer (inclusive). */ + uint8_t first_sect, last_sect; + uint16_t _pad; /* padding to make it 8 bytes, so it's cache-aligned */ +} __attribute__((__packed__)); + struct blkif_request_rw { uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ @@ -147,12 +181,31 @@ struct blkif_request_other { uint64_t id; /* private guest value, echoed in resp */ } __attribute__((__packed__)); +struct blkif_request_indirect { + uint8_t indirect_op; + uint16_t nr_segments; +#ifdef CONFIG_X86_64 + uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */ +#endif + uint64_t id; + blkif_sector_t sector_number; + blkif_vdev_t handle; + uint16_t _pad2; + grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; +#ifdef CONFIG_X86_64 + uint32_t _pad3; /* make it 64 byte aligned */ +#else + uint64_t _pad3; /* make it 64 byte aligned */ +#endif +} __attribute__((__packed__)); + struct blkif_request { uint8_t operation; /* BLKIF_OP_??? */ union { struct blkif_request_rw rw; struct blkif_request_discard discard; struct blkif_request_other other; + struct blkif_request_indirect indirect; } u; } __attribute__((__packed__)); diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h index 75271b9a8f61..7d28aff605c7 100644 --- a/include/xen/interface/io/ring.h +++ b/include/xen/interface/io/ring.h @@ -188,6 +188,11 @@ struct __name##_back_ring { \ #define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) +/* Ill-behaved frontend determination: Can there be this many requests? */ +#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \ + (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r)) + + #define RING_PUSH_REQUESTS(_r) do { \ wmb(); /* back sees requests /before/ updated producer index */ \ (_r)->sring->req_prod = (_r)->req_prod_pvt; \ |