diff options
author | David S. Miller <davem@davemloft.net> | 2013-09-05 14:58:52 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-09-05 14:58:52 -0400 |
commit | 06c54055bebf919249aa1eb68312887c3cfe77b4 (patch) | |
tree | 223a49c09e5d26516ed0161b8a52d08454ae028e /drivers | |
parent | 1a5bbfc3d6b700178b75743a2ba1fd2e58a8f36f (diff) | |
parent | e2e5c4c07caf810d7849658dca42f598b3938e21 (diff) | |
download | linux-06c54055bebf919249aa1eb68312887c3cfe77b4.tar.bz2 |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
net/bridge/br_multicast.c
net/ipv6/sit.c
The conflicts were minor:
1) sit.c changes overlap with change to ip_tunnel_xmit() signature.
2) br_multicast.c had an overlap between computing max_delay using
msecs_to_jiffies and turning MLDV2_MRC() into an inline function
with a name using lowercase instead of uppercase letters.
3) stmmac had two overlapping changes, one which conditionally allocated
and hooked up a dma_cfg based upon the presence of the pbl OF property,
and another one handling store-and-forward DMA made. The latter of
which should not go into the new of_find_property() basic block.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
64 files changed, 625 insertions, 328 deletions
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index e1284b8dc6ee..3270d3c8ba4e 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -908,9 +908,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) device->cap._DDC = 1; } - if (acpi_video_init_brightness(device)) - return; - if (acpi_video_backlight_support()) { struct backlight_properties props; struct pci_dev *pdev; @@ -920,6 +917,9 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) static int count = 0; char *name; + result = acpi_video_init_brightness(device); + if (result) + return; name = kasprintf(GFP_KERNEL, "acpi_video%d", count); if (!name) return; @@ -979,11 +979,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) if (result) printk(KERN_ERR PREFIX "Create sysfs link\n"); - } else { - /* Remove the brightness object. */ - kfree(device->brightness->levels); - kfree(device->brightness); - device->brightness = NULL; } } diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c index 1c41722bb7e2..20fd337a5731 100644 --- a/drivers/ata/libata-pmp.c +++ b/drivers/ata/libata-pmp.c @@ -289,24 +289,24 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info) /* Disable sending Early R_OK. * With "cached read" HDD testing and multiple ports busy on a SATA - * host controller, 3726 PMP will very rarely drop a deferred + * host controller, 3x26 PMP will very rarely drop a deferred * R_OK that was intended for the host. Symptom will be all * 5 drives under test will timeout, get reset, and recover. */ - if (vendor == 0x1095 && devid == 0x3726) { + if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) { u32 reg; err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, ®); if (err_mask) { rc = -EIO; - reason = "failed to read Sil3726 Private Register"; + reason = "failed to read Sil3x26 Private Register"; goto fail; } reg &= ~0x1; err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg); if (err_mask) { rc = -EIO; - reason = "failed to write Sil3726 Private Register"; + reason = "failed to write Sil3x26 Private Register"; goto fail; } } @@ -383,8 +383,8 @@ static void sata_pmp_quirks(struct ata_port *ap) u16 devid = sata_pmp_gscr_devid(gscr); struct ata_link *link; - if (vendor == 0x1095 && devid == 0x3726) { - /* sil3726 quirks */ + if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) { + /* sil3x26 quirks */ ata_for_each_link(link, ap, EDGE) { /* link reports offline after LPM */ link->flags |= ATA_LFLAG_NO_LPM; diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index 19720a0a4a65..851bd3f43ac6 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -293,6 +293,7 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host, { struct sata_fsl_host_priv *host_priv = host->private_data; void __iomem *hcr_base = host_priv->hcr_base; + unsigned long flags; if (count > ICC_MAX_INT_COUNT_THRESHOLD) count = ICC_MAX_INT_COUNT_THRESHOLD; @@ -305,12 +306,12 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host, (count > ICC_MIN_INT_COUNT_THRESHOLD)) ticks = ICC_SAFE_INT_TICKS; - spin_lock(&host->lock); + spin_lock_irqsave(&host->lock, flags); iowrite32((count << 24 | ticks), hcr_base + ICC); intr_coalescing_count = count; intr_coalescing_ticks = ticks; - spin_unlock(&host->lock); + spin_unlock_irqrestore(&host->lock, flags); DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n", intr_coalescing_count, intr_coalescing_ticks); diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c index d047d92a456f..e9a4f46d962e 100644 --- a/drivers/ata/sata_highbank.c +++ b/drivers/ata/sata_highbank.c @@ -86,11 +86,11 @@ struct ecx_plat_data { #define SGPIO_SIGNALS 3 #define ECX_ACTIVITY_BITS 0x300000 -#define ECX_ACTIVITY_SHIFT 2 +#define ECX_ACTIVITY_SHIFT 0 #define ECX_LOCATE_BITS 0x80000 #define ECX_LOCATE_SHIFT 1 #define ECX_FAULT_BITS 0x400000 -#define ECX_FAULT_SHIFT 0 +#define ECX_FAULT_SHIFT 2 static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port, u32 shift) { diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 2b7813ec6d02..ec386ee9cb22 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -141,6 +141,8 @@ static ssize_t show_mem_removable(struct device *dev, container_of(dev, struct memory_block, dev); for (i = 0; i < sections_per_block; i++) { + if (!present_section_nr(mem->start_section_nr + i)) + continue; pfn = section_nr_to_pfn(mem->start_section_nr + i); ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION); } diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index 5c1435c4e210..0fccc99881fd 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -332,7 +332,7 @@ regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg) } if (!rbnode->blklen) { - rbnode->blklen = sizeof(*rbnode); + rbnode->blklen = 1; rbnode->base_reg = reg; } diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c index 19e36603b23b..3bc8414533c9 100644 --- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c @@ -500,7 +500,8 @@ static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo, &status)) goto log_fail; - while (status == SDVO_CMD_STATUS_PENDING && retry--) { + while ((status == SDVO_CMD_STATUS_PENDING || + status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && retry--) { udelay(15); if (!psb_intel_sdvo_read_byte(psb_intel_sdvo, SDVO_I2C_CMD_STATUS, diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 6f514297c483..342f1f336168 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -752,6 +752,8 @@ will not assert AGPBUSY# and will only be delivered when out of C3. */ #define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */ +#define INSTPM_TLB_INVALIDATE (1<<9) +#define INSTPM_SYNC_FLUSH (1<<5) #define ACTHD 0x020c8 #define FW_BLC 0x020d8 #define FW_BLC2 0x020dc @@ -4438,7 +4440,7 @@ #define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22) #define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22) #define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22) -#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22) +#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e <<22) /* legacy values */ #define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 664118d8c1d6..079ef0129e74 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -968,6 +968,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring) I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); POSTING_READ(mmio); + + /* Flush the TLB for this page */ + if (INTEL_INFO(dev)->gen >= 6) { + u32 reg = RING_INSTPM(ring->mmio_base); + I915_WRITE(reg, + _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | + INSTPM_SYNC_FLUSH)); + if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, + 1000)) + DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", + ring->name); + } } static int diff --git a/drivers/gpu/drm/nouveau/core/core/mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c index d8291724dbd4..7a4e0891c5f8 100644 --- a/drivers/gpu/drm/nouveau/core/core/mm.c +++ b/drivers/gpu/drm/nouveau/core/core/mm.c @@ -98,6 +98,8 @@ nouveau_mm_head(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min, u32 splitoff; u32 s, e; + BUG_ON(!type); + list_for_each_entry(this, &mm->free, fl_entry) { e = this->offset + this->length; s = this->offset; @@ -162,6 +164,8 @@ nouveau_mm_tail(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min, struct nouveau_mm_node *prev, *this, *next; u32 mask = align - 1; + BUG_ON(!type); + list_for_each_entry_reverse(this, &mm->free, fl_entry) { u32 e = this->offset + this->length; u32 s = this->offset; diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h index d5502267c30f..9d2cd2006250 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h @@ -20,8 +20,8 @@ nouveau_mc(void *obj) return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC]; } -#define nouveau_mc_create(p,e,o,d) \ - nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d) +#define nouveau_mc_create(p,e,o,m,d) \ + nouveau_mc_create_((p), (e), (o), (m), sizeof(**d), (void **)d) #define nouveau_mc_destroy(p) ({ \ struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \ }) @@ -33,7 +33,8 @@ nouveau_mc(void *obj) }) int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *, - struct nouveau_oclass *, int, void **); + struct nouveau_oclass *, const struct nouveau_mc_intr *, + int, void **); void _nouveau_mc_dtor(struct nouveau_object *); int _nouveau_mc_init(struct nouveau_object *); int _nouveau_mc_fini(struct nouveau_object *, bool); diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c index 19e3a9a63a02..ab7ef0ac9e34 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c @@ -40,15 +40,15 @@ nv49_ram_create(struct nouveau_object *parent, struct nouveau_object *engine, return ret; switch (pfb914 & 0x00000003) { - case 0x00000000: pfb->ram->type = NV_MEM_TYPE_DDR1; break; - case 0x00000001: pfb->ram->type = NV_MEM_TYPE_DDR2; break; - case 0x00000002: pfb->ram->type = NV_MEM_TYPE_GDDR3; break; + case 0x00000000: ram->type = NV_MEM_TYPE_DDR1; break; + case 0x00000001: ram->type = NV_MEM_TYPE_DDR2; break; + case 0x00000002: ram->type = NV_MEM_TYPE_GDDR3; break; case 0x00000003: break; } - pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; - pfb->ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; - pfb->ram->tags = nv_rd32(pfb, 0x100320); + ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; + ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; + ram->tags = nv_rd32(pfb, 0x100320); return 0; } diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c index 7192aa6e5577..63a6aab86028 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c @@ -38,8 +38,8 @@ nv4e_ram_create(struct nouveau_object *parent, struct nouveau_object *engine, if (ret) return ret; - pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; - pfb->ram->type = NV_MEM_TYPE_STOLEN; + ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; + ram->type = NV_MEM_TYPE_STOLEN; return 0; } diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c index bcca883018f4..cce65cc56514 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c @@ -30,8 +30,9 @@ struct nvc0_ltcg_priv { struct nouveau_ltcg base; u32 part_nr; u32 subp_nr; - struct nouveau_mm tags; u32 num_tags; + u32 tag_base; + struct nouveau_mm tags; struct nouveau_mm_node *tag_ram; }; @@ -117,10 +118,6 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv) u32 tag_size, tag_margin, tag_align; int ret; - nv_wr32(priv, 0x17e8d8, priv->part_nr); - if (nv_device(pfb)->card_type >= NV_E0) - nv_wr32(priv, 0x17e000, priv->part_nr); - /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */ priv->num_tags = (pfb->ram->size >> 17) / 4; if (priv->num_tags > (1 << 17)) @@ -142,7 +139,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv) tag_size += tag_align; tag_size = (tag_size + 0xfff) >> 12; /* round up */ - ret = nouveau_mm_tail(&pfb->vram, 0, tag_size, tag_size, 1, + ret = nouveau_mm_tail(&pfb->vram, 1, tag_size, tag_size, 1, &priv->tag_ram); if (ret) { priv->num_tags = 0; @@ -152,7 +149,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv) tag_base += tag_align - 1; ret = do_div(tag_base, tag_align); - nv_wr32(priv, 0x17e8d4, tag_base); + priv->tag_base = tag_base; } ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1); @@ -182,8 +179,6 @@ nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, } priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28; - nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */ - ret = nvc0_ltcg_init_tag_ram(pfb, priv); if (ret) return ret; @@ -209,13 +204,32 @@ nvc0_ltcg_dtor(struct nouveau_object *object) nouveau_ltcg_destroy(ltcg); } +static int +nvc0_ltcg_init(struct nouveau_object *object) +{ + struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object; + struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg; + int ret; + + ret = nouveau_ltcg_init(ltcg); + if (ret) + return ret; + + nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */ + nv_wr32(priv, 0x17e8d8, priv->part_nr); + if (nv_device(ltcg)->card_type >= NV_E0) + nv_wr32(priv, 0x17e000, priv->part_nr); + nv_wr32(priv, 0x17e8d4, priv->tag_base); + return 0; +} + struct nouveau_oclass nvc0_ltcg_oclass = { .handle = NV_SUBDEV(LTCG, 0xc0), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nvc0_ltcg_ctor, .dtor = nvc0_ltcg_dtor, - .init = _nouveau_ltcg_init, + .init = nvc0_ltcg_init, .fini = _nouveau_ltcg_fini, }, }; diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c index 1c0330b8c9a4..ec9cd6f10f91 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c @@ -80,7 +80,9 @@ _nouveau_mc_dtor(struct nouveau_object *object) int nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, int length, void **pobject) + struct nouveau_oclass *oclass, + const struct nouveau_mc_intr *intr_map, + int length, void **pobject) { struct nouveau_device *device = nv_device(parent); struct nouveau_mc *pmc; @@ -92,6 +94,8 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine, if (ret) return ret; + pmc->intr_map = intr_map; + ret = request_irq(device->pdev->irq, nouveau_mc_intr, IRQF_SHARED, "nouveau", pmc); if (ret < 0) diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c index 8c769715227b..64aa4edb0d9d 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c @@ -50,12 +50,11 @@ nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nv04_mc_priv *priv; int ret; - ret = nouveau_mc_create(parent, engine, oclass, &priv); + ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv); *pobject = nv_object(priv); if (ret) return ret; - priv->base.intr_map = nv04_mc_intr; return 0; } diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c index 51919371810f..d9891782bf28 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c @@ -36,12 +36,11 @@ nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nv44_mc_priv *priv; int ret; - ret = nouveau_mc_create(parent, engine, oclass, &priv); + ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv); *pobject = nv_object(priv); if (ret) return ret; - priv->base.intr_map = nv04_mc_intr; return 0; } diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c index f25fc5fc7dd1..2b1afe225db8 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c @@ -53,12 +53,11 @@ nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nv50_mc_priv *priv; int ret; - ret = nouveau_mc_create(parent, engine, oclass, &priv); + ret = nouveau_mc_create(parent, engine, oclass, nv50_mc_intr, &priv); *pobject = nv_object(priv); if (ret) return ret; - priv->base.intr_map = nv50_mc_intr; return 0; } diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c index e82fd21b5041..0d57b4d3e001 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c @@ -54,12 +54,11 @@ nv98_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nv98_mc_priv *priv; int ret; - ret = nouveau_mc_create(parent, engine, oclass, &priv); + ret = nouveau_mc_create(parent, engine, oclass, nv98_mc_intr, &priv); *pobject = nv_object(priv); if (ret) return ret; - priv->base.intr_map = nv98_mc_intr; return 0; } diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c index c5da3babbc62..104175c5a2dd 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c @@ -57,12 +57,11 @@ nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nvc0_mc_priv *priv; int ret; - ret = nouveau_mc_create(parent, engine, oclass, &priv); + ret = nouveau_mc_create(parent, engine, oclass, nvc0_mc_intr, &priv); *pobject = nv_object(priv); if (ret) return ret; - priv->base.intr_map = nvc0_mc_intr; return 0; } diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c index 0782bd2f1e04..6a13ffb53bdb 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c +++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c @@ -606,6 +606,24 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) regp->ramdac_a34 = 0x1; } +static int +nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) +{ + struct nv04_display *disp = nv04_display(crtc->dev); + struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb); + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + int ret; + + ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM); + if (ret == 0) { + if (disp->image[nv_crtc->index]) + nouveau_bo_unpin(disp->image[nv_crtc->index]); + nouveau_bo_ref(nvfb->nvbo, &disp->image[nv_crtc->index]); + } + + return ret; +} + /** * Sets up registers for the given mode/adjusted_mode pair. * @@ -622,10 +640,15 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_device *dev = crtc->dev; struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct nouveau_drm *drm = nouveau_drm(dev); + int ret; NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index); drm_mode_debug_printmodeline(adjusted_mode); + ret = nv_crtc_swap_fbs(crtc, old_fb); + if (ret) + return ret; + /* unlock must come after turning off FP_TG_CONTROL in output_prepare */ nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1); @@ -722,6 +745,7 @@ static void nv_crtc_commit(struct drm_crtc *crtc) static void nv_crtc_destroy(struct drm_crtc *crtc) { + struct nv04_display *disp = nv04_display(crtc->dev); struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); if (!nv_crtc) @@ -729,6 +753,10 @@ static void nv_crtc_destroy(struct drm_crtc *crtc) drm_crtc_cleanup(crtc); + if (disp->image[nv_crtc->index]) + nouveau_bo_unpin(disp->image[nv_crtc->index]); + nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]); + nouveau_bo_unmap(nv_crtc->cursor.nvbo); nouveau_bo_unpin(nv_crtc->cursor.nvbo); nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); @@ -754,6 +782,16 @@ nv_crtc_gamma_load(struct drm_crtc *crtc) } static void +nv_crtc_disable(struct drm_crtc *crtc) +{ + struct nv04_display *disp = nv04_display(crtc->dev); + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + if (disp->image[nv_crtc->index]) + nouveau_bo_unpin(disp->image[nv_crtc->index]); + nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]); +} + +static void nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, uint32_t size) { @@ -791,7 +829,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, struct drm_framebuffer *drm_fb; struct nouveau_framebuffer *fb; int arb_burst, arb_lwm; - int ret; NV_DEBUG(drm, "index %d\n", nv_crtc->index); @@ -801,10 +838,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, return 0; } - /* If atomic, we want to switch to the fb we were passed, so - * now we update pointers to do that. (We don't pin; just - * assume we're already pinned and update the base address.) + * now we update pointers to do that. */ if (atomic) { drm_fb = passed_fb; @@ -812,17 +847,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, } else { drm_fb = crtc->fb; fb = nouveau_framebuffer(crtc->fb); - /* If not atomic, we can go ahead and pin, and unpin the - * old fb we were passed. - */ - ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM); - if (ret) - return ret; - - if (passed_fb) { - struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb); - nouveau_bo_unpin(ofb->nvbo); - } } nv_crtc->fb.offset = fb->nvbo->bo.offset; @@ -877,6 +901,9 @@ static int nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { + int ret = nv_crtc_swap_fbs(crtc, old_fb); + if (ret) + return ret; return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false); } @@ -1027,6 +1054,7 @@ static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = { .mode_set_base = nv04_crtc_mode_set_base, .mode_set_base_atomic = nv04_crtc_mode_set_base_atomic, .load_lut = nv_crtc_gamma_load, + .disable = nv_crtc_disable, }; int diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h index a0a031dad13f..9928187f0a7d 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.h +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h @@ -81,6 +81,7 @@ struct nv04_display { uint32_t saved_vga_font[4][16384]; uint32_t dac_users[4]; struct nouveau_object *core; + struct nouveau_bo *image[2]; }; static inline struct nv04_display * diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 907d20ef6d4d..a03e75deacaf 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -577,6 +577,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, ret = nv50_display_flip_next(crtc, fb, chan, 0); if (ret) goto fail_unreserve; + } else { + struct nv04_display *dispnv04 = nv04_display(dev); + nouveau_bo_ref(new_bo, &dispnv04->image[nouveau_crtc(crtc)->index]); } ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c index 3af5bcd0b203..625f80d53dc2 100644 --- a/drivers/gpu/drm/nouveau/nv40_pm.c +++ b/drivers/gpu/drm/nouveau/nv40_pm.c @@ -131,7 +131,7 @@ nv40_calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll, if (clk < pll->vco1.max_freq) pll->vco2.max_freq = 0; - pclk->pll_calc(pclk, pll, clk, &coef); + ret = pclk->pll_calc(pclk, pll, clk, &coef); if (ret == 0) return -ERANGE; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c index 3751730764a5..1a0bf07fe54b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c @@ -29,7 +29,9 @@ #include <drm/drmP.h> #include <drm/ttm/ttm_bo_driver.h> -#define VMW_PPN_SIZE sizeof(unsigned long) +#define VMW_PPN_SIZE (sizeof(unsigned long)) +/* A future safe maximum remap size. */ +#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE) static int vmw_gmr2_bind(struct vmw_private *dev_priv, struct page *pages[], @@ -38,43 +40,61 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv, { SVGAFifoCmdDefineGMR2 define_cmd; SVGAFifoCmdRemapGMR2 remap_cmd; - uint32_t define_size = sizeof(define_cmd) + 4; - uint32_t remap_size = VMW_PPN_SIZE * num_pages + sizeof(remap_cmd) + 4; uint32_t *cmd; uint32_t *cmd_orig; + uint32_t define_size = sizeof(define_cmd) + sizeof(*cmd); + uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0); + uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num; + uint32_t remap_pos = 0; + uint32_t cmd_size = define_size + remap_size; uint32_t i; - cmd_orig = cmd = vmw_fifo_reserve(dev_priv, define_size + remap_size); + cmd_orig = cmd = vmw_fifo_reserve(dev_priv, cmd_size); if (unlikely(cmd == NULL)) return -ENOMEM; define_cmd.gmrId = gmr_id; define_cmd.numPages = num_pages; + *cmd++ = SVGA_CMD_DEFINE_GMR2; + memcpy(cmd, &define_cmd, sizeof(define_cmd)); + cmd += sizeof(define_cmd) / sizeof(*cmd); + + /* + * Need to split the command if there are too many + * pages that goes into the gmr. + */ + remap_cmd.gmrId = gmr_id; remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ? SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32; - remap_cmd.offsetPages = 0; - remap_cmd.numPages = num_pages; - *cmd++ = SVGA_CMD_DEFINE_GMR2; - memcpy(cmd, &define_cmd, sizeof(define_cmd)); - cmd += sizeof(define_cmd) / sizeof(uint32); + while (num_pages > 0) { + unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP); + + remap_cmd.offsetPages = remap_pos; + remap_cmd.numPages = nr; - *cmd++ = SVGA_CMD_REMAP_GMR2; - memcpy(cmd, &remap_cmd, sizeof(remap_cmd)); - cmd += sizeof(remap_cmd) / sizeof(uint32); + *cmd++ = SVGA_CMD_REMAP_GMR2; + memcpy(cmd, &remap_cmd, sizeof(remap_cmd)); + cmd += sizeof(remap_cmd) / sizeof(*cmd); - for (i = 0; i < num_pages; ++i) { - if (VMW_PPN_SIZE <= 4) - *cmd = page_to_pfn(*pages++); - else - *((uint64_t *)cmd) = page_to_pfn(*pages++); + for (i = 0; i < nr; ++i) { + if (VMW_PPN_SIZE <= 4) + *cmd = page_to_pfn(*pages++); + else + *((uint64_t *)cmd) = page_to_pfn(*pages++); - cmd += VMW_PPN_SIZE / sizeof(*cmd); + cmd += VMW_PPN_SIZE / sizeof(*cmd); + } + + num_pages -= nr; + remap_pos += nr; } - vmw_fifo_commit(dev_priv, define_size + remap_size); + BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd)); + + vmw_fifo_commit(dev_priv, cmd_size); return 0; } diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c index 5f4749e60b04..c1cd5698b8ae 100644 --- a/drivers/iio/light/adjd_s311.c +++ b/drivers/iio/light/adjd_s311.c @@ -232,7 +232,8 @@ static int adjd_s311_read_raw(struct iio_dev *indio_dev, switch (mask) { case IIO_CHAN_INFO_RAW: - ret = adjd_s311_read_data(indio_dev, chan->address, val); + ret = adjd_s311_read_data(indio_dev, + ADJD_S311_DATA_REG(chan->address), val); if (ret < 0) return ret; return IIO_VAL_INT; diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index fa061d46527f..75e3b102ce45 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -167,6 +167,7 @@ static const struct xpad_device { { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x1689, 0xfd01, "Razer Onza Classic Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 }, { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 }, diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 57b2637e153a..8551dcaf24db 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -672,6 +672,7 @@ static int elantech_packet_check_v2(struct psmouse *psmouse) */ static int elantech_packet_check_v3(struct psmouse *psmouse) { + struct elantech_data *etd = psmouse->private; const u8 debounce_packet[] = { 0xc4, 0xff, 0xff, 0x02, 0xff, 0xff }; unsigned char *packet = psmouse->packet; @@ -682,19 +683,48 @@ static int elantech_packet_check_v3(struct psmouse *psmouse) if (!memcmp(packet, debounce_packet, sizeof(debounce_packet))) return PACKET_DEBOUNCE; - if ((packet[0] & 0x0c) == 0x04 && (packet[3] & 0xcf) == 0x02) - return PACKET_V3_HEAD; + /* + * If the hardware flag 'crc_enabled' is set the packets have + * different signatures. + */ + if (etd->crc_enabled) { + if ((packet[3] & 0x09) == 0x08) + return PACKET_V3_HEAD; + + if ((packet[3] & 0x09) == 0x09) + return PACKET_V3_TAIL; + } else { + if ((packet[0] & 0x0c) == 0x04 && (packet[3] & 0xcf) == 0x02) + return PACKET_V3_HEAD; - if ((packet[0] & 0x0c) == 0x0c && (packet[3] & 0xce) == 0x0c) - return PACKET_V3_TAIL; + if ((packet[0] & 0x0c) == 0x0c && (packet[3] & 0xce) == 0x0c) + return PACKET_V3_TAIL; + } return PACKET_UNKNOWN; } static int elantech_packet_check_v4(struct psmouse *psmouse) { + struct elantech_data *etd = psmouse->private; unsigned char *packet = psmouse->packet; unsigned char packet_type = packet[3] & 0x03; + bool sanity_check; + + /* + * Sanity check based on the constant bits of a packet. + * The constant bits change depending on the value of + * the hardware flag 'crc_enabled' but are the same for + * every packet, regardless of the type. + */ + if (etd->crc_enabled) + sanity_check = ((packet[3] & 0x08) == 0x00); + else + sanity_check = ((packet[0] & 0x0c) == 0x04 && + (packet[3] & 0x1c) == 0x10); + + if (!sanity_check) + return PACKET_UNKNOWN; switch (packet_type) { case 0: @@ -1313,6 +1343,12 @@ static int elantech_set_properties(struct elantech_data *etd) etd->reports_pressure = true; } + /* + * The signatures of v3 and v4 packets change depending on the + * value of this hardware flag. + */ + etd->crc_enabled = ((etd->fw_version & 0x4000) == 0x4000); + return 0; } diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h index 46db3be45ac9..036a04abaef7 100644 --- a/drivers/input/mouse/elantech.h +++ b/drivers/input/mouse/elantech.h @@ -129,6 +129,7 @@ struct elantech_data { bool paritycheck; bool jumpy_cursor; bool reports_pressure; + bool crc_enabled; unsigned char hw_version; unsigned int fw_version; unsigned int single_finger_reports; diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig index 94c17c28d268..1e691a3a79cb 100644 --- a/drivers/input/serio/Kconfig +++ b/drivers/input/serio/Kconfig @@ -22,7 +22,8 @@ config SERIO_I8042 tristate "i8042 PC Keyboard controller" if EXPERT || !X86 default y depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \ - (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !S390 + (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !S390 && \ + !ARC help i8042 is the chip over which the standard AT keyboard and PS/2 mouse are connected to the computer. If you use these devices, diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 384fbcd0cee0..f3e91f0b57ae 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -2112,7 +2112,7 @@ static const struct wacom_features wacom_features_0xDA = { "Wacom Bamboo 2FG 4x5 SE", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 }; -static struct wacom_features wacom_features_0xDB = +static const struct wacom_features wacom_features_0xDB = { "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 }; @@ -2127,6 +2127,12 @@ static const struct wacom_features wacom_features_0xDF = { "Wacom Bamboo 16FG 6x8", WACOM_PKGLEN_BBPEN, 21648, 13700, 1023, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16 }; +static const struct wacom_features wacom_features_0x300 = + { "Wacom Bamboo One S", WACOM_PKGLEN_BBPEN, 14720, 9225, 1023, + 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; +static const struct wacom_features wacom_features_0x301 = + { "Wacom Bamboo One M", WACOM_PKGLEN_BBPEN, 21648, 13530, 1023, + 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x6004 = { "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255, 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; @@ -2253,6 +2259,8 @@ const struct usb_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0x100) }, { USB_DEVICE_WACOM(0x101) }, { USB_DEVICE_WACOM(0x10D) }, + { USB_DEVICE_WACOM(0x300) }, + { USB_DEVICE_WACOM(0x301) }, { USB_DEVICE_WACOM(0x304) }, { USB_DEVICE_WACOM(0x4001) }, { USB_DEVICE_WACOM(0x47) }, diff --git a/drivers/irqchip/irq-sirfsoc.c b/drivers/irqchip/irq-sirfsoc.c index 69ea44ebcf61..4851afae38dc 100644 --- a/drivers/irqchip/irq-sirfsoc.c +++ b/drivers/irqchip/irq-sirfsoc.c @@ -23,7 +23,7 @@ #define SIRFSOC_INT_RISC_LEVEL1 0x0024 #define SIRFSOC_INIT_IRQ_ID 0x0038 -#define SIRFSOC_NUM_IRQS 128 +#define SIRFSOC_NUM_IRQS 64 static struct irq_domain *sirfsoc_irqdomain; @@ -32,15 +32,18 @@ sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num) { struct irq_chip_generic *gc; struct irq_chip_type *ct; + int ret; + unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; - gc = irq_alloc_generic_chip("SIRFINTC", 1, irq_start, base, handle_level_irq); - ct = gc->chip_types; + ret = irq_alloc_domain_generic_chips(sirfsoc_irqdomain, num, 1, "irq_sirfsoc", + handle_level_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE); + gc = irq_get_domain_generic_chip(sirfsoc_irqdomain, irq_start); + gc->reg_base = base; + ct = gc->chip_types; ct->chip.irq_mask = irq_gc_mask_clr_bit; ct->chip.irq_unmask = irq_gc_mask_set_bit; ct->regs.mask = SIRFSOC_INT_RISC_MASK0; - - irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST, 0); } static asmlinkage void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs) @@ -60,9 +63,8 @@ static int __init sirfsoc_irq_init(struct device_node *np, struct device_node *p if (!base) panic("unable to map intc cpu registers\n"); - /* using legacy because irqchip_generic does not work with linear */ - sirfsoc_irqdomain = irq_domain_add_legacy(np, SIRFSOC_NUM_IRQS, 0, 0, - &irq_domain_simple_ops, base); + sirfsoc_irqdomain = irq_domain_add_linear(np, SIRFSOC_NUM_IRQS, + &irq_generic_chip_ops, base); sirfsoc_alloc_gc(base, 0, 32); sirfsoc_alloc_gc(base + 4, 32, SIRFSOC_NUM_IRQS - 32); diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c index 22b720ec80cb..77025f5cb57d 100644 --- a/drivers/isdn/mISDN/dsp_core.c +++ b/drivers/isdn/mISDN/dsp_core.c @@ -288,8 +288,10 @@ dsp_control_req(struct dsp *dsp, struct mISDNhead *hh, struct sk_buff *skb) u8 *data; int len; - if (skb->len < sizeof(int)) + if (skb->len < sizeof(int)) { printk(KERN_ERR "%s: PH_CONTROL message too short\n", __func__); + return -EINVAL; + } cont = *((int *)skb->data); len = skb->len - sizeof(int); data = skb->data + sizeof(int); diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig index a5f91e1e8fe3..becef25fa194 100644 --- a/drivers/net/ethernet/8390/Kconfig +++ b/drivers/net/ethernet/8390/Kconfig @@ -148,7 +148,7 @@ config PCMCIA_PCNET config NE_H8300 tristate "NE2000 compatible support for H8/300" - depends on H8300 + depends on H8300H_AKI3068NET || H8300H_H8MAX ---help--- Say Y here if you want to use the NE2000 compatible controller on the Renesas H8/300 processor. diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 8d726f6e1c52..2361bf236ce3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -53,6 +53,7 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; int old_max_eth_txqs, new_max_eth_txqs; int old_txdata_index = 0, new_txdata_index = 0; + struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info; /* Copy the NAPI object as it has been already initialized */ from_fp->napi = to_fp->napi; @@ -61,6 +62,11 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) memcpy(to_fp, from_fp, sizeof(*to_fp)); to_fp->index = to; + /* Retain the tpa_info of the original `to' version as we don't want + * 2 FPs to contain the same tpa_info pointer. + */ + to_fp->tpa_info = old_tpa_info; + /* move sp_objs contents as well, as their indices match fp ones */ memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs)); @@ -2959,8 +2965,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) if (IS_PF(bp)) { if (CNIC_LOADED(bp)) bnx2x_free_mem_cnic(bp); - bnx2x_free_mem(bp); } + bnx2x_free_mem(bp); + bp->state = BNX2X_STATE_CLOSED; bp->cnic_loaded = false; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 9d64b988ab34..664568420c9b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -6501,12 +6501,13 @@ static int bnx2x_link_initialize(struct link_params *params, struct bnx2x_phy *phy = ¶ms->phy[INT_PHY]; if (vars->line_speed == SPEED_AUTO_NEG && (CHIP_IS_E1x(bp) || - CHIP_IS_E2(bp))) + CHIP_IS_E2(bp))) { bnx2x_set_parallel_detection(phy, params); if (params->phy[INT_PHY].config_init) params->phy[INT_PHY].config_init(phy, params, vars); + } } /* Init external phy*/ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index c69990d2170e..285f2a59a3a5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -7855,12 +7855,15 @@ void bnx2x_free_mem(struct bnx2x *bp) { int i; - BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, - sizeof(struct host_sp_status_block)); - BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, bp->fw_stats_data_sz + bp->fw_stats_req_sz); + if (IS_VF(bp)) + return; + + BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, + sizeof(struct host_sp_status_block)); + BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, sizeof(struct bnx2x_slowpath)); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 73731eb68f2a..b26eb83069b6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -545,23 +545,6 @@ static int bnx2x_vfop_set_user_req(struct bnx2x *bp, return 0; } -static int -bnx2x_vfop_config_vlan0(struct bnx2x *bp, - struct bnx2x_vlan_mac_ramrod_params *vlan_mac, - bool add) -{ - int rc; - - vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD : - BNX2X_VLAN_MAC_DEL; - vlan_mac->user_req.u.vlan.vlan = 0; - - rc = bnx2x_config_vlan_mac(bp, vlan_mac); - if (rc == -EEXIST) - rc = 0; - return rc; -} - static int bnx2x_vfop_config_list(struct bnx2x *bp, struct bnx2x_vfop_filters *filters, struct bnx2x_vlan_mac_ramrod_params *vlan_mac) @@ -666,30 +649,14 @@ static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf) case BNX2X_VFOP_VLAN_CONFIG_LIST: /* next state */ - vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0; - - /* remove vlan0 - could be no-op */ - vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false); - if (vfop->rc) - goto op_err; + vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; - /* Do vlan list config. if this operation fails we try to - * restore vlan0 to keep the queue is working order - */ + /* do list config */ vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); if (!vfop->rc) { set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); } - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */ - - case BNX2X_VFOP_VLAN_CONFIG_LIST_0: - /* next state */ - vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; - - if (list_empty(&obj->head)) - /* add vlan0 */ - vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true); bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); default: @@ -2833,6 +2800,18 @@ int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) return 0; } +struct set_vf_state_cookie { + struct bnx2x_virtf *vf; + u8 state; +}; + +void bnx2x_set_vf_state(void *cookie) +{ + struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie; + + p->vf->state = p->state; +} + /* VFOP close (teardown the queues, delete mcasts and close HW) */ static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) { @@ -2883,7 +2862,19 @@ static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) op_err: BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); op_done: - vf->state = VF_ACQUIRED; + + /* need to make sure there are no outstanding stats ramrods which may + * cause the device to access the VF's stats buffer which it will free + * as soon as we return from the close flow. + */ + { + struct set_vf_state_cookie cookie; + + cookie.vf = vf; + cookie.state = VF_ACQUIRED; + bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); + } + DP(BNX2X_MSG_IOV, "set state to acquired\n"); bnx2x_vfop_end(bp, vf, vfop); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index d63d1327b051..86436c77af03 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -522,20 +522,16 @@ static void bnx2x_func_stats_init(struct bnx2x *bp) /* should be called under stats_sema */ static void __bnx2x_stats_start(struct bnx2x *bp) { - /* vfs travel through here as part of the statistics FSM, but no action - * is required - */ - if (IS_VF(bp)) - return; - - if (bp->port.pmf) - bnx2x_port_stats_init(bp); + if (IS_PF(bp)) { + if (bp->port.pmf) + bnx2x_port_stats_init(bp); - else if (bp->func_stx) - bnx2x_func_stats_init(bp); + else if (bp->func_stx) + bnx2x_func_stats_init(bp); - bnx2x_hw_stats_post(bp); - bnx2x_storm_stats_post(bp); + bnx2x_hw_stats_post(bp); + bnx2x_storm_stats_post(bp); + } bp->stats_started = true; } @@ -1997,3 +1993,14 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats, estats->mac_discard); } } + +void bnx2x_stats_safe_exec(struct bnx2x *bp, + void (func_to_exec)(void *cookie), + void *cookie){ + if (down_timeout(&bp->stats_sema, HZ/10)) + BNX2X_ERR("Unable to acquire stats lock\n"); + bnx2x_stats_comp(bp); + func_to_exec(cookie); + __bnx2x_stats_start(bp); + up(&bp->stats_sema); +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index 853824d258e8..f35845006cdd 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h @@ -539,6 +539,9 @@ struct bnx2x; void bnx2x_memset_stats(struct bnx2x *bp); void bnx2x_stats_init(struct bnx2x *bp); void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); +void bnx2x_stats_safe_exec(struct bnx2x *bp, + void (func_to_exec)(void *cookie), + void *cookie); /** * bnx2x_save_statistics - save statistics when unloading. diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 2e55ee29cf13..5701f3d1a169 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -3030,6 +3030,19 @@ static bool tg3_phy_power_bug(struct tg3 *tp) return false; } +static bool tg3_phy_led_bug(struct tg3 *tp) +{ + switch (tg3_asic_rev(tp)) { + case ASIC_REV_5719: + if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && + !tp->pci_fn) + return true; + return false; + } + + return false; +} + static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) { u32 val; @@ -3077,8 +3090,9 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) } return; } else if (do_low_power) { - tg3_writephy(tp, MII_TG3_EXT_CTRL, - MII_TG3_EXT_CTRL_FORCE_LED_OFF); + if (!tg3_phy_led_bug(tp)) + tg3_writephy(tp, MII_TG3_EXT_CTRL, + MII_TG3_EXT_CTRL_FORCE_LED_OFF); val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index 7cb148c495c9..78d6d6b970e1 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -353,11 +353,9 @@ struct xgmac_extra_stats { /* Receive errors */ unsigned long rx_watchdog; unsigned long rx_da_filter_fail; - unsigned long rx_sa_filter_fail; unsigned long rx_payload_error; unsigned long rx_ip_header_error; /* Tx/Rx IRQ errors */ - unsigned long tx_undeflow; unsigned long tx_process_stopped; unsigned long rx_buf_unav; unsigned long rx_process_stopped; @@ -393,6 +391,7 @@ struct xgmac_priv { char rx_pause; char tx_pause; int wolopts; + struct work_struct tx_timeout_work; }; /* XGMAC Configuration Settings */ @@ -409,6 +408,9 @@ struct xgmac_priv { #define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s) #define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s) +#define tx_dma_ring_space(p) \ + dma_ring_space((p)->tx_head, (p)->tx_tail, DMA_TX_RING_SZ) + /* XGMAC Descriptor Access Helpers */ static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz) { @@ -421,7 +423,7 @@ static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz) static inline int desc_get_buf_len(struct xgmac_dma_desc *p) { - u32 len = cpu_to_le32(p->flags); + u32 len = le32_to_cpu(p->buf_size); return (len & DESC_BUFFER1_SZ_MASK) + ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET); } @@ -464,11 +466,23 @@ static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags) p->flags = cpu_to_le32(tmpflags); } +static inline void desc_clear_tx_owner(struct xgmac_dma_desc *p) +{ + u32 tmpflags = le32_to_cpu(p->flags); + tmpflags &= TXDESC_END_RING; + p->flags = cpu_to_le32(tmpflags); +} + static inline int desc_get_tx_ls(struct xgmac_dma_desc *p) { return le32_to_cpu(p->flags) & TXDESC_LAST_SEG; } +static inline int desc_get_tx_fs(struct xgmac_dma_desc *p) +{ + return le32_to_cpu(p->flags) & TXDESC_FIRST_SEG; +} + static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p) { return le32_to_cpu(p->buf1_addr); @@ -609,10 +623,15 @@ static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr, { u32 data; - data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0); - writel(data, ioaddr + XGMAC_ADDR_HIGH(num)); - data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; - writel(data, ioaddr + XGMAC_ADDR_LOW(num)); + if (addr) { + data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0); + writel(data, ioaddr + XGMAC_ADDR_HIGH(num)); + data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; + writel(data, ioaddr + XGMAC_ADDR_LOW(num)); + } else { + writel(0, ioaddr + XGMAC_ADDR_HIGH(num)); + writel(0, ioaddr + XGMAC_ADDR_LOW(num)); + } } static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, @@ -683,9 +702,14 @@ static void xgmac_rx_refill(struct xgmac_priv *priv) if (unlikely(skb == NULL)) break; - priv->rx_skbuff[entry] = skb; paddr = dma_map_single(priv->device, skb->data, - bufsz, DMA_FROM_DEVICE); + priv->dma_buf_sz - NET_IP_ALIGN, + DMA_FROM_DEVICE); + if (dma_mapping_error(priv->device, paddr)) { + dev_kfree_skb_any(skb); + break; + } + priv->rx_skbuff[entry] = skb; desc_set_buf_addr(p, paddr, priv->dma_buf_sz); } @@ -782,20 +806,21 @@ static void xgmac_free_rx_skbufs(struct xgmac_priv *priv) return; for (i = 0; i < DMA_RX_RING_SZ; i++) { - if (priv->rx_skbuff[i] == NULL) + struct sk_buff *skb = priv->rx_skbuff[i]; + if (skb == NULL) continue; p = priv->dma_rx + i; dma_unmap_single(priv->device, desc_get_buf_addr(p), - priv->dma_buf_sz, DMA_FROM_DEVICE); - dev_kfree_skb_any(priv->rx_skbuff[i]); + priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); priv->rx_skbuff[i] = NULL; } } static void xgmac_free_tx_skbufs(struct xgmac_priv *priv) { - int i, f; + int i; struct xgmac_dma_desc *p; if (!priv->tx_skbuff) @@ -806,16 +831,15 @@ static void xgmac_free_tx_skbufs(struct xgmac_priv *priv) continue; p = priv->dma_tx + i; - dma_unmap_single(priv->device, desc_get_buf_addr(p), - desc_get_buf_len(p), DMA_TO_DEVICE); - - for (f = 0; f < skb_shinfo(priv->tx_skbuff[i])->nr_frags; f++) { - p = priv->dma_tx + i++; + if (desc_get_tx_fs(p)) + dma_unmap_single(priv->device, desc_get_buf_addr(p), + desc_get_buf_len(p), DMA_TO_DEVICE); + else dma_unmap_page(priv->device, desc_get_buf_addr(p), desc_get_buf_len(p), DMA_TO_DEVICE); - } - dev_kfree_skb_any(priv->tx_skbuff[i]); + if (desc_get_tx_ls(p)) + dev_kfree_skb_any(priv->tx_skbuff[i]); priv->tx_skbuff[i] = NULL; } } @@ -852,8 +876,6 @@ static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv) */ static void xgmac_tx_complete(struct xgmac_priv *priv) { - int i; - while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) { unsigned int entry = priv->tx_tail; struct sk_buff *skb = priv->tx_skbuff[entry]; @@ -863,55 +885,45 @@ static void xgmac_tx_complete(struct xgmac_priv *priv) if (desc_get_owner(p)) break; - /* Verify tx error by looking at the last segment */ - if (desc_get_tx_ls(p)) - desc_get_tx_status(priv, p); - netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n", priv->tx_head, priv->tx_tail); - dma_unmap_single(priv->device, desc_get_buf_addr(p), - desc_get_buf_len(p), DMA_TO_DEVICE); - - priv->tx_skbuff[entry] = NULL; - priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ); - - if (!skb) { - continue; - } - - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - entry = priv->tx_tail = dma_ring_incr(priv->tx_tail, - DMA_TX_RING_SZ); - p = priv->dma_tx + priv->tx_tail; - + if (desc_get_tx_fs(p)) + dma_unmap_single(priv->device, desc_get_buf_addr(p), + desc_get_buf_len(p), DMA_TO_DEVICE); + else dma_unmap_page(priv->device, desc_get_buf_addr(p), desc_get_buf_len(p), DMA_TO_DEVICE); + + /* Check tx error on the last segment */ + if (desc_get_tx_ls(p)) { + desc_get_tx_status(priv, p); + dev_kfree_skb(skb); } - dev_kfree_skb(skb); + priv->tx_skbuff[entry] = NULL; + priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ); } - if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) > - MAX_SKB_FRAGS) + /* Ensure tx_tail is visible to xgmac_xmit */ + smp_mb(); + if (unlikely(netif_queue_stopped(priv->dev) && + (tx_dma_ring_space(priv) > MAX_SKB_FRAGS))) netif_wake_queue(priv->dev); } -/** - * xgmac_tx_err: - * @priv: pointer to the private device structure - * Description: it cleans the descriptors and restarts the transmission - * in case of errors. - */ -static void xgmac_tx_err(struct xgmac_priv *priv) +static void xgmac_tx_timeout_work(struct work_struct *work) { - u32 reg, value, inten; + u32 reg, value; + struct xgmac_priv *priv = + container_of(work, struct xgmac_priv, tx_timeout_work); - netif_stop_queue(priv->dev); + napi_disable(&priv->napi); - inten = readl(priv->base + XGMAC_DMA_INTR_ENA); writel(0, priv->base + XGMAC_DMA_INTR_ENA); + netif_tx_lock(priv->dev); + reg = readl(priv->base + XGMAC_DMA_CONTROL); writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); do { @@ -927,9 +939,15 @@ static void xgmac_tx_err(struct xgmac_priv *priv) writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS, priv->base + XGMAC_DMA_STATUS); - writel(inten, priv->base + XGMAC_DMA_INTR_ENA); + netif_tx_unlock(priv->dev); netif_wake_queue(priv->dev); + + napi_enable(&priv->napi); + + /* Enable interrupts */ + writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_STATUS); + writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA); } static int xgmac_hw_init(struct net_device *dev) @@ -957,9 +975,7 @@ static int xgmac_hw_init(struct net_device *dev) DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL; writel(value, ioaddr + XGMAC_DMA_BUS_MODE); - /* Enable interrupts */ - writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); - writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); + writel(0, ioaddr + XGMAC_DMA_INTR_ENA); /* Mask power mgt interrupt */ writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT); @@ -1027,6 +1043,10 @@ static int xgmac_open(struct net_device *dev) napi_enable(&priv->napi); netif_start_queue(dev); + /* Enable interrupts */ + writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); + writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); + return 0; } @@ -1087,7 +1107,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE); if (dma_mapping_error(priv->device, paddr)) { dev_kfree_skb(skb); - return -EIO; + return NETDEV_TX_OK; } priv->tx_skbuff[entry] = skb; desc_set_buf_addr_and_size(desc, paddr, len); @@ -1099,14 +1119,12 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) paddr = skb_frag_dma_map(priv->device, frag, 0, len, DMA_TO_DEVICE); - if (dma_mapping_error(priv->device, paddr)) { - dev_kfree_skb(skb); - return -EIO; - } + if (dma_mapping_error(priv->device, paddr)) + goto dma_err; entry = dma_ring_incr(entry, DMA_TX_RING_SZ); desc = priv->dma_tx + entry; - priv->tx_skbuff[entry] = NULL; + priv->tx_skbuff[entry] = skb; desc_set_buf_addr_and_size(desc, paddr, len); if (i < (nfrags - 1)) @@ -1124,13 +1142,35 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) wmb(); desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG); + writel(1, priv->base + XGMAC_DMA_TX_POLL); + priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ); - writel(1, priv->base + XGMAC_DMA_TX_POLL); - if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) < - MAX_SKB_FRAGS) + /* Ensure tx_head update is visible to tx completion */ + smp_mb(); + if (unlikely(tx_dma_ring_space(priv) <= MAX_SKB_FRAGS)) { netif_stop_queue(dev); + /* Ensure netif_stop_queue is visible to tx completion */ + smp_mb(); + if (tx_dma_ring_space(priv) > MAX_SKB_FRAGS) + netif_start_queue(dev); + } + return NETDEV_TX_OK; +dma_err: + entry = priv->tx_head; + for ( ; i > 0; i--) { + entry = dma_ring_incr(entry, DMA_TX_RING_SZ); + desc = priv->dma_tx + entry; + priv->tx_skbuff[entry] = NULL; + dma_unmap_page(priv->device, desc_get_buf_addr(desc), + desc_get_buf_len(desc), DMA_TO_DEVICE); + desc_clear_tx_owner(desc); + } + desc = first; + dma_unmap_single(priv->device, desc_get_buf_addr(desc), + desc_get_buf_len(desc), DMA_TO_DEVICE); + dev_kfree_skb(skb); return NETDEV_TX_OK; } @@ -1174,7 +1214,7 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit) skb_put(skb, frame_len); dma_unmap_single(priv->device, desc_get_buf_addr(p), - frame_len, DMA_FROM_DEVICE); + priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE); skb->protocol = eth_type_trans(skb, priv->dev); skb->ip_summed = ip_checksum; @@ -1225,9 +1265,7 @@ static int xgmac_poll(struct napi_struct *napi, int budget) static void xgmac_tx_timeout(struct net_device *dev) { struct xgmac_priv *priv = netdev_priv(dev); - - /* Clear Tx resources and restart transmitting again */ - xgmac_tx_err(priv); + schedule_work(&priv->tx_timeout_work); } /** @@ -1286,6 +1324,8 @@ static void xgmac_set_rx_mode(struct net_device *dev) if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) { use_hash = true; value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF; + } else { + use_hash = false; } netdev_for_each_mc_addr(ha, dev) { if (use_hash) { @@ -1302,6 +1342,8 @@ static void xgmac_set_rx_mode(struct net_device *dev) } out: + for (i = reg; i < XGMAC_MAX_FILTER_ADDR; i++) + xgmac_set_mac_addr(ioaddr, NULL, reg); for (i = 0; i < XGMAC_NUM_HASH; i++) writel(hash_filter[i], ioaddr + XGMAC_HASH(i)); @@ -1366,7 +1408,6 @@ static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id) static irqreturn_t xgmac_interrupt(int irq, void *dev_id) { u32 intr_status; - bool tx_err = false; struct net_device *dev = (struct net_device *)dev_id; struct xgmac_priv *priv = netdev_priv(dev); struct xgmac_extra_stats *x = &priv->xstats; @@ -1396,16 +1437,12 @@ static irqreturn_t xgmac_interrupt(int irq, void *dev_id) if (intr_status & DMA_STATUS_TPS) { netdev_err(priv->dev, "transmit process stopped\n"); x->tx_process_stopped++; - tx_err = true; + schedule_work(&priv->tx_timeout_work); } if (intr_status & DMA_STATUS_FBI) { netdev_err(priv->dev, "fatal bus error\n"); x->fatal_bus_error++; - tx_err = true; } - - if (tx_err) - xgmac_tx_err(priv); } /* TX/RX NORMAL interrupts */ @@ -1569,7 +1606,6 @@ static const struct xgmac_stats xgmac_gstrings_stats[] = { XGMAC_STAT(rx_payload_error), XGMAC_STAT(rx_ip_header_error), XGMAC_STAT(rx_da_filter_fail), - XGMAC_STAT(rx_sa_filter_fail), XGMAC_STAT(fatal_bus_error), XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG), XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME), @@ -1708,6 +1744,7 @@ static int xgmac_probe(struct platform_device *pdev) ndev->netdev_ops = &xgmac_netdev_ops; SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops); spin_lock_init(&priv->stats_lock); + INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work); priv->device = &pdev->dev; priv->dev = ndev; @@ -1759,7 +1796,7 @@ static int xgmac_probe(struct platform_device *pdev) if (device_can_wakeup(priv->device)) priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ - ndev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA; + ndev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA; if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL) ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index e104db7fcf27..3224d28cdad4 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4476,6 +4476,10 @@ static int be_resume(struct pci_dev *pdev) pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); + status = be_fw_wait_ready(adapter); + if (status) + return status; + /* tell fw we're ready to fire cmds */ status = be_cmd_fw_init(adapter); if (status) diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index ae236009f1a8..0120217a16dd 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -296,6 +296,9 @@ struct fec_enet_private { /* The ring entries to be free()ed */ struct bufdesc *dirty_tx; + unsigned short tx_ring_size; + unsigned short rx_ring_size; + struct platform_device *pdev; int opened; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 0cd5e4b8b545..f9aacf5d8523 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -238,22 +238,57 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); static int mii_cnt; -static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, int is_ex) +static inline +struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep) { - struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp; - if (is_ex) - return (struct bufdesc *)(ex + 1); + struct bufdesc *new_bd = bdp + 1; + struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1; + struct bufdesc_ex *ex_base; + struct bufdesc *base; + int ring_size; + + if (bdp >= fep->tx_bd_base) { + base = fep->tx_bd_base; + ring_size = fep->tx_ring_size; + ex_base = (struct bufdesc_ex *)fep->tx_bd_base; + } else { + base = fep->rx_bd_base; + ring_size = fep->rx_ring_size; + ex_base = (struct bufdesc_ex *)fep->rx_bd_base; + } + + if (fep->bufdesc_ex) + return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ? + ex_base : ex_new_bd); else - return bdp + 1; + return (new_bd >= (base + ring_size)) ? + base : new_bd; } -static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, int is_ex) +static inline +struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep) { - struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp; - if (is_ex) - return (struct bufdesc *)(ex - 1); + struct bufdesc *new_bd = bdp - 1; + struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1; + struct bufdesc_ex *ex_base; + struct bufdesc *base; + int ring_size; + + if (bdp >= fep->tx_bd_base) { + base = fep->tx_bd_base; + ring_size = fep->tx_ring_size; + ex_base = (struct bufdesc_ex *)fep->tx_bd_base; + } else { + base = fep->rx_bd_base; + ring_size = fep->rx_ring_size; + ex_base = (struct bufdesc_ex *)fep->rx_bd_base; + } + + if (fep->bufdesc_ex) + return (struct bufdesc *)((ex_new_bd < ex_base) ? + (ex_new_bd + ring_size) : ex_new_bd); else - return bdp - 1; + return (new_bd < base) ? (new_bd + ring_size) : new_bd; } static void *swap_buffer(void *bufaddr, int len) @@ -379,7 +414,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) } } - bdp_pre = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); + bdp_pre = fec_enet_get_prevdesc(bdp, fep); if ((id_entry->driver_data & FEC_QUIRK_ERR006358) && !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) { fep->delay_work.trig_tx = true; @@ -388,10 +423,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) } /* If this was the last BD in the ring, start at the beginning again. */ - if (status & BD_ENET_TX_WRAP) - bdp = fep->tx_bd_base; - else - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_nextdesc(bdp, fep); fep->cur_tx = bdp; @@ -416,18 +448,18 @@ static void fec_enet_bd_init(struct net_device *dev) /* Initialize the receive buffer descriptors. */ bdp = fep->rx_bd_base; - for (i = 0; i < RX_RING_SIZE; i++) { + for (i = 0; i < fep->rx_ring_size; i++) { /* Initialize the BD for every fragment in the page. */ if (bdp->cbd_bufaddr) bdp->cbd_sc = BD_ENET_RX_EMPTY; else bdp->cbd_sc = 0; - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_nextdesc(bdp, fep); } /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_prevdesc(bdp, fep); bdp->cbd_sc |= BD_SC_WRAP; fep->cur_rx = fep->rx_bd_base; @@ -435,7 +467,7 @@ static void fec_enet_bd_init(struct net_device *dev) /* ...and the same for transmit */ bdp = fep->tx_bd_base; fep->cur_tx = bdp; - for (i = 0; i < TX_RING_SIZE; i++) { + for (i = 0; i < fep->tx_ring_size; i++) { /* Initialize the BD for every fragment in the page. */ bdp->cbd_sc = 0; @@ -444,11 +476,11 @@ static void fec_enet_bd_init(struct net_device *dev) fep->tx_skbuff[i] = NULL; } bdp->cbd_bufaddr = 0; - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_nextdesc(bdp, fep); } /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_prevdesc(bdp, fep); bdp->cbd_sc |= BD_SC_WRAP; fep->dirty_tx = bdp; } @@ -509,10 +541,10 @@ fec_restart(struct net_device *ndev, int duplex) writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); if (fep->bufdesc_ex) writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex) - * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); + * fep->rx_ring_size, fep->hwp + FEC_X_DES_START); else writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) - * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); + * fep->rx_ring_size, fep->hwp + FEC_X_DES_START); for (i = 0; i <= TX_RING_MOD_MASK; i++) { @@ -726,10 +758,7 @@ fec_enet_tx(struct net_device *ndev) bdp = fep->dirty_tx; /* get next bdp of dirty_tx */ - if (bdp->cbd_sc & BD_ENET_TX_WRAP) - bdp = fep->tx_bd_base; - else - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_nextdesc(bdp, fep); while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { @@ -799,10 +828,7 @@ fec_enet_tx(struct net_device *ndev) fep->dirty_tx = bdp; /* Update pointer to next buffer descriptor to be transmitted */ - if (status & BD_ENET_TX_WRAP) - bdp = fep->tx_bd_base; - else - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_nextdesc(bdp, fep); /* Since we have freed up a buffer, the ring is no longer full */ @@ -970,8 +996,7 @@ fec_enet_rx(struct net_device *ndev, int budget) htons(ETH_P_8021Q), vlan_tag); - if (!skb_defer_rx_timestamp(skb)) - napi_gro_receive(&fep->napi, skb); + napi_gro_receive(&fep->napi, skb); } bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, @@ -993,10 +1018,8 @@ rx_processing_done: } /* Update BD pointer to next entry */ - if (status & BD_ENET_RX_WRAP) - bdp = fep->rx_bd_base; - else - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_nextdesc(bdp, fep); + /* Doing this here will keep the FEC running while we process * incoming frames. On a heavily loaded network, we should be * able to keep up at the expense of system resources. @@ -1662,7 +1685,7 @@ static void fec_enet_free_buffers(struct net_device *ndev) struct bufdesc *bdp; bdp = fep->rx_bd_base; - for (i = 0; i < RX_RING_SIZE; i++) { + for (i = 0; i < fep->rx_ring_size; i++) { skb = fep->rx_skbuff[i]; if (bdp->cbd_bufaddr) @@ -1670,11 +1693,11 @@ static void fec_enet_free_buffers(struct net_device *ndev) FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); if (skb) dev_kfree_skb(skb); - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_nextdesc(bdp, fep); } bdp = fep->tx_bd_base; - for (i = 0; i < TX_RING_SIZE; i++) + for (i = 0; i < fep->tx_ring_size; i++) kfree(fep->tx_bounce[i]); } @@ -1686,7 +1709,7 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) struct bufdesc *bdp; bdp = fep->rx_bd_base; - for (i = 0; i < RX_RING_SIZE; i++) { + for (i = 0; i < fep->rx_ring_size; i++) { skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); if (!skb) { fec_enet_free_buffers(ndev); @@ -1703,15 +1726,15 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) ebdp->cbd_esc = BD_ENET_RX_INT; } - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_nextdesc(bdp, fep); } /* Set the last buffer to wrap. */ - bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_prevdesc(bdp, fep); bdp->cbd_sc |= BD_SC_WRAP; bdp = fep->tx_bd_base; - for (i = 0; i < TX_RING_SIZE; i++) { + for (i = 0; i < fep->tx_ring_size; i++) { fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); bdp->cbd_sc = 0; @@ -1722,11 +1745,11 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) ebdp->cbd_esc = BD_ENET_TX_INT; } - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_nextdesc(bdp, fep); } /* Set the last buffer to wrap. */ - bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_prevdesc(bdp, fep); bdp->cbd_sc |= BD_SC_WRAP; return 0; @@ -1966,13 +1989,17 @@ static int fec_enet_init(struct net_device *ndev) /* Get the Ethernet address */ fec_get_mac(ndev); + /* init the tx & rx ring size */ + fep->tx_ring_size = TX_RING_SIZE; + fep->rx_ring_size = RX_RING_SIZE; + /* Set receive and transmit descriptor base. */ fep->rx_bd_base = cbd_base; if (fep->bufdesc_ex) fep->tx_bd_base = (struct bufdesc *) - (((struct bufdesc_ex *)cbd_base) + RX_RING_SIZE); + (((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size); else - fep->tx_bd_base = cbd_base + RX_RING_SIZE; + fep->tx_bd_base = cbd_base + fep->rx_ring_size; /* The FEC Ethernet specific entries in the device structure */ ndev->watchdog_timeo = TX_TIMEOUT; diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 7fbe6abf6054..23de82a9da82 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -3069,7 +3069,7 @@ jme_init_one(struct pci_dev *pdev, jwrite32(jme, JME_APMC, apmc); } - NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2) + NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, NAPI_POLL_WEIGHT) spin_lock_init(&jme->phy_lock); spin_lock_init(&jme->macaddr_lock); diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 2777c70c603b..e35bac7cfdf1 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -138,7 +138,9 @@ #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) +#define MVNETA_GMAC_AN_SPEED_EN BIT(7) #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) +#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) #define MVNETA_MIB_COUNTERS_BASE 0x3080 #define MVNETA_MIB_LATE_COLLISION 0x7c #define MVNETA_DA_FILT_SPEC_MCAST 0x3400 @@ -948,6 +950,13 @@ static void mvneta_defaults_set(struct mvneta_port *pp) /* Assign port SDMA configuration */ mvreg_write(pp, MVNETA_SDMA_CONFIG, val); + /* Disable PHY polling in hardware, since we're using the + * kernel phylib to do this. + */ + val = mvreg_read(pp, MVNETA_UNIT_CONTROL); + val &= ~MVNETA_PHY_POLLING_ENABLE; + mvreg_write(pp, MVNETA_UNIT_CONTROL, val); + mvneta_set_ucast_table(pp, -1); mvneta_set_special_mcast_table(pp, -1); mvneta_set_other_mcast_table(pp, -1); @@ -2340,7 +2349,9 @@ static void mvneta_adjust_link(struct net_device *ndev) val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | MVNETA_GMAC_CONFIG_GMII_SPEED | - MVNETA_GMAC_CONFIG_FULL_DUPLEX); + MVNETA_GMAC_CONFIG_FULL_DUPLEX | + MVNETA_GMAC_AN_SPEED_EN | + MVNETA_GMAC_AN_DUPLEX_EN); if (phydev->duplex) val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; @@ -2473,6 +2484,21 @@ static int mvneta_stop(struct net_device *dev) return 0; } +static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mvneta_port *pp = netdev_priv(dev); + int ret; + + if (!pp->phy_dev) + return -ENOTSUPP; + + ret = phy_mii_ioctl(pp->phy_dev, ifr, cmd); + if (!ret) + mvneta_adjust_link(dev); + + return ret; +} + /* Ethtool methods */ /* Get settings (phy address, speed) for ethtools */ @@ -2591,6 +2617,7 @@ static const struct net_device_ops mvneta_netdev_ops = { .ndo_change_mtu = mvneta_change_mtu, .ndo_tx_timeout = mvneta_tx_timeout, .ndo_get_stats64 = mvneta_get_stats64, + .ndo_do_ioctl = mvneta_ioctl, }; const struct ethtool_ops mvneta_eth_tool_ops = { diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h index 3fe09ab2d7c9..32675e16021e 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h @@ -1171,7 +1171,6 @@ typedef struct { #define NETXEN_DB_MAPSIZE_BYTES 0x1000 -#define NETXEN_NETDEV_WEIGHT 128 #define NETXEN_ADAPTER_UP_MAGIC 777 #define NETXEN_NIC_PEG_TUNE 0 diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 1046e9461509..cbd75f97ffb3 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -197,7 +197,7 @@ netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev) for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; netif_napi_add(netdev, &sds_ring->napi, - netxen_nic_poll, NETXEN_NETDEV_WEIGHT); + netxen_nic_poll, NAPI_POLL_WEIGHT); } return 0; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 474c8a86a2af..5cd831ebfa83 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1348,7 +1348,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) DMA_FROM_DEVICE); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, ndev); - netif_rx(skb); + netif_receive_skb(skb); ndev->stats.rx_packets++; ndev->stats.rx_bytes += pkt_len; } @@ -1906,11 +1906,13 @@ static int sh_eth_open(struct net_device *ndev) pm_runtime_get_sync(&mdp->pdev->dev); + napi_enable(&mdp->napi); + ret = request_irq(ndev->irq, sh_eth_interrupt, mdp->cd->irq_flags, ndev->name, ndev); if (ret) { dev_err(&ndev->dev, "Can not assign IRQ number\n"); - return ret; + goto out_napi_off; } /* Descriptor set */ @@ -1928,12 +1930,12 @@ static int sh_eth_open(struct net_device *ndev) if (ret) goto out_free_irq; - napi_enable(&mdp->napi); - return ret; out_free_irq: free_irq(ndev->irq, ndev); +out_napi_off: + napi_disable(&mdp->napi); pm_runtime_put_sync(&mdp->pdev->dev); return ret; } @@ -2025,8 +2027,6 @@ static int sh_eth_close(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); - napi_disable(&mdp->napi); - netif_stop_queue(ndev); /* Disable interrupts by clearing the interrupt mask. */ @@ -2044,6 +2044,8 @@ static int sh_eth_close(struct net_device *ndev) free_irq(ndev->irq, ndev); + napi_disable(&mdp->napi); + /* Free all the skbuffs in the Rx queue. */ sh_eth_ring_free(ndev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 623ebc50fe6b..7a0072003f34 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -71,19 +71,22 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, plat->force_sf_dma_mode = 1; } - dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); - if (!dma_cfg) - return -ENOMEM; - - plat->dma_cfg = dma_cfg; - of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); - dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst"); - dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst"); + if (of_find_property(np, "snps,pbl", NULL)) { + dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), + GFP_KERNEL); + if (!dma_cfg) + return -ENOMEM; + plat->dma_cfg = dma_cfg; + of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); + dma_cfg->fixed_burst = + of_property_read_bool(np, "snps,fixed-burst"); + dma_cfg->mixed_burst = + of_property_read_bool(np, "snps,mixed-burst"); + } plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode"); if (plat->force_thresh_dma_mode) { plat->force_sf_dma_mode = 0; pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set."); - } return 0; } diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index ad32af67e618..9c805e0c0cae 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c @@ -1466,8 +1466,7 @@ static void gelic_ether_setup_netdev_ops(struct net_device *netdev, { netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; /* NAPI */ - netif_napi_add(netdev, napi, - gelic_net_poll, GELIC_NET_NAPI_WEIGHT); + netif_napi_add(netdev, napi, gelic_net_poll, NAPI_POLL_WEIGHT); netdev->ethtool_ops = &gelic_ether_ethtool_ops; netdev->netdev_ops = &gelic_netdevice_ops; } diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h index a93df6ac1909..309abb472aa2 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h @@ -37,7 +37,6 @@ #define GELIC_NET_RXBUF_ALIGN 128 #define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */ #define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ -#define GELIC_NET_NAPI_WEIGHT (GELIC_NET_RX_DESCRIPTORS) #define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL #define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */ diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c index e90e1f46121e..64b4639f43b6 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c @@ -175,6 +175,7 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np) printk(KERN_WARNING "Setting MDIO clock divisor to " "default %d\n", DEFAULT_CLOCK_DIVISOR); clk_div = DEFAULT_CLOCK_DIVISOR; + of_node_put(np1); goto issue; } diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 872819851aef..25ba7eca9a13 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -400,6 +400,10 @@ static const struct usb_device_id mbim_devs[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&cdc_mbim_info_zlp, }, + /* HP hs2434 Mobile Broadband Module needs ZLPs */ + { USB_DEVICE_AND_INTERFACE_INFO(0x3f0, 0x4b1d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&cdc_mbim_info_zlp, + }, { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&cdc_mbim_info, }, diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 1d4c8fe72752..c82fe65c4128 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -102,10 +102,13 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) zfcp_erp_action_dismiss(&port->erp_action); - else - shost_for_each_device(sdev, port->adapter->scsi_host) + else { + spin_lock(port->adapter->scsi_host->host_lock); + __shost_for_each_device(sdev, port->adapter->scsi_host) if (sdev_to_zfcp(sdev)->port == port) zfcp_erp_action_dismiss_lun(sdev); + spin_unlock(port->adapter->scsi_host->host_lock); + } } static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) @@ -592,9 +595,11 @@ static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear, { struct scsi_device *sdev; - shost_for_each_device(sdev, port->adapter->scsi_host) + spin_lock(port->adapter->scsi_host->host_lock); + __shost_for_each_device(sdev, port->adapter->scsi_host) if (sdev_to_zfcp(sdev)->port == port) _zfcp_erp_lun_reopen(sdev, clear, id, 0); + spin_unlock(port->adapter->scsi_host->host_lock); } static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) @@ -1434,8 +1439,10 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask) atomic_set_mask(common_mask, &port->status); read_unlock_irqrestore(&adapter->port_list_lock, flags); - shost_for_each_device(sdev, adapter->scsi_host) + spin_lock_irqsave(adapter->scsi_host->host_lock, flags); + __shost_for_each_device(sdev, adapter->scsi_host) atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status); + spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags); } /** @@ -1469,11 +1476,13 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask) } read_unlock_irqrestore(&adapter->port_list_lock, flags); - shost_for_each_device(sdev, adapter->scsi_host) { + spin_lock_irqsave(adapter->scsi_host->host_lock, flags); + __shost_for_each_device(sdev, adapter->scsi_host) { atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status); if (clear_counter) atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); } + spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags); } /** @@ -1487,16 +1496,19 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask) { struct scsi_device *sdev; u32 common_mask = mask & ZFCP_COMMON_FLAGS; + unsigned long flags; atomic_set_mask(mask, &port->status); if (!common_mask) return; - shost_for_each_device(sdev, port->adapter->scsi_host) + spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags); + __shost_for_each_device(sdev, port->adapter->scsi_host) if (sdev_to_zfcp(sdev)->port == port) atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status); + spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags); } /** @@ -1511,6 +1523,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask) struct scsi_device *sdev; u32 common_mask = mask & ZFCP_COMMON_FLAGS; u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; + unsigned long flags; atomic_clear_mask(mask, &port->status); @@ -1520,13 +1533,15 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask) if (clear_counter) atomic_set(&port->erp_counter, 0); - shost_for_each_device(sdev, port->adapter->scsi_host) + spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags); + __shost_for_each_device(sdev, port->adapter->scsi_host) if (sdev_to_zfcp(sdev)->port == port) { atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status); if (clear_counter) atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); } + spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags); } /** diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 665e3cfaaf85..de0598eaacd2 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -224,11 +224,9 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) { - spin_lock_irq(&qdio->req_q_lock); if (atomic_read(&qdio->req_q_free) || !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) return 1; - spin_unlock_irq(&qdio->req_q_lock); return 0; } @@ -246,9 +244,8 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) { long ret; - spin_unlock_irq(&qdio->req_q_lock); - ret = wait_event_interruptible_timeout(qdio->req_q_wq, - zfcp_qdio_sbal_check(qdio), 5 * HZ); + ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq, + zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ); if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) return -EIO; @@ -262,7 +259,6 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1"); } - spin_lock_irq(&qdio->req_q_lock); return -EIO; } diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index 3f01bbf0609f..890639274bcf 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -27,6 +27,16 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \ static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \ zfcp_sysfs_##_feat##_##_name##_show, NULL); +#define ZFCP_DEFINE_ATTR_CONST(_feat, _name, _format, _value) \ +static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \ + struct device_attribute *at,\ + char *buf) \ +{ \ + return sprintf(buf, _format, _value); \ +} \ +static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \ + zfcp_sysfs_##_feat##_##_name##_show, NULL); + #define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \ static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \ struct device_attribute *at,\ @@ -75,6 +85,8 @@ ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n", ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n", (zfcp_unit_sdev_status(unit) & ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); +ZFCP_DEFINE_ATTR_CONST(unit, access_shared, "%d\n", 0); +ZFCP_DEFINE_ATTR_CONST(unit, access_readonly, "%d\n", 0); static ssize_t zfcp_sysfs_port_failed_show(struct device *dev, struct device_attribute *attr, @@ -347,6 +359,8 @@ static struct attribute *zfcp_unit_attrs[] = { &dev_attr_unit_in_recovery.attr, &dev_attr_unit_status.attr, &dev_attr_unit_access_denied.attr, + &dev_attr_unit_access_shared.attr, + &dev_attr_unit_access_readonly.attr, NULL }; static struct attribute_group zfcp_unit_attr_group = { diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 48b2918e0d65..92ff027746f2 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -1353,7 +1353,6 @@ config SCSI_LPFC tristate "Emulex LightPulse Fibre Channel Support" depends on PCI && SCSI select SCSI_FC_ATTRS - select GENERIC_CSUM select CRC_T10DIF help This lpfc driver supports the Emulex LightPulse diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c index e25eba5713c1..b3b5125faa72 100644 --- a/drivers/staging/comedi/drivers.c +++ b/drivers/staging/comedi/drivers.c @@ -482,7 +482,7 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it) ret = comedi_device_postconfig(dev); if (ret < 0) { comedi_device_detach(dev); - module_put(dev->driver->module); + module_put(driv->module); } /* On success, the driver module count has been incremented. */ return ret; diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c index 3396eb9d57a3..ac2767100df5 100644 --- a/drivers/tty/hvc/hvsi_lib.c +++ b/drivers/tty/hvc/hvsi_lib.c @@ -341,8 +341,8 @@ void hvsilib_establish(struct hvsi_priv *pv) pr_devel("HVSI@%x: ... waiting handshake\n", pv->termno); - /* Try for up to 200s */ - for (timeout = 0; timeout < 20; timeout++) { + /* Try for up to 400ms */ + for (timeout = 0; timeout < 40; timeout++) { if (pv->established) goto established; if (!hvsi_get_packet(pv)) diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c index 08613e241894..279b04910f00 100644 --- a/drivers/usb/host/ohci-pci.c +++ b/drivers/usb/host/ohci-pci.c @@ -304,6 +304,13 @@ static int __init ohci_pci_init(void) pr_info("%s: " DRIVER_DESC "\n", hcd_name); ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides); + +#ifdef CONFIG_PM + /* Entries for the PCI suspend/resume callbacks are special */ + ohci_pci_hc_driver.pci_suspend = ohci_suspend; + ohci_pci_hc_driver.pci_resume = ohci_resume; +#endif + return pci_register_driver(&ohci_pci_driver); } module_init(ohci_pci_init); diff --git a/drivers/usb/phy/phy-fsl-usb.h b/drivers/usb/phy/phy-fsl-usb.h index ca266280895d..e1859b8ef567 100644 --- a/drivers/usb/phy/phy-fsl-usb.h +++ b/drivers/usb/phy/phy-fsl-usb.h @@ -15,7 +15,7 @@ * 675 Mass Ave, Cambridge, MA 02139, USA. */ -#include "otg_fsm.h" +#include "phy-fsm-usb.h" #include <linux/usb/otg.h> #include <linux/ioctl.h> diff --git a/drivers/usb/phy/phy-fsm-usb.c b/drivers/usb/phy/phy-fsm-usb.c index c520b3548e7c..7f4596606e18 100644 --- a/drivers/usb/phy/phy-fsm-usb.c +++ b/drivers/usb/phy/phy-fsm-usb.c @@ -29,7 +29,7 @@ #include <linux/usb/gadget.h> #include <linux/usb/otg.h> -#include "phy-otg-fsm.h" +#include "phy-fsm-usb.h" /* Change USB protocol when there is a protocol change */ static int otg_set_protocol(struct otg_fsm *fsm, int protocol) |