summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/pci_root.c16
-rw-r--r--drivers/amba/bus.c15
-rw-r--r--drivers/block/sunvdc.c222
-rw-r--r--drivers/char/ipmi/Kconfig14
-rw-r--r--drivers/char/ipmi/Makefile2
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c560
-rw-r--r--drivers/char/ipmi/ipmi_powernv.c310
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c497
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c1870
-rw-r--r--drivers/dma-buf/fence.c2
-rw-r--r--drivers/dma/Kconfig11
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/amba-pl08x.c2
-rw-r--r--drivers/dma/at_xdmac.c1524
-rw-r--r--drivers/dma/bcm2835-dma.c3
-rw-r--r--drivers/dma/cppi41.c12
-rw-r--r--drivers/dma/dma-jz4740.c4
-rw-r--r--drivers/dma/dmaengine.c3
-rw-r--r--drivers/dma/fsl-edma.c191
-rw-r--r--drivers/dma/fsldma.c1
-rw-r--r--drivers/dma/imx-sdma.c6
-rw-r--r--drivers/dma/ioat/dma_v3.c35
-rw-r--r--drivers/dma/iop-adma.c1
-rw-r--r--drivers/dma/k3dma.c4
-rw-r--r--drivers/dma/mmp_pdma.c1
-rw-r--r--drivers/dma/mmp_tdma.c1
-rw-r--r--drivers/dma/mpc512x_dma.c13
-rw-r--r--drivers/dma/nbpfaxi.c1
-rw-r--r--drivers/dma/omap-dma.c2
-rw-r--r--drivers/dma/pch_dma.c2
-rw-r--r--drivers/dma/pl330.c106
-rw-r--r--drivers/dma/qcom_bam_dma.c231
-rw-r--r--drivers/dma/s3c24xx-dma.c1
-rw-r--r--drivers/dma/sa11x0-dma.c3
-rw-r--r--drivers/dma/sh/rcar-audmapp.c3
-rw-r--r--drivers/dma/sh/rcar-hpbdma.c3
-rw-r--r--drivers/dma/sh/shdma-base.c4
-rw-r--r--drivers/dma/sh/shdma-of.c1
-rw-r--r--drivers/dma/sh/shdmac.c2
-rw-r--r--drivers/dma/sh/sudmac.c3
-rw-r--r--drivers/dma/sirf-dma.c5
-rw-r--r--drivers/dma/ste_dma40.c1
-rw-r--r--drivers/dma/sun6i-dma.c122
-rw-r--r--drivers/dma/tegra20-apb-dma.c1
-rw-r--r--drivers/dma/timb_dma.c1
-rw-r--r--drivers/dma/xilinx/xilinx_vdma.c13
-rw-r--r--drivers/ide/ide-floppy.c4
-rw-r--r--drivers/ide/ide.c3
-rw-r--r--drivers/iommu/Kconfig25
-rw-r--r--drivers/iommu/Makefile2
-rw-r--r--drivers/iommu/amd_iommu.c2
-rw-r--r--drivers/iommu/amd_iommu_v2.c8
-rw-r--r--drivers/iommu/arm-smmu.c135
-rw-r--r--drivers/iommu/dmar.c532
-rw-r--r--drivers/iommu/intel-iommu.c307
-rw-r--r--drivers/iommu/intel_irq_remapping.c249
-rw-r--r--drivers/iommu/iommu.c20
-rw-r--r--drivers/iommu/ipmmu-vmsa.c2
-rw-r--r--drivers/iommu/msm_iommu.c3
-rw-r--r--drivers/iommu/msm_iommu_dev.c10
-rw-r--r--drivers/iommu/omap-iommu-debug.c242
-rw-r--r--drivers/iommu/omap-iommu.c312
-rw-r--r--drivers/iommu/omap-iommu.h98
-rw-r--r--drivers/iommu/omap-iommu2.c337
-rw-r--r--drivers/iommu/rockchip-iommu.c1038
-rw-r--r--drivers/net/dsa/bcm_sf2.c23
-rw-r--r--drivers/net/ethernet/cadence/macb.c456
-rw-r--r--drivers/net/ethernet/cadence/macb.h36
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h2
-rw-r--r--drivers/net/ethernet/davicom/Kconfig2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c6
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c9
-rw-r--r--drivers/net/ethernet/jme.c12
-rw-r--r--drivers/net/ethernet/realtek/r8169.c53
-rw-r--r--drivers/net/ethernet/smsc/Kconfig4
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c30
-rw-r--r--drivers/net/phy/fixed.c1
-rw-r--r--drivers/pcmcia/sa1100_generic.c1
-rw-r--r--drivers/pcmcia/sa1111_generic.c7
-rw-r--r--drivers/pcmcia/sa11xx_base.c14
-rw-r--r--drivers/pcmcia/soc_common.c4
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c144
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c4
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h4
-rw-r--r--drivers/video/fbdev/sa1100fb.c30
-rw-r--r--drivers/video/fbdev/sa1100fb.h1
89 files changed, 7948 insertions, 2071 deletions
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index cd4de7e038ea..c6bcb8c719d8 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -33,6 +33,7 @@
#include <linux/pci.h>
#include <linux/pci-acpi.h>
#include <linux/pci-aspm.h>
+#include <linux/dmar.h>
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/dmi.h>
@@ -525,6 +526,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
struct acpi_pci_root *root;
acpi_handle handle = device->handle;
int no_aspm = 0, clear_aspm = 0;
+ bool hotadd = system_state != SYSTEM_BOOTING;
root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
if (!root)
@@ -571,6 +573,11 @@ static int acpi_pci_root_add(struct acpi_device *device,
strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
device->driver_data = root;
+ if (hotadd && dmar_device_add(handle)) {
+ result = -ENXIO;
+ goto end;
+ }
+
pr_info(PREFIX "%s [%s] (domain %04x %pR)\n",
acpi_device_name(device), acpi_device_bid(device),
root->segment, &root->secondary);
@@ -597,7 +604,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
root->segment, (unsigned int)root->secondary.start);
device->driver_data = NULL;
result = -ENODEV;
- goto end;
+ goto remove_dmar;
}
if (clear_aspm) {
@@ -611,7 +618,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
if (device->wakeup.flags.run_wake)
device_set_run_wake(root->bus->bridge, true);
- if (system_state != SYSTEM_BOOTING) {
+ if (hotadd) {
pcibios_resource_survey_bus(root->bus);
pci_assign_unassigned_root_bus_resources(root->bus);
}
@@ -621,6 +628,9 @@ static int acpi_pci_root_add(struct acpi_device *device,
pci_unlock_rescan_remove();
return 1;
+remove_dmar:
+ if (hotadd)
+ dmar_device_remove(handle);
end:
kfree(root);
return result;
@@ -639,6 +649,8 @@ static void acpi_pci_root_remove(struct acpi_device *device)
pci_remove_root_bus(root->bus);
+ dmar_device_remove(device->handle);
+
pci_unlock_rescan_remove();
kfree(root);
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 973a3332a85f..80f4de729a86 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -95,8 +95,12 @@ static int amba_pm_runtime_suspend(struct device *dev)
struct amba_device *pcdev = to_amba_device(dev);
int ret = pm_generic_runtime_suspend(dev);
- if (ret == 0 && dev->driver)
- clk_disable_unprepare(pcdev->pclk);
+ if (ret == 0 && dev->driver) {
+ if (pm_runtime_is_irq_safe(dev))
+ clk_disable(pcdev->pclk);
+ else
+ clk_disable_unprepare(pcdev->pclk);
+ }
return ret;
}
@@ -107,7 +111,10 @@ static int amba_pm_runtime_resume(struct device *dev)
int ret;
if (dev->driver) {
- ret = clk_prepare_enable(pcdev->pclk);
+ if (pm_runtime_is_irq_safe(dev))
+ ret = clk_enable(pcdev->pclk);
+ else
+ ret = clk_prepare_enable(pcdev->pclk);
/* Failure is probably fatal to the system, but... */
if (ret)
return ret;
@@ -115,7 +122,7 @@ static int amba_pm_runtime_resume(struct device *dev)
return pm_generic_runtime_resume(dev);
}
-#endif
+#endif /* CONFIG_PM */
static const struct dev_pm_ops amba_pm = {
.suspend = pm_generic_suspend,
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 0ebadf93b6c5..4b911ed96ea3 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -23,8 +23,8 @@
#define DRV_MODULE_NAME "sunvdc"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "1.1"
-#define DRV_MODULE_RELDATE "February 13, 2013"
+#define DRV_MODULE_VERSION "1.2"
+#define DRV_MODULE_RELDATE "November 24, 2014"
static char version[] =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -40,6 +40,8 @@ MODULE_VERSION(DRV_MODULE_VERSION);
#define WAITING_FOR_GEN_CMD 0x04
#define WAITING_FOR_ANY -1
+static struct workqueue_struct *sunvdc_wq;
+
struct vdc_req_entry {
struct request *req;
};
@@ -60,6 +62,10 @@ struct vdc_port {
u64 max_xfer_size;
u32 vdisk_block_size;
+ u64 ldc_timeout;
+ struct timer_list ldc_reset_timer;
+ struct work_struct ldc_reset_work;
+
/* The server fills these in for us in the disk attribute
* ACK packet.
*/
@@ -71,6 +77,10 @@ struct vdc_port {
char disk_name[32];
};
+static void vdc_ldc_reset(struct vdc_port *port);
+static void vdc_ldc_reset_work(struct work_struct *work);
+static void vdc_ldc_reset_timer(unsigned long _arg);
+
static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
{
return container_of(vio, struct vdc_port, vio);
@@ -150,6 +160,21 @@ static const struct block_device_operations vdc_fops = {
.ioctl = vdc_ioctl,
};
+static void vdc_blk_queue_start(struct vdc_port *port)
+{
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+
+ /* restart blk queue when ring is half emptied. also called after
+ * handshake completes, so check for initial handshake before we've
+ * allocated a disk.
+ */
+ if (port->disk && blk_queue_stopped(port->disk->queue) &&
+ vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50) {
+ blk_start_queue(port->disk->queue);
+ }
+
+}
+
static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
{
if (vio->cmp &&
@@ -163,7 +188,11 @@ static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
static void vdc_handshake_complete(struct vio_driver_state *vio)
{
+ struct vdc_port *port = to_vdc_port(vio);
+
+ del_timer(&port->ldc_reset_timer);
vdc_finish(vio, 0, WAITING_FOR_LINK_UP);
+ vdc_blk_queue_start(port);
}
static int vdc_handle_unknown(struct vdc_port *port, void *arg)
@@ -269,7 +298,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
desc->hdr.state = VIO_DESC_FREE;
- dr->cons = (index + 1) & (VDC_TX_RING_SIZE - 1);
+ dr->cons = vio_dring_next(dr, index);
req = rqe->req;
if (req == NULL) {
@@ -281,10 +310,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
__blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
- /* restart blk queue when ring is half emptied */
- if (blk_queue_stopped(port->disk->queue) &&
- vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50)
- blk_start_queue(port->disk->queue);
+ vdc_blk_queue_start(port);
}
static int vdc_ack(struct vdc_port *port, void *msgbuf)
@@ -317,17 +343,20 @@ static void vdc_event(void *arg, int event)
spin_lock_irqsave(&vio->lock, flags);
- if (unlikely(event == LDC_EVENT_RESET ||
- event == LDC_EVENT_UP)) {
+ if (unlikely(event == LDC_EVENT_RESET)) {
vio_link_state_change(vio, event);
- spin_unlock_irqrestore(&vio->lock, flags);
- return;
+ queue_work(sunvdc_wq, &port->ldc_reset_work);
+ goto out;
+ }
+
+ if (unlikely(event == LDC_EVENT_UP)) {
+ vio_link_state_change(vio, event);
+ goto out;
}
if (unlikely(event != LDC_EVENT_DATA_READY)) {
- printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event);
- spin_unlock_irqrestore(&vio->lock, flags);
- return;
+ pr_warn(PFX "Unexpected LDC event %d\n", event);
+ goto out;
}
err = 0;
@@ -371,6 +400,7 @@ static void vdc_event(void *arg, int event)
}
if (err < 0)
vdc_finish(&port->vio, err, WAITING_FOR_ANY);
+out:
spin_unlock_irqrestore(&vio->lock, flags);
}
@@ -403,6 +433,8 @@ static int __vdc_tx_trigger(struct vdc_port *port)
delay = 128;
} while (err == -EAGAIN);
+ if (err == -ENOTCONN)
+ vdc_ldc_reset(port);
return err;
}
@@ -472,7 +504,7 @@ static int __send_request(struct request *req)
printk(KERN_ERR PFX "vdc_tx_trigger() failure, err=%d\n", err);
} else {
port->req_id++;
- dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1);
+ dr->prod = vio_dring_next(dr, dr->prod);
}
return err;
@@ -626,7 +658,7 @@ static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
err = __vdc_tx_trigger(port);
if (err >= 0) {
port->req_id++;
- dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1);
+ dr->prod = vio_dring_next(dr, dr->prod);
spin_unlock_irqrestore(&port->vio.lock, flags);
wait_for_completion(&comp.com);
@@ -690,12 +722,9 @@ static void vdc_free_tx_ring(struct vdc_port *port)
}
}
-static int probe_disk(struct vdc_port *port)
+static int vdc_port_up(struct vdc_port *port)
{
struct vio_completion comp;
- struct request_queue *q;
- struct gendisk *g;
- int err;
init_completion(&comp.com);
comp.err = 0;
@@ -703,10 +732,27 @@ static int probe_disk(struct vdc_port *port)
port->vio.cmp = &comp;
vio_port_up(&port->vio);
-
wait_for_completion(&comp.com);
- if (comp.err)
- return comp.err;
+ return comp.err;
+}
+
+static void vdc_port_down(struct vdc_port *port)
+{
+ ldc_disconnect(port->vio.lp);
+ ldc_unbind(port->vio.lp);
+ vdc_free_tx_ring(port);
+ vio_ldc_free(&port->vio);
+}
+
+static int probe_disk(struct vdc_port *port)
+{
+ struct request_queue *q;
+ struct gendisk *g;
+ int err;
+
+ err = vdc_port_up(port);
+ if (err)
+ return err;
if (vdc_version_supported(port, 1, 1)) {
/* vdisk_size should be set during the handshake, if it wasn't
@@ -819,6 +865,7 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
struct mdesc_handle *hp;
struct vdc_port *port;
int err;
+ const u64 *ldc_timeout;
print_version();
@@ -848,6 +895,16 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26));
port->vdisk_size = -1;
+ /* Actual wall time may be double due to do_generic_file_read() doing
+ * a readahead I/O first, and once that fails it will try to read a
+ * single page.
+ */
+ ldc_timeout = mdesc_get_property(hp, vdev->mp, "vdc-timeout", NULL);
+ port->ldc_timeout = ldc_timeout ? *ldc_timeout : 0;
+ setup_timer(&port->ldc_reset_timer, vdc_ldc_reset_timer,
+ (unsigned long)port);
+ INIT_WORK(&port->ldc_reset_work, vdc_ldc_reset_work);
+
err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
vdc_versions, ARRAY_SIZE(vdc_versions),
&vdc_vio_ops, port->disk_name);
@@ -896,8 +953,21 @@ static int vdc_port_remove(struct vio_dev *vdev)
struct vdc_port *port = dev_get_drvdata(&vdev->dev);
if (port) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->vio.lock, flags);
+ blk_stop_queue(port->disk->queue);
+ spin_unlock_irqrestore(&port->vio.lock, flags);
+
+ flush_work(&port->ldc_reset_work);
+ del_timer_sync(&port->ldc_reset_timer);
del_timer_sync(&port->vio.timer);
+ del_gendisk(port->disk);
+ blk_cleanup_queue(port->disk->queue);
+ put_disk(port->disk);
+ port->disk = NULL;
+
vdc_free_tx_ring(port);
vio_ldc_free(&port->vio);
@@ -908,6 +978,102 @@ static int vdc_port_remove(struct vio_dev *vdev)
return 0;
}
+static void vdc_requeue_inflight(struct vdc_port *port)
+{
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ u32 idx;
+
+ for (idx = dr->cons; idx != dr->prod; idx = vio_dring_next(dr, idx)) {
+ struct vio_disk_desc *desc = vio_dring_entry(dr, idx);
+ struct vdc_req_entry *rqe = &port->rq_arr[idx];
+ struct request *req;
+
+ ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
+ desc->hdr.state = VIO_DESC_FREE;
+ dr->cons = vio_dring_next(dr, idx);
+
+ req = rqe->req;
+ if (req == NULL) {
+ vdc_end_special(port, desc);
+ continue;
+ }
+
+ rqe->req = NULL;
+ blk_requeue_request(port->disk->queue, req);
+ }
+}
+
+static void vdc_queue_drain(struct vdc_port *port)
+{
+ struct request *req;
+
+ while ((req = blk_fetch_request(port->disk->queue)) != NULL)
+ __blk_end_request_all(req, -EIO);
+}
+
+static void vdc_ldc_reset_timer(unsigned long _arg)
+{
+ struct vdc_port *port = (struct vdc_port *) _arg;
+ struct vio_driver_state *vio = &port->vio;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vio->lock, flags);
+ if (!(port->vio.hs_state & VIO_HS_COMPLETE)) {
+ pr_warn(PFX "%s ldc down %llu seconds, draining queue\n",
+ port->disk_name, port->ldc_timeout);
+ vdc_queue_drain(port);
+ vdc_blk_queue_start(port);
+ }
+ spin_unlock_irqrestore(&vio->lock, flags);
+}
+
+static void vdc_ldc_reset_work(struct work_struct *work)
+{
+ struct vdc_port *port;
+ struct vio_driver_state *vio;
+ unsigned long flags;
+
+ port = container_of(work, struct vdc_port, ldc_reset_work);
+ vio = &port->vio;
+
+ spin_lock_irqsave(&vio->lock, flags);
+ vdc_ldc_reset(port);
+ spin_unlock_irqrestore(&vio->lock, flags);
+}
+
+static void vdc_ldc_reset(struct vdc_port *port)
+{
+ int err;
+
+ assert_spin_locked(&port->vio.lock);
+
+ pr_warn(PFX "%s ldc link reset\n", port->disk_name);
+ blk_stop_queue(port->disk->queue);
+ vdc_requeue_inflight(port);
+ vdc_port_down(port);
+
+ err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
+ if (err) {
+ pr_err(PFX "%s vio_ldc_alloc:%d\n", port->disk_name, err);
+ return;
+ }
+
+ err = vdc_alloc_tx_ring(port);
+ if (err) {
+ pr_err(PFX "%s vio_alloc_tx_ring:%d\n", port->disk_name, err);
+ goto err_free_ldc;
+ }
+
+ if (port->ldc_timeout)
+ mod_timer(&port->ldc_reset_timer,
+ round_jiffies(jiffies + HZ * port->ldc_timeout));
+ mod_timer(&port->vio.timer, round_jiffies(jiffies + HZ));
+ return;
+
+err_free_ldc:
+ vio_ldc_free(&port->vio);
+}
+
static const struct vio_device_id vdc_port_match[] = {
{
.type = "vdc-port",
@@ -927,9 +1093,13 @@ static int __init vdc_init(void)
{
int err;
+ sunvdc_wq = alloc_workqueue("sunvdc", 0, 0);
+ if (!sunvdc_wq)
+ return -ENOMEM;
+
err = register_blkdev(0, VDCBLK_NAME);
if (err < 0)
- goto out_err;
+ goto out_free_wq;
vdc_major = err;
@@ -943,7 +1113,8 @@ out_unregister_blkdev:
unregister_blkdev(vdc_major, VDCBLK_NAME);
vdc_major = 0;
-out_err:
+out_free_wq:
+ destroy_workqueue(sunvdc_wq);
return err;
}
@@ -951,6 +1122,7 @@ static void __exit vdc_exit(void)
{
vio_unregister_driver(&vdc_port_driver);
unregister_blkdev(vdc_major, VDCBLK_NAME);
+ destroy_workqueue(sunvdc_wq);
}
module_init(vdc_init);
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index db1c9b7adaa6..6ed9e9fe5233 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -62,6 +62,20 @@ config IPMI_SI_PROBE_DEFAULTS
only be available on older systems if the "ipmi_si_intf.trydefaults=1"
boot argument is passed.
+config IPMI_SSIF
+ tristate 'IPMI SMBus handler (SSIF)'
+ select I2C
+ help
+ Provides a driver for a SMBus interface to a BMC, meaning that you
+ have a driver that must be accessed over an I2C bus instead of a
+ standard interface. This module requires I2C support.
+
+config IPMI_POWERNV
+ depends on PPC_POWERNV
+ tristate 'POWERNV (OPAL firmware) IPMI interface'
+ help
+ Provides a driver for OPAL firmware-based IPMI interfaces.
+
config IPMI_WATCHDOG
tristate 'IPMI Watchdog Timer'
help
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
index 16a93648d54e..f3ffde1f5f1f 100644
--- a/drivers/char/ipmi/Makefile
+++ b/drivers/char/ipmi/Makefile
@@ -7,5 +7,7 @@ ipmi_si-y := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o
obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o
obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o
obj-$(CONFIG_IPMI_SI) += ipmi_si.o
+obj-$(CONFIG_IPMI_SSIF) += ipmi_ssif.o
+obj-$(CONFIG_IPMI_POWERNV) += ipmi_powernv.o
obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o
obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index f816211f062f..5fa83f751378 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -56,6 +56,8 @@ static int ipmi_init_msghandler(void);
static void smi_recv_tasklet(unsigned long);
static void handle_new_recv_msgs(ipmi_smi_t intf);
static void need_waiter(ipmi_smi_t intf);
+static int handle_one_recv_msg(ipmi_smi_t intf,
+ struct ipmi_smi_msg *msg);
static int initialized;
@@ -191,12 +193,12 @@ struct ipmi_proc_entry {
#endif
struct bmc_device {
- struct platform_device *dev;
+ struct platform_device pdev;
struct ipmi_device_id id;
unsigned char guid[16];
int guid_set;
-
- struct kref refcount;
+ char name[16];
+ struct kref usecount;
/* bmc device attributes */
struct device_attribute device_id_attr;
@@ -210,6 +212,7 @@ struct bmc_device {
struct device_attribute guid_attr;
struct device_attribute aux_firmware_rev_attr;
};
+#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
/*
* Various statistics for IPMI, these index stats[] in the ipmi_smi
@@ -323,6 +326,9 @@ struct ipmi_smi {
struct kref refcount;
+ /* Set when the interface is being unregistered. */
+ bool in_shutdown;
+
/* Used for a list of interfaces. */
struct list_head link;
@@ -341,7 +347,6 @@ struct ipmi_smi {
struct bmc_device *bmc;
char *my_dev_name;
- char *sysfs_name;
/*
* This is the lower-layer's sender routine. Note that you
@@ -377,11 +382,16 @@ struct ipmi_smi {
* periodic timer interrupt. The tasklet is for handling received
* messages directly from the handler.
*/
- spinlock_t waiting_msgs_lock;
- struct list_head waiting_msgs;
+ spinlock_t waiting_rcv_msgs_lock;
+ struct list_head waiting_rcv_msgs;
atomic_t watchdog_pretimeouts_to_deliver;
struct tasklet_struct recv_tasklet;
+ spinlock_t xmit_msgs_lock;
+ struct list_head xmit_msgs;
+ struct ipmi_smi_msg *curr_msg;
+ struct list_head hp_xmit_msgs;
+
/*
* The list of command receivers that are registered for commands
* on this interface.
@@ -474,6 +484,18 @@ static DEFINE_MUTEX(smi_watchers_mutex);
#define ipmi_get_stat(intf, stat) \
((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
+static char *addr_src_to_str[] = { "invalid", "hotmod", "hardcoded", "SPMI",
+ "ACPI", "SMBIOS", "PCI",
+ "device-tree", "default" };
+
+const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
+{
+ if (src > SI_DEFAULT)
+ src = 0; /* Invalid */
+ return addr_src_to_str[src];
+}
+EXPORT_SYMBOL(ipmi_addr_src_to_str);
+
static int is_lan_addr(struct ipmi_addr *addr)
{
return addr->addr_type == IPMI_LAN_ADDR_TYPE;
@@ -517,7 +539,7 @@ static void clean_up_interface_data(ipmi_smi_t intf)
tasklet_kill(&intf->recv_tasklet);
- free_smi_msg_list(&intf->waiting_msgs);
+ free_smi_msg_list(&intf->waiting_rcv_msgs);
free_recv_msg_list(&intf->waiting_events);
/*
@@ -1473,6 +1495,30 @@ static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
smi_msg->msgid = msgid;
}
+static void smi_send(ipmi_smi_t intf, struct ipmi_smi_handlers *handlers,
+ struct ipmi_smi_msg *smi_msg, int priority)
+{
+ int run_to_completion = intf->run_to_completion;
+ unsigned long flags;
+
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+ if (intf->curr_msg) {
+ if (priority > 0)
+ list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
+ else
+ list_add_tail(&smi_msg->link, &intf->xmit_msgs);
+ smi_msg = NULL;
+ } else {
+ intf->curr_msg = smi_msg;
+ }
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+
+ if (smi_msg)
+ handlers->sender(intf->send_info, smi_msg);
+}
+
/*
* Separate from ipmi_request so that the user does not have to be
* supplied in certain circumstances (mainly at panic time). If
@@ -1497,7 +1543,6 @@ static int i_ipmi_request(ipmi_user_t user,
struct ipmi_smi_msg *smi_msg;
struct ipmi_recv_msg *recv_msg;
unsigned long flags;
- struct ipmi_smi_handlers *handlers;
if (supplied_recv)
@@ -1520,8 +1565,7 @@ static int i_ipmi_request(ipmi_user_t user,
}
rcu_read_lock();
- handlers = intf->handlers;
- if (!handlers) {
+ if (intf->in_shutdown) {
rv = -ENODEV;
goto out_err;
}
@@ -1856,7 +1900,7 @@ static int i_ipmi_request(ipmi_user_t user,
}
#endif
- handlers->sender(intf->send_info, smi_msg, priority);
+ smi_send(intf, intf->handlers, smi_msg, priority);
rcu_read_unlock();
return 0;
@@ -2153,7 +2197,7 @@ static void remove_proc_entries(ipmi_smi_t smi)
static int __find_bmc_guid(struct device *dev, void *data)
{
unsigned char *id = data;
- struct bmc_device *bmc = dev_get_drvdata(dev);
+ struct bmc_device *bmc = to_bmc_device(dev);
return memcmp(bmc->guid, id, 16) == 0;
}
@@ -2164,7 +2208,7 @@ static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
if (dev)
- return dev_get_drvdata(dev);
+ return to_bmc_device(dev);
else
return NULL;
}
@@ -2177,7 +2221,7 @@ struct prod_dev_id {
static int __find_bmc_prod_dev_id(struct device *dev, void *data)
{
struct prod_dev_id *id = data;
- struct bmc_device *bmc = dev_get_drvdata(dev);
+ struct bmc_device *bmc = to_bmc_device(dev);
return (bmc->id.product_id == id->product_id
&& bmc->id.device_id == id->device_id);
@@ -2195,7 +2239,7 @@ static struct bmc_device *ipmi_find_bmc_prod_dev_id(
dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
if (dev)
- return dev_get_drvdata(dev);
+ return to_bmc_device(dev);
else
return NULL;
}
@@ -2204,84 +2248,92 @@ static ssize_t device_id_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct bmc_device *bmc = dev_get_drvdata(dev);
+ struct bmc_device *bmc = to_bmc_device(dev);
return snprintf(buf, 10, "%u\n", bmc->id.device_id);
}
+DEVICE_ATTR(device_id, S_IRUGO, device_id_show, NULL);
-static ssize_t provides_dev_sdrs_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t provides_device_sdrs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- struct bmc_device *bmc = dev_get_drvdata(dev);
+ struct bmc_device *bmc = to_bmc_device(dev);
return snprintf(buf, 10, "%u\n",
(bmc->id.device_revision & 0x80) >> 7);
}
+DEVICE_ATTR(provides_device_sdrs, S_IRUGO, provides_device_sdrs_show, NULL);
static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct bmc_device *bmc = dev_get_drvdata(dev);
+ struct bmc_device *bmc = to_bmc_device(dev);
return snprintf(buf, 20, "%u\n",
bmc->id.device_revision & 0x0F);
}
+DEVICE_ATTR(revision, S_IRUGO, revision_show, NULL);
-static ssize_t firmware_rev_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t firmware_revision_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- struct bmc_device *bmc = dev_get_drvdata(dev);
+ struct bmc_device *bmc = to_bmc_device(dev);
return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
bmc->id.firmware_revision_2);
}
+DEVICE_ATTR(firmware_revision, S_IRUGO, firmware_revision_show, NULL);
static ssize_t ipmi_version_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct bmc_device *bmc = dev_get_drvdata(dev);
+ struct bmc_device *bmc = to_bmc_device(dev);
return snprintf(buf, 20, "%u.%u\n",
ipmi_version_major(&bmc->id),
ipmi_version_minor(&bmc->id));
}
+DEVICE_ATTR(ipmi_version, S_IRUGO, ipmi_version_show, NULL);
static ssize_t add_dev_support_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct bmc_device *bmc = dev_get_drvdata(dev);
+ struct bmc_device *bmc = to_bmc_device(dev);
return snprintf(buf, 10, "0x%02x\n",
bmc->id.additional_device_support);
}
+DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show, NULL);
static ssize_t manufacturer_id_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct bmc_device *bmc = dev_get_drvdata(dev);
+ struct bmc_device *bmc = to_bmc_device(dev);
return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
}
+DEVICE_ATTR(manufacturer_id, S_IRUGO, manufacturer_id_show, NULL);
static ssize_t product_id_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct bmc_device *bmc = dev_get_drvdata(dev);
+ struct bmc_device *bmc = to_bmc_device(dev);
return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
}
+DEVICE_ATTR(product_id, S_IRUGO, product_id_show, NULL);
static ssize_t aux_firmware_rev_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct bmc_device *bmc = dev_get_drvdata(dev);
+ struct bmc_device *bmc = to_bmc_device(dev);
return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
bmc->id.aux_firmware_revision[3],
@@ -2289,174 +2341,96 @@ static ssize_t aux_firmware_rev_show(struct device *dev,
bmc->id.aux_firmware_revision[1],
bmc->id.aux_firmware_revision[0]);
}
+DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct bmc_device *bmc = dev_get_drvdata(dev);
+ struct bmc_device *bmc = to_bmc_device(dev);
return snprintf(buf, 100, "%Lx%Lx\n",
(long long) bmc->guid[0],
(long long) bmc->guid[8]);
}
+DEVICE_ATTR(guid, S_IRUGO, guid_show, NULL);
+
+static struct attribute *bmc_dev_attrs[] = {
+ &dev_attr_device_id.attr,
+ &dev_attr_provides_device_sdrs.attr,
+ &dev_attr_revision.attr,
+ &dev_attr_firmware_revision.attr,
+ &dev_attr_ipmi_version.attr,
+ &dev_attr_additional_device_support.attr,
+ &dev_attr_manufacturer_id.attr,
+ &dev_attr_product_id.attr,
+ NULL
+};
-static void remove_files(struct bmc_device *bmc)
-{
- if (!bmc->dev)
- return;
+static struct attribute_group bmc_dev_attr_group = {
+ .attrs = bmc_dev_attrs,
+};
- device_remove_file(&bmc->dev->dev,
- &bmc->device_id_attr);
- device_remove_file(&bmc->dev->dev,
- &bmc->provides_dev_sdrs_attr);
- device_remove_file(&bmc->dev->dev,
- &bmc->revision_attr);
- device_remove_file(&bmc->dev->dev,
- &bmc->firmware_rev_attr);
- device_remove_file(&bmc->dev->dev,
- &bmc->version_attr);
- device_remove_file(&bmc->dev->dev,
- &bmc->add_dev_support_attr);
- device_remove_file(&bmc->dev->dev,
- &bmc->manufacturer_id_attr);
- device_remove_file(&bmc->dev->dev,
- &bmc->product_id_attr);
+static const struct attribute_group *bmc_dev_attr_groups[] = {
+ &bmc_dev_attr_group,
+ NULL
+};
- if (bmc->id.aux_firmware_revision_set)
- device_remove_file(&bmc->dev->dev,
- &bmc->aux_firmware_rev_attr);
- if (bmc->guid_set)
- device_remove_file(&bmc->dev->dev,
- &bmc->guid_attr);
+static struct device_type bmc_device_type = {
+ .groups = bmc_dev_attr_groups,
+};
+
+static void
+release_bmc_device(struct device *dev)
+{
+ kfree(to_bmc_device(dev));
}
static void
cleanup_bmc_device(struct kref *ref)
{
- struct bmc_device *bmc;
+ struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
- bmc = container_of(ref, struct bmc_device, refcount);
+ if (bmc->id.aux_firmware_revision_set)
+ device_remove_file(&bmc->pdev.dev,
+ &bmc->aux_firmware_rev_attr);
+ if (bmc->guid_set)
+ device_remove_file(&bmc->pdev.dev,
+ &bmc->guid_attr);
- remove_files(bmc);
- platform_device_unregister(bmc->dev);
- kfree(bmc);
+ platform_device_unregister(&bmc->pdev);
}
static void ipmi_bmc_unregister(ipmi_smi_t intf)
{
struct bmc_device *bmc = intf->bmc;
- if (intf->sysfs_name) {
- sysfs_remove_link(&intf->si_dev->kobj, intf->sysfs_name);
- kfree(intf->sysfs_name);
- intf->sysfs_name = NULL;
- }
+ sysfs_remove_link(&intf->si_dev->kobj, "bmc");
if (intf->my_dev_name) {
- sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name);
+ sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
kfree(intf->my_dev_name);
intf->my_dev_name = NULL;
}
mutex_lock(&ipmidriver_mutex);
- kref_put(&bmc->refcount, cleanup_bmc_device);
+ kref_put(&bmc->usecount, cleanup_bmc_device);
intf->bmc = NULL;
mutex_unlock(&ipmidriver_mutex);
}
-static int create_files(struct bmc_device *bmc)
+static int create_bmc_files(struct bmc_device *bmc)
{
int err;
- bmc->device_id_attr.attr.name = "device_id";
- bmc->device_id_attr.attr.mode = S_IRUGO;
- bmc->device_id_attr.show = device_id_show;
- sysfs_attr_init(&bmc->device_id_attr.attr);
-
- bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
- bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
- bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
- sysfs_attr_init(&bmc->provides_dev_sdrs_attr.attr);
-
- bmc->revision_attr.attr.name = "revision";
- bmc->revision_attr.attr.mode = S_IRUGO;
- bmc->revision_attr.show = revision_show;
- sysfs_attr_init(&bmc->revision_attr.attr);
-
- bmc->firmware_rev_attr.attr.name = "firmware_revision";
- bmc->firmware_rev_attr.attr.mode = S_IRUGO;
- bmc->firmware_rev_attr.show = firmware_rev_show;
- sysfs_attr_init(&bmc->firmware_rev_attr.attr);
-
- bmc->version_attr.attr.name = "ipmi_version";
- bmc->version_attr.attr.mode = S_IRUGO;
- bmc->version_attr.show = ipmi_version_show;
- sysfs_attr_init(&bmc->version_attr.attr);
-
- bmc->add_dev_support_attr.attr.name = "additional_device_support";
- bmc->add_dev_support_attr.attr.mode = S_IRUGO;
- bmc->add_dev_support_attr.show = add_dev_support_show;
- sysfs_attr_init(&bmc->add_dev_support_attr.attr);
-
- bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
- bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
- bmc->manufacturer_id_attr.show = manufacturer_id_show;
- sysfs_attr_init(&bmc->manufacturer_id_attr.attr);
-
- bmc->product_id_attr.attr.name = "product_id";
- bmc->product_id_attr.attr.mode = S_IRUGO;
- bmc->product_id_attr.show = product_id_show;
- sysfs_attr_init(&bmc->product_id_attr.attr);
-
- bmc->guid_attr.attr.name = "guid";
- bmc->guid_attr.attr.mode = S_IRUGO;
- bmc->guid_attr.show = guid_show;
- sysfs_attr_init(&bmc->guid_attr.attr);
-
- bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
- bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
- bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
- sysfs_attr_init(&bmc->aux_firmware_rev_attr.attr);
-
- err = device_create_file(&bmc->dev->dev,
- &bmc->device_id_attr);
- if (err)
- goto out;
- err = device_create_file(&bmc->dev->dev,
- &bmc->provides_dev_sdrs_attr);
- if (err)
- goto out_devid;
- err = device_create_file(&bmc->dev->dev,
- &bmc->revision_attr);
- if (err)
- goto out_sdrs;
- err = device_create_file(&bmc->dev->dev,
- &bmc->firmware_rev_attr);
- if (err)
- goto out_rev;
- err = device_create_file(&bmc->dev->dev,
- &bmc->version_attr);
- if (err)
- goto out_firm;
- err = device_create_file(&bmc->dev->dev,
- &bmc->add_dev_support_attr);
- if (err)
- goto out_version;
- err = device_create_file(&bmc->dev->dev,
- &bmc->manufacturer_id_attr);
- if (err)
- goto out_add_dev;
- err = device_create_file(&bmc->dev->dev,
- &bmc->product_id_attr);
- if (err)
- goto out_manu;
if (bmc->id.aux_firmware_revision_set) {
- err = device_create_file(&bmc->dev->dev,
+ bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
+ err = device_create_file(&bmc->pdev.dev,
&bmc->aux_firmware_rev_attr);
if (err)
- goto out_prod_id;
+ goto out;
}
if (bmc->guid_set) {
- err = device_create_file(&bmc->dev->dev,
+ bmc->guid_attr.attr.name = "guid";
+ err = device_create_file(&bmc->pdev.dev,
&bmc->guid_attr);
if (err)
goto out_aux_firm;
@@ -2466,44 +2440,17 @@ static int create_files(struct bmc_device *bmc)
out_aux_firm:
if (bmc->id.aux_firmware_revision_set)
- device_remove_file(&bmc->dev->dev,
+ device_remove_file(&bmc->pdev.dev,
&bmc->aux_firmware_rev_attr);
-out_prod_id:
- device_remove_file(&bmc->dev->dev,
- &bmc->product_id_attr);
-out_manu:
- device_remove_file(&bmc->dev->dev,
- &bmc->manufacturer_id_attr);
-out_add_dev:
- device_remove_file(&bmc->dev->dev,
- &bmc->add_dev_support_attr);
-out_version:
- device_remove_file(&bmc->dev->dev,
- &bmc->version_attr);
-out_firm:
- device_remove_file(&bmc->dev->dev,
- &bmc->firmware_rev_attr);
-out_rev:
- device_remove_file(&bmc->dev->dev,
- &bmc->revision_attr);
-out_sdrs:
- device_remove_file(&bmc->dev->dev,
- &bmc->provides_dev_sdrs_attr);
-out_devid:
- device_remove_file(&bmc->dev->dev,
- &bmc->device_id_attr);
out:
return err;
}
-static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
- const char *sysfs_name)
+static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum)
{
int rv;
struct bmc_device *bmc = intf->bmc;
struct bmc_device *old_bmc;
- int size;
- char dummy[1];
mutex_lock(&ipmidriver_mutex);
@@ -2527,7 +2474,7 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
intf->bmc = old_bmc;
bmc = old_bmc;
- kref_get(&bmc->refcount);
+ kref_get(&bmc->usecount);
mutex_unlock(&ipmidriver_mutex);
printk(KERN_INFO
@@ -2537,12 +2484,12 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
bmc->id.product_id,
bmc->id.device_id);
} else {
- char name[14];
unsigned char orig_dev_id = bmc->id.device_id;
int warn_printed = 0;
- snprintf(name, sizeof(name),
+ snprintf(bmc->name, sizeof(bmc->name),
"ipmi_bmc.%4.4x", bmc->id.product_id);
+ bmc->pdev.name = bmc->name;
while (ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
bmc->id.product_id,
@@ -2566,23 +2513,16 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
}
}
- bmc->dev = platform_device_alloc(name, bmc->id.device_id);
- if (!bmc->dev) {
- mutex_unlock(&ipmidriver_mutex);
- printk(KERN_ERR
- "ipmi_msghandler:"
- " Unable to allocate platform device\n");
- return -ENOMEM;
- }
- bmc->dev->dev.driver = &ipmidriver.driver;
- dev_set_drvdata(&bmc->dev->dev, bmc);
- kref_init(&bmc->refcount);
+ bmc->pdev.dev.driver = &ipmidriver.driver;
+ bmc->pdev.id = bmc->id.device_id;
+ bmc->pdev.dev.release = release_bmc_device;
+ bmc->pdev.dev.type = &bmc_device_type;
+ kref_init(&bmc->usecount);
- rv = platform_device_add(bmc->dev);
+ rv = platform_device_register(&bmc->pdev);
mutex_unlock(&ipmidriver_mutex);
if (rv) {
- platform_device_put(bmc->dev);
- bmc->dev = NULL;
+ put_device(&bmc->pdev.dev);
printk(KERN_ERR
"ipmi_msghandler:"
" Unable to register bmc device: %d\n",
@@ -2594,10 +2534,10 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
return rv;
}
- rv = create_files(bmc);
+ rv = create_bmc_files(bmc);
if (rv) {
mutex_lock(&ipmidriver_mutex);
- platform_device_unregister(bmc->dev);
+ platform_device_unregister(&bmc->pdev);
mutex_unlock(&ipmidriver_mutex);
return rv;
@@ -2614,44 +2554,26 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
* create symlink from system interface device to bmc device
* and back.
*/
- intf->sysfs_name = kstrdup(sysfs_name, GFP_KERNEL);
- if (!intf->sysfs_name) {
- rv = -ENOMEM;
- printk(KERN_ERR
- "ipmi_msghandler: allocate link to BMC: %d\n",
- rv);
- goto out_err;
- }
-
- rv = sysfs_create_link(&intf->si_dev->kobj,
- &bmc->dev->dev.kobj, intf->sysfs_name);
+ rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
if (rv) {
- kfree(intf->sysfs_name);
- intf->sysfs_name = NULL;
printk(KERN_ERR
"ipmi_msghandler: Unable to create bmc symlink: %d\n",
rv);
goto out_err;
}
- size = snprintf(dummy, 0, "ipmi%d", ifnum);
- intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
+ intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", ifnum);
if (!intf->my_dev_name) {
- kfree(intf->sysfs_name);
- intf->sysfs_name = NULL;
rv = -ENOMEM;
printk(KERN_ERR
"ipmi_msghandler: allocate link from BMC: %d\n",
rv);
goto out_err;
}
- snprintf(intf->my_dev_name, size+1, "ipmi%d", ifnum);
- rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj,
+ rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
intf->my_dev_name);
if (rv) {
- kfree(intf->sysfs_name);
- intf->sysfs_name = NULL;
kfree(intf->my_dev_name);
intf->my_dev_name = NULL;
printk(KERN_ERR
@@ -2850,7 +2772,6 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
void *send_info,
struct ipmi_device_id *device_id,
struct device *si_dev,
- const char *sysfs_name,
unsigned char slave_addr)
{
int i, j;
@@ -2909,12 +2830,15 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
#ifdef CONFIG_PROC_FS
mutex_init(&intf->proc_entry_lock);
#endif
- spin_lock_init(&intf->waiting_msgs_lock);
- INIT_LIST_HEAD(&intf->waiting_msgs);
+ spin_lock_init(&intf->waiting_rcv_msgs_lock);
+ INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
tasklet_init(&intf->recv_tasklet,
smi_recv_tasklet,
(unsigned long) intf);
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
+ spin_lock_init(&intf->xmit_msgs_lock);
+ INIT_LIST_HEAD(&intf->xmit_msgs);
+ INIT_LIST_HEAD(&intf->hp_xmit_msgs);
spin_lock_init(&intf->events_lock);
atomic_set(&intf->event_waiters, 0);
intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
@@ -2984,7 +2908,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
if (rv == 0)
rv = add_proc_entries(intf, i);
- rv = ipmi_bmc_register(intf, i, sysfs_name);
+ rv = ipmi_bmc_register(intf, i);
out:
if (rv) {
@@ -3014,12 +2938,50 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
}
EXPORT_SYMBOL(ipmi_register_smi);
+static void deliver_smi_err_response(ipmi_smi_t intf,
+ struct ipmi_smi_msg *msg,
+ unsigned char err)
+{
+ msg->rsp[0] = msg->data[0] | 4;
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = err;
+ msg->rsp_size = 3;
+ /* It's an error, so it will never requeue, no need to check return. */
+ handle_one_recv_msg(intf, msg);
+}
+
static void cleanup_smi_msgs(ipmi_smi_t intf)
{
int i;
struct seq_table *ent;
+ struct ipmi_smi_msg *msg;
+ struct list_head *entry;
+ struct list_head tmplist;
+
+ /* Clear out our transmit queues and hold the messages. */
+ INIT_LIST_HEAD(&tmplist);
+ list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
+ list_splice_tail(&intf->xmit_msgs, &tmplist);
+
+ /* Current message first, to preserve order */
+ while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
+ /* Wait for the message to clear out. */
+ schedule_timeout(1);
+ }
/* No need for locks, the interface is down. */
+
+ /*
+ * Return errors for all pending messages in queue and in the
+ * tables waiting for remote responses.
+ */
+ while (!list_empty(&tmplist)) {
+ entry = tmplist.next;
+ list_del(entry);
+ msg = list_entry(entry, struct ipmi_smi_msg, link);
+ deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
+ }
+
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
ent = &(intf->seq_table[i]);
if (!ent->inuse)
@@ -3031,20 +2993,33 @@ static void cleanup_smi_msgs(ipmi_smi_t intf)
int ipmi_unregister_smi(ipmi_smi_t intf)
{
struct ipmi_smi_watcher *w;
- int intf_num = intf->intf_num;
+ int intf_num = intf->intf_num;
+ ipmi_user_t user;
ipmi_bmc_unregister(intf);
mutex_lock(&smi_watchers_mutex);
mutex_lock(&ipmi_interfaces_mutex);
intf->intf_num = -1;
- intf->handlers = NULL;
+ intf->in_shutdown = true;
list_del_rcu(&intf->link);
mutex_unlock(&ipmi_interfaces_mutex);
synchronize_rcu();
cleanup_smi_msgs(intf);
+ /* Clean up the effects of users on the lower-level software. */
+ mutex_lock(&ipmi_interfaces_mutex);
+ rcu_read_lock();
+ list_for_each_entry_rcu(user, &intf->users, link) {
+ module_put(intf->handlers->owner);
+ if (intf->handlers->dec_usecount)
+ intf->handlers->dec_usecount(intf->send_info);
+ }
+ rcu_read_unlock();
+ intf->handlers = NULL;
+ mutex_unlock(&ipmi_interfaces_mutex);
+
remove_proc_entries(intf);
/*
@@ -3134,7 +3109,6 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
ipmi_user_t user = NULL;
struct ipmi_ipmb_addr *ipmb_addr;
struct ipmi_recv_msg *recv_msg;
- struct ipmi_smi_handlers *handlers;
if (msg->rsp_size < 10) {
/* Message not big enough, just ignore it. */
@@ -3188,9 +3162,8 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
}
#endif
rcu_read_lock();
- handlers = intf->handlers;
- if (handlers) {
- handlers->sender(intf->send_info, msg, 0);
+ if (!intf->in_shutdown) {
+ smi_send(intf, intf->handlers, msg, 0);
/*
* We used the message, so return the value
* that causes it to not be freed or
@@ -3857,32 +3830,32 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
/* See if any waiting messages need to be processed. */
if (!run_to_completion)
- spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
- while (!list_empty(&intf->waiting_msgs)) {
- smi_msg = list_entry(intf->waiting_msgs.next,
+ spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
+ while (!list_empty(&intf->waiting_rcv_msgs)) {
+ smi_msg = list_entry(intf->waiting_rcv_msgs.next,
struct ipmi_smi_msg, link);
- list_del(&smi_msg->link);
if (!run_to_completion)
- spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+ spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
+ flags);
rv = handle_one_recv_msg(intf, smi_msg);
if (!run_to_completion)
- spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
- if (rv == 0) {
- /* Message handled */
- ipmi_free_smi_msg(smi_msg);
- } else if (rv < 0) {
- /* Fatal error on the message, del but don't free. */
- } else {
+ spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
+ if (rv > 0) {
/*
* To preserve message order, quit if we
* can't handle a message.
*/
- list_add(&smi_msg->link, &intf->waiting_msgs);
break;
+ } else {
+ list_del(&smi_msg->link);
+ if (rv == 0)
+ /* Message handled */
+ ipmi_free_smi_msg(smi_msg);
+ /* If rv < 0, fatal error, del but don't free. */
}
}
if (!run_to_completion)
- spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+ spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
/*
* If the pretimout count is non-zero, decrement one from it and
@@ -3903,7 +3876,41 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
static void smi_recv_tasklet(unsigned long val)
{
- handle_new_recv_msgs((ipmi_smi_t) val);
+ unsigned long flags = 0; /* keep us warning-free. */
+ ipmi_smi_t intf = (ipmi_smi_t) val;
+ int run_to_completion = intf->run_to_completion;
+ struct ipmi_smi_msg *newmsg = NULL;
+
+ /*
+ * Start the next message if available.
+ *
+ * Do this here, not in the actual receiver, because we may deadlock
+ * because the lower layer is allowed to hold locks while calling
+ * message delivery.
+ */
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+ if (intf->curr_msg == NULL && !intf->in_shutdown) {
+ struct list_head *entry = NULL;
+
+ /* Pick the high priority queue first. */
+ if (!list_empty(&intf->hp_xmit_msgs))
+ entry = intf->hp_xmit_msgs.next;
+ else if (!list_empty(&intf->xmit_msgs))
+ entry = intf->xmit_msgs.next;
+
+ if (entry) {
+ list_del(entry);
+ newmsg = list_entry(entry, struct ipmi_smi_msg, link);
+ intf->curr_msg = newmsg;
+ }
+ }
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+ if (newmsg)
+ intf->handlers->sender(intf->send_info, newmsg);
+
+ handle_new_recv_msgs(intf);
}
/* Handle a new message from the lower layer. */
@@ -3911,13 +3918,16 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
struct ipmi_smi_msg *msg)
{
unsigned long flags = 0; /* keep us warning-free. */
- int run_to_completion;
-
+ int run_to_completion = intf->run_to_completion;
if ((msg->data_size >= 2)
&& (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
&& (msg->data[1] == IPMI_SEND_MSG_CMD)
&& (msg->user_data == NULL)) {
+
+ if (intf->in_shutdown)
+ goto free_msg;
+
/*
* This is the local response to a command send, start
* the timer for these. The user_data will not be
@@ -3953,29 +3963,40 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
/* The message was sent, start the timer. */
intf_start_seq_timer(intf, msg->msgid);
+free_msg:
ipmi_free_smi_msg(msg);
- goto out;
+ } else {
+ /*
+ * To preserve message order, we keep a queue and deliver from
+ * a tasklet.
+ */
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
+ list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
+ flags);
}
- /*
- * To preserve message order, if the list is not empty, we
- * tack this message onto the end of the list.
- */
- run_to_completion = intf->run_to_completion;
if (!run_to_completion)
- spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
- list_add_tail(&msg->link, &intf->waiting_msgs);
+ spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+ if (msg == intf->curr_msg)
+ intf->curr_msg = NULL;
if (!run_to_completion)
- spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+ spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
- tasklet_schedule(&intf->recv_tasklet);
- out:
- return;
+ if (run_to_completion)
+ smi_recv_tasklet((unsigned long) intf);
+ else
+ tasklet_schedule(&intf->recv_tasklet);
}
EXPORT_SYMBOL(ipmi_smi_msg_received);
void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
{
+ if (intf->in_shutdown)
+ return;
+
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
tasklet_schedule(&intf->recv_tasklet);
}
@@ -4017,7 +4038,7 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
struct ipmi_recv_msg *msg;
struct ipmi_smi_handlers *handlers;
- if (intf->intf_num == -1)
+ if (intf->in_shutdown)
return;
if (!ent->inuse)
@@ -4082,8 +4103,7 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
ipmi_inc_stat(intf,
retransmitted_ipmb_commands);
- intf->handlers->sender(intf->send_info,
- smi_msg, 0);
+ smi_send(intf, intf->handlers, smi_msg, 0);
} else
ipmi_free_smi_msg(smi_msg);
@@ -4145,15 +4165,12 @@ static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period)
static void ipmi_request_event(ipmi_smi_t intf)
{
- struct ipmi_smi_handlers *handlers;
-
/* No event requests when in maintenance mode. */
if (intf->maintenance_mode_enable)
return;
- handlers = intf->handlers;
- if (handlers)
- handlers->request_events(intf->send_info);
+ if (!intf->in_shutdown)
+ intf->handlers->request_events(intf->send_info);
}
static struct timer_list ipmi_timer;
@@ -4548,6 +4565,7 @@ static int ipmi_init_msghandler(void)
proc_ipmi_root = proc_mkdir("ipmi", NULL);
if (!proc_ipmi_root) {
printk(KERN_ERR PFX "Unable to create IPMI proc dir");
+ driver_unregister(&ipmidriver.driver);
return -ENOMEM;
}
diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c
new file mode 100644
index 000000000000..79524ed2a3cb
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_powernv.c
@@ -0,0 +1,310 @@
+/*
+ * PowerNV OPAL IPMI driver
+ *
+ * Copyright 2014 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#define pr_fmt(fmt) "ipmi-powernv: " fmt
+
+#include <linux/ipmi_smi.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include <asm/opal.h>
+
+
+struct ipmi_smi_powernv {
+ u64 interface_id;
+ struct ipmi_device_id ipmi_id;
+ ipmi_smi_t intf;
+ u64 event;
+ struct notifier_block event_nb;
+
+ /**
+ * We assume that there can only be one outstanding request, so
+ * keep the pending message in cur_msg. We protect this from concurrent
+ * updates through send & recv calls, (and consequently opal_msg, which
+ * is in-use when cur_msg is set) with msg_lock
+ */
+ spinlock_t msg_lock;
+ struct ipmi_smi_msg *cur_msg;
+ struct opal_ipmi_msg *opal_msg;
+};
+
+static int ipmi_powernv_start_processing(void *send_info, ipmi_smi_t intf)
+{
+ struct ipmi_smi_powernv *smi = send_info;
+
+ smi->intf = intf;
+ return 0;
+}
+
+static void send_error_reply(struct ipmi_smi_powernv *smi,
+ struct ipmi_smi_msg *msg, u8 completion_code)
+{
+ msg->rsp[0] = msg->data[0] | 0x4;
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = completion_code;
+ msg->rsp_size = 3;
+ ipmi_smi_msg_received(smi->intf, msg);
+}
+
+static void ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg)
+{
+ struct ipmi_smi_powernv *smi = send_info;
+ struct opal_ipmi_msg *opal_msg;
+ unsigned long flags;
+ int comp, rc;
+ size_t size;
+
+ /* ensure data_len will fit in the opal_ipmi_msg buffer... */
+ if (msg->data_size > IPMI_MAX_MSG_LENGTH) {
+ comp = IPMI_REQ_LEN_EXCEEDED_ERR;
+ goto err;
+ }
+
+ /* ... and that we at least have netfn and cmd bytes */
+ if (msg->data_size < 2) {
+ comp = IPMI_REQ_LEN_INVALID_ERR;
+ goto err;
+ }
+
+ spin_lock_irqsave(&smi->msg_lock, flags);
+
+ if (smi->cur_msg) {
+ comp = IPMI_NODE_BUSY_ERR;
+ goto err_unlock;
+ }
+
+ /* format our data for the OPAL API */
+ opal_msg = smi->opal_msg;
+ opal_msg->version = OPAL_IPMI_MSG_FORMAT_VERSION_1;
+ opal_msg->netfn = msg->data[0];
+ opal_msg->cmd = msg->data[1];
+ if (msg->data_size > 2)
+ memcpy(opal_msg->data, msg->data + 2, msg->data_size - 2);
+
+ /* data_size already includes the netfn and cmd bytes */
+ size = sizeof(*opal_msg) + msg->data_size - 2;
+
+ pr_devel("%s: opal_ipmi_send(0x%llx, %p, %ld)\n", __func__,
+ smi->interface_id, opal_msg, size);
+ rc = opal_ipmi_send(smi->interface_id, opal_msg, size);
+ pr_devel("%s: -> %d\n", __func__, rc);
+
+ if (!rc) {
+ smi->cur_msg = msg;
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ return;
+ }
+
+ comp = IPMI_ERR_UNSPECIFIED;
+err_unlock:
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+err:
+ send_error_reply(smi, msg, comp);
+}
+
+static int ipmi_powernv_recv(struct ipmi_smi_powernv *smi)
+{
+ struct opal_ipmi_msg *opal_msg;
+ struct ipmi_smi_msg *msg;
+ unsigned long flags;
+ uint64_t size;
+ int rc;
+
+ pr_devel("%s: opal_ipmi_recv(%llx, msg, sz)\n", __func__,
+ smi->interface_id);
+
+ spin_lock_irqsave(&smi->msg_lock, flags);
+
+ if (!smi->cur_msg) {
+ pr_warn("no current message?\n");
+ return 0;
+ }
+
+ msg = smi->cur_msg;
+ opal_msg = smi->opal_msg;
+
+ size = cpu_to_be64(sizeof(*opal_msg) + IPMI_MAX_MSG_LENGTH);
+
+ rc = opal_ipmi_recv(smi->interface_id,
+ opal_msg,
+ &size);
+ size = be64_to_cpu(size);
+ pr_devel("%s: -> %d (size %lld)\n", __func__,
+ rc, rc == 0 ? size : 0);
+ if (rc) {
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ ipmi_free_smi_msg(msg);
+ return 0;
+ }
+
+ if (size < sizeof(*opal_msg)) {
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ pr_warn("unexpected IPMI message size %lld\n", size);
+ return 0;
+ }
+
+ if (opal_msg->version != OPAL_IPMI_MSG_FORMAT_VERSION_1) {
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ pr_warn("unexpected IPMI message format (version %d)\n",
+ opal_msg->version);
+ return 0;
+ }
+
+ msg->rsp[0] = opal_msg->netfn;
+ msg->rsp[1] = opal_msg->cmd;
+ if (size > sizeof(*opal_msg))
+ memcpy(&msg->rsp[2], opal_msg->data, size - sizeof(*opal_msg));
+ msg->rsp_size = 2 + size - sizeof(*opal_msg);
+
+ smi->cur_msg = NULL;
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ ipmi_smi_msg_received(smi->intf, msg);
+ return 0;
+}
+
+static void ipmi_powernv_request_events(void *send_info)
+{
+}
+
+static void ipmi_powernv_set_run_to_completion(void *send_info,
+ bool run_to_completion)
+{
+}
+
+static void ipmi_powernv_poll(void *send_info)
+{
+ struct ipmi_smi_powernv *smi = send_info;
+
+ ipmi_powernv_recv(smi);
+}
+
+static struct ipmi_smi_handlers ipmi_powernv_smi_handlers = {
+ .owner = THIS_MODULE,
+ .start_processing = ipmi_powernv_start_processing,
+ .sender = ipmi_powernv_send,
+ .request_events = ipmi_powernv_request_events,
+ .set_run_to_completion = ipmi_powernv_set_run_to_completion,
+ .poll = ipmi_powernv_poll,
+};
+
+static int ipmi_opal_event(struct notifier_block *nb,
+ unsigned long events, void *change)
+{
+ struct ipmi_smi_powernv *smi = container_of(nb,
+ struct ipmi_smi_powernv, event_nb);
+
+ if (events & smi->event)
+ ipmi_powernv_recv(smi);
+ return 0;
+}
+
+static int ipmi_powernv_probe(struct platform_device *pdev)
+{
+ struct ipmi_smi_powernv *ipmi;
+ struct device *dev;
+ u32 prop;
+ int rc;
+
+ if (!pdev || !pdev->dev.of_node)
+ return -ENODEV;
+
+ dev = &pdev->dev;
+
+ ipmi = devm_kzalloc(dev, sizeof(*ipmi), GFP_KERNEL);
+ if (!ipmi)
+ return -ENOMEM;
+
+ spin_lock_init(&ipmi->msg_lock);
+
+ rc = of_property_read_u32(dev->of_node, "ibm,ipmi-interface-id",
+ &prop);
+ if (rc) {
+ dev_warn(dev, "No interface ID property\n");
+ goto err_free;
+ }
+ ipmi->interface_id = prop;
+
+ rc = of_property_read_u32(dev->of_node, "interrupts", &prop);
+ if (rc) {
+ dev_warn(dev, "No interrupts property\n");
+ goto err_free;
+ }
+
+ ipmi->event = 1ull << prop;
+ ipmi->event_nb.notifier_call = ipmi_opal_event;
+
+ rc = opal_notifier_register(&ipmi->event_nb);
+ if (rc) {
+ dev_warn(dev, "OPAL notifier registration failed (%d)\n", rc);
+ goto err_free;
+ }
+
+ ipmi->opal_msg = devm_kmalloc(dev,
+ sizeof(*ipmi->opal_msg) + IPMI_MAX_MSG_LENGTH,
+ GFP_KERNEL);
+ if (!ipmi->opal_msg) {
+ rc = -ENOMEM;
+ goto err_unregister;
+ }
+
+ /* todo: query actual ipmi_device_id */
+ rc = ipmi_register_smi(&ipmi_powernv_smi_handlers, ipmi,
+ &ipmi->ipmi_id, dev, 0);
+ if (rc) {
+ dev_warn(dev, "IPMI SMI registration failed (%d)\n", rc);
+ goto err_free_msg;
+ }
+
+ dev_set_drvdata(dev, ipmi);
+ return 0;
+
+err_free_msg:
+ devm_kfree(dev, ipmi->opal_msg);
+err_unregister:
+ opal_notifier_unregister(&ipmi->event_nb);
+err_free:
+ devm_kfree(dev, ipmi);
+ return rc;
+}
+
+static int ipmi_powernv_remove(struct platform_device *pdev)
+{
+ struct ipmi_smi_powernv *smi = dev_get_drvdata(&pdev->dev);
+
+ ipmi_unregister_smi(smi->intf);
+ opal_notifier_unregister(&smi->event_nb);
+ return 0;
+}
+
+static const struct of_device_id ipmi_powernv_match[] = {
+ { .compatible = "ibm,opal-ipmi" },
+ { },
+};
+
+
+static struct platform_driver powernv_ipmi_driver = {
+ .driver = {
+ .name = "ipmi-powernv",
+ .owner = THIS_MODULE,
+ .of_match_table = ipmi_powernv_match,
+ },
+ .probe = ipmi_powernv_probe,
+ .remove = ipmi_powernv_remove,
+};
+
+
+module_platform_driver(powernv_ipmi_driver);
+
+MODULE_DEVICE_TABLE(of, ipmi_powernv_match);
+MODULE_DESCRIPTION("powernv IPMI driver");
+MODULE_AUTHOR("Jeremy Kerr <jk@ozlabs.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 5c4e1f625bbb..90c7fdf95419 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -92,12 +92,9 @@ enum si_intf_state {
SI_GETTING_FLAGS,
SI_GETTING_EVENTS,
SI_CLEARING_FLAGS,
- SI_CLEARING_FLAGS_THEN_SET_IRQ,
SI_GETTING_MESSAGES,
- SI_ENABLE_INTERRUPTS1,
- SI_ENABLE_INTERRUPTS2,
- SI_DISABLE_INTERRUPTS1,
- SI_DISABLE_INTERRUPTS2
+ SI_CHECKING_ENABLES,
+ SI_SETTING_ENABLES
/* FIXME - add watchdog stuff. */
};
@@ -111,10 +108,6 @@ enum si_type {
};
static char *si_to_str[] = { "kcs", "smic", "bt" };
-static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI",
- "ACPI", "SMBIOS", "PCI",
- "device-tree", "default" };
-
#define DEVICE_NAME "ipmi_si"
static struct platform_driver ipmi_driver;
@@ -174,8 +167,7 @@ struct smi_info {
struct si_sm_handlers *handlers;
enum si_type si_type;
spinlock_t si_lock;
- struct list_head xmit_msgs;
- struct list_head hp_xmit_msgs;
+ struct ipmi_smi_msg *waiting_msg;
struct ipmi_smi_msg *curr_msg;
enum si_intf_state si_state;
@@ -254,9 +246,6 @@ struct smi_info {
/* The time (in jiffies) the last timeout occurred at. */
unsigned long last_timeout_jiffies;
- /* Used to gracefully stop the timer without race conditions. */
- atomic_t stop_operation;
-
/* Are we waiting for the events, pretimeouts, received msgs? */
atomic_t need_watch;
@@ -268,6 +257,16 @@ struct smi_info {
*/
bool interrupt_disabled;
+ /*
+ * Does the BMC support events?
+ */
+ bool supports_event_msg_buff;
+
+ /*
+ * Did we get an attention that we did not handle?
+ */
+ bool got_attn;
+
/* From the get device id response... */
struct ipmi_device_id device_id;
@@ -332,7 +331,10 @@ static void deliver_recv_msg(struct smi_info *smi_info,
struct ipmi_smi_msg *msg)
{
/* Deliver the message to the upper layer. */
- ipmi_smi_msg_received(smi_info->intf, msg);
+ if (smi_info->intf)
+ ipmi_smi_msg_received(smi_info->intf, msg);
+ else
+ ipmi_free_smi_msg(msg);
}
static void return_hosed_msg(struct smi_info *smi_info, int cCode)
@@ -356,28 +358,18 @@ static void return_hosed_msg(struct smi_info *smi_info, int cCode)
static enum si_sm_result start_next_msg(struct smi_info *smi_info)
{
int rv;
- struct list_head *entry = NULL;
#ifdef DEBUG_TIMING
struct timeval t;
#endif
- /* Pick the high priority queue first. */
- if (!list_empty(&(smi_info->hp_xmit_msgs))) {
- entry = smi_info->hp_xmit_msgs.next;
- } else if (!list_empty(&(smi_info->xmit_msgs))) {
- entry = smi_info->xmit_msgs.next;
- }
-
- if (!entry) {
+ if (!smi_info->waiting_msg) {
smi_info->curr_msg = NULL;
rv = SI_SM_IDLE;
} else {
int err;
- list_del(entry);
- smi_info->curr_msg = list_entry(entry,
- struct ipmi_smi_msg,
- link);
+ smi_info->curr_msg = smi_info->waiting_msg;
+ smi_info->waiting_msg = NULL;
#ifdef DEBUG_TIMING
do_gettimeofday(&t);
printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
@@ -401,30 +393,15 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
return rv;
}
-static void start_enable_irq(struct smi_info *smi_info)
+static void start_check_enables(struct smi_info *smi_info)
{
unsigned char msg[2];
- /*
- * If we are enabling interrupts, we have to tell the
- * BMC to use them.
- */
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
- smi_info->si_state = SI_ENABLE_INTERRUPTS1;
-}
-
-static void start_disable_irq(struct smi_info *smi_info)
-{
- unsigned char msg[2];
-
- msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
- msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
-
- smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
- smi_info->si_state = SI_DISABLE_INTERRUPTS1;
+ smi_info->si_state = SI_CHECKING_ENABLES;
}
static void start_clear_flags(struct smi_info *smi_info)
@@ -440,6 +417,32 @@ static void start_clear_flags(struct smi_info *smi_info)
smi_info->si_state = SI_CLEARING_FLAGS;
}
+static void start_getting_msg_queue(struct smi_info *smi_info)
+{
+ smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
+ smi_info->curr_msg->data_size = 2;
+
+ smi_info->handlers->start_transaction(
+ smi_info->si_sm,
+ smi_info->curr_msg->data,
+ smi_info->curr_msg->data_size);
+ smi_info->si_state = SI_GETTING_MESSAGES;
+}
+
+static void start_getting_events(struct smi_info *smi_info)
+{
+ smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
+ smi_info->curr_msg->data_size = 2;
+
+ smi_info->handlers->start_transaction(
+ smi_info->si_sm,
+ smi_info->curr_msg->data,
+ smi_info->curr_msg->data_size);
+ smi_info->si_state = SI_GETTING_EVENTS;
+}
+
static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
{
smi_info->last_timeout_jiffies = jiffies;
@@ -453,22 +456,45 @@ static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
* polled until we can allocate some memory. Once we have some
* memory, we will re-enable the interrupt.
*/
-static inline void disable_si_irq(struct smi_info *smi_info)
+static inline bool disable_si_irq(struct smi_info *smi_info)
{
if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
- start_disable_irq(smi_info);
smi_info->interrupt_disabled = true;
- if (!atomic_read(&smi_info->stop_operation))
- smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
+ start_check_enables(smi_info);
+ return true;
}
+ return false;
}
-static inline void enable_si_irq(struct smi_info *smi_info)
+static inline bool enable_si_irq(struct smi_info *smi_info)
{
if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
- start_enable_irq(smi_info);
smi_info->interrupt_disabled = false;
+ start_check_enables(smi_info);
+ return true;
}
+ return false;
+}
+
+/*
+ * Allocate a message. If unable to allocate, start the interrupt
+ * disable process and return NULL. If able to allocate but
+ * interrupts are disabled, free the message and return NULL after
+ * starting the interrupt enable process.
+ */
+static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
+{
+ struct ipmi_smi_msg *msg;
+
+ msg = ipmi_alloc_smi_msg();
+ if (!msg) {
+ if (!disable_si_irq(smi_info))
+ smi_info->si_state = SI_NORMAL;
+ } else if (enable_si_irq(smi_info)) {
+ ipmi_free_smi_msg(msg);
+ msg = NULL;
+ }
+ return msg;
}
static void handle_flags(struct smi_info *smi_info)
@@ -480,45 +506,22 @@ static void handle_flags(struct smi_info *smi_info)
start_clear_flags(smi_info);
smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
- ipmi_smi_watchdog_pretimeout(smi_info->intf);
+ if (smi_info->intf)
+ ipmi_smi_watchdog_pretimeout(smi_info->intf);
} else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
/* Messages available. */
- smi_info->curr_msg = ipmi_alloc_smi_msg();
- if (!smi_info->curr_msg) {
- disable_si_irq(smi_info);
- smi_info->si_state = SI_NORMAL;
+ smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
+ if (!smi_info->curr_msg)
return;
- }
- enable_si_irq(smi_info);
-
- smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
- smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
- smi_info->curr_msg->data_size = 2;
- smi_info->handlers->start_transaction(
- smi_info->si_sm,
- smi_info->curr_msg->data,
- smi_info->curr_msg->data_size);
- smi_info->si_state = SI_GETTING_MESSAGES;
+ start_getting_msg_queue(smi_info);
} else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
/* Events available. */
- smi_info->curr_msg = ipmi_alloc_smi_msg();
- if (!smi_info->curr_msg) {
- disable_si_irq(smi_info);
- smi_info->si_state = SI_NORMAL;
+ smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
+ if (!smi_info->curr_msg)
return;
- }
- enable_si_irq(smi_info);
-
- smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
- smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
- smi_info->curr_msg->data_size = 2;
- smi_info->handlers->start_transaction(
- smi_info->si_sm,
- smi_info->curr_msg->data,
- smi_info->curr_msg->data_size);
- smi_info->si_state = SI_GETTING_EVENTS;
+ start_getting_events(smi_info);
} else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
smi_info->oem_data_avail_handler) {
if (smi_info->oem_data_avail_handler(smi_info))
@@ -527,6 +530,55 @@ static void handle_flags(struct smi_info *smi_info)
smi_info->si_state = SI_NORMAL;
}
+/*
+ * Global enables we care about.
+ */
+#define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \
+ IPMI_BMC_EVT_MSG_INTR)
+
+static u8 current_global_enables(struct smi_info *smi_info, u8 base,
+ bool *irq_on)
+{
+ u8 enables = 0;
+
+ if (smi_info->supports_event_msg_buff)
+ enables |= IPMI_BMC_EVT_MSG_BUFF;
+ else
+ enables &= ~IPMI_BMC_EVT_MSG_BUFF;
+
+ if (smi_info->irq && !smi_info->interrupt_disabled)
+ enables |= IPMI_BMC_RCV_MSG_INTR;
+ else
+ enables &= ~IPMI_BMC_RCV_MSG_INTR;
+
+ if (smi_info->supports_event_msg_buff &&
+ smi_info->irq && !smi_info->interrupt_disabled)
+
+ enables |= IPMI_BMC_EVT_MSG_INTR;
+ else
+ enables &= ~IPMI_BMC_EVT_MSG_INTR;
+
+ *irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR);
+
+ return enables;
+}
+
+static void check_bt_irq(struct smi_info *smi_info, bool irq_on)
+{
+ u8 irqstate = smi_info->io.inputb(&smi_info->io, IPMI_BT_INTMASK_REG);
+
+ irqstate &= IPMI_BT_INTMASK_ENABLE_IRQ_BIT;
+
+ if ((bool)irqstate == irq_on)
+ return;
+
+ if (irq_on)
+ smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
+ IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
+ else
+ smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 0);
+}
+
static void handle_transaction_done(struct smi_info *smi_info)
{
struct ipmi_smi_msg *msg;
@@ -581,7 +633,6 @@ static void handle_transaction_done(struct smi_info *smi_info)
}
case SI_CLEARING_FLAGS:
- case SI_CLEARING_FLAGS_THEN_SET_IRQ:
{
unsigned char msg[3];
@@ -592,10 +643,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
dev_warn(smi_info->dev,
"Error clearing flags: %2.2x\n", msg[2]);
}
- if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
- start_enable_irq(smi_info);
- else
- smi_info->si_state = SI_NORMAL;
+ smi_info->si_state = SI_NORMAL;
break;
}
@@ -675,9 +723,11 @@ static void handle_transaction_done(struct smi_info *smi_info)
break;
}
- case SI_ENABLE_INTERRUPTS1:
+ case SI_CHECKING_ENABLES:
{
unsigned char msg[4];
+ u8 enables;
+ bool irq_on;
/* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
@@ -687,70 +737,53 @@ static void handle_transaction_done(struct smi_info *smi_info)
dev_warn(smi_info->dev,
"Maybe ok, but ipmi might run very slowly.\n");
smi_info->si_state = SI_NORMAL;
- } else {
+ break;
+ }
+ enables = current_global_enables(smi_info, 0, &irq_on);
+ if (smi_info->si_type == SI_BT)
+ /* BT has its own interrupt enable bit. */
+ check_bt_irq(smi_info, irq_on);
+ if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) {
+ /* Enables are not correct, fix them. */
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
- msg[2] = (msg[3] |
- IPMI_BMC_RCV_MSG_INTR |
- IPMI_BMC_EVT_MSG_INTR);
+ msg[2] = enables | (msg[3] & ~GLOBAL_ENABLES_MASK);
smi_info->handlers->start_transaction(
smi_info->si_sm, msg, 3);
- smi_info->si_state = SI_ENABLE_INTERRUPTS2;
+ smi_info->si_state = SI_SETTING_ENABLES;
+ } else if (smi_info->supports_event_msg_buff) {
+ smi_info->curr_msg = ipmi_alloc_smi_msg();
+ if (!smi_info->curr_msg) {
+ smi_info->si_state = SI_NORMAL;
+ break;
+ }
+ start_getting_msg_queue(smi_info);
+ } else {
+ smi_info->si_state = SI_NORMAL;
}
break;
}
- case SI_ENABLE_INTERRUPTS2:
+ case SI_SETTING_ENABLES:
{
unsigned char msg[4];
- /* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
- if (msg[2] != 0) {
+ if (msg[2] != 0)
dev_warn(smi_info->dev,
- "Couldn't set irq info: %x.\n", msg[2]);
- dev_warn(smi_info->dev,
- "Maybe ok, but ipmi might run very slowly.\n");
- } else
- smi_info->interrupt_disabled = false;
- smi_info->si_state = SI_NORMAL;
- break;
- }
-
- case SI_DISABLE_INTERRUPTS1:
- {
- unsigned char msg[4];
+ "Could not set the global enables: 0x%x.\n",
+ msg[2]);
- /* We got the flags from the SMI, now handle them. */
- smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
- if (msg[2] != 0) {
- dev_warn(smi_info->dev, "Could not disable interrupts"
- ", failed get.\n");
- smi_info->si_state = SI_NORMAL;
+ if (smi_info->supports_event_msg_buff) {
+ smi_info->curr_msg = ipmi_alloc_smi_msg();
+ if (!smi_info->curr_msg) {
+ smi_info->si_state = SI_NORMAL;
+ break;
+ }
+ start_getting_msg_queue(smi_info);
} else {
- msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
- msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
- msg[2] = (msg[3] &
- ~(IPMI_BMC_RCV_MSG_INTR |
- IPMI_BMC_EVT_MSG_INTR));
- smi_info->handlers->start_transaction(
- smi_info->si_sm, msg, 3);
- smi_info->si_state = SI_DISABLE_INTERRUPTS2;
- }
- break;
- }
-
- case SI_DISABLE_INTERRUPTS2:
- {
- unsigned char msg[4];
-
- /* We got the flags from the SMI, now handle them. */
- smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
- if (msg[2] != 0) {
- dev_warn(smi_info->dev, "Could not disable interrupts"
- ", failed set.\n");
+ smi_info->si_state = SI_NORMAL;
}
- smi_info->si_state = SI_NORMAL;
break;
}
}
@@ -808,25 +841,35 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
* We prefer handling attn over new messages. But don't do
* this if there is not yet an upper layer to handle anything.
*/
- if (likely(smi_info->intf) && si_sm_result == SI_SM_ATTN) {
+ if (likely(smi_info->intf) &&
+ (si_sm_result == SI_SM_ATTN || smi_info->got_attn)) {
unsigned char msg[2];
- smi_inc_stat(smi_info, attentions);
+ if (smi_info->si_state != SI_NORMAL) {
+ /*
+ * We got an ATTN, but we are doing something else.
+ * Handle the ATTN later.
+ */
+ smi_info->got_attn = true;
+ } else {
+ smi_info->got_attn = false;
+ smi_inc_stat(smi_info, attentions);
- /*
- * Got a attn, send down a get message flags to see
- * what's causing it. It would be better to handle
- * this in the upper layer, but due to the way
- * interrupts work with the SMI, that's not really
- * possible.
- */
- msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
- msg[1] = IPMI_GET_MSG_FLAGS_CMD;
+ /*
+ * Got a attn, send down a get message flags to see
+ * what's causing it. It would be better to handle
+ * this in the upper layer, but due to the way
+ * interrupts work with the SMI, that's not really
+ * possible.
+ */
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_GET_MSG_FLAGS_CMD;
- smi_info->handlers->start_transaction(
- smi_info->si_sm, msg, 2);
- smi_info->si_state = SI_GETTING_FLAGS;
- goto restart;
+ smi_info->handlers->start_transaction(
+ smi_info->si_sm, msg, 2);
+ smi_info->si_state = SI_GETTING_FLAGS;
+ goto restart;
+ }
}
/* If we are currently idle, try to start the next message. */
@@ -846,19 +889,21 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
*/
atomic_set(&smi_info->req_events, 0);
- smi_info->curr_msg = ipmi_alloc_smi_msg();
- if (!smi_info->curr_msg)
- goto out;
-
- smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
- smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
- smi_info->curr_msg->data_size = 2;
+ /*
+ * Take this opportunity to check the interrupt and
+ * message enable state for the BMC. The BMC can be
+ * asynchronously reset, and may thus get interrupts
+ * disable and messages disabled.
+ */
+ if (smi_info->supports_event_msg_buff || smi_info->irq) {
+ start_check_enables(smi_info);
+ } else {
+ smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
+ if (!smi_info->curr_msg)
+ goto out;
- smi_info->handlers->start_transaction(
- smi_info->si_sm,
- smi_info->curr_msg->data,
- smi_info->curr_msg->data_size);
- smi_info->si_state = SI_GETTING_EVENTS;
+ start_getting_events(smi_info);
+ }
goto restart;
}
out:
@@ -879,8 +924,7 @@ static void check_start_timer_thread(struct smi_info *smi_info)
}
static void sender(void *send_info,
- struct ipmi_smi_msg *msg,
- int priority)
+ struct ipmi_smi_msg *msg)
{
struct smi_info *smi_info = send_info;
enum si_sm_result result;
@@ -889,14 +933,8 @@ static void sender(void *send_info,
struct timeval t;
#endif
- if (atomic_read(&smi_info->stop_operation)) {
- msg->rsp[0] = msg->data[0] | 4;
- msg->rsp[1] = msg->data[1];
- msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
- msg->rsp_size = 3;
- deliver_recv_msg(smi_info, msg);
- return;
- }
+ BUG_ON(smi_info->waiting_msg);
+ smi_info->waiting_msg = msg;
#ifdef DEBUG_TIMING
do_gettimeofday(&t);
@@ -905,16 +943,16 @@ static void sender(void *send_info,
if (smi_info->run_to_completion) {
/*
- * If we are running to completion, then throw it in
- * the list and run transactions until everything is
- * clear. Priority doesn't matter here.
+ * If we are running to completion, start it and run
+ * transactions until everything is clear.
*/
+ smi_info->curr_msg = smi_info->waiting_msg;
+ smi_info->waiting_msg = NULL;
/*
* Run to completion means we are single-threaded, no
* need for locks.
*/
- list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
result = smi_event_handler(smi_info, 0);
while (result != SI_SM_IDLE) {
@@ -926,11 +964,6 @@ static void sender(void *send_info,
}
spin_lock_irqsave(&smi_info->si_lock, flags);
- if (priority > 0)
- list_add_tail(&msg->link, &smi_info->hp_xmit_msgs);
- else
- list_add_tail(&msg->link, &smi_info->xmit_msgs);
-
check_start_timer_thread(smi_info);
spin_unlock_irqrestore(&smi_info->si_lock, flags);
}
@@ -1068,8 +1101,7 @@ static void request_events(void *send_info)
{
struct smi_info *smi_info = send_info;
- if (atomic_read(&smi_info->stop_operation) ||
- !smi_info->has_event_buffer)
+ if (!smi_info->has_event_buffer)
return;
atomic_set(&smi_info->req_events, 1);
@@ -1697,7 +1729,7 @@ static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
}
*s = '\0';
s++;
- for (i = 0; hotmod_ops[i].name; i++) {
+ for (i = 0; v[i].name; i++) {
if (strcmp(*curr, v[i].name) == 0) {
*val = v[i].val;
*curr = s;
@@ -2133,6 +2165,9 @@ static int try_init_spmi(struct SPMITable *spmi)
case 3: /* BT */
info->si_type = SI_BT;
break;
+ case 4: /* SSIF, just ignore */
+ kfree(info);
+ return -EIO;
default:
printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n",
spmi->InterfaceType);
@@ -2250,6 +2285,8 @@ static int ipmi_pnp_probe(struct pnp_dev *dev,
case 3:
info->si_type = SI_BT;
break;
+ case 4: /* SSIF, just ignore */
+ goto err_free;
default:
dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp);
goto err_free;
@@ -2913,9 +2950,11 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
goto out;
}
- if (resp[3] & IPMI_BMC_EVT_MSG_BUFF)
+ if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
/* buffer is already enabled, nothing to do. */
+ smi_info->supports_event_msg_buff = true;
goto out;
+ }
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
@@ -2948,6 +2987,9 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
* that the event buffer is not supported.
*/
rv = -ENOENT;
+ else
+ smi_info->supports_event_msg_buff = true;
+
out:
kfree(resp);
return rv;
@@ -3188,15 +3230,10 @@ static void setup_xaction_handlers(struct smi_info *smi_info)
static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
{
- if (smi_info->intf) {
- /*
- * The timer and thread are only running if the
- * interface has been started up and registered.
- */
- if (smi_info->thread != NULL)
- kthread_stop(smi_info->thread);
+ if (smi_info->thread != NULL)
+ kthread_stop(smi_info->thread);
+ if (smi_info->timer_running)
del_timer_sync(&smi_info->si_timer);
- }
}
static struct ipmi_default_vals
@@ -3274,8 +3311,8 @@ static int add_smi(struct smi_info *new_smi)
int rv = 0;
printk(KERN_INFO PFX "Adding %s-specified %s state machine",
- ipmi_addr_src_to_str[new_smi->addr_source],
- si_to_str[new_smi->si_type]);
+ ipmi_addr_src_to_str(new_smi->addr_source),
+ si_to_str[new_smi->si_type]);
mutex_lock(&smi_infos_lock);
if (!is_new_interface(new_smi)) {
printk(KERN_CONT " duplicate interface\n");
@@ -3305,7 +3342,7 @@ static int try_smi_init(struct smi_info *new_smi)
printk(KERN_INFO PFX "Trying %s-specified %s state"
" machine at %s address 0x%lx, slave address 0x%x,"
" irq %d\n",
- ipmi_addr_src_to_str[new_smi->addr_source],
+ ipmi_addr_src_to_str(new_smi->addr_source),
si_to_str[new_smi->si_type],
addr_space_to_str[new_smi->io.addr_type],
new_smi->io.addr_data,
@@ -3371,8 +3408,7 @@ static int try_smi_init(struct smi_info *new_smi)
setup_oem_data_handler(new_smi);
setup_xaction_handlers(new_smi);
- INIT_LIST_HEAD(&(new_smi->xmit_msgs));
- INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
+ new_smi->waiting_msg = NULL;
new_smi->curr_msg = NULL;
atomic_set(&new_smi->req_events, 0);
new_smi->run_to_completion = false;
@@ -3380,7 +3416,6 @@ static int try_smi_init(struct smi_info *new_smi)
atomic_set(&new_smi->stats[i], 0);
new_smi->interrupt_disabled = true;
- atomic_set(&new_smi->stop_operation, 0);
atomic_set(&new_smi->need_watch, 0);
new_smi->intf_num = smi_num;
smi_num++;
@@ -3394,9 +3429,15 @@ static int try_smi_init(struct smi_info *new_smi)
* timer to avoid racing with the timer.
*/
start_clear_flags(new_smi);
- /* IRQ is defined to be set when non-zero. */
- if (new_smi->irq)
- new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
+
+ /*
+ * IRQ is defined to be set when non-zero. req_events will
+ * cause a global flags check that will enable interrupts.
+ */
+ if (new_smi->irq) {
+ new_smi->interrupt_disabled = false;
+ atomic_set(&new_smi->req_events, 1);
+ }
if (!new_smi->dev) {
/*
@@ -3428,7 +3469,6 @@ static int try_smi_init(struct smi_info *new_smi)
new_smi,
&new_smi->device_id,
new_smi->dev,
- "bmc",
new_smi->slave_addr);
if (rv) {
dev_err(new_smi->dev, "Unable to register device: error %d\n",
@@ -3466,15 +3506,15 @@ static int try_smi_init(struct smi_info *new_smi)
return 0;
out_err_stop_timer:
- atomic_inc(&new_smi->stop_operation);
wait_for_timer_and_thread(new_smi);
out_err:
new_smi->interrupt_disabled = true;
if (new_smi->intf) {
- ipmi_unregister_smi(new_smi->intf);
+ ipmi_smi_t intf = new_smi->intf;
new_smi->intf = NULL;
+ ipmi_unregister_smi(intf);
}
if (new_smi->irq_cleanup) {
@@ -3653,60 +3693,49 @@ module_init(init_ipmi_si);
static void cleanup_one_si(struct smi_info *to_clean)
{
int rv = 0;
- unsigned long flags;
if (!to_clean)
return;
+ if (to_clean->intf) {
+ ipmi_smi_t intf = to_clean->intf;
+
+ to_clean->intf = NULL;
+ rv = ipmi_unregister_smi(intf);
+ if (rv) {
+ pr_err(PFX "Unable to unregister device: errno=%d\n",
+ rv);
+ }
+ }
+
if (to_clean->dev)
dev_set_drvdata(to_clean->dev, NULL);
list_del(&to_clean->link);
- /* Tell the driver that we are shutting down. */
- atomic_inc(&to_clean->stop_operation);
-
/*
- * Make sure the timer and thread are stopped and will not run
- * again.
+ * Make sure that interrupts, the timer and the thread are
+ * stopped and will not run again.
*/
+ if (to_clean->irq_cleanup)
+ to_clean->irq_cleanup(to_clean);
wait_for_timer_and_thread(to_clean);
/*
* Timeouts are stopped, now make sure the interrupts are off
- * for the device. A little tricky with locks to make sure
- * there are no races.
+ * in the BMC. Note that timers and CPU interrupts are off,
+ * so no need for locks.
*/
- spin_lock_irqsave(&to_clean->si_lock, flags);
while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
- spin_unlock_irqrestore(&to_clean->si_lock, flags);
poll(to_clean);
schedule_timeout_uninterruptible(1);
- spin_lock_irqsave(&to_clean->si_lock, flags);
}
disable_si_irq(to_clean);
- spin_unlock_irqrestore(&to_clean->si_lock, flags);
while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
poll(to_clean);
schedule_timeout_uninterruptible(1);
}
- /* Clean up interrupts and make sure that everything is done. */
- if (to_clean->irq_cleanup)
- to_clean->irq_cleanup(to_clean);
- while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
- poll(to_clean);
- schedule_timeout_uninterruptible(1);
- }
-
- if (to_clean->intf)
- rv = ipmi_unregister_smi(to_clean->intf);
-
- if (rv) {
- printk(KERN_ERR PFX "Unable to unregister device: errno=%d\n",
- rv);
- }
-
if (to_clean->handlers)
to_clean->handlers->cleanup(to_clean->si_sm);
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
new file mode 100644
index 000000000000..e178ac27e73c
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -0,0 +1,1870 @@
+/*
+ * ipmi_ssif.c
+ *
+ * The interface to the IPMI driver for SMBus access to a SMBus
+ * compliant device. Called SSIF by the IPMI spec.
+ *
+ * Author: Intel Corporation
+ * Todd Davis <todd.c.davis@intel.com>
+ *
+ * Rewritten by Corey Minyard <minyard@acm.org> to support the
+ * non-blocking I2C interface, add support for multi-part
+ * transactions, add PEC support, and general clenaup.
+ *
+ * Copyright 2003 Intel Corporation
+ * Copyright 2005 MontaVista Software
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/*
+ * This file holds the "policy" for the interface to the SSIF state
+ * machine. It does the configuration, handles timers and interrupts,
+ * and drives the real SSIF state machine.
+ */
+
+/*
+ * TODO: Figure out how to use SMB alerts. This will require a new
+ * interface into the I2C driver, I believe.
+ */
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/i2c.h>
+#include <linux/ipmi_smi.h>
+#include <linux/init.h>
+#include <linux/dmi.h>
+#include <linux/kthread.h>
+#include <linux/acpi.h>
+
+#define PFX "ipmi_ssif: "
+#define DEVICE_NAME "ipmi_ssif"
+
+#define IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD 0x57
+
+#define SSIF_IPMI_REQUEST 2
+#define SSIF_IPMI_MULTI_PART_REQUEST_START 6
+#define SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE 7
+#define SSIF_IPMI_RESPONSE 3
+#define SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE 9
+
+/* ssif_debug is a bit-field
+ * SSIF_DEBUG_MSG - commands and their responses
+ * SSIF_DEBUG_STATES - message states
+ * SSIF_DEBUG_TIMING - Measure times between events in the driver
+ */
+#define SSIF_DEBUG_TIMING 4
+#define SSIF_DEBUG_STATE 2
+#define SSIF_DEBUG_MSG 1
+#define SSIF_NODEBUG 0
+#define SSIF_DEFAULT_DEBUG (SSIF_NODEBUG)
+
+/*
+ * Timer values
+ */
+#define SSIF_MSG_USEC 20000 /* 20ms between message tries. */
+#define SSIF_MSG_PART_USEC 5000 /* 5ms for a message part */
+
+/* How many times to we retry sending/receiving the message. */
+#define SSIF_SEND_RETRIES 5
+#define SSIF_RECV_RETRIES 250
+
+#define SSIF_MSG_MSEC (SSIF_MSG_USEC / 1000)
+#define SSIF_MSG_JIFFIES ((SSIF_MSG_USEC * 1000) / TICK_NSEC)
+#define SSIF_MSG_PART_JIFFIES ((SSIF_MSG_PART_USEC * 1000) / TICK_NSEC)
+
+enum ssif_intf_state {
+ SSIF_NORMAL,
+ SSIF_GETTING_FLAGS,
+ SSIF_GETTING_EVENTS,
+ SSIF_CLEARING_FLAGS,
+ SSIF_GETTING_MESSAGES,
+ /* FIXME - add watchdog stuff. */
+};
+
+#define SSIF_IDLE(ssif) ((ssif)->ssif_state == SSIF_NORMAL \
+ && (ssif)->curr_msg == NULL)
+
+/*
+ * Indexes into stats[] in ssif_info below.
+ */
+enum ssif_stat_indexes {
+ /* Number of total messages sent. */
+ SSIF_STAT_sent_messages = 0,
+
+ /*
+ * Number of message parts sent. Messages may be broken into
+ * parts if they are long.
+ */
+ SSIF_STAT_sent_messages_parts,
+
+ /*
+ * Number of time a message was retried.
+ */
+ SSIF_STAT_send_retries,
+
+ /*
+ * Number of times the send of a message failed.
+ */
+ SSIF_STAT_send_errors,
+
+ /*
+ * Number of message responses received.
+ */
+ SSIF_STAT_received_messages,
+
+ /*
+ * Number of message fragments received.
+ */
+ SSIF_STAT_received_message_parts,
+
+ /*
+ * Number of times the receive of a message was retried.
+ */
+ SSIF_STAT_receive_retries,
+
+ /*
+ * Number of errors receiving messages.
+ */
+ SSIF_STAT_receive_errors,
+
+ /*
+ * Number of times a flag fetch was requested.
+ */
+ SSIF_STAT_flag_fetches,
+
+ /*
+ * Number of times the hardware didn't follow the state machine.
+ */
+ SSIF_STAT_hosed,
+
+ /*
+ * Number of received events.
+ */
+ SSIF_STAT_events,
+
+ /* Number of asyncronous messages received. */
+ SSIF_STAT_incoming_messages,
+
+ /* Number of watchdog pretimeouts. */
+ SSIF_STAT_watchdog_pretimeouts,
+
+ /* Always add statistics before this value, it must be last. */
+ SSIF_NUM_STATS
+};
+
+struct ssif_addr_info {
+ unsigned short addr;
+ struct i2c_board_info binfo;
+ char *adapter_name;
+ int debug;
+ int slave_addr;
+ enum ipmi_addr_src addr_src;
+ union ipmi_smi_info_union addr_info;
+
+ struct mutex clients_mutex;
+ struct list_head clients;
+
+ struct list_head link;
+};
+
+struct ssif_info;
+
+typedef void (*ssif_i2c_done)(struct ssif_info *ssif_info, int result,
+ unsigned char *data, unsigned int len);
+
+struct ssif_info {
+ ipmi_smi_t intf;
+ int intf_num;
+ spinlock_t lock;
+ struct ipmi_smi_msg *waiting_msg;
+ struct ipmi_smi_msg *curr_msg;
+ enum ssif_intf_state ssif_state;
+ unsigned long ssif_debug;
+
+ struct ipmi_smi_handlers handlers;
+
+ enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
+ union ipmi_smi_info_union addr_info;
+
+ /*
+ * Flags from the last GET_MSG_FLAGS command, used when an ATTN
+ * is set to hold the flags until we are done handling everything
+ * from the flags.
+ */
+#define RECEIVE_MSG_AVAIL 0x01
+#define EVENT_MSG_BUFFER_FULL 0x02
+#define WDT_PRE_TIMEOUT_INT 0x08
+ unsigned char msg_flags;
+
+ bool has_event_buffer;
+
+ /*
+ * If set to true, this will request events the next time the
+ * state machine is idle.
+ */
+ bool req_events;
+
+ /*
+ * If set to true, this will request flags the next time the
+ * state machine is idle.
+ */
+ bool req_flags;
+
+ /*
+ * Used to perform timer operations when run-to-completion
+ * mode is on. This is a countdown timer.
+ */
+ int rtc_us_timer;
+
+ /* Used for sending/receiving data. +1 for the length. */
+ unsigned char data[IPMI_MAX_MSG_LENGTH + 1];
+ unsigned int data_len;
+
+ /* Temp receive buffer, gets copied into data. */
+ unsigned char recv[I2C_SMBUS_BLOCK_MAX];
+
+ struct i2c_client *client;
+ ssif_i2c_done done_handler;
+
+ /* Thread interface handling */
+ struct task_struct *thread;
+ struct completion wake_thread;
+ bool stopping;
+ int i2c_read_write;
+ int i2c_command;
+ unsigned char *i2c_data;
+ unsigned int i2c_size;
+
+ /* From the device id response. */
+ struct ipmi_device_id device_id;
+
+ struct timer_list retry_timer;
+ int retries_left;
+
+ /* Info from SSIF cmd */
+ unsigned char max_xmit_msg_size;
+ unsigned char max_recv_msg_size;
+ unsigned int multi_support;
+ int supports_pec;
+
+#define SSIF_NO_MULTI 0
+#define SSIF_MULTI_2_PART 1
+#define SSIF_MULTI_n_PART 2
+ unsigned char *multi_data;
+ unsigned int multi_len;
+ unsigned int multi_pos;
+
+ atomic_t stats[SSIF_NUM_STATS];
+};
+
+#define ssif_inc_stat(ssif, stat) \
+ atomic_inc(&(ssif)->stats[SSIF_STAT_ ## stat])
+#define ssif_get_stat(ssif, stat) \
+ ((unsigned int) atomic_read(&(ssif)->stats[SSIF_STAT_ ## stat]))
+
+static bool initialized;
+
+static atomic_t next_intf = ATOMIC_INIT(0);
+
+static void return_hosed_msg(struct ssif_info *ssif_info,
+ struct ipmi_smi_msg *msg);
+static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags);
+static int start_send(struct ssif_info *ssif_info,
+ unsigned char *data,
+ unsigned int len);
+
+static unsigned long *ipmi_ssif_lock_cond(struct ssif_info *ssif_info,
+ unsigned long *flags)
+{
+ spin_lock_irqsave(&ssif_info->lock, *flags);
+ return flags;
+}
+
+static void ipmi_ssif_unlock_cond(struct ssif_info *ssif_info,
+ unsigned long *flags)
+{
+ spin_unlock_irqrestore(&ssif_info->lock, *flags);
+}
+
+static void deliver_recv_msg(struct ssif_info *ssif_info,
+ struct ipmi_smi_msg *msg)
+{
+ ipmi_smi_t intf = ssif_info->intf;
+
+ if (!intf) {
+ ipmi_free_smi_msg(msg);
+ } else if (msg->rsp_size < 0) {
+ return_hosed_msg(ssif_info, msg);
+ pr_err(PFX
+ "Malformed message in deliver_recv_msg: rsp_size = %d\n",
+ msg->rsp_size);
+ } else {
+ ipmi_smi_msg_received(intf, msg);
+ }
+}
+
+static void return_hosed_msg(struct ssif_info *ssif_info,
+ struct ipmi_smi_msg *msg)
+{
+ ssif_inc_stat(ssif_info, hosed);
+
+ /* Make it a response */
+ msg->rsp[0] = msg->data[0] | 4;
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = 0xFF; /* Unknown error. */
+ msg->rsp_size = 3;
+
+ deliver_recv_msg(ssif_info, msg);
+}
+
+/*
+ * Must be called with the message lock held. This will release the
+ * message lock. Note that the caller will check SSIF_IDLE and start a
+ * new operation, so there is no need to check for new messages to
+ * start in here.
+ */
+static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags)
+{
+ unsigned char msg[3];
+
+ ssif_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
+ ssif_info->ssif_state = SSIF_CLEARING_FLAGS;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+
+ /* Make sure the watchdog pre-timeout flag is not set at startup. */
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
+ msg[2] = WDT_PRE_TIMEOUT_INT;
+
+ if (start_send(ssif_info, msg, 3) != 0) {
+ /* Error, just go to normal state. */
+ ssif_info->ssif_state = SSIF_NORMAL;
+ }
+}
+
+static void start_flag_fetch(struct ssif_info *ssif_info, unsigned long *flags)
+{
+ unsigned char mb[2];
+
+ ssif_info->req_flags = false;
+ ssif_info->ssif_state = SSIF_GETTING_FLAGS;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+
+ mb[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ mb[1] = IPMI_GET_MSG_FLAGS_CMD;
+ if (start_send(ssif_info, mb, 2) != 0)
+ ssif_info->ssif_state = SSIF_NORMAL;
+}
+
+static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags,
+ struct ipmi_smi_msg *msg)
+{
+ if (start_send(ssif_info, msg->data, msg->data_size) != 0) {
+ unsigned long oflags;
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ ssif_info->curr_msg = NULL;
+ ssif_info->ssif_state = SSIF_NORMAL;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ ipmi_free_smi_msg(msg);
+ }
+}
+
+static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags)
+{
+ struct ipmi_smi_msg *msg;
+
+ ssif_info->req_events = false;
+
+ msg = ipmi_alloc_smi_msg();
+ if (!msg) {
+ ssif_info->ssif_state = SSIF_NORMAL;
+ return;
+ }
+
+ ssif_info->curr_msg = msg;
+ ssif_info->ssif_state = SSIF_GETTING_EVENTS;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+
+ msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
+ msg->data_size = 2;
+
+ check_start_send(ssif_info, flags, msg);
+}
+
+static void start_recv_msg_fetch(struct ssif_info *ssif_info,
+ unsigned long *flags)
+{
+ struct ipmi_smi_msg *msg;
+
+ msg = ipmi_alloc_smi_msg();
+ if (!msg) {
+ ssif_info->ssif_state = SSIF_NORMAL;
+ return;
+ }
+
+ ssif_info->curr_msg = msg;
+ ssif_info->ssif_state = SSIF_GETTING_MESSAGES;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+
+ msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg->data[1] = IPMI_GET_MSG_CMD;
+ msg->data_size = 2;
+
+ check_start_send(ssif_info, flags, msg);
+}
+
+/*
+ * Must be called with the message lock held. This will release the
+ * message lock. Note that the caller will check SSIF_IDLE and start a
+ * new operation, so there is no need to check for new messages to
+ * start in here.
+ */
+static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags)
+{
+ if (ssif_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
+ ipmi_smi_t intf = ssif_info->intf;
+ /* Watchdog pre-timeout */
+ ssif_inc_stat(ssif_info, watchdog_pretimeouts);
+ start_clear_flags(ssif_info, flags);
+ if (intf)
+ ipmi_smi_watchdog_pretimeout(intf);
+ } else if (ssif_info->msg_flags & RECEIVE_MSG_AVAIL)
+ /* Messages available. */
+ start_recv_msg_fetch(ssif_info, flags);
+ else if (ssif_info->msg_flags & EVENT_MSG_BUFFER_FULL)
+ /* Events available. */
+ start_event_fetch(ssif_info, flags);
+ else {
+ ssif_info->ssif_state = SSIF_NORMAL;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ }
+}
+
+static int ipmi_ssif_thread(void *data)
+{
+ struct ssif_info *ssif_info = data;
+
+ while (!kthread_should_stop()) {
+ int result;
+
+ /* Wait for something to do */
+ wait_for_completion(&ssif_info->wake_thread);
+ init_completion(&ssif_info->wake_thread);
+
+ if (ssif_info->stopping)
+ break;
+
+ if (ssif_info->i2c_read_write == I2C_SMBUS_WRITE) {
+ result = i2c_smbus_write_block_data(
+ ssif_info->client, SSIF_IPMI_REQUEST,
+ ssif_info->i2c_data[0],
+ ssif_info->i2c_data + 1);
+ ssif_info->done_handler(ssif_info, result, NULL, 0);
+ } else {
+ result = i2c_smbus_read_block_data(
+ ssif_info->client, SSIF_IPMI_RESPONSE,
+ ssif_info->i2c_data);
+ if (result < 0)
+ ssif_info->done_handler(ssif_info, result,
+ NULL, 0);
+ else
+ ssif_info->done_handler(ssif_info, 0,
+ ssif_info->i2c_data,
+ result);
+ }
+ }
+
+ return 0;
+}
+
+static int ssif_i2c_send(struct ssif_info *ssif_info,
+ ssif_i2c_done handler,
+ int read_write, int command,
+ unsigned char *data, unsigned int size)
+{
+ ssif_info->done_handler = handler;
+
+ ssif_info->i2c_read_write = read_write;
+ ssif_info->i2c_command = command;
+ ssif_info->i2c_data = data;
+ ssif_info->i2c_size = size;
+ complete(&ssif_info->wake_thread);
+ return 0;
+}
+
+
+static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ unsigned char *data, unsigned int len);
+
+static void retry_timeout(unsigned long data)
+{
+ struct ssif_info *ssif_info = (void *) data;
+ int rv;
+
+ if (ssif_info->stopping)
+ return;
+
+ ssif_info->rtc_us_timer = 0;
+
+ rv = ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
+ SSIF_IPMI_RESPONSE,
+ ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
+ if (rv < 0) {
+ /* request failed, just return the error. */
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ pr_info("Error from i2c_non_blocking_op(5)\n");
+
+ msg_done_handler(ssif_info, -EIO, NULL, 0);
+ }
+}
+
+static int start_resend(struct ssif_info *ssif_info);
+
+static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ unsigned char *data, unsigned int len)
+{
+ struct ipmi_smi_msg *msg;
+ unsigned long oflags, *flags;
+ int rv;
+
+ /*
+ * We are single-threaded here, so no need for a lock until we
+ * start messing with driver states or the queues.
+ */
+
+ if (result < 0) {
+ ssif_info->retries_left--;
+ if (ssif_info->retries_left > 0) {
+ ssif_inc_stat(ssif_info, receive_retries);
+
+ mod_timer(&ssif_info->retry_timer,
+ jiffies + SSIF_MSG_JIFFIES);
+ ssif_info->rtc_us_timer = SSIF_MSG_USEC;
+ return;
+ }
+
+ ssif_inc_stat(ssif_info, receive_errors);
+
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ pr_info("Error in msg_done_handler: %d\n", result);
+ len = 0;
+ goto continue_op;
+ }
+
+ if ((len > 1) && (ssif_info->multi_pos == 0)
+ && (data[0] == 0x00) && (data[1] == 0x01)) {
+ /* Start of multi-part read. Start the next transaction. */
+ int i;
+
+ ssif_inc_stat(ssif_info, received_message_parts);
+
+ /* Remove the multi-part read marker. */
+ for (i = 0; i < (len-2); i++)
+ ssif_info->data[i] = data[i+2];
+ len -= 2;
+ ssif_info->multi_len = len;
+ ssif_info->multi_pos = 1;
+
+ rv = ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
+ SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE,
+ ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
+ if (rv < 0) {
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ pr_info("Error from i2c_non_blocking_op(1)\n");
+
+ result = -EIO;
+ } else
+ return;
+ } else if (ssif_info->multi_pos) {
+ /* Middle of multi-part read. Start the next transaction. */
+ int i;
+ unsigned char blocknum;
+
+ if (len == 0) {
+ result = -EIO;
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ pr_info(PFX "Middle message with no data\n");
+
+ goto continue_op;
+ }
+
+ blocknum = data[ssif_info->multi_len];
+
+ if (ssif_info->multi_len+len-1 > IPMI_MAX_MSG_LENGTH) {
+ /* Received message too big, abort the operation. */
+ result = -E2BIG;
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ pr_info("Received message too big\n");
+
+ goto continue_op;
+ }
+
+ /* Remove the blocknum from the data. */
+ for (i = 0; i < (len-1); i++)
+ ssif_info->data[i+ssif_info->multi_len] = data[i+1];
+ len--;
+ ssif_info->multi_len += len;
+ if (blocknum == 0xff) {
+ /* End of read */
+ len = ssif_info->multi_len;
+ data = ssif_info->data;
+ } else if ((blocknum+1) != ssif_info->multi_pos) {
+ /*
+ * Out of sequence block, just abort. Block
+ * numbers start at zero for the second block,
+ * but multi_pos starts at one, so the +1.
+ */
+ result = -EIO;
+ } else {
+ ssif_inc_stat(ssif_info, received_message_parts);
+
+ ssif_info->multi_pos++;
+
+ rv = ssif_i2c_send(ssif_info, msg_done_handler,
+ I2C_SMBUS_READ,
+ SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE,
+ ssif_info->recv,
+ I2C_SMBUS_BLOCK_DATA);
+ if (rv < 0) {
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ pr_info(PFX
+ "Error from i2c_non_blocking_op(2)\n");
+
+ result = -EIO;
+ } else
+ return;
+ }
+ }
+
+ if (result < 0) {
+ ssif_inc_stat(ssif_info, receive_errors);
+ } else {
+ ssif_inc_stat(ssif_info, received_messages);
+ ssif_inc_stat(ssif_info, received_message_parts);
+ }
+
+
+ continue_op:
+ if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
+ pr_info(PFX "DONE 1: state = %d, result=%d.\n",
+ ssif_info->ssif_state, result);
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ msg = ssif_info->curr_msg;
+ if (msg) {
+ msg->rsp_size = len;
+ if (msg->rsp_size > IPMI_MAX_MSG_LENGTH)
+ msg->rsp_size = IPMI_MAX_MSG_LENGTH;
+ memcpy(msg->rsp, data, msg->rsp_size);
+ ssif_info->curr_msg = NULL;
+ }
+
+ switch (ssif_info->ssif_state) {
+ case SSIF_NORMAL:
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ if (!msg)
+ break;
+
+ if (result < 0)
+ return_hosed_msg(ssif_info, msg);
+ else
+ deliver_recv_msg(ssif_info, msg);
+ break;
+
+ case SSIF_GETTING_FLAGS:
+ /* We got the flags from the SSIF, now handle them. */
+ if ((result < 0) || (len < 4) || (data[2] != 0)) {
+ /*
+ * Error fetching flags, or invalid length,
+ * just give up for now.
+ */
+ ssif_info->ssif_state = SSIF_NORMAL;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ pr_warn(PFX "Error getting flags: %d %d, %x\n",
+ result, len, data[2]);
+ } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
+ || data[1] != IPMI_GET_MSG_FLAGS_CMD) {
+ pr_warn(PFX "Invalid response getting flags: %x %x\n",
+ data[0], data[1]);
+ } else {
+ ssif_inc_stat(ssif_info, flag_fetches);
+ ssif_info->msg_flags = data[3];
+ handle_flags(ssif_info, flags);
+ }
+ break;
+
+ case SSIF_CLEARING_FLAGS:
+ /* We cleared the flags. */
+ if ((result < 0) || (len < 3) || (data[2] != 0)) {
+ /* Error clearing flags */
+ pr_warn(PFX "Error clearing flags: %d %d, %x\n",
+ result, len, data[2]);
+ } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
+ || data[1] != IPMI_CLEAR_MSG_FLAGS_CMD) {
+ pr_warn(PFX "Invalid response clearing flags: %x %x\n",
+ data[0], data[1]);
+ }
+ ssif_info->ssif_state = SSIF_NORMAL;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ break;
+
+ case SSIF_GETTING_EVENTS:
+ if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) {
+ /* Error getting event, probably done. */
+ msg->done(msg);
+
+ /* Take off the event flag. */
+ ssif_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
+ handle_flags(ssif_info, flags);
+ } else if (msg->rsp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
+ || msg->rsp[1] != IPMI_READ_EVENT_MSG_BUFFER_CMD) {
+ pr_warn(PFX "Invalid response getting events: %x %x\n",
+ msg->rsp[0], msg->rsp[1]);
+ msg->done(msg);
+ /* Take off the event flag. */
+ ssif_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
+ handle_flags(ssif_info, flags);
+ } else {
+ handle_flags(ssif_info, flags);
+ ssif_inc_stat(ssif_info, events);
+ deliver_recv_msg(ssif_info, msg);
+ }
+ break;
+
+ case SSIF_GETTING_MESSAGES:
+ if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) {
+ /* Error getting event, probably done. */
+ msg->done(msg);
+
+ /* Take off the msg flag. */
+ ssif_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
+ handle_flags(ssif_info, flags);
+ } else if (msg->rsp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
+ || msg->rsp[1] != IPMI_GET_MSG_CMD) {
+ pr_warn(PFX "Invalid response clearing flags: %x %x\n",
+ msg->rsp[0], msg->rsp[1]);
+ msg->done(msg);
+
+ /* Take off the msg flag. */
+ ssif_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
+ handle_flags(ssif_info, flags);
+ } else {
+ ssif_inc_stat(ssif_info, incoming_messages);
+ handle_flags(ssif_info, flags);
+ deliver_recv_msg(ssif_info, msg);
+ }
+ break;
+ }
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ if (SSIF_IDLE(ssif_info) && !ssif_info->stopping) {
+ if (ssif_info->req_events)
+ start_event_fetch(ssif_info, flags);
+ else if (ssif_info->req_flags)
+ start_flag_fetch(ssif_info, flags);
+ else
+ start_next_msg(ssif_info, flags);
+ } else
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+
+ if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
+ pr_info(PFX "DONE 2: state = %d.\n", ssif_info->ssif_state);
+}
+
+static void msg_written_handler(struct ssif_info *ssif_info, int result,
+ unsigned char *data, unsigned int len)
+{
+ int rv;
+
+ /* We are single-threaded here, so no need for a lock. */
+ if (result < 0) {
+ ssif_info->retries_left--;
+ if (ssif_info->retries_left > 0) {
+ if (!start_resend(ssif_info)) {
+ ssif_inc_stat(ssif_info, send_retries);
+ return;
+ }
+ /* request failed, just return the error. */
+ ssif_inc_stat(ssif_info, send_errors);
+
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ pr_info(PFX
+ "Out of retries in msg_written_handler\n");
+ msg_done_handler(ssif_info, -EIO, NULL, 0);
+ return;
+ }
+
+ ssif_inc_stat(ssif_info, send_errors);
+
+ /*
+ * Got an error on transmit, let the done routine
+ * handle it.
+ */
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ pr_info("Error in msg_written_handler: %d\n", result);
+
+ msg_done_handler(ssif_info, result, NULL, 0);
+ return;
+ }
+
+ if (ssif_info->multi_data) {
+ /* In the middle of a multi-data write. */
+ int left;
+
+ ssif_inc_stat(ssif_info, sent_messages_parts);
+
+ left = ssif_info->multi_len - ssif_info->multi_pos;
+ if (left > 32)
+ left = 32;
+ /* Length byte. */
+ ssif_info->multi_data[ssif_info->multi_pos] = left;
+ ssif_info->multi_pos += left;
+ if (left < 32)
+ /*
+ * Write is finished. Note that we must end
+ * with a write of less than 32 bytes to
+ * complete the transaction, even if it is
+ * zero bytes.
+ */
+ ssif_info->multi_data = NULL;
+
+ rv = ssif_i2c_send(ssif_info, msg_written_handler,
+ I2C_SMBUS_WRITE,
+ SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE,
+ ssif_info->multi_data + ssif_info->multi_pos,
+ I2C_SMBUS_BLOCK_DATA);
+ if (rv < 0) {
+ /* request failed, just return the error. */
+ ssif_inc_stat(ssif_info, send_errors);
+
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ pr_info("Error from i2c_non_blocking_op(3)\n");
+ msg_done_handler(ssif_info, -EIO, NULL, 0);
+ }
+ } else {
+ ssif_inc_stat(ssif_info, sent_messages);
+ ssif_inc_stat(ssif_info, sent_messages_parts);
+
+ /* Wait a jiffie then request the next message */
+ ssif_info->retries_left = SSIF_RECV_RETRIES;
+ ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC;
+ mod_timer(&ssif_info->retry_timer,
+ jiffies + SSIF_MSG_PART_JIFFIES);
+ return;
+ }
+}
+
+static int start_resend(struct ssif_info *ssif_info)
+{
+ int rv;
+ int command;
+
+ if (ssif_info->data_len > 32) {
+ command = SSIF_IPMI_MULTI_PART_REQUEST_START;
+ ssif_info->multi_data = ssif_info->data;
+ ssif_info->multi_len = ssif_info->data_len;
+ /*
+ * Subtle thing, this is 32, not 33, because we will
+ * overwrite the thing at position 32 (which was just
+ * transmitted) with the new length.
+ */
+ ssif_info->multi_pos = 32;
+ ssif_info->data[0] = 32;
+ } else {
+ ssif_info->multi_data = NULL;
+ command = SSIF_IPMI_REQUEST;
+ ssif_info->data[0] = ssif_info->data_len;
+ }
+
+ rv = ssif_i2c_send(ssif_info, msg_written_handler, I2C_SMBUS_WRITE,
+ command, ssif_info->data, I2C_SMBUS_BLOCK_DATA);
+ if (rv && (ssif_info->ssif_debug & SSIF_DEBUG_MSG))
+ pr_info("Error from i2c_non_blocking_op(4)\n");
+ return rv;
+}
+
+static int start_send(struct ssif_info *ssif_info,
+ unsigned char *data,
+ unsigned int len)
+{
+ if (len > IPMI_MAX_MSG_LENGTH)
+ return -E2BIG;
+ if (len > ssif_info->max_xmit_msg_size)
+ return -E2BIG;
+
+ ssif_info->retries_left = SSIF_SEND_RETRIES;
+ memcpy(ssif_info->data+1, data, len);
+ ssif_info->data_len = len;
+ return start_resend(ssif_info);
+}
+
+/* Must be called with the message lock held. */
+static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags)
+{
+ struct ipmi_smi_msg *msg;
+ unsigned long oflags;
+
+ restart:
+ if (!SSIF_IDLE(ssif_info)) {
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ return;
+ }
+
+ if (!ssif_info->waiting_msg) {
+ ssif_info->curr_msg = NULL;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ } else {
+ int rv;
+
+ ssif_info->curr_msg = ssif_info->waiting_msg;
+ ssif_info->waiting_msg = NULL;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ rv = start_send(ssif_info,
+ ssif_info->curr_msg->data,
+ ssif_info->curr_msg->data_size);
+ if (rv) {
+ msg = ssif_info->curr_msg;
+ ssif_info->curr_msg = NULL;
+ return_hosed_msg(ssif_info, msg);
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ goto restart;
+ }
+ }
+}
+
+static void sender(void *send_info,
+ struct ipmi_smi_msg *msg)
+{
+ struct ssif_info *ssif_info = (struct ssif_info *) send_info;
+ unsigned long oflags, *flags;
+
+ BUG_ON(ssif_info->waiting_msg);
+ ssif_info->waiting_msg = msg;
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ start_next_msg(ssif_info, flags);
+
+ if (ssif_info->ssif_debug & SSIF_DEBUG_TIMING) {
+ struct timeval t;
+
+ do_gettimeofday(&t);
+ pr_info("**Enqueue %02x %02x: %ld.%6.6ld\n",
+ msg->data[0], msg->data[1], t.tv_sec, t.tv_usec);
+ }
+}
+
+static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
+{
+ struct ssif_info *ssif_info = send_info;
+
+ data->addr_src = ssif_info->addr_source;
+ data->dev = &ssif_info->client->dev;
+ data->addr_info = ssif_info->addr_info;
+ get_device(data->dev);
+
+ return 0;
+}
+
+/*
+ * Instead of having our own timer to periodically check the message
+ * flags, we let the message handler drive us.
+ */
+static void request_events(void *send_info)
+{
+ struct ssif_info *ssif_info = (struct ssif_info *) send_info;
+ unsigned long oflags, *flags;
+
+ if (!ssif_info->has_event_buffer)
+ return;
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ /*
+ * Request flags first, not events, because the lower layer
+ * doesn't have a way to send an attention. But make sure
+ * event checking still happens.
+ */
+ ssif_info->req_events = true;
+ if (SSIF_IDLE(ssif_info))
+ start_flag_fetch(ssif_info, flags);
+ else {
+ ssif_info->req_flags = true;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ }
+}
+
+static int inc_usecount(void *send_info)
+{
+ struct ssif_info *ssif_info = send_info;
+
+ if (!i2c_get_adapter(ssif_info->client->adapter->nr))
+ return -ENODEV;
+
+ i2c_use_client(ssif_info->client);
+ return 0;
+}
+
+static void dec_usecount(void *send_info)
+{
+ struct ssif_info *ssif_info = send_info;
+
+ i2c_release_client(ssif_info->client);
+ i2c_put_adapter(ssif_info->client->adapter);
+}
+
+static int ssif_start_processing(void *send_info,
+ ipmi_smi_t intf)
+{
+ struct ssif_info *ssif_info = send_info;
+
+ ssif_info->intf = intf;
+
+ return 0;
+}
+
+#define MAX_SSIF_BMCS 4
+
+static unsigned short addr[MAX_SSIF_BMCS];
+static int num_addrs;
+module_param_array(addr, ushort, &num_addrs, 0);
+MODULE_PARM_DESC(addr, "The addresses to scan for IPMI BMCs on the SSIFs.");
+
+static char *adapter_name[MAX_SSIF_BMCS];
+static int num_adapter_names;
+module_param_array(adapter_name, charp, &num_adapter_names, 0);
+MODULE_PARM_DESC(adapter_name, "The string name of the I2C device that has the BMC. By default all devices are scanned.");
+
+static int slave_addrs[MAX_SSIF_BMCS];
+static int num_slave_addrs;
+module_param_array(slave_addrs, int, &num_slave_addrs, 0);
+MODULE_PARM_DESC(slave_addrs,
+ "The default IPMB slave address for the controller.");
+
+/*
+ * Bit 0 enables message debugging, bit 1 enables state debugging, and
+ * bit 2 enables timing debugging. This is an array indexed by
+ * interface number"
+ */
+static int dbg[MAX_SSIF_BMCS];
+static int num_dbg;
+module_param_array(dbg, int, &num_dbg, 0);
+MODULE_PARM_DESC(dbg, "Turn on debugging.");
+
+static bool ssif_dbg_probe;
+module_param_named(dbg_probe, ssif_dbg_probe, bool, 0);
+MODULE_PARM_DESC(dbg_probe, "Enable debugging of probing of adapters.");
+
+static int use_thread;
+module_param(use_thread, int, 0);
+MODULE_PARM_DESC(use_thread, "Use the thread interface.");
+
+static bool ssif_tryacpi = 1;
+module_param_named(tryacpi, ssif_tryacpi, bool, 0);
+MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the default scan of the interfaces identified via ACPI");
+
+static bool ssif_trydmi = 1;
+module_param_named(trydmi, ssif_trydmi, bool, 0);
+MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the default scan of the interfaces identified via DMI (SMBIOS)");
+
+static DEFINE_MUTEX(ssif_infos_mutex);
+static LIST_HEAD(ssif_infos);
+
+static int ssif_remove(struct i2c_client *client)
+{
+ struct ssif_info *ssif_info = i2c_get_clientdata(client);
+ int rv;
+
+ if (!ssif_info)
+ return 0;
+
+ i2c_set_clientdata(client, NULL);
+
+ /*
+ * After this point, we won't deliver anything asychronously
+ * to the message handler. We can unregister ourself.
+ */
+ rv = ipmi_unregister_smi(ssif_info->intf);
+ if (rv) {
+ pr_err(PFX "Unable to unregister device: errno=%d\n", rv);
+ return rv;
+ }
+ ssif_info->intf = NULL;
+
+ /* make sure the driver is not looking for flags any more. */
+ while (ssif_info->ssif_state != SSIF_NORMAL)
+ schedule_timeout(1);
+
+ ssif_info->stopping = true;
+ del_timer_sync(&ssif_info->retry_timer);
+ if (ssif_info->thread) {
+ complete(&ssif_info->wake_thread);
+ kthread_stop(ssif_info->thread);
+ }
+
+ /*
+ * No message can be outstanding now, we have removed the
+ * upper layer and it permitted us to do so.
+ */
+ kfree(ssif_info);
+ return 0;
+}
+
+static int do_cmd(struct i2c_client *client, int len, unsigned char *msg,
+ int *resp_len, unsigned char *resp)
+{
+ int retry_cnt;
+ int ret;
+
+ retry_cnt = SSIF_SEND_RETRIES;
+ retry1:
+ ret = i2c_smbus_write_block_data(client, SSIF_IPMI_REQUEST, len, msg);
+ if (ret) {
+ retry_cnt--;
+ if (retry_cnt > 0)
+ goto retry1;
+ return -ENODEV;
+ }
+
+ ret = -ENODEV;
+ retry_cnt = SSIF_RECV_RETRIES;
+ while (retry_cnt > 0) {
+ ret = i2c_smbus_read_block_data(client, SSIF_IPMI_RESPONSE,
+ resp);
+ if (ret > 0)
+ break;
+ msleep(SSIF_MSG_MSEC);
+ retry_cnt--;
+ if (retry_cnt <= 0)
+ break;
+ }
+
+ if (ret > 0) {
+ /* Validate that the response is correct. */
+ if (ret < 3 ||
+ (resp[0] != (msg[0] | (1 << 2))) ||
+ (resp[1] != msg[1]))
+ ret = -EINVAL;
+ else {
+ *resp_len = ret;
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
+static int ssif_detect(struct i2c_client *client, struct i2c_board_info *info)
+{
+ unsigned char *resp;
+ unsigned char msg[3];
+ int rv;
+ int len;
+
+ resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ /* Do a Get Device ID command, since it is required. */
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_DEVICE_ID_CMD;
+ rv = do_cmd(client, 2, msg, &len, resp);
+ if (rv)
+ rv = -ENODEV;
+ else
+ strlcpy(info->type, DEVICE_NAME, I2C_NAME_SIZE);
+ kfree(resp);
+ return rv;
+}
+
+static int smi_type_proc_show(struct seq_file *m, void *v)
+{
+ return seq_puts(m, "ssif\n");
+}
+
+static int smi_type_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, smi_type_proc_show, inode->i_private);
+}
+
+static const struct file_operations smi_type_proc_ops = {
+ .open = smi_type_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int smi_stats_proc_show(struct seq_file *m, void *v)
+{
+ struct ssif_info *ssif_info = m->private;
+
+ seq_printf(m, "sent_messages: %u\n",
+ ssif_get_stat(ssif_info, sent_messages));
+ seq_printf(m, "sent_messages_parts: %u\n",
+ ssif_get_stat(ssif_info, sent_messages_parts));
+ seq_printf(m, "send_retries: %u\n",
+ ssif_get_stat(ssif_info, send_retries));
+ seq_printf(m, "send_errors: %u\n",
+ ssif_get_stat(ssif_info, send_errors));
+ seq_printf(m, "received_messages: %u\n",
+ ssif_get_stat(ssif_info, received_messages));
+ seq_printf(m, "received_message_parts: %u\n",
+ ssif_get_stat(ssif_info, received_message_parts));
+ seq_printf(m, "receive_retries: %u\n",
+ ssif_get_stat(ssif_info, receive_retries));
+ seq_printf(m, "receive_errors: %u\n",
+ ssif_get_stat(ssif_info, receive_errors));
+ seq_printf(m, "flag_fetches: %u\n",
+ ssif_get_stat(ssif_info, flag_fetches));
+ seq_printf(m, "hosed: %u\n",
+ ssif_get_stat(ssif_info, hosed));
+ seq_printf(m, "events: %u\n",
+ ssif_get_stat(ssif_info, events));
+ seq_printf(m, "watchdog_pretimeouts: %u\n",
+ ssif_get_stat(ssif_info, watchdog_pretimeouts));
+ return 0;
+}
+
+static int smi_stats_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, smi_stats_proc_show, PDE_DATA(inode));
+}
+
+static const struct file_operations smi_stats_proc_ops = {
+ .open = smi_stats_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct ssif_addr_info *ssif_info_find(unsigned short addr,
+ char *adapter_name,
+ bool match_null_name)
+{
+ struct ssif_addr_info *info, *found = NULL;
+
+restart:
+ list_for_each_entry(info, &ssif_infos, link) {
+ if (info->binfo.addr == addr) {
+ if (info->adapter_name || adapter_name) {
+ if (!info->adapter_name != !adapter_name) {
+ /* One is NULL and one is not */
+ continue;
+ }
+ if (strcmp(info->adapter_name, adapter_name))
+ /* Names to not match */
+ continue;
+ }
+ found = info;
+ break;
+ }
+ }
+
+ if (!found && match_null_name) {
+ /* Try to get an exact match first, then try with a NULL name */
+ adapter_name = NULL;
+ match_null_name = false;
+ goto restart;
+ }
+
+ return found;
+}
+
+static bool check_acpi(struct ssif_info *ssif_info, struct device *dev)
+{
+#ifdef CONFIG_ACPI
+ acpi_handle acpi_handle;
+
+ acpi_handle = ACPI_HANDLE(dev);
+ if (acpi_handle) {
+ ssif_info->addr_source = SI_ACPI;
+ ssif_info->addr_info.acpi_info.acpi_handle = acpi_handle;
+ return true;
+ }
+#endif
+ return false;
+}
+
+static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ unsigned char msg[3];
+ unsigned char *resp;
+ struct ssif_info *ssif_info;
+ int rv = 0;
+ int len;
+ int i;
+ u8 slave_addr = 0;
+ struct ssif_addr_info *addr_info = NULL;
+
+
+ resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ ssif_info = kzalloc(sizeof(*ssif_info), GFP_KERNEL);
+ if (!ssif_info) {
+ kfree(resp);
+ return -ENOMEM;
+ }
+
+ if (!check_acpi(ssif_info, &client->dev)) {
+ addr_info = ssif_info_find(client->addr, client->adapter->name,
+ true);
+ if (!addr_info) {
+ /* Must have come in through sysfs. */
+ ssif_info->addr_source = SI_HOTMOD;
+ } else {
+ ssif_info->addr_source = addr_info->addr_src;
+ ssif_info->ssif_debug = addr_info->debug;
+ ssif_info->addr_info = addr_info->addr_info;
+ slave_addr = addr_info->slave_addr;
+ }
+ }
+
+ pr_info(PFX "Trying %s-specified SSIF interface at i2c address 0x%x, adapter %s, slave address 0x%x\n",
+ ipmi_addr_src_to_str(ssif_info->addr_source),
+ client->addr, client->adapter->name, slave_addr);
+
+ /*
+ * Do a Get Device ID command, since it comes back with some
+ * useful info.
+ */
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_DEVICE_ID_CMD;
+ rv = do_cmd(client, 2, msg, &len, resp);
+ if (rv)
+ goto out;
+
+ rv = ipmi_demangle_device_id(resp, len, &ssif_info->device_id);
+ if (rv)
+ goto out;
+
+ ssif_info->client = client;
+ i2c_set_clientdata(client, ssif_info);
+
+ /* Now check for system interface capabilities */
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD;
+ msg[2] = 0; /* SSIF */
+ rv = do_cmd(client, 3, msg, &len, resp);
+ if (!rv && (len >= 3) && (resp[2] == 0)) {
+ if (len < 7) {
+ if (ssif_dbg_probe)
+ pr_info(PFX "SSIF info too short: %d\n", len);
+ goto no_support;
+ }
+
+ /* Got a good SSIF response, handle it. */
+ ssif_info->max_xmit_msg_size = resp[5];
+ ssif_info->max_recv_msg_size = resp[6];
+ ssif_info->multi_support = (resp[4] >> 6) & 0x3;
+ ssif_info->supports_pec = (resp[4] >> 3) & 0x1;
+
+ /* Sanitize the data */
+ switch (ssif_info->multi_support) {
+ case SSIF_NO_MULTI:
+ if (ssif_info->max_xmit_msg_size > 32)
+ ssif_info->max_xmit_msg_size = 32;
+ if (ssif_info->max_recv_msg_size > 32)
+ ssif_info->max_recv_msg_size = 32;
+ break;
+
+ case SSIF_MULTI_2_PART:
+ if (ssif_info->max_xmit_msg_size > 64)
+ ssif_info->max_xmit_msg_size = 64;
+ if (ssif_info->max_recv_msg_size > 62)
+ ssif_info->max_recv_msg_size = 62;
+ break;
+
+ case SSIF_MULTI_n_PART:
+ break;
+
+ default:
+ /* Data is not sane, just give up. */
+ goto no_support;
+ }
+ } else {
+ no_support:
+ /* Assume no multi-part or PEC support */
+ pr_info(PFX "Error fetching SSIF: %d %d %2.2x, your system probably doesn't support this command so using defaults\n",
+ rv, len, resp[2]);
+
+ ssif_info->max_xmit_msg_size = 32;
+ ssif_info->max_recv_msg_size = 32;
+ ssif_info->multi_support = SSIF_NO_MULTI;
+ ssif_info->supports_pec = 0;
+ }
+
+ /* Make sure the NMI timeout is cleared. */
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
+ msg[2] = WDT_PRE_TIMEOUT_INT;
+ rv = do_cmd(client, 3, msg, &len, resp);
+ if (rv || (len < 3) || (resp[2] != 0))
+ pr_warn(PFX "Unable to clear message flags: %d %d %2.2x\n",
+ rv, len, resp[2]);
+
+ /* Attempt to enable the event buffer. */
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
+ rv = do_cmd(client, 2, msg, &len, resp);
+ if (rv || (len < 4) || (resp[2] != 0)) {
+ pr_warn(PFX "Error getting global enables: %d %d %2.2x\n",
+ rv, len, resp[2]);
+ rv = 0; /* Not fatal */
+ goto found;
+ }
+
+ if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
+ ssif_info->has_event_buffer = true;
+ /* buffer is already enabled, nothing to do. */
+ goto found;
+ }
+
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+ msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF;
+ rv = do_cmd(client, 3, msg, &len, resp);
+ if (rv || (len < 2)) {
+ pr_warn(PFX "Error getting global enables: %d %d %2.2x\n",
+ rv, len, resp[2]);
+ rv = 0; /* Not fatal */
+ goto found;
+ }
+
+ if (resp[2] == 0)
+ /* A successful return means the event buffer is supported. */
+ ssif_info->has_event_buffer = true;
+
+ found:
+ ssif_info->intf_num = atomic_inc_return(&next_intf);
+
+ if (ssif_dbg_probe) {
+ pr_info("ssif_probe: i2c_probe found device at i2c address %x\n",
+ client->addr);
+ }
+
+ spin_lock_init(&ssif_info->lock);
+ ssif_info->ssif_state = SSIF_NORMAL;
+ init_timer(&ssif_info->retry_timer);
+ ssif_info->retry_timer.data = (unsigned long) ssif_info;
+ ssif_info->retry_timer.function = retry_timeout;
+
+ for (i = 0; i < SSIF_NUM_STATS; i++)
+ atomic_set(&ssif_info->stats[i], 0);
+
+ if (ssif_info->supports_pec)
+ ssif_info->client->flags |= I2C_CLIENT_PEC;
+
+ ssif_info->handlers.owner = THIS_MODULE;
+ ssif_info->handlers.start_processing = ssif_start_processing;
+ ssif_info->handlers.get_smi_info = get_smi_info;
+ ssif_info->handlers.sender = sender;
+ ssif_info->handlers.request_events = request_events;
+ ssif_info->handlers.inc_usecount = inc_usecount;
+ ssif_info->handlers.dec_usecount = dec_usecount;
+
+ {
+ unsigned int thread_num;
+
+ thread_num = ((ssif_info->client->adapter->nr << 8) |
+ ssif_info->client->addr);
+ init_completion(&ssif_info->wake_thread);
+ ssif_info->thread = kthread_run(ipmi_ssif_thread, ssif_info,
+ "kssif%4.4x", thread_num);
+ if (IS_ERR(ssif_info->thread)) {
+ rv = PTR_ERR(ssif_info->thread);
+ dev_notice(&ssif_info->client->dev,
+ "Could not start kernel thread: error %d\n",
+ rv);
+ goto out;
+ }
+ }
+
+ rv = ipmi_register_smi(&ssif_info->handlers,
+ ssif_info,
+ &ssif_info->device_id,
+ &ssif_info->client->dev,
+ slave_addr);
+ if (rv) {
+ pr_err(PFX "Unable to register device: error %d\n", rv);
+ goto out;
+ }
+
+ rv = ipmi_smi_add_proc_entry(ssif_info->intf, "type",
+ &smi_type_proc_ops,
+ ssif_info);
+ if (rv) {
+ pr_err(PFX "Unable to create proc entry: %d\n", rv);
+ goto out_err_unreg;
+ }
+
+ rv = ipmi_smi_add_proc_entry(ssif_info->intf, "ssif_stats",
+ &smi_stats_proc_ops,
+ ssif_info);
+ if (rv) {
+ pr_err(PFX "Unable to create proc entry: %d\n", rv);
+ goto out_err_unreg;
+ }
+
+ out:
+ if (rv)
+ kfree(ssif_info);
+ kfree(resp);
+ return rv;
+
+ out_err_unreg:
+ ipmi_unregister_smi(ssif_info->intf);
+ goto out;
+}
+
+static int ssif_adapter_handler(struct device *adev, void *opaque)
+{
+ struct ssif_addr_info *addr_info = opaque;
+
+ if (adev->type != &i2c_adapter_type)
+ return 0;
+
+ i2c_new_device(to_i2c_adapter(adev), &addr_info->binfo);
+
+ if (!addr_info->adapter_name)
+ return 1; /* Only try the first I2C adapter by default. */
+ return 0;
+}
+
+static int new_ssif_client(int addr, char *adapter_name,
+ int debug, int slave_addr,
+ enum ipmi_addr_src addr_src)
+{
+ struct ssif_addr_info *addr_info;
+ int rv = 0;
+
+ mutex_lock(&ssif_infos_mutex);
+ if (ssif_info_find(addr, adapter_name, false)) {
+ rv = -EEXIST;
+ goto out_unlock;
+ }
+
+ addr_info = kzalloc(sizeof(*addr_info), GFP_KERNEL);
+ if (!addr_info) {
+ rv = -ENOMEM;
+ goto out_unlock;
+ }
+
+ if (adapter_name) {
+ addr_info->adapter_name = kstrdup(adapter_name, GFP_KERNEL);
+ if (!addr_info->adapter_name) {
+ kfree(addr_info);
+ rv = -ENOMEM;
+ goto out_unlock;
+ }
+ }
+
+ strncpy(addr_info->binfo.type, DEVICE_NAME,
+ sizeof(addr_info->binfo.type));
+ addr_info->binfo.addr = addr;
+ addr_info->binfo.platform_data = addr_info;
+ addr_info->debug = debug;
+ addr_info->slave_addr = slave_addr;
+ addr_info->addr_src = addr_src;
+
+ list_add_tail(&addr_info->link, &ssif_infos);
+
+ if (initialized)
+ i2c_for_each_dev(addr_info, ssif_adapter_handler);
+ /* Otherwise address list will get it */
+
+out_unlock:
+ mutex_unlock(&ssif_infos_mutex);
+ return rv;
+}
+
+static void free_ssif_clients(void)
+{
+ struct ssif_addr_info *info, *tmp;
+
+ mutex_lock(&ssif_infos_mutex);
+ list_for_each_entry_safe(info, tmp, &ssif_infos, link) {
+ list_del(&info->link);
+ kfree(info->adapter_name);
+ kfree(info);
+ }
+ mutex_unlock(&ssif_infos_mutex);
+}
+
+static unsigned short *ssif_address_list(void)
+{
+ struct ssif_addr_info *info;
+ unsigned int count = 0, i;
+ unsigned short *address_list;
+
+ list_for_each_entry(info, &ssif_infos, link)
+ count++;
+
+ address_list = kzalloc(sizeof(*address_list) * (count + 1), GFP_KERNEL);
+ if (!address_list)
+ return NULL;
+
+ i = 0;
+ list_for_each_entry(info, &ssif_infos, link) {
+ unsigned short addr = info->binfo.addr;
+ int j;
+
+ for (j = 0; j < i; j++) {
+ if (address_list[j] == addr)
+ goto skip_addr;
+ }
+ address_list[i] = addr;
+skip_addr:
+ i++;
+ }
+ address_list[i] = I2C_CLIENT_END;
+
+ return address_list;
+}
+
+#ifdef CONFIG_ACPI
+static struct acpi_device_id ssif_acpi_match[] = {
+ { "IPI0001", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, ssif_acpi_match);
+
+/*
+ * Once we get an ACPI failure, we don't try any more, because we go
+ * through the tables sequentially. Once we don't find a table, there
+ * are no more.
+ */
+static int acpi_failure;
+
+/*
+ * Defined in the IPMI 2.0 spec.
+ */
+struct SPMITable {
+ s8 Signature[4];
+ u32 Length;
+ u8 Revision;
+ u8 Checksum;
+ s8 OEMID[6];
+ s8 OEMTableID[8];
+ s8 OEMRevision[4];
+ s8 CreatorID[4];
+ s8 CreatorRevision[4];
+ u8 InterfaceType;
+ u8 IPMIlegacy;
+ s16 SpecificationRevision;
+
+ /*
+ * Bit 0 - SCI interrupt supported
+ * Bit 1 - I/O APIC/SAPIC
+ */
+ u8 InterruptType;
+
+ /*
+ * If bit 0 of InterruptType is set, then this is the SCI
+ * interrupt in the GPEx_STS register.
+ */
+ u8 GPE;
+
+ s16 Reserved;
+
+ /*
+ * If bit 1 of InterruptType is set, then this is the I/O
+ * APIC/SAPIC interrupt.
+ */
+ u32 GlobalSystemInterrupt;
+
+ /* The actual register address. */
+ struct acpi_generic_address addr;
+
+ u8 UID[4];
+
+ s8 spmi_id[1]; /* A '\0' terminated array starts here. */
+};
+
+static int try_init_spmi(struct SPMITable *spmi)
+{
+ unsigned short myaddr;
+
+ if (num_addrs >= MAX_SSIF_BMCS)
+ return -1;
+
+ if (spmi->IPMIlegacy != 1) {
+ pr_warn("IPMI: Bad SPMI legacy: %d\n", spmi->IPMIlegacy);
+ return -ENODEV;
+ }
+
+ if (spmi->InterfaceType != 4)
+ return -ENODEV;
+
+ if (spmi->addr.space_id != ACPI_ADR_SPACE_SMBUS) {
+ pr_warn(PFX "Invalid ACPI SSIF I/O Address type: %d\n",
+ spmi->addr.space_id);
+ return -EIO;
+ }
+
+ myaddr = spmi->addr.address >> 1;
+
+ return new_ssif_client(myaddr, NULL, 0, 0, SI_SPMI);
+}
+
+static void spmi_find_bmc(void)
+{
+ acpi_status status;
+ struct SPMITable *spmi;
+ int i;
+
+ if (acpi_disabled)
+ return;
+
+ if (acpi_failure)
+ return;
+
+ for (i = 0; ; i++) {
+ status = acpi_get_table(ACPI_SIG_SPMI, i+1,
+ (struct acpi_table_header **)&spmi);
+ if (status != AE_OK)
+ return;
+
+ try_init_spmi(spmi);
+ }
+}
+#else
+static void spmi_find_bmc(void) { }
+#endif
+
+#ifdef CONFIG_DMI
+static int decode_dmi(const struct dmi_device *dmi_dev)
+{
+ struct dmi_header *dm = dmi_dev->device_data;
+ u8 *data = (u8 *) dm;
+ u8 len = dm->length;
+ unsigned short myaddr;
+ int slave_addr;
+
+ if (num_addrs >= MAX_SSIF_BMCS)
+ return -1;
+
+ if (len < 9)
+ return -1;
+
+ if (data[0x04] != 4) /* Not SSIF */
+ return -1;
+
+ if ((data[8] >> 1) == 0) {
+ /*
+ * Some broken systems put the I2C address in
+ * the slave address field. We try to
+ * accommodate them here.
+ */
+ myaddr = data[6] >> 1;
+ slave_addr = 0;
+ } else {
+ myaddr = data[8] >> 1;
+ slave_addr = data[6];
+ }
+
+ return new_ssif_client(myaddr, NULL, 0, 0, SI_SMBIOS);
+}
+
+static void dmi_iterator(void)
+{
+ const struct dmi_device *dev = NULL;
+
+ while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev)))
+ decode_dmi(dev);
+}
+#else
+static void dmi_iterator(void) { }
+#endif
+
+static const struct i2c_device_id ssif_id[] = {
+ { DEVICE_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ssif_id);
+
+static struct i2c_driver ssif_i2c_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DEVICE_NAME
+ },
+ .probe = ssif_probe,
+ .remove = ssif_remove,
+ .id_table = ssif_id,
+ .detect = ssif_detect
+};
+
+static int init_ipmi_ssif(void)
+{
+ int i;
+ int rv;
+
+ if (initialized)
+ return 0;
+
+ pr_info("IPMI SSIF Interface driver\n");
+
+ /* build list for i2c from addr list */
+ for (i = 0; i < num_addrs; i++) {
+ rv = new_ssif_client(addr[i], adapter_name[i],
+ dbg[i], slave_addrs[i],
+ SI_HARDCODED);
+ if (!rv)
+ pr_err(PFX
+ "Couldn't add hardcoded device at addr 0x%x\n",
+ addr[i]);
+ }
+
+ if (ssif_tryacpi)
+ ssif_i2c_driver.driver.acpi_match_table =
+ ACPI_PTR(ssif_acpi_match);
+ if (ssif_trydmi)
+ dmi_iterator();
+ if (ssif_tryacpi)
+ spmi_find_bmc();
+
+ ssif_i2c_driver.address_list = ssif_address_list();
+
+ rv = i2c_add_driver(&ssif_i2c_driver);
+ if (!rv)
+ initialized = true;
+
+ return rv;
+}
+module_init(init_ipmi_ssif);
+
+static void cleanup_ipmi_ssif(void)
+{
+ if (!initialized)
+ return;
+
+ initialized = false;
+
+ i2c_del_driver(&ssif_i2c_driver);
+
+ free_ssif_clients();
+}
+module_exit(cleanup_ipmi_ssif);
+
+MODULE_AUTHOR("Todd C Davis <todd.c.davis@intel.com>, Corey Minyard <minyard@acm.org>");
+MODULE_DESCRIPTION("IPMI driver for management controllers on a SMBus");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c
index 7bb9d65d9a2c..e5541117b3e9 100644
--- a/drivers/dma-buf/fence.c
+++ b/drivers/dma-buf/fence.c
@@ -283,7 +283,7 @@ EXPORT_SYMBOL(fence_add_callback);
* @cb: [in] the callback to remove
*
* Remove a previously queued callback from the fence. This function returns
- * true if the callback is succesfully removed, or false if the fence has
+ * true if the callback is successfully removed, or false if the fence has
* already been signaled.
*
* *WARNING*:
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index de469821bc1b..f2b2c4e87aef 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -107,6 +107,13 @@ config AT_HDMAC
help
Support the Atmel AHB DMA controller.
+config AT_XDMAC
+ tristate "Atmel XDMA support"
+ depends on ARCH_AT91
+ select DMA_ENGINE
+ help
+ Support the Atmel XDMA controller.
+
config FSL_DMA
tristate "Freescale Elo series DMA support"
depends on FSL_SOC
@@ -395,12 +402,12 @@ config XILINX_VDMA
config DMA_SUN6I
tristate "Allwinner A31 SoCs DMA support"
- depends on MACH_SUN6I || COMPILE_TEST
+ depends on MACH_SUN6I || MACH_SUN8I || COMPILE_TEST
depends on RESET_CONTROLLER
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
- Support for the DMA engine for Allwinner A31 SoCs.
+ Support for the DMA engine first found in Allwinner A31 SoCs.
config NBPFAXI_DMA
tristate "Renesas Type-AXI NBPF DMA support"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index cb626c179911..2022b5451377 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_MV_XOR) += mv_xor.o
obj-$(CONFIG_DW_DMAC_CORE) += dw/
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
+obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
obj-$(CONFIG_MX3_IPU) += ipu/
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
obj-$(CONFIG_SH_DMAE_BASE) += sh/
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index e34024b000a4..1364d00881dd 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -2164,7 +2164,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
__func__, ret);
goto out_no_memcpy;
}
- pl08x->memcpy.chancnt = ret;
/* Register slave channels */
ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
@@ -2175,7 +2174,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
__func__, ret);
goto out_no_slave;
}
- pl08x->slave.chancnt = ret;
ret = dma_async_device_register(&pl08x->memcpy);
if (ret) {
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
new file mode 100644
index 000000000000..b60d77a22df6
--- /dev/null
+++ b/drivers/dma/at_xdmac.c
@@ -0,0 +1,1524 @@
+/*
+ * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems)
+ *
+ * Copyright (C) 2014 Atmel Corporation
+ *
+ * Author: Ludovic Desroches <ludovic.desroches@atmel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <asm/barrier.h>
+#include <dt-bindings/dma/at91.h>
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+
+#include "dmaengine.h"
+
+/* Global registers */
+#define AT_XDMAC_GTYPE 0x00 /* Global Type Register */
+#define AT_XDMAC_NB_CH(i) (((i) & 0x1F) + 1) /* Number of Channels Minus One */
+#define AT_XDMAC_FIFO_SZ(i) (((i) >> 5) & 0x7FF) /* Number of Bytes */
+#define AT_XDMAC_NB_REQ(i) ((((i) >> 16) & 0x3F) + 1) /* Number of Peripheral Requests Minus One */
+#define AT_XDMAC_GCFG 0x04 /* Global Configuration Register */
+#define AT_XDMAC_GWAC 0x08 /* Global Weighted Arbiter Configuration Register */
+#define AT_XDMAC_GIE 0x0C /* Global Interrupt Enable Register */
+#define AT_XDMAC_GID 0x10 /* Global Interrupt Disable Register */
+#define AT_XDMAC_GIM 0x14 /* Global Interrupt Mask Register */
+#define AT_XDMAC_GIS 0x18 /* Global Interrupt Status Register */
+#define AT_XDMAC_GE 0x1C /* Global Channel Enable Register */
+#define AT_XDMAC_GD 0x20 /* Global Channel Disable Register */
+#define AT_XDMAC_GS 0x24 /* Global Channel Status Register */
+#define AT_XDMAC_GRS 0x28 /* Global Channel Read Suspend Register */
+#define AT_XDMAC_GWS 0x2C /* Global Write Suspend Register */
+#define AT_XDMAC_GRWS 0x30 /* Global Channel Read Write Suspend Register */
+#define AT_XDMAC_GRWR 0x34 /* Global Channel Read Write Resume Register */
+#define AT_XDMAC_GSWR 0x38 /* Global Channel Software Request Register */
+#define AT_XDMAC_GSWS 0x3C /* Global channel Software Request Status Register */
+#define AT_XDMAC_GSWF 0x40 /* Global Channel Software Flush Request Register */
+#define AT_XDMAC_VERSION 0xFFC /* XDMAC Version Register */
+
+/* Channel relative registers offsets */
+#define AT_XDMAC_CIE 0x00 /* Channel Interrupt Enable Register */
+#define AT_XDMAC_CIE_BIE BIT(0) /* End of Block Interrupt Enable Bit */
+#define AT_XDMAC_CIE_LIE BIT(1) /* End of Linked List Interrupt Enable Bit */
+#define AT_XDMAC_CIE_DIE BIT(2) /* End of Disable Interrupt Enable Bit */
+#define AT_XDMAC_CIE_FIE BIT(3) /* End of Flush Interrupt Enable Bit */
+#define AT_XDMAC_CIE_RBEIE BIT(4) /* Read Bus Error Interrupt Enable Bit */
+#define AT_XDMAC_CIE_WBEIE BIT(5) /* Write Bus Error Interrupt Enable Bit */
+#define AT_XDMAC_CIE_ROIE BIT(6) /* Request Overflow Interrupt Enable Bit */
+#define AT_XDMAC_CID 0x04 /* Channel Interrupt Disable Register */
+#define AT_XDMAC_CID_BID BIT(0) /* End of Block Interrupt Disable Bit */
+#define AT_XDMAC_CID_LID BIT(1) /* End of Linked List Interrupt Disable Bit */
+#define AT_XDMAC_CID_DID BIT(2) /* End of Disable Interrupt Disable Bit */
+#define AT_XDMAC_CID_FID BIT(3) /* End of Flush Interrupt Disable Bit */
+#define AT_XDMAC_CID_RBEID BIT(4) /* Read Bus Error Interrupt Disable Bit */
+#define AT_XDMAC_CID_WBEID BIT(5) /* Write Bus Error Interrupt Disable Bit */
+#define AT_XDMAC_CID_ROID BIT(6) /* Request Overflow Interrupt Disable Bit */
+#define AT_XDMAC_CIM 0x08 /* Channel Interrupt Mask Register */
+#define AT_XDMAC_CIM_BIM BIT(0) /* End of Block Interrupt Mask Bit */
+#define AT_XDMAC_CIM_LIM BIT(1) /* End of Linked List Interrupt Mask Bit */
+#define AT_XDMAC_CIM_DIM BIT(2) /* End of Disable Interrupt Mask Bit */
+#define AT_XDMAC_CIM_FIM BIT(3) /* End of Flush Interrupt Mask Bit */
+#define AT_XDMAC_CIM_RBEIM BIT(4) /* Read Bus Error Interrupt Mask Bit */
+#define AT_XDMAC_CIM_WBEIM BIT(5) /* Write Bus Error Interrupt Mask Bit */
+#define AT_XDMAC_CIM_ROIM BIT(6) /* Request Overflow Interrupt Mask Bit */
+#define AT_XDMAC_CIS 0x0C /* Channel Interrupt Status Register */
+#define AT_XDMAC_CIS_BIS BIT(0) /* End of Block Interrupt Status Bit */
+#define AT_XDMAC_CIS_LIS BIT(1) /* End of Linked List Interrupt Status Bit */
+#define AT_XDMAC_CIS_DIS BIT(2) /* End of Disable Interrupt Status Bit */
+#define AT_XDMAC_CIS_FIS BIT(3) /* End of Flush Interrupt Status Bit */
+#define AT_XDMAC_CIS_RBEIS BIT(4) /* Read Bus Error Interrupt Status Bit */
+#define AT_XDMAC_CIS_WBEIS BIT(5) /* Write Bus Error Interrupt Status Bit */
+#define AT_XDMAC_CIS_ROIS BIT(6) /* Request Overflow Interrupt Status Bit */
+#define AT_XDMAC_CSA 0x10 /* Channel Source Address Register */
+#define AT_XDMAC_CDA 0x14 /* Channel Destination Address Register */
+#define AT_XDMAC_CNDA 0x18 /* Channel Next Descriptor Address Register */
+#define AT_XDMAC_CNDA_NDAIF(i) ((i) & 0x1) /* Channel x Next Descriptor Interface */
+#define AT_XDMAC_CNDA_NDA(i) ((i) & 0xfffffffc) /* Channel x Next Descriptor Address */
+#define AT_XDMAC_CNDC 0x1C /* Channel Next Descriptor Control Register */
+#define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */
+#define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */
+#define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */
+#define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */
+#define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */
+#define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */
+#define AT_XDMAC_CNDC_NDVIEW_NDV3 (0x3 << 3) /* Channel x Next Descriptor View 3 */
+#define AT_XDMAC_CUBC 0x20 /* Channel Microblock Control Register */
+#define AT_XDMAC_CBC 0x24 /* Channel Block Control Register */
+#define AT_XDMAC_CC 0x28 /* Channel Configuration Register */
+#define AT_XDMAC_CC_TYPE (0x1 << 0) /* Channel Transfer Type */
+#define AT_XDMAC_CC_TYPE_MEM_TRAN (0x0 << 0) /* Memory to Memory Transfer */
+#define AT_XDMAC_CC_TYPE_PER_TRAN (0x1 << 0) /* Peripheral to Memory or Memory to Peripheral Transfer */
+#define AT_XDMAC_CC_MBSIZE_MASK (0x3 << 1)
+#define AT_XDMAC_CC_MBSIZE_SINGLE (0x0 << 1)
+#define AT_XDMAC_CC_MBSIZE_FOUR (0x1 << 1)
+#define AT_XDMAC_CC_MBSIZE_EIGHT (0x2 << 1)
+#define AT_XDMAC_CC_MBSIZE_SIXTEEN (0x3 << 1)
+#define AT_XDMAC_CC_DSYNC (0x1 << 4) /* Channel Synchronization */
+#define AT_XDMAC_CC_DSYNC_PER2MEM (0x0 << 4)
+#define AT_XDMAC_CC_DSYNC_MEM2PER (0x1 << 4)
+#define AT_XDMAC_CC_PROT (0x1 << 5) /* Channel Protection */
+#define AT_XDMAC_CC_PROT_SEC (0x0 << 5)
+#define AT_XDMAC_CC_PROT_UNSEC (0x1 << 5)
+#define AT_XDMAC_CC_SWREQ (0x1 << 6) /* Channel Software Request Trigger */
+#define AT_XDMAC_CC_SWREQ_HWR_CONNECTED (0x0 << 6)
+#define AT_XDMAC_CC_SWREQ_SWR_CONNECTED (0x1 << 6)
+#define AT_XDMAC_CC_MEMSET (0x1 << 7) /* Channel Fill Block of memory */
+#define AT_XDMAC_CC_MEMSET_NORMAL_MODE (0x0 << 7)
+#define AT_XDMAC_CC_MEMSET_HW_MODE (0x1 << 7)
+#define AT_XDMAC_CC_CSIZE(i) ((0x7 & (i)) << 8) /* Channel Chunk Size */
+#define AT_XDMAC_CC_DWIDTH_OFFSET 11
+#define AT_XDMAC_CC_DWIDTH_MASK (0x3 << AT_XDMAC_CC_DWIDTH_OFFSET)
+#define AT_XDMAC_CC_DWIDTH(i) ((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET) /* Channel Data Width */
+#define AT_XDMAC_CC_DWIDTH_BYTE 0x0
+#define AT_XDMAC_CC_DWIDTH_HALFWORD 0x1
+#define AT_XDMAC_CC_DWIDTH_WORD 0x2
+#define AT_XDMAC_CC_DWIDTH_DWORD 0x3
+#define AT_XDMAC_CC_SIF(i) ((0x1 & (i)) << 13) /* Channel Source Interface Identifier */
+#define AT_XDMAC_CC_DIF(i) ((0x1 & (i)) << 14) /* Channel Destination Interface Identifier */
+#define AT_XDMAC_CC_SAM_MASK (0x3 << 16) /* Channel Source Addressing Mode */
+#define AT_XDMAC_CC_SAM_FIXED_AM (0x0 << 16)
+#define AT_XDMAC_CC_SAM_INCREMENTED_AM (0x1 << 16)
+#define AT_XDMAC_CC_SAM_UBS_AM (0x2 << 16)
+#define AT_XDMAC_CC_SAM_UBS_DS_AM (0x3 << 16)
+#define AT_XDMAC_CC_DAM_MASK (0x3 << 18) /* Channel Source Addressing Mode */
+#define AT_XDMAC_CC_DAM_FIXED_AM (0x0 << 18)
+#define AT_XDMAC_CC_DAM_INCREMENTED_AM (0x1 << 18)
+#define AT_XDMAC_CC_DAM_UBS_AM (0x2 << 18)
+#define AT_XDMAC_CC_DAM_UBS_DS_AM (0x3 << 18)
+#define AT_XDMAC_CC_INITD (0x1 << 21) /* Channel Initialization Terminated (read only) */
+#define AT_XDMAC_CC_INITD_TERMINATED (0x0 << 21)
+#define AT_XDMAC_CC_INITD_IN_PROGRESS (0x1 << 21)
+#define AT_XDMAC_CC_RDIP (0x1 << 22) /* Read in Progress (read only) */
+#define AT_XDMAC_CC_RDIP_DONE (0x0 << 22)
+#define AT_XDMAC_CC_RDIP_IN_PROGRESS (0x1 << 22)
+#define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */
+#define AT_XDMAC_CC_WRIP_DONE (0x0 << 23)
+#define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23)
+#define AT_XDMAC_CC_PERID(i) (0x7f & (h) << 24) /* Channel Peripheral Identifier */
+#define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */
+#define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
+#define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
+
+#define AT_XDMAC_CHAN_REG_BASE 0x50 /* Channel registers base address */
+
+/* Microblock control members */
+#define AT_XDMAC_MBR_UBC_UBLEN_MAX 0xFFFFFFUL /* Maximum Microblock Length */
+#define AT_XDMAC_MBR_UBC_NDE (0x1 << 24) /* Next Descriptor Enable */
+#define AT_XDMAC_MBR_UBC_NSEN (0x1 << 25) /* Next Descriptor Source Update */
+#define AT_XDMAC_MBR_UBC_NDEN (0x1 << 26) /* Next Descriptor Destination Update */
+#define AT_XDMAC_MBR_UBC_NDV0 (0x0 << 27) /* Next Descriptor View 0 */
+#define AT_XDMAC_MBR_UBC_NDV1 (0x1 << 27) /* Next Descriptor View 1 */
+#define AT_XDMAC_MBR_UBC_NDV2 (0x2 << 27) /* Next Descriptor View 2 */
+#define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */
+
+#define AT_XDMAC_MAX_CHAN 0x20
+
+enum atc_status {
+ AT_XDMAC_CHAN_IS_CYCLIC = 0,
+ AT_XDMAC_CHAN_IS_PAUSED,
+};
+
+/* ----- Channels ----- */
+struct at_xdmac_chan {
+ struct dma_chan chan;
+ void __iomem *ch_regs;
+ u32 mask; /* Channel Mask */
+ u32 cfg[3]; /* Channel Configuration Register */
+ #define AT_XDMAC_CUR_CFG 0 /* Current channel conf */
+ #define AT_XDMAC_DEV_TO_MEM_CFG 1 /* Predifined dev to mem channel conf */
+ #define AT_XDMAC_MEM_TO_DEV_CFG 2 /* Predifined mem to dev channel conf */
+ u8 perid; /* Peripheral ID */
+ u8 perif; /* Peripheral Interface */
+ u8 memif; /* Memory Interface */
+ u32 per_src_addr;
+ u32 per_dst_addr;
+ u32 save_cim;
+ u32 save_cnda;
+ u32 save_cndc;
+ unsigned long status;
+ struct tasklet_struct tasklet;
+
+ spinlock_t lock;
+
+ struct list_head xfers_list;
+ struct list_head free_descs_list;
+};
+
+
+/* ----- Controller ----- */
+struct at_xdmac {
+ struct dma_device dma;
+ void __iomem *regs;
+ int irq;
+ struct clk *clk;
+ u32 save_gim;
+ u32 save_gs;
+ struct dma_pool *at_xdmac_desc_pool;
+ struct at_xdmac_chan chan[0];
+};
+
+
+/* ----- Descriptors ----- */
+
+/* Linked List Descriptor */
+struct at_xdmac_lld {
+ dma_addr_t mbr_nda; /* Next Descriptor Member */
+ u32 mbr_ubc; /* Microblock Control Member */
+ dma_addr_t mbr_sa; /* Source Address Member */
+ dma_addr_t mbr_da; /* Destination Address Member */
+ u32 mbr_cfg; /* Configuration Register */
+};
+
+
+struct at_xdmac_desc {
+ struct at_xdmac_lld lld;
+ enum dma_transfer_direction direction;
+ struct dma_async_tx_descriptor tx_dma_desc;
+ struct list_head desc_node;
+ /* Following members are only used by the first descriptor */
+ bool active_xfer;
+ unsigned int xfer_size;
+ struct list_head descs_list;
+ struct list_head xfer_node;
+};
+
+static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
+{
+ return atxdmac->regs + (AT_XDMAC_CHAN_REG_BASE + chan_nb * 0x40);
+}
+
+#define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg))
+#define at_xdmac_write(atxdmac, reg, value) \
+ writel_relaxed((value), (atxdmac)->regs + (reg))
+
+#define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg))
+#define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg))
+
+static inline struct at_xdmac_chan *to_at_xdmac_chan(struct dma_chan *dchan)
+{
+ return container_of(dchan, struct at_xdmac_chan, chan);
+}
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+
+static inline struct at_xdmac *to_at_xdmac(struct dma_device *ddev)
+{
+ return container_of(ddev, struct at_xdmac, dma);
+}
+
+static inline struct at_xdmac_desc *txd_to_at_desc(struct dma_async_tx_descriptor *txd)
+{
+ return container_of(txd, struct at_xdmac_desc, tx_dma_desc);
+}
+
+static inline int at_xdmac_chan_is_cyclic(struct at_xdmac_chan *atchan)
+{
+ return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
+}
+
+static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan)
+{
+ return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
+}
+
+static inline int at_xdmac_csize(u32 maxburst)
+{
+ int csize;
+
+ csize = ffs(maxburst) - 1;
+ if (csize > 4)
+ csize = -EINVAL;
+
+ return csize;
+};
+
+static inline u8 at_xdmac_get_dwidth(u32 cfg)
+{
+ return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
+};
+
+static unsigned int init_nr_desc_per_channel = 64;
+module_param(init_nr_desc_per_channel, uint, 0644);
+MODULE_PARM_DESC(init_nr_desc_per_channel,
+ "initial descriptors per channel (default: 64)");
+
+
+static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan)
+{
+ return at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask;
+}
+
+static void at_xdmac_off(struct at_xdmac *atxdmac)
+{
+ at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L);
+
+ /* Wait that all chans are disabled. */
+ while (at_xdmac_read(atxdmac, AT_XDMAC_GS))
+ cpu_relax();
+
+ at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L);
+}
+
+/* Call with lock hold. */
+static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
+ struct at_xdmac_desc *first)
+{
+ struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
+ u32 reg;
+
+ dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first);
+
+ if (at_xdmac_chan_is_enabled(atchan))
+ return;
+
+ /* Set transfer as active to not try to start it again. */
+ first->active_xfer = true;
+
+ /* Tell xdmac where to get the first descriptor. */
+ reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys)
+ | AT_XDMAC_CNDA_NDAIF(atchan->memif);
+ at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
+
+ /*
+ * When doing memory to memory transfer we need to use the next
+ * descriptor view 2 since some fields of the configuration register
+ * depend on transfer size and src/dest addresses.
+ */
+ if (is_slave_direction(first->direction)) {
+ reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
+ if (first->direction == DMA_MEM_TO_DEV)
+ atchan->cfg[AT_XDMAC_CUR_CFG] =
+ atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
+ else
+ atchan->cfg[AT_XDMAC_CUR_CFG] =
+ atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
+ at_xdmac_chan_write(atchan, AT_XDMAC_CC,
+ atchan->cfg[AT_XDMAC_CUR_CFG]);
+ } else {
+ /*
+ * No need to write AT_XDMAC_CC reg, it will be done when the
+ * descriptor is fecthed.
+ */
+ reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
+ }
+
+ reg |= AT_XDMAC_CNDC_NDDUP
+ | AT_XDMAC_CNDC_NDSUP
+ | AT_XDMAC_CNDC_NDE;
+ at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, reg);
+
+ dev_vdbg(chan2dev(&atchan->chan),
+ "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
+ __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
+
+ at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
+ reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE | AT_XDMAC_CIE_ROIE;
+ /*
+ * There is no end of list when doing cyclic dma, we need to get
+ * an interrupt after each periods.
+ */
+ if (at_xdmac_chan_is_cyclic(atchan))
+ at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
+ reg | AT_XDMAC_CIE_BIE);
+ else
+ at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
+ reg | AT_XDMAC_CIE_LIE);
+ at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask);
+ dev_vdbg(chan2dev(&atchan->chan),
+ "%s: enable channel (0x%08x)\n", __func__, atchan->mask);
+ wmb();
+ at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
+
+ dev_vdbg(chan2dev(&atchan->chan),
+ "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
+ __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
+
+}
+
+static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct at_xdmac_desc *desc = txd_to_at_desc(tx);
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan);
+ dma_cookie_t cookie;
+
+ spin_lock_bh(&atchan->lock);
+ cookie = dma_cookie_assign(tx);
+
+ dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
+ __func__, atchan, desc);
+ list_add_tail(&desc->xfer_node, &atchan->xfers_list);
+ if (list_is_singular(&atchan->xfers_list))
+ at_xdmac_start_xfer(atchan, desc);
+
+ spin_unlock_bh(&atchan->lock);
+ return cookie;
+}
+
+static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
+ gfp_t gfp_flags)
+{
+ struct at_xdmac_desc *desc;
+ struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
+ dma_addr_t phys;
+
+ desc = dma_pool_alloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
+ if (desc) {
+ memset(desc, 0, sizeof(*desc));
+ INIT_LIST_HEAD(&desc->descs_list);
+ dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
+ desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
+ desc->tx_dma_desc.phys = phys;
+ }
+
+ return desc;
+}
+
+/* Call must be protected by lock. */
+static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
+{
+ struct at_xdmac_desc *desc;
+
+ if (list_empty(&atchan->free_descs_list)) {
+ desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT);
+ } else {
+ desc = list_first_entry(&atchan->free_descs_list,
+ struct at_xdmac_desc, desc_node);
+ list_del(&desc->desc_node);
+ desc->active_xfer = false;
+ }
+
+ return desc;
+}
+
+static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *of_dma)
+{
+ struct at_xdmac *atxdmac = of_dma->of_dma_data;
+ struct at_xdmac_chan *atchan;
+ struct dma_chan *chan;
+ struct device *dev = atxdmac->dma.dev;
+
+ if (dma_spec->args_count != 1) {
+ dev_err(dev, "dma phandler args: bad number of args\n");
+ return NULL;
+ }
+
+ chan = dma_get_any_slave_channel(&atxdmac->dma);
+ if (!chan) {
+ dev_err(dev, "can't get a dma channel\n");
+ return NULL;
+ }
+
+ atchan = to_at_xdmac_chan(chan);
+ atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]);
+ atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]);
+ atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]);
+ dev_dbg(dev, "chan dt cfg: memif=%u perif=%u perid=%u\n",
+ atchan->memif, atchan->perif, atchan->perid);
+
+ return chan;
+}
+
+static int at_xdmac_set_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *sconfig)
+{
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
+ u8 dwidth;
+ int csize;
+
+ atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] =
+ AT91_XDMAC_DT_PERID(atchan->perid)
+ | AT_XDMAC_CC_DAM_INCREMENTED_AM
+ | AT_XDMAC_CC_SAM_FIXED_AM
+ | AT_XDMAC_CC_DIF(atchan->memif)
+ | AT_XDMAC_CC_SIF(atchan->perif)
+ | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
+ | AT_XDMAC_CC_DSYNC_PER2MEM
+ | AT_XDMAC_CC_MBSIZE_SIXTEEN
+ | AT_XDMAC_CC_TYPE_PER_TRAN;
+ csize = at_xdmac_csize(sconfig->src_maxburst);
+ if (csize < 0) {
+ dev_err(chan2dev(chan), "invalid src maxburst value\n");
+ return -EINVAL;
+ }
+ atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_CSIZE(csize);
+ dwidth = ffs(sconfig->src_addr_width) - 1;
+ atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth);
+
+
+ atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] =
+ AT91_XDMAC_DT_PERID(atchan->perid)
+ | AT_XDMAC_CC_DAM_FIXED_AM
+ | AT_XDMAC_CC_SAM_INCREMENTED_AM
+ | AT_XDMAC_CC_DIF(atchan->perif)
+ | AT_XDMAC_CC_SIF(atchan->memif)
+ | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
+ | AT_XDMAC_CC_DSYNC_MEM2PER
+ | AT_XDMAC_CC_MBSIZE_SIXTEEN
+ | AT_XDMAC_CC_TYPE_PER_TRAN;
+ csize = at_xdmac_csize(sconfig->dst_maxburst);
+ if (csize < 0) {
+ dev_err(chan2dev(chan), "invalid src maxburst value\n");
+ return -EINVAL;
+ }
+ atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_CSIZE(csize);
+ dwidth = ffs(sconfig->dst_addr_width) - 1;
+ atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth);
+
+ /* Src and dst addr are needed to configure the link list descriptor. */
+ atchan->per_src_addr = sconfig->src_addr;
+ atchan->per_dst_addr = sconfig->dst_addr;
+
+ dev_dbg(chan2dev(chan),
+ "%s: cfg[dev2mem]=0x%08x, cfg[mem2dev]=0x%08x, per_src_addr=0x%08x, per_dst_addr=0x%08x\n",
+ __func__, atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG],
+ atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG],
+ atchan->per_src_addr, atchan->per_dst_addr);
+
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *
+at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
+ struct at_xdmac_desc *first = NULL, *prev = NULL;
+ struct scatterlist *sg;
+ int i;
+ u32 cfg;
+ unsigned int xfer_size = 0;
+
+ if (!sgl)
+ return NULL;
+
+ if (!is_slave_direction(direction)) {
+ dev_err(chan2dev(chan), "invalid DMA direction\n");
+ return NULL;
+ }
+
+ dev_dbg(chan2dev(chan), "%s: sg_len=%d, dir=%s, flags=0x%lx\n",
+ __func__, sg_len,
+ direction == DMA_MEM_TO_DEV ? "to device" : "from device",
+ flags);
+
+ /* Protect dma_sconfig field that can be modified by set_slave_conf. */
+ spin_lock_bh(&atchan->lock);
+
+ /* Prepare descriptors. */
+ for_each_sg(sgl, sg, sg_len, i) {
+ struct at_xdmac_desc *desc = NULL;
+ u32 len, mem;
+
+ len = sg_dma_len(sg);
+ mem = sg_dma_address(sg);
+ if (unlikely(!len)) {
+ dev_err(chan2dev(chan), "sg data length is zero\n");
+ spin_unlock_bh(&atchan->lock);
+ return NULL;
+ }
+ dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
+ __func__, i, len, mem);
+
+ desc = at_xdmac_get_desc(atchan);
+ if (!desc) {
+ dev_err(chan2dev(chan), "can't get descriptor\n");
+ if (first)
+ list_splice_init(&first->descs_list, &atchan->free_descs_list);
+ spin_unlock_bh(&atchan->lock);
+ return NULL;
+ }
+
+ /* Linked list descriptor setup. */
+ if (direction == DMA_DEV_TO_MEM) {
+ desc->lld.mbr_sa = atchan->per_src_addr;
+ desc->lld.mbr_da = mem;
+ cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
+ } else {
+ desc->lld.mbr_sa = mem;
+ desc->lld.mbr_da = atchan->per_dst_addr;
+ cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
+ }
+ desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 /* next descriptor view */
+ | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
+ | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
+ | (i == sg_len - 1 ? 0 : AT_XDMAC_MBR_UBC_NDE) /* descriptor fetch */
+ | len / (1 << at_xdmac_get_dwidth(cfg)); /* microblock length */
+ dev_dbg(chan2dev(chan),
+ "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
+ __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
+
+ /* Chain lld. */
+ if (prev) {
+ prev->lld.mbr_nda = desc->tx_dma_desc.phys;
+ dev_dbg(chan2dev(chan),
+ "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
+ __func__, prev, &prev->lld.mbr_nda);
+ }
+
+ prev = desc;
+ if (!first)
+ first = desc;
+
+ dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
+ __func__, desc, first);
+ list_add_tail(&desc->desc_node, &first->descs_list);
+ xfer_size += len;
+ }
+
+ spin_unlock_bh(&atchan->lock);
+
+ first->tx_dma_desc.flags = flags;
+ first->xfer_size = xfer_size;
+ first->direction = direction;
+
+ return &first->tx_dma_desc;
+}
+
+static struct dma_async_tx_descriptor *
+at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
+ struct at_xdmac_desc *first = NULL, *prev = NULL;
+ unsigned int periods = buf_len / period_len;
+ int i;
+ u32 cfg;
+
+ dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
+ __func__, &buf_addr, buf_len, period_len,
+ direction == DMA_MEM_TO_DEV ? "mem2per" : "per2mem", flags);
+
+ if (!is_slave_direction(direction)) {
+ dev_err(chan2dev(chan), "invalid DMA direction\n");
+ return NULL;
+ }
+
+ if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) {
+ dev_err(chan2dev(chan), "channel currently used\n");
+ return NULL;
+ }
+
+ for (i = 0; i < periods; i++) {
+ struct at_xdmac_desc *desc = NULL;
+
+ spin_lock_bh(&atchan->lock);
+ desc = at_xdmac_get_desc(atchan);
+ if (!desc) {
+ dev_err(chan2dev(chan), "can't get descriptor\n");
+ if (first)
+ list_splice_init(&first->descs_list, &atchan->free_descs_list);
+ spin_unlock_bh(&atchan->lock);
+ return NULL;
+ }
+ spin_unlock_bh(&atchan->lock);
+ dev_dbg(chan2dev(chan),
+ "%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
+ __func__, desc, &desc->tx_dma_desc.phys);
+
+ if (direction == DMA_DEV_TO_MEM) {
+ desc->lld.mbr_sa = atchan->per_src_addr;
+ desc->lld.mbr_da = buf_addr + i * period_len;
+ cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
+ } else {
+ desc->lld.mbr_sa = buf_addr + i * period_len;
+ desc->lld.mbr_da = atchan->per_dst_addr;
+ cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
+ }
+ desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
+ | AT_XDMAC_MBR_UBC_NDEN
+ | AT_XDMAC_MBR_UBC_NSEN
+ | AT_XDMAC_MBR_UBC_NDE
+ | period_len >> at_xdmac_get_dwidth(cfg);
+
+ dev_dbg(chan2dev(chan),
+ "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
+ __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
+
+ /* Chain lld. */
+ if (prev) {
+ prev->lld.mbr_nda = desc->tx_dma_desc.phys;
+ dev_dbg(chan2dev(chan),
+ "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
+ __func__, prev, &prev->lld.mbr_nda);
+ }
+
+ prev = desc;
+ if (!first)
+ first = desc;
+
+ dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
+ __func__, desc, first);
+ list_add_tail(&desc->desc_node, &first->descs_list);
+ }
+
+ prev->lld.mbr_nda = first->tx_dma_desc.phys;
+ dev_dbg(chan2dev(chan),
+ "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
+ __func__, prev, &prev->lld.mbr_nda);
+ first->tx_dma_desc.flags = flags;
+ first->xfer_size = buf_len;
+ first->direction = direction;
+
+ return &first->tx_dma_desc;
+}
+
+static struct dma_async_tx_descriptor *
+at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
+ struct at_xdmac_desc *first = NULL, *prev = NULL;
+ size_t remaining_size = len, xfer_size = 0, ublen;
+ dma_addr_t src_addr = src, dst_addr = dest;
+ u32 dwidth;
+ /*
+ * WARNING: We don't know the direction, it involves we can't
+ * dynamically set the source and dest interface so we have to use the
+ * same one. Only interface 0 allows EBI access. Hopefully we can
+ * access DDR through both ports (at least on SAMA5D4x), so we can use
+ * the same interface for source and dest, that solves the fact we
+ * don't know the direction.
+ */
+ u32 chan_cc = AT_XDMAC_CC_DAM_INCREMENTED_AM
+ | AT_XDMAC_CC_SAM_INCREMENTED_AM
+ | AT_XDMAC_CC_DIF(0)
+ | AT_XDMAC_CC_SIF(0)
+ | AT_XDMAC_CC_MBSIZE_SIXTEEN
+ | AT_XDMAC_CC_TYPE_MEM_TRAN;
+
+ dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
+ __func__, &src, &dest, len, flags);
+
+ if (unlikely(!len))
+ return NULL;
+
+ /*
+ * Check address alignment to select the greater data width we can use.
+ * Some XDMAC implementations don't provide dword transfer, in this
+ * case selecting dword has the same behavior as selecting word transfers.
+ */
+ if (!((src_addr | dst_addr) & 7)) {
+ dwidth = AT_XDMAC_CC_DWIDTH_DWORD;
+ dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
+ } else if (!((src_addr | dst_addr) & 3)) {
+ dwidth = AT_XDMAC_CC_DWIDTH_WORD;
+ dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
+ } else if (!((src_addr | dst_addr) & 1)) {
+ dwidth = AT_XDMAC_CC_DWIDTH_HALFWORD;
+ dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
+ } else {
+ dwidth = AT_XDMAC_CC_DWIDTH_BYTE;
+ dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
+ }
+
+ /* Prepare descriptors. */
+ while (remaining_size) {
+ struct at_xdmac_desc *desc = NULL;
+
+ dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
+
+ spin_lock_bh(&atchan->lock);
+ desc = at_xdmac_get_desc(atchan);
+ spin_unlock_bh(&atchan->lock);
+ if (!desc) {
+ dev_err(chan2dev(chan), "can't get descriptor\n");
+ if (first)
+ list_splice_init(&first->descs_list, &atchan->free_descs_list);
+ return NULL;
+ }
+
+ /* Update src and dest addresses. */
+ src_addr += xfer_size;
+ dst_addr += xfer_size;
+
+ if (remaining_size >= AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)
+ xfer_size = AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth;
+ else
+ xfer_size = remaining_size;
+
+ dev_dbg(chan2dev(chan), "%s: xfer_size=%zu\n", __func__, xfer_size);
+
+ /* Check remaining length and change data width if needed. */
+ if (!((src_addr | dst_addr | xfer_size) & 7)) {
+ dwidth = AT_XDMAC_CC_DWIDTH_DWORD;
+ dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
+ } else if (!((src_addr | dst_addr | xfer_size) & 3)) {
+ dwidth = AT_XDMAC_CC_DWIDTH_WORD;
+ dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
+ } else if (!((src_addr | dst_addr | xfer_size) & 1)) {
+ dwidth = AT_XDMAC_CC_DWIDTH_HALFWORD;
+ dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
+ } else if ((src_addr | dst_addr | xfer_size) & 1) {
+ dwidth = AT_XDMAC_CC_DWIDTH_BYTE;
+ dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
+ }
+ chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
+
+ ublen = xfer_size >> dwidth;
+ remaining_size -= xfer_size;
+
+ desc->lld.mbr_sa = src_addr;
+ desc->lld.mbr_da = dst_addr;
+ desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2
+ | AT_XDMAC_MBR_UBC_NDEN
+ | AT_XDMAC_MBR_UBC_NSEN
+ | (remaining_size ? AT_XDMAC_MBR_UBC_NDE : 0)
+ | ublen;
+ desc->lld.mbr_cfg = chan_cc;
+
+ dev_dbg(chan2dev(chan),
+ "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
+ __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg);
+
+ /* Chain lld. */
+ if (prev) {
+ prev->lld.mbr_nda = desc->tx_dma_desc.phys;
+ dev_dbg(chan2dev(chan),
+ "%s: chain lld: prev=0x%p, mbr_nda=0x%08x\n",
+ __func__, prev, prev->lld.mbr_nda);
+ }
+
+ prev = desc;
+ if (!first)
+ first = desc;
+
+ dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
+ __func__, desc, first);
+ list_add_tail(&desc->desc_node, &first->descs_list);
+ }
+
+ first->tx_dma_desc.flags = flags;
+ first->xfer_size = len;
+
+ return &first->tx_dma_desc;
+}
+
+static enum dma_status
+at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
+ struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
+ struct at_xdmac_desc *desc, *_desc;
+ struct list_head *descs_list;
+ enum dma_status ret;
+ int residue;
+ u32 cur_nda, mask, value;
+ u8 dwidth = at_xdmac_get_dwidth(atchan->cfg[AT_XDMAC_CUR_CFG]);
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_COMPLETE)
+ return ret;
+
+ if (!txstate)
+ return ret;
+
+ spin_lock_bh(&atchan->lock);
+
+ desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
+
+ /*
+ * If the transfer has not been started yet, don't need to compute the
+ * residue, it's the transfer length.
+ */
+ if (!desc->active_xfer) {
+ dma_set_residue(txstate, desc->xfer_size);
+ spin_unlock_bh(&atchan->lock);
+ return ret;
+ }
+
+ residue = desc->xfer_size;
+ /*
+ * Flush FIFO: only relevant when the transfer is source peripheral
+ * synchronized.
+ */
+ mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
+ value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
+ if ((atchan->cfg[AT_XDMAC_CUR_CFG] & mask) == value) {
+ at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
+ while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
+ cpu_relax();
+ }
+
+ cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
+ /*
+ * Remove size of all microblocks already transferred and the current
+ * one. Then add the remaining size to transfer of the current
+ * microblock.
+ */
+ descs_list = &desc->descs_list;
+ list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
+ residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
+ if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
+ break;
+ }
+ residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth;
+
+ spin_unlock_bh(&atchan->lock);
+
+ dma_set_residue(txstate, residue);
+
+ dev_dbg(chan2dev(chan),
+ "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
+ __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
+
+ return ret;
+}
+
+/* Call must be protected by lock. */
+static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
+ struct at_xdmac_desc *desc)
+{
+ dev_dbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
+
+ /*
+ * Remove the transfer from the transfer list then move the transfer
+ * descriptors into the free descriptors list.
+ */
+ list_del(&desc->xfer_node);
+ list_splice_init(&desc->descs_list, &atchan->free_descs_list);
+}
+
+static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
+{
+ struct at_xdmac_desc *desc;
+
+ spin_lock_bh(&atchan->lock);
+
+ /*
+ * If channel is enabled, do nothing, advance_work will be triggered
+ * after the interruption.
+ */
+ if (!at_xdmac_chan_is_enabled(atchan) && !list_empty(&atchan->xfers_list)) {
+ desc = list_first_entry(&atchan->xfers_list,
+ struct at_xdmac_desc,
+ xfer_node);
+ dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
+ if (!desc->active_xfer)
+ at_xdmac_start_xfer(atchan, desc);
+ }
+
+ spin_unlock_bh(&atchan->lock);
+}
+
+static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
+{
+ struct at_xdmac_desc *desc;
+ struct dma_async_tx_descriptor *txd;
+
+ desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
+ txd = &desc->tx_dma_desc;
+
+ if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT))
+ txd->callback(txd->callback_param);
+}
+
+static void at_xdmac_tasklet(unsigned long data)
+{
+ struct at_xdmac_chan *atchan = (struct at_xdmac_chan *)data;
+ struct at_xdmac_desc *desc;
+ u32 error_mask;
+
+ dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n",
+ __func__, atchan->status);
+
+ error_mask = AT_XDMAC_CIS_RBEIS
+ | AT_XDMAC_CIS_WBEIS
+ | AT_XDMAC_CIS_ROIS;
+
+ if (at_xdmac_chan_is_cyclic(atchan)) {
+ at_xdmac_handle_cyclic(atchan);
+ } else if ((atchan->status & AT_XDMAC_CIS_LIS)
+ || (atchan->status & error_mask)) {
+ struct dma_async_tx_descriptor *txd;
+
+ if (atchan->status & AT_XDMAC_CIS_RBEIS)
+ dev_err(chan2dev(&atchan->chan), "read bus error!!!");
+ if (atchan->status & AT_XDMAC_CIS_WBEIS)
+ dev_err(chan2dev(&atchan->chan), "write bus error!!!");
+ if (atchan->status & AT_XDMAC_CIS_ROIS)
+ dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
+
+ spin_lock_bh(&atchan->lock);
+ desc = list_first_entry(&atchan->xfers_list,
+ struct at_xdmac_desc,
+ xfer_node);
+ dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
+ BUG_ON(!desc->active_xfer);
+
+ txd = &desc->tx_dma_desc;
+
+ at_xdmac_remove_xfer(atchan, desc);
+ spin_unlock_bh(&atchan->lock);
+
+ if (!at_xdmac_chan_is_cyclic(atchan)) {
+ dma_cookie_complete(txd);
+ if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT))
+ txd->callback(txd->callback_param);
+ }
+
+ dma_run_dependencies(txd);
+
+ at_xdmac_advance_work(atchan);
+ }
+}
+
+static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
+{
+ struct at_xdmac *atxdmac = (struct at_xdmac *)dev_id;
+ struct at_xdmac_chan *atchan;
+ u32 imr, status, pending;
+ u32 chan_imr, chan_status;
+ int i, ret = IRQ_NONE;
+
+ do {
+ imr = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
+ status = at_xdmac_read(atxdmac, AT_XDMAC_GIS);
+ pending = status & imr;
+
+ dev_vdbg(atxdmac->dma.dev,
+ "%s: status=0x%08x, imr=0x%08x, pending=0x%08x\n",
+ __func__, status, imr, pending);
+
+ if (!pending)
+ break;
+
+ /* We have to find which channel has generated the interrupt. */
+ for (i = 0; i < atxdmac->dma.chancnt; i++) {
+ if (!((1 << i) & pending))
+ continue;
+
+ atchan = &atxdmac->chan[i];
+ chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
+ chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
+ atchan->status = chan_status & chan_imr;
+ dev_vdbg(atxdmac->dma.dev,
+ "%s: chan%d: imr=0x%x, status=0x%x\n",
+ __func__, i, chan_imr, chan_status);
+ dev_vdbg(chan2dev(&atchan->chan),
+ "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
+ __func__,
+ at_xdmac_chan_read(atchan, AT_XDMAC_CC),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
+ at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
+
+ if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
+ at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
+
+ tasklet_schedule(&atchan->tasklet);
+ ret = IRQ_HANDLED;
+ }
+
+ } while (pending);
+
+ return ret;
+}
+
+static void at_xdmac_issue_pending(struct dma_chan *chan)
+{
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
+
+ dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
+
+ if (!at_xdmac_chan_is_cyclic(atchan))
+ at_xdmac_advance_work(atchan);
+
+ return;
+}
+
+static int at_xdmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct at_xdmac_desc *desc, *_desc;
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
+ struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
+ int ret = 0;
+
+ dev_dbg(chan2dev(chan), "%s: cmd=%d\n", __func__, cmd);
+
+ spin_lock_bh(&atchan->lock);
+
+ switch (cmd) {
+ case DMA_PAUSE:
+ at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
+ set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
+ break;
+
+ case DMA_RESUME:
+ if (!at_xdmac_chan_is_paused(atchan))
+ break;
+
+ at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
+ clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
+ break;
+
+ case DMA_TERMINATE_ALL:
+ at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
+ while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
+ cpu_relax();
+
+ /* Cancel all pending transfers. */
+ list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
+ at_xdmac_remove_xfer(atchan, desc);
+
+ clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
+ break;
+
+ case DMA_SLAVE_CONFIG:
+ ret = at_xdmac_set_slave_config(chan,
+ (struct dma_slave_config *)arg);
+ break;
+
+ default:
+ dev_err(chan2dev(chan),
+ "unmanaged or unknown dma control cmd: %d\n", cmd);
+ ret = -ENXIO;
+ }
+
+ spin_unlock_bh(&atchan->lock);
+
+ return ret;
+}
+
+static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
+ struct at_xdmac_desc *desc;
+ int i;
+
+ spin_lock_bh(&atchan->lock);
+
+ if (at_xdmac_chan_is_enabled(atchan)) {
+ dev_err(chan2dev(chan),
+ "can't allocate channel resources (channel enabled)\n");
+ i = -EIO;
+ goto spin_unlock;
+ }
+
+ if (!list_empty(&atchan->free_descs_list)) {
+ dev_err(chan2dev(chan),
+ "can't allocate channel resources (channel not free from a previous use)\n");
+ i = -EIO;
+ goto spin_unlock;
+ }
+
+ for (i = 0; i < init_nr_desc_per_channel; i++) {
+ desc = at_xdmac_alloc_desc(chan, GFP_ATOMIC);
+ if (!desc) {
+ dev_warn(chan2dev(chan),
+ "only %d descriptors have been allocated\n", i);
+ break;
+ }
+ list_add_tail(&desc->desc_node, &atchan->free_descs_list);
+ }
+
+ dma_cookie_init(chan);
+
+ dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
+
+spin_unlock:
+ spin_unlock_bh(&atchan->lock);
+ return i;
+}
+
+static void at_xdmac_free_chan_resources(struct dma_chan *chan)
+{
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
+ struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
+ struct at_xdmac_desc *desc, *_desc;
+
+ list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) {
+ dev_dbg(chan2dev(chan), "%s: freeing descriptor %p\n", __func__, desc);
+ list_del(&desc->desc_node);
+ dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys);
+ }
+
+ return;
+}
+
+#define AT_XDMAC_DMA_BUSWIDTHS\
+ (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
+ BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+static int at_xdmac_device_slave_caps(struct dma_chan *dchan,
+ struct dma_slave_caps *caps)
+{
+
+ caps->src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
+ caps->dstn_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
+ caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ caps->cmd_pause = true;
+ caps->cmd_terminate = true;
+ caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int atmel_xdmac_prepare(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
+ struct dma_chan *chan, *_chan;
+
+ list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
+
+ /* Wait for transfer completion, except in cyclic case. */
+ if (at_xdmac_chan_is_enabled(atchan) && !at_xdmac_chan_is_cyclic(atchan))
+ return -EAGAIN;
+ }
+ return 0;
+}
+#else
+# define atmel_xdmac_prepare NULL
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int atmel_xdmac_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
+ struct dma_chan *chan, *_chan;
+
+ list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
+ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
+
+ if (at_xdmac_chan_is_cyclic(atchan)) {
+ if (!at_xdmac_chan_is_paused(atchan))
+ at_xdmac_control(chan, DMA_PAUSE, 0);
+ atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
+ atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
+ atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
+ }
+ }
+ atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
+
+ at_xdmac_off(atxdmac);
+ clk_disable_unprepare(atxdmac->clk);
+ return 0;
+}
+
+static int atmel_xdmac_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
+ struct at_xdmac_chan *atchan;
+ struct dma_chan *chan, *_chan;
+ int i;
+ u32 cfg;
+
+ clk_prepare_enable(atxdmac->clk);
+
+ /* Clear pending interrupts. */
+ for (i = 0; i < atxdmac->dma.chancnt; i++) {
+ atchan = &atxdmac->chan[i];
+ while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
+ cpu_relax();
+ }
+
+ at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
+ at_xdmac_write(atxdmac, AT_XDMAC_GE, atxdmac->save_gs);
+ list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
+ atchan = to_at_xdmac_chan(chan);
+ cfg = atchan->cfg[AT_XDMAC_CUR_CFG];
+ at_xdmac_chan_write(atchan, AT_XDMAC_CC, cfg);
+ if (at_xdmac_chan_is_cyclic(atchan)) {
+ at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
+ at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
+ at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
+ wmb();
+ at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
+ }
+ }
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static int at_xdmac_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct at_xdmac *atxdmac;
+ int irq, size, nr_channels, i, ret;
+ void __iomem *base;
+ u32 reg;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ /*
+ * Read number of xdmac channels, read helper function can't be used
+ * since atxdmac is not yet allocated and we need to know the number
+ * of channels to do the allocation.
+ */
+ reg = readl_relaxed(base + AT_XDMAC_GTYPE);
+ nr_channels = AT_XDMAC_NB_CH(reg);
+ if (nr_channels > AT_XDMAC_MAX_CHAN) {
+ dev_err(&pdev->dev, "invalid number of channels (%u)\n",
+ nr_channels);
+ return -EINVAL;
+ }
+
+ size = sizeof(*atxdmac);
+ size += nr_channels * sizeof(struct at_xdmac_chan);
+ atxdmac = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (!atxdmac) {
+ dev_err(&pdev->dev, "can't allocate at_xdmac structure\n");
+ return -ENOMEM;
+ }
+
+ atxdmac->regs = base;
+ atxdmac->irq = irq;
+
+ atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk");
+ if (IS_ERR(atxdmac->clk)) {
+ dev_err(&pdev->dev, "can't get dma_clk\n");
+ return PTR_ERR(atxdmac->clk);
+ }
+
+ /* Do not use dev res to prevent races with tasklet */
+ ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac);
+ if (ret) {
+ dev_err(&pdev->dev, "can't request irq\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(atxdmac->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "can't prepare or enable clock\n");
+ goto err_free_irq;
+ }
+
+ atxdmac->at_xdmac_desc_pool =
+ dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
+ sizeof(struct at_xdmac_desc), 4, 0);
+ if (!atxdmac->at_xdmac_desc_pool) {
+ dev_err(&pdev->dev, "no memory for descriptors dma pool\n");
+ ret = -ENOMEM;
+ goto err_clk_disable;
+ }
+
+ dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask);
+ dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask);
+ dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask);
+ /*
+ * Without DMA_PRIVATE the driver is not able to allocate more than
+ * one channel, second allocation fails in private_candidate.
+ */
+ dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask);
+ atxdmac->dma.dev = &pdev->dev;
+ atxdmac->dma.device_alloc_chan_resources = at_xdmac_alloc_chan_resources;
+ atxdmac->dma.device_free_chan_resources = at_xdmac_free_chan_resources;
+ atxdmac->dma.device_tx_status = at_xdmac_tx_status;
+ atxdmac->dma.device_issue_pending = at_xdmac_issue_pending;
+ atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic;
+ atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy;
+ atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg;
+ atxdmac->dma.device_control = at_xdmac_control;
+ atxdmac->dma.device_slave_caps = at_xdmac_device_slave_caps;
+
+ /* Disable all chans and interrupts. */
+ at_xdmac_off(atxdmac);
+
+ /* Init channels. */
+ INIT_LIST_HEAD(&atxdmac->dma.channels);
+ for (i = 0; i < nr_channels; i++) {
+ struct at_xdmac_chan *atchan = &atxdmac->chan[i];
+
+ atchan->chan.device = &atxdmac->dma;
+ list_add_tail(&atchan->chan.device_node,
+ &atxdmac->dma.channels);
+
+ atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i);
+ atchan->mask = 1 << i;
+
+ spin_lock_init(&atchan->lock);
+ INIT_LIST_HEAD(&atchan->xfers_list);
+ INIT_LIST_HEAD(&atchan->free_descs_list);
+ tasklet_init(&atchan->tasklet, at_xdmac_tasklet,
+ (unsigned long)atchan);
+
+ /* Clear pending interrupts. */
+ while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
+ cpu_relax();
+ }
+ platform_set_drvdata(pdev, atxdmac);
+
+ ret = dma_async_device_register(&atxdmac->dma);
+ if (ret) {
+ dev_err(&pdev->dev, "fail to register DMA engine device\n");
+ goto err_clk_disable;
+ }
+
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ at_xdmac_xlate, atxdmac);
+ if (ret) {
+ dev_err(&pdev->dev, "could not register of dma controller\n");
+ goto err_dma_unregister;
+ }
+
+ dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n",
+ nr_channels, atxdmac->regs);
+
+ return 0;
+
+err_dma_unregister:
+ dma_async_device_unregister(&atxdmac->dma);
+err_clk_disable:
+ clk_disable_unprepare(atxdmac->clk);
+err_free_irq:
+ free_irq(atxdmac->irq, atxdmac->dma.dev);
+ return ret;
+}
+
+static int at_xdmac_remove(struct platform_device *pdev)
+{
+ struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
+ int i;
+
+ at_xdmac_off(atxdmac);
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&atxdmac->dma);
+ clk_disable_unprepare(atxdmac->clk);
+
+ synchronize_irq(atxdmac->irq);
+
+ free_irq(atxdmac->irq, atxdmac->dma.dev);
+
+ for (i = 0; i < atxdmac->dma.chancnt; i++) {
+ struct at_xdmac_chan *atchan = &atxdmac->chan[i];
+
+ tasklet_kill(&atchan->tasklet);
+ at_xdmac_free_chan_resources(&atchan->chan);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops atmel_xdmac_dev_pm_ops = {
+ .prepare = atmel_xdmac_prepare,
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume)
+};
+
+static const struct of_device_id atmel_xdmac_dt_ids[] = {
+ {
+ .compatible = "atmel,sama5d4-dma",
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids);
+
+static struct platform_driver at_xdmac_driver = {
+ .probe = at_xdmac_probe,
+ .remove = at_xdmac_remove,
+ .driver = {
+ .name = "at_xdmac",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
+ .pm = &atmel_xdmac_dev_pm_ops,
+ }
+};
+
+static int __init at_xdmac_init(void)
+{
+ return platform_driver_probe(&at_xdmac_driver, at_xdmac_probe);
+}
+subsys_initcall(at_xdmac_init);
+
+MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");
+MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 68007974961a..918b7b3f766f 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -525,8 +525,6 @@ static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq)
vchan_init(&c->vc, &d->ddev);
INIT_LIST_HEAD(&c->node);
- d->ddev.chancnt++;
-
c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id);
c->ch = chan_id;
c->irq_number = irq;
@@ -694,7 +692,6 @@ static struct platform_driver bcm2835_dma_driver = {
.remove = bcm2835_dma_remove,
.driver = {
.name = "bcm2835-dma",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(bcm2835_dma_of_match),
},
};
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index a58eec3b2cad..b743adf56465 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -1,3 +1,4 @@
+#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
@@ -567,7 +568,7 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c)
reg |= GCR_TEARDOWN;
cppi_writel(reg, c->gcr_reg);
c->td_queued = 1;
- c->td_retry = 100;
+ c->td_retry = 500;
}
if (!c->td_seen || !c->td_desc_seen) {
@@ -603,12 +604,16 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c)
* descriptor before the TD we fetch it from enqueue, it has to be
* there waiting for us.
*/
- if (!c->td_seen && c->td_retry)
+ if (!c->td_seen && c->td_retry) {
+ udelay(1);
return -EAGAIN;
-
+ }
WARN_ON(!c->td_retry);
+
if (!c->td_desc_seen) {
desc_phys = cppi41_pop_desc(cdd, c->q_num);
+ if (!desc_phys)
+ desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
WARN_ON(!desc_phys);
}
@@ -1088,7 +1093,6 @@ static struct platform_driver cpp41_dma_driver = {
.remove = cppi41_dma_remove,
.driver = {
.name = "cppi41-dma-engine",
- .owner = THIS_MODULE,
.pm = &cppi41_pm_ops,
.of_match_table = of_match_ptr(cppi41_dma_ids),
},
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index ae2ab14e64b3..bdeafeefa5f6 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -563,10 +563,9 @@ static int jz4740_dma_probe(struct platform_device *pdev)
dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic;
dd->device_control = jz4740_dma_control;
dd->dev = &pdev->dev;
- dd->chancnt = JZ_DMA_NR_CHANS;
INIT_LIST_HEAD(&dd->channels);
- for (i = 0; i < dd->chancnt; i++) {
+ for (i = 0; i < JZ_DMA_NR_CHANS; i++) {
chan = &dmadev->chan[i];
chan->id = i;
chan->vchan.desc_free = jz4740_dma_desc_free;
@@ -608,7 +607,6 @@ static struct platform_driver jz4740_dma_driver = {
.remove = jz4740_dma_remove,
.driver = {
.name = "jz4740-dma",
- .owner = THIS_MODULE,
},
};
module_platform_driver(jz4740_dma_driver);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 24bfaf0b92ba..e057935e3023 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -330,8 +330,7 @@ static int __init dma_channel_table_init(void)
if (err) {
pr_err("initialization failure\n");
for_each_dma_cap_mask(cap, dma_cap_mask_all)
- if (channel_table[cap])
- free_percpu(channel_table[cap]);
+ free_percpu(channel_table[cap]);
}
return err;
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index 3c5711d5fe97..6fb2e902b459 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -118,17 +118,17 @@
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
struct fsl_edma_hw_tcd {
- u32 saddr;
- u16 soff;
- u16 attr;
- u32 nbytes;
- u32 slast;
- u32 daddr;
- u16 doff;
- u16 citer;
- u32 dlast_sga;
- u16 csr;
- u16 biter;
+ __le32 saddr;
+ __le16 soff;
+ __le16 attr;
+ __le32 nbytes;
+ __le32 slast;
+ __le32 daddr;
+ __le16 doff;
+ __le16 citer;
+ __le32 dlast_sga;
+ __le16 csr;
+ __le16 biter;
};
struct fsl_edma_sw_tcd {
@@ -175,18 +175,12 @@ struct fsl_edma_engine {
};
/*
- * R/W functions for big- or little-endian registers
- * the eDMA controller's endian is independent of the CPU core's endian.
+ * R/W functions for big- or little-endian registers:
+ * The eDMA controller's endian is independent of the CPU core's endian.
+ * For the big-endian IP module, the offset for 8-bit or 16-bit registers
+ * should also be swapped opposite to that in little-endian IP.
*/
-static u16 edma_readw(struct fsl_edma_engine *edma, void __iomem *addr)
-{
- if (edma->big_endian)
- return ioread16be(addr);
- else
- return ioread16(addr);
-}
-
static u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
{
if (edma->big_endian)
@@ -197,13 +191,18 @@ static u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
static void edma_writeb(struct fsl_edma_engine *edma, u8 val, void __iomem *addr)
{
- iowrite8(val, addr);
+ /* swap the reg offset for these in big-endian mode */
+ if (edma->big_endian)
+ iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3));
+ else
+ iowrite8(val, addr);
}
static void edma_writew(struct fsl_edma_engine *edma, u16 val, void __iomem *addr)
{
+ /* swap the reg offset for these in big-endian mode */
if (edma->big_endian)
- iowrite16be(val, addr);
+ iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2));
else
iowrite16(val, addr);
}
@@ -254,13 +253,12 @@ static void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
+ slot = EDMAMUX_CHCFG_SOURCE(slot);
if (enable)
- edma_writeb(fsl_chan->edma,
- EDMAMUX_CHCFG_ENBL | EDMAMUX_CHCFG_SOURCE(slot),
- muxaddr + ch_off);
+ iowrite8(EDMAMUX_CHCFG_ENBL | slot, muxaddr + ch_off);
else
- edma_writeb(fsl_chan->edma, EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
+ iowrite8(EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
}
static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
@@ -286,9 +284,8 @@ static void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
fsl_desc = to_fsl_edma_desc(vdesc);
for (i = 0; i < fsl_desc->n_tcds; i++)
- dma_pool_free(fsl_desc->echan->tcd_pool,
- fsl_desc->tcd[i].vtcd,
- fsl_desc->tcd[i].ptcd);
+ dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
+ fsl_desc->tcd[i].ptcd);
kfree(fsl_desc);
}
@@ -363,8 +360,8 @@ static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
/* calculate the total size in this desc */
for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
- len += edma_readl(fsl_chan->edma, &(edesc->tcd[i].vtcd->nbytes))
- * edma_readw(fsl_chan->edma, &(edesc->tcd[i].vtcd->biter));
+ len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
+ * le16_to_cpu(edesc->tcd[i].vtcd->biter);
if (!in_progress)
return len;
@@ -376,17 +373,15 @@ static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
/* figure out the finished and calculate the residue */
for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
- size = edma_readl(fsl_chan->edma, &(edesc->tcd[i].vtcd->nbytes))
- * edma_readw(fsl_chan->edma, &(edesc->tcd[i].vtcd->biter));
+ size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
+ * le16_to_cpu(edesc->tcd[i].vtcd->biter);
if (dir == DMA_MEM_TO_DEV)
- dma_addr = edma_readl(fsl_chan->edma,
- &(edesc->tcd[i].vtcd->saddr));
+ dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
else
- dma_addr = edma_readl(fsl_chan->edma,
- &(edesc->tcd[i].vtcd->daddr));
+ dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
len -= size;
- if (cur_addr > dma_addr && cur_addr < dma_addr + size) {
+ if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
len += dma_addr + size - cur_addr;
break;
}
@@ -424,55 +419,67 @@ static enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
return fsl_chan->status;
}
-static void fsl_edma_set_tcd_params(struct fsl_edma_chan *fsl_chan,
- u32 src, u32 dst, u16 attr, u16 soff, u32 nbytes,
- u32 slast, u16 citer, u16 biter, u32 doff, u32 dlast_sga,
- u16 csr)
+static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
+ struct fsl_edma_hw_tcd *tcd)
{
+ struct fsl_edma_engine *edma = fsl_chan->edma;
void __iomem *addr = fsl_chan->edma->membase;
u32 ch = fsl_chan->vchan.chan.chan_id;
/*
- * TCD parameters have been swapped in fill_tcd_params(),
- * so just write them to registers in the cpu endian here
+ * TCD parameters are stored in struct fsl_edma_hw_tcd in little
+ * endian format. However, we need to load the TCD registers in
+ * big- or little-endian obeying the eDMA engine model endian.
*/
- writew(0, addr + EDMA_TCD_CSR(ch));
- writel(src, addr + EDMA_TCD_SADDR(ch));
- writel(dst, addr + EDMA_TCD_DADDR(ch));
- writew(attr, addr + EDMA_TCD_ATTR(ch));
- writew(soff, addr + EDMA_TCD_SOFF(ch));
- writel(nbytes, addr + EDMA_TCD_NBYTES(ch));
- writel(slast, addr + EDMA_TCD_SLAST(ch));
- writew(citer, addr + EDMA_TCD_CITER(ch));
- writew(biter, addr + EDMA_TCD_BITER(ch));
- writew(doff, addr + EDMA_TCD_DOFF(ch));
- writel(dlast_sga, addr + EDMA_TCD_DLAST_SGA(ch));
- writew(csr, addr + EDMA_TCD_CSR(ch));
-}
-
-static void fill_tcd_params(struct fsl_edma_engine *edma,
- struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
- u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
- u16 biter, u16 doff, u32 dlast_sga, bool major_int,
- bool disable_req, bool enable_sg)
+ edma_writew(edma, 0, addr + EDMA_TCD_CSR(ch));
+ edma_writel(edma, le32_to_cpu(tcd->saddr), addr + EDMA_TCD_SADDR(ch));
+ edma_writel(edma, le32_to_cpu(tcd->daddr), addr + EDMA_TCD_DADDR(ch));
+
+ edma_writew(edma, le16_to_cpu(tcd->attr), addr + EDMA_TCD_ATTR(ch));
+ edma_writew(edma, le16_to_cpu(tcd->soff), addr + EDMA_TCD_SOFF(ch));
+
+ edma_writel(edma, le32_to_cpu(tcd->nbytes), addr + EDMA_TCD_NBYTES(ch));
+ edma_writel(edma, le32_to_cpu(tcd->slast), addr + EDMA_TCD_SLAST(ch));
+
+ edma_writew(edma, le16_to_cpu(tcd->citer), addr + EDMA_TCD_CITER(ch));
+ edma_writew(edma, le16_to_cpu(tcd->biter), addr + EDMA_TCD_BITER(ch));
+ edma_writew(edma, le16_to_cpu(tcd->doff), addr + EDMA_TCD_DOFF(ch));
+
+ edma_writel(edma, le32_to_cpu(tcd->dlast_sga), addr + EDMA_TCD_DLAST_SGA(ch));
+
+ edma_writew(edma, le16_to_cpu(tcd->csr), addr + EDMA_TCD_CSR(ch));
+}
+
+static inline
+void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
+ u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
+ u16 biter, u16 doff, u32 dlast_sga, bool major_int,
+ bool disable_req, bool enable_sg)
{
u16 csr = 0;
/*
- * eDMA hardware SGs require the TCD parameters stored in memory
- * the same endian as the eDMA module so that they can be loaded
- * automatically by the engine
+ * eDMA hardware SGs require the TCDs to be stored in little
+ * endian format irrespective of the register endian model.
+ * So we put the value in little endian in memory, waiting
+ * for fsl_edma_set_tcd_regs doing the swap.
*/
- edma_writel(edma, src, &(tcd->saddr));
- edma_writel(edma, dst, &(tcd->daddr));
- edma_writew(edma, attr, &(tcd->attr));
- edma_writew(edma, EDMA_TCD_SOFF_SOFF(soff), &(tcd->soff));
- edma_writel(edma, EDMA_TCD_NBYTES_NBYTES(nbytes), &(tcd->nbytes));
- edma_writel(edma, EDMA_TCD_SLAST_SLAST(slast), &(tcd->slast));
- edma_writew(edma, EDMA_TCD_CITER_CITER(citer), &(tcd->citer));
- edma_writew(edma, EDMA_TCD_DOFF_DOFF(doff), &(tcd->doff));
- edma_writel(edma, EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga), &(tcd->dlast_sga));
- edma_writew(edma, EDMA_TCD_BITER_BITER(biter), &(tcd->biter));
+ tcd->saddr = cpu_to_le32(src);
+ tcd->daddr = cpu_to_le32(dst);
+
+ tcd->attr = cpu_to_le16(attr);
+
+ tcd->soff = cpu_to_le16(EDMA_TCD_SOFF_SOFF(soff));
+
+ tcd->nbytes = cpu_to_le32(EDMA_TCD_NBYTES_NBYTES(nbytes));
+ tcd->slast = cpu_to_le32(EDMA_TCD_SLAST_SLAST(slast));
+
+ tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
+ tcd->doff = cpu_to_le16(EDMA_TCD_DOFF_DOFF(doff));
+
+ tcd->dlast_sga = cpu_to_le32(EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga));
+
+ tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
if (major_int)
csr |= EDMA_TCD_CSR_INT_MAJOR;
@@ -482,7 +489,7 @@ static void fill_tcd_params(struct fsl_edma_engine *edma,
if (enable_sg)
csr |= EDMA_TCD_CSR_E_SG;
- edma_writew(edma, csr, &(tcd->csr));
+ tcd->csr = cpu_to_le16(csr);
}
static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
@@ -558,9 +565,9 @@ static struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
doff = fsl_chan->fsc.addr_width;
}
- fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd, src_addr,
- dst_addr, fsl_chan->fsc.attr, soff, nbytes, 0,
- iter, iter, doff, last_sg, true, false, true);
+ fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
+ fsl_chan->fsc.attr, soff, nbytes, 0, iter,
+ iter, doff, last_sg, true, false, true);
dma_buf_next += period_len;
}
@@ -607,16 +614,16 @@ static struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
iter = sg_dma_len(sg) / nbytes;
if (i < sg_len - 1) {
last_sg = fsl_desc->tcd[(i + 1)].ptcd;
- fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd,
- src_addr, dst_addr, fsl_chan->fsc.attr,
- soff, nbytes, 0, iter, iter, doff, last_sg,
- false, false, true);
+ fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
+ dst_addr, fsl_chan->fsc.attr, soff,
+ nbytes, 0, iter, iter, doff, last_sg,
+ false, false, true);
} else {
last_sg = 0;
- fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd,
- src_addr, dst_addr, fsl_chan->fsc.attr,
- soff, nbytes, 0, iter, iter, doff, last_sg,
- true, true, false);
+ fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
+ dst_addr, fsl_chan->fsc.attr, soff,
+ nbytes, 0, iter, iter, doff, last_sg,
+ true, true, false);
}
}
@@ -625,17 +632,13 @@ static struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
static void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
{
- struct fsl_edma_hw_tcd *tcd;
struct virt_dma_desc *vdesc;
vdesc = vchan_next_desc(&fsl_chan->vchan);
if (!vdesc)
return;
fsl_chan->edesc = to_fsl_edma_desc(vdesc);
- tcd = fsl_chan->edesc->tcd[0].vtcd;
- fsl_edma_set_tcd_params(fsl_chan, tcd->saddr, tcd->daddr, tcd->attr,
- tcd->soff, tcd->nbytes, tcd->slast, tcd->citer,
- tcd->biter, tcd->doff, tcd->dlast_sga, tcd->csr);
+ fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
fsl_edma_enable_request(fsl_chan);
fsl_chan->status = DMA_IN_PROGRESS;
}
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 994bcb2c6b92..3d8feb5e4c2f 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1337,7 +1337,6 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev,
/* Add the channel to DMA device channel list */
list_add_tail(&chan->common.device_node, &fdev->common.channels);
- fdev->common.chancnt++;
dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
chan->irq != NO_IRQ ? chan->irq : fdev->irq);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 88afc48c2ca7..d0df198f62e9 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -729,6 +729,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
case IMX_DMATYPE_CSPI:
case IMX_DMATYPE_EXT:
case IMX_DMATYPE_SSI:
+ case IMX_DMATYPE_SAI:
per_2_emi = sdma->script_addrs->app_2_mcu_addr;
emi_2_per = sdma->script_addrs->mcu_2_app_addr;
break;
@@ -1287,7 +1288,8 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
unsigned short *ram_code;
if (!fw) {
- dev_err(sdma->dev, "firmware not found\n");
+ dev_info(sdma->dev, "external firmware not found, using ROM firmware\n");
+ /* In this case we just use the ROM firmware. */
return;
}
@@ -1346,7 +1348,7 @@ static int sdma_get_firmware(struct sdma_engine *sdma,
return ret;
}
-static int __init sdma_init(struct sdma_engine *sdma)
+static int sdma_init(struct sdma_engine *sdma)
{
int i, ret;
dma_addr_t ccb_phys;
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 895f869d6c2c..32eae38291e5 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -1265,9 +1265,17 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
op = IOAT_OP_XOR;
dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dest_dma))
+ goto dma_unmap;
+
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
+ dma_srcs[i] = DMA_ERROR_CODE;
+ for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_srcs[i]))
+ goto dma_unmap;
+ }
tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
IOAT_NUM_SRC_TEST, PAGE_SIZE,
DMA_PREP_INTERRUPT);
@@ -1298,7 +1306,6 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
goto dma_unmap;
}
- dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
@@ -1313,6 +1320,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
}
dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+
/* skip validate if the capability is not present */
if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
goto free_resources;
@@ -1327,8 +1336,13 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
xor_val_result = 1;
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+ dma_srcs[i] = DMA_ERROR_CODE;
+ for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_srcs[i]))
+ goto dma_unmap;
+ }
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
&xor_val_result, DMA_PREP_INTERRUPT);
@@ -1374,8 +1388,13 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
xor_val_result = 0;
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+ dma_srcs[i] = DMA_ERROR_CODE;
+ for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_srcs[i]))
+ goto dma_unmap;
+ }
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
&xor_val_result, DMA_PREP_INTERRUPT);
@@ -1417,14 +1436,18 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
goto free_resources;
dma_unmap:
if (op == IOAT_OP_XOR) {
- dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dest_dma != DMA_ERROR_CODE)
+ dma_unmap_page(dev, dest_dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
- dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
- DMA_TO_DEVICE);
+ if (dma_srcs[i] != DMA_ERROR_CODE)
+ dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
+ DMA_TO_DEVICE);
} else if (op == IOAT_OP_XOR_VAL) {
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
- dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
- DMA_TO_DEVICE);
+ if (dma_srcs[i] != DMA_ERROR_CODE)
+ dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
+ DMA_TO_DEVICE);
}
free_resources:
dma->device_free_chan_resources(dma_chan);
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index c56137bc3868..263d9f6a207e 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1557,7 +1557,6 @@ static struct platform_driver iop_adma_driver = {
.probe = iop_adma_probe,
.remove = iop_adma_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "iop-adma",
},
};
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index a1f911aaf220..a1de14ab2c51 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -722,7 +722,6 @@ static int k3_dma_probe(struct platform_device *op)
d->slave.device_issue_pending = k3_dma_issue_pending;
d->slave.device_control = k3_dma_control;
d->slave.copy_align = DMA_ALIGN;
- d->slave.chancnt = d->dma_requests;
/* init virtual channel */
d->chans = devm_kzalloc(&op->dev,
@@ -787,6 +786,7 @@ static int k3_dma_remove(struct platform_device *op)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int k3_dma_suspend(struct device *dev)
{
struct k3_dma_dev *d = dev_get_drvdata(dev);
@@ -816,13 +816,13 @@ static int k3_dma_resume(struct device *dev)
k3_dma_enable_dma(d, true);
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume);
static struct platform_driver k3_pdma_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.pm = &k3_dma_pmops,
.of_match_table = k3_pdma_dt_ids,
},
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index a1a4db5721b8..8b8952f35e6c 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -1098,7 +1098,6 @@ static const struct platform_device_id mmp_pdma_id_table[] = {
static struct platform_driver mmp_pdma_driver = {
.driver = {
.name = "mmp-pdma",
- .owner = THIS_MODULE,
.of_match_table = mmp_pdma_dt_ids,
},
.id_table = mmp_pdma_id_table,
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index c6bd015b7165..bfb46957c3dc 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -703,7 +703,6 @@ static const struct platform_device_id mmp_tdma_id_table[] = {
static struct platform_driver mmp_tdma_driver = {
.driver = {
.name = "mmp-tdma",
- .owner = THIS_MODULE,
.of_match_table = mmp_tdma_dt_ids,
},
.id_table = mmp_tdma_id_table,
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 881db2bcb48b..01bec4023de2 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -885,6 +885,7 @@ static int mpc_dma_probe(struct platform_device *op)
struct resource res;
ulong regs_start, regs_size;
int retval, i;
+ u8 chancnt;
mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
if (!mdma) {
@@ -956,10 +957,6 @@ static int mpc_dma_probe(struct platform_device *op)
dma = &mdma->dma;
dma->dev = dev;
- if (mdma->is_mpc8308)
- dma->chancnt = MPC8308_DMACHAN_MAX;
- else
- dma->chancnt = MPC512x_DMACHAN_MAX;
dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
dma->device_free_chan_resources = mpc_dma_free_chan_resources;
dma->device_issue_pending = mpc_dma_issue_pending;
@@ -972,7 +969,12 @@ static int mpc_dma_probe(struct platform_device *op)
dma_cap_set(DMA_MEMCPY, dma->cap_mask);
dma_cap_set(DMA_SLAVE, dma->cap_mask);
- for (i = 0; i < dma->chancnt; i++) {
+ if (mdma->is_mpc8308)
+ chancnt = MPC8308_DMACHAN_MAX;
+ else
+ chancnt = MPC512x_DMACHAN_MAX;
+
+ for (i = 0; i < chancnt; i++) {
mchan = &mdma->channels[i];
mchan->chan.device = dma;
@@ -1090,7 +1092,6 @@ static struct platform_driver mpc_dma_driver = {
.remove = mpc_dma_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = mpc_dma_match,
},
};
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index bda20e6e1007..d7d61e1a01c3 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -1500,7 +1500,6 @@ static const struct dev_pm_ops nbpf_pm_ops = {
static struct platform_driver nbpf_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "dma-nbpf",
.of_match_table = nbpf_match,
.pm = &nbpf_pm_ops,
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index bbea8243f9e8..6ea1aded7e74 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -1074,8 +1074,6 @@ static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
vchan_init(&c->vc, &od->ddev);
INIT_LIST_HEAD(&c->node);
- od->ddev.chancnt++;
-
return 0;
}
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 9f9ca9fe5ce6..6e0e47d76b23 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -997,7 +997,7 @@ static void pch_dma_remove(struct pci_dev *pdev)
#define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810
#define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815
-const struct pci_device_id pch_dma_id_table[] = {
+static const struct pci_device_id pch_dma_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 19a99743cf52..bdf40b530032 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -27,6 +27,7 @@
#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/err.h>
+#include <linux/pm_runtime.h>
#include "dmaengine.h"
#define PL330_MAX_CHAN 8
@@ -265,6 +266,9 @@ static unsigned cmd_line;
#define NR_DEFAULT_DESC 16
+/* Delay for runtime PM autosuspend, ms */
+#define PL330_AUTOSUSPEND_DELAY 20
+
/* Populated by the PL330 core driver for DMA API driver's info */
struct pl330_config {
u32 periph_id;
@@ -1958,6 +1962,7 @@ static void pl330_tasklet(unsigned long data)
struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
struct dma_pl330_desc *desc, *_dt;
unsigned long flags;
+ bool power_down = false;
spin_lock_irqsave(&pch->lock, flags);
@@ -1972,10 +1977,17 @@ static void pl330_tasklet(unsigned long data)
/* Try to submit a req imm. next to the last completed cookie */
fill_queue(pch);
- /* Make sure the PL330 Channel thread is active */
- spin_lock(&pch->thread->dmac->lock);
- _start(pch->thread);
- spin_unlock(&pch->thread->dmac->lock);
+ if (list_empty(&pch->work_list)) {
+ spin_lock(&pch->thread->dmac->lock);
+ _stop(pch->thread);
+ spin_unlock(&pch->thread->dmac->lock);
+ power_down = true;
+ } else {
+ /* Make sure the PL330 Channel thread is active */
+ spin_lock(&pch->thread->dmac->lock);
+ _start(pch->thread);
+ spin_unlock(&pch->thread->dmac->lock);
+ }
while (!list_empty(&pch->completed_list)) {
dma_async_tx_callback callback;
@@ -1990,6 +2002,12 @@ static void pl330_tasklet(unsigned long data)
if (pch->cyclic) {
desc->status = PREP;
list_move_tail(&desc->node, &pch->work_list);
+ if (power_down) {
+ spin_lock(&pch->thread->dmac->lock);
+ _start(pch->thread);
+ spin_unlock(&pch->thread->dmac->lock);
+ power_down = false;
+ }
} else {
desc->status = FREE;
list_move_tail(&desc->node, &pch->dmac->desc_pool);
@@ -2004,6 +2022,12 @@ static void pl330_tasklet(unsigned long data)
}
}
spin_unlock_irqrestore(&pch->lock, flags);
+
+ /* If work list empty, power down */
+ if (power_down) {
+ pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
+ pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
+ }
}
bool pl330_filter(struct dma_chan *chan, void *param)
@@ -2073,6 +2097,7 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
switch (cmd) {
case DMA_TERMINATE_ALL:
+ pm_runtime_get_sync(pl330->ddma.dev);
spin_lock_irqsave(&pch->lock, flags);
spin_lock(&pl330->lock);
@@ -2099,10 +2124,15 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
dma_cookie_complete(&desc->txd);
}
+ if (!list_empty(&pch->work_list))
+ pm_runtime_put(pl330->ddma.dev);
+
list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool);
list_splice_tail_init(&pch->work_list, &pl330->desc_pool);
list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
spin_unlock_irqrestore(&pch->lock, flags);
+ pm_runtime_mark_last_busy(pl330->ddma.dev);
+ pm_runtime_put_autosuspend(pl330->ddma.dev);
break;
case DMA_SLAVE_CONFIG:
slave_config = (struct dma_slave_config *)arg;
@@ -2138,6 +2168,7 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
tasklet_kill(&pch->task);
+ pm_runtime_get_sync(pch->dmac->ddma.dev);
spin_lock_irqsave(&pch->lock, flags);
pl330_release_channel(pch->thread);
@@ -2147,6 +2178,8 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
spin_unlock_irqrestore(&pch->lock, flags);
+ pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
+ pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
}
static enum dma_status
@@ -2162,6 +2195,15 @@ static void pl330_issue_pending(struct dma_chan *chan)
unsigned long flags;
spin_lock_irqsave(&pch->lock, flags);
+ if (list_empty(&pch->work_list)) {
+ /*
+ * Warn on nothing pending. Empty submitted_list may
+ * break our pm_runtime usage counter as it is
+ * updated on work_list emptiness status.
+ */
+ WARN_ON(list_empty(&pch->submitted_list));
+ pm_runtime_get_sync(pch->dmac->ddma.dev);
+ }
list_splice_tail_init(&pch->submitted_list, &pch->work_list);
spin_unlock_irqrestore(&pch->lock, flags);
@@ -2594,6 +2636,46 @@ static int pl330_dma_device_slave_caps(struct dma_chan *dchan,
return 0;
}
+/*
+ * Runtime PM callbacks are provided by amba/bus.c driver.
+ *
+ * It is assumed here that IRQ safe runtime PM is chosen in probe and amba
+ * bus driver will only disable/enable the clock in runtime PM callbacks.
+ */
+static int __maybe_unused pl330_suspend(struct device *dev)
+{
+ struct amba_device *pcdev = to_amba_device(dev);
+
+ pm_runtime_disable(dev);
+
+ if (!pm_runtime_status_suspended(dev)) {
+ /* amba did not disable the clock */
+ amba_pclk_disable(pcdev);
+ }
+ amba_pclk_unprepare(pcdev);
+
+ return 0;
+}
+
+static int __maybe_unused pl330_resume(struct device *dev)
+{
+ struct amba_device *pcdev = to_amba_device(dev);
+ int ret;
+
+ ret = amba_pclk_prepare(pcdev);
+ if (ret)
+ return ret;
+
+ if (!pm_runtime_status_suspended(dev))
+ ret = amba_pclk_enable(pcdev);
+
+ pm_runtime_enable(dev);
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(pl330_pm, pl330_suspend, pl330_resume);
+
static int
pl330_probe(struct amba_device *adev, const struct amba_id *id)
{
@@ -2619,6 +2701,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
return -ENOMEM;
}
+ pd = &pl330->ddma;
+ pd->dev = &adev->dev;
+
pl330->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
res = &adev->res;
@@ -2655,7 +2740,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
if (!add_desc(pl330, GFP_KERNEL, NR_DEFAULT_DESC))
dev_warn(&adev->dev, "unable to allocate desc\n");
- pd = &pl330->ddma;
INIT_LIST_HEAD(&pd->channels);
/* Initialize channel parameters */
@@ -2692,7 +2776,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
list_add_tail(&pch->chan.device_node, &pd->channels);
}
- pd->dev = &adev->dev;
if (pdat) {
pd->cap_mask = pdat->cap_mask;
} else {
@@ -2747,6 +2830,12 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pcfg->data_buf_dep, pcfg->data_bus_width / 8, pcfg->num_chan,
pcfg->num_peri, pcfg->num_events);
+ pm_runtime_irq_safe(&adev->dev);
+ pm_runtime_use_autosuspend(&adev->dev);
+ pm_runtime_set_autosuspend_delay(&adev->dev, PL330_AUTOSUSPEND_DELAY);
+ pm_runtime_mark_last_busy(&adev->dev);
+ pm_runtime_put_autosuspend(&adev->dev);
+
return 0;
probe_err3:
/* Idle the DMAC */
@@ -2773,6 +2862,8 @@ static int pl330_remove(struct amba_device *adev)
struct pl330_dmac *pl330 = amba_get_drvdata(adev);
struct dma_pl330_chan *pch, *_p;
+ pm_runtime_get_noresume(pl330->ddma.dev);
+
if (adev->dev.of_node)
of_dma_controller_free(adev->dev.of_node);
@@ -2811,6 +2902,7 @@ static struct amba_driver pl330_driver = {
.drv = {
.owner = THIS_MODULE,
.name = "dma-pl330",
+ .pm = &pl330_pm,
},
.id_table = pl330_ids,
.probe = pl330_probe,
@@ -2819,6 +2911,6 @@ static struct amba_driver pl330_driver = {
module_amba_driver(pl330_driver);
-MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
+MODULE_AUTHOR("Jaswinder Singh <jassisinghbrar@gmail.com>");
MODULE_DESCRIPTION("API Driver for PL330 DMAC");
MODULE_LICENSE("GPL");
diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom_bam_dma.c
index 7a4bbb0f80a5..3122a99ec06b 100644
--- a/drivers/dma/qcom_bam_dma.c
+++ b/drivers/dma/qcom_bam_dma.c
@@ -79,35 +79,97 @@ struct bam_async_desc {
struct bam_desc_hw desc[0];
};
-#define BAM_CTRL 0x0000
-#define BAM_REVISION 0x0004
-#define BAM_SW_REVISION 0x0080
-#define BAM_NUM_PIPES 0x003C
-#define BAM_TIMER 0x0040
-#define BAM_TIMER_CTRL 0x0044
-#define BAM_DESC_CNT_TRSHLD 0x0008
-#define BAM_IRQ_SRCS 0x000C
-#define BAM_IRQ_SRCS_MSK 0x0010
-#define BAM_IRQ_SRCS_UNMASKED 0x0030
-#define BAM_IRQ_STTS 0x0014
-#define BAM_IRQ_CLR 0x0018
-#define BAM_IRQ_EN 0x001C
-#define BAM_CNFG_BITS 0x007C
-#define BAM_IRQ_SRCS_EE(ee) (0x0800 + ((ee) * 0x80))
-#define BAM_IRQ_SRCS_MSK_EE(ee) (0x0804 + ((ee) * 0x80))
-#define BAM_P_CTRL(pipe) (0x1000 + ((pipe) * 0x1000))
-#define BAM_P_RST(pipe) (0x1004 + ((pipe) * 0x1000))
-#define BAM_P_HALT(pipe) (0x1008 + ((pipe) * 0x1000))
-#define BAM_P_IRQ_STTS(pipe) (0x1010 + ((pipe) * 0x1000))
-#define BAM_P_IRQ_CLR(pipe) (0x1014 + ((pipe) * 0x1000))
-#define BAM_P_IRQ_EN(pipe) (0x1018 + ((pipe) * 0x1000))
-#define BAM_P_EVNT_DEST_ADDR(pipe) (0x182C + ((pipe) * 0x1000))
-#define BAM_P_EVNT_REG(pipe) (0x1818 + ((pipe) * 0x1000))
-#define BAM_P_SW_OFSTS(pipe) (0x1800 + ((pipe) * 0x1000))
-#define BAM_P_DATA_FIFO_ADDR(pipe) (0x1824 + ((pipe) * 0x1000))
-#define BAM_P_DESC_FIFO_ADDR(pipe) (0x181C + ((pipe) * 0x1000))
-#define BAM_P_EVNT_TRSHLD(pipe) (0x1828 + ((pipe) * 0x1000))
-#define BAM_P_FIFO_SIZES(pipe) (0x1820 + ((pipe) * 0x1000))
+enum bam_reg {
+ BAM_CTRL,
+ BAM_REVISION,
+ BAM_NUM_PIPES,
+ BAM_DESC_CNT_TRSHLD,
+ BAM_IRQ_SRCS,
+ BAM_IRQ_SRCS_MSK,
+ BAM_IRQ_SRCS_UNMASKED,
+ BAM_IRQ_STTS,
+ BAM_IRQ_CLR,
+ BAM_IRQ_EN,
+ BAM_CNFG_BITS,
+ BAM_IRQ_SRCS_EE,
+ BAM_IRQ_SRCS_MSK_EE,
+ BAM_P_CTRL,
+ BAM_P_RST,
+ BAM_P_HALT,
+ BAM_P_IRQ_STTS,
+ BAM_P_IRQ_CLR,
+ BAM_P_IRQ_EN,
+ BAM_P_EVNT_DEST_ADDR,
+ BAM_P_EVNT_REG,
+ BAM_P_SW_OFSTS,
+ BAM_P_DATA_FIFO_ADDR,
+ BAM_P_DESC_FIFO_ADDR,
+ BAM_P_EVNT_GEN_TRSHLD,
+ BAM_P_FIFO_SIZES,
+};
+
+struct reg_offset_data {
+ u32 base_offset;
+ unsigned int pipe_mult, evnt_mult, ee_mult;
+};
+
+static const struct reg_offset_data bam_v1_3_reg_info[] = {
+ [BAM_CTRL] = { 0x0F80, 0x00, 0x00, 0x00 },
+ [BAM_REVISION] = { 0x0F84, 0x00, 0x00, 0x00 },
+ [BAM_NUM_PIPES] = { 0x0FBC, 0x00, 0x00, 0x00 },
+ [BAM_DESC_CNT_TRSHLD] = { 0x0F88, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_SRCS] = { 0x0F8C, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_SRCS_MSK] = { 0x0F90, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_SRCS_UNMASKED] = { 0x0FB0, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_STTS] = { 0x0F94, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_CLR] = { 0x0F98, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_EN] = { 0x0F9C, 0x00, 0x00, 0x00 },
+ [BAM_CNFG_BITS] = { 0x0FFC, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_SRCS_EE] = { 0x1800, 0x00, 0x00, 0x80 },
+ [BAM_IRQ_SRCS_MSK_EE] = { 0x1804, 0x00, 0x00, 0x80 },
+ [BAM_P_CTRL] = { 0x0000, 0x80, 0x00, 0x00 },
+ [BAM_P_RST] = { 0x0004, 0x80, 0x00, 0x00 },
+ [BAM_P_HALT] = { 0x0008, 0x80, 0x00, 0x00 },
+ [BAM_P_IRQ_STTS] = { 0x0010, 0x80, 0x00, 0x00 },
+ [BAM_P_IRQ_CLR] = { 0x0014, 0x80, 0x00, 0x00 },
+ [BAM_P_IRQ_EN] = { 0x0018, 0x80, 0x00, 0x00 },
+ [BAM_P_EVNT_DEST_ADDR] = { 0x102C, 0x00, 0x40, 0x00 },
+ [BAM_P_EVNT_REG] = { 0x1018, 0x00, 0x40, 0x00 },
+ [BAM_P_SW_OFSTS] = { 0x1000, 0x00, 0x40, 0x00 },
+ [BAM_P_DATA_FIFO_ADDR] = { 0x1024, 0x00, 0x40, 0x00 },
+ [BAM_P_DESC_FIFO_ADDR] = { 0x101C, 0x00, 0x40, 0x00 },
+ [BAM_P_EVNT_GEN_TRSHLD] = { 0x1028, 0x00, 0x40, 0x00 },
+ [BAM_P_FIFO_SIZES] = { 0x1020, 0x00, 0x40, 0x00 },
+};
+
+static const struct reg_offset_data bam_v1_4_reg_info[] = {
+ [BAM_CTRL] = { 0x0000, 0x00, 0x00, 0x00 },
+ [BAM_REVISION] = { 0x0004, 0x00, 0x00, 0x00 },
+ [BAM_NUM_PIPES] = { 0x003C, 0x00, 0x00, 0x00 },
+ [BAM_DESC_CNT_TRSHLD] = { 0x0008, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_SRCS] = { 0x000C, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_SRCS_MSK] = { 0x0010, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_SRCS_UNMASKED] = { 0x0030, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_STTS] = { 0x0014, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_CLR] = { 0x0018, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_EN] = { 0x001C, 0x00, 0x00, 0x00 },
+ [BAM_CNFG_BITS] = { 0x007C, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_SRCS_EE] = { 0x0800, 0x00, 0x00, 0x80 },
+ [BAM_IRQ_SRCS_MSK_EE] = { 0x0804, 0x00, 0x00, 0x80 },
+ [BAM_P_CTRL] = { 0x1000, 0x1000, 0x00, 0x00 },
+ [BAM_P_RST] = { 0x1004, 0x1000, 0x00, 0x00 },
+ [BAM_P_HALT] = { 0x1008, 0x1000, 0x00, 0x00 },
+ [BAM_P_IRQ_STTS] = { 0x1010, 0x1000, 0x00, 0x00 },
+ [BAM_P_IRQ_CLR] = { 0x1014, 0x1000, 0x00, 0x00 },
+ [BAM_P_IRQ_EN] = { 0x1018, 0x1000, 0x00, 0x00 },
+ [BAM_P_EVNT_DEST_ADDR] = { 0x102C, 0x00, 0x1000, 0x00 },
+ [BAM_P_EVNT_REG] = { 0x1018, 0x00, 0x1000, 0x00 },
+ [BAM_P_SW_OFSTS] = { 0x1000, 0x00, 0x1000, 0x00 },
+ [BAM_P_DATA_FIFO_ADDR] = { 0x1824, 0x00, 0x1000, 0x00 },
+ [BAM_P_DESC_FIFO_ADDR] = { 0x181C, 0x00, 0x1000, 0x00 },
+ [BAM_P_EVNT_GEN_TRSHLD] = { 0x1828, 0x00, 0x1000, 0x00 },
+ [BAM_P_FIFO_SIZES] = { 0x1820, 0x00, 0x1000, 0x00 },
+};
/* BAM CTRL */
#define BAM_SW_RST BIT(0)
@@ -297,6 +359,8 @@ struct bam_device {
/* execution environment ID, from DT */
u32 ee;
+ const struct reg_offset_data *layout;
+
struct clk *bamclk;
int irq;
@@ -305,6 +369,23 @@ struct bam_device {
};
/**
+ * bam_addr - returns BAM register address
+ * @bdev: bam device
+ * @pipe: pipe instance (ignored when register doesn't have multiple instances)
+ * @reg: register enum
+ */
+static inline void __iomem *bam_addr(struct bam_device *bdev, u32 pipe,
+ enum bam_reg reg)
+{
+ const struct reg_offset_data r = bdev->layout[reg];
+
+ return bdev->regs + r.base_offset +
+ r.pipe_mult * pipe +
+ r.evnt_mult * pipe +
+ r.ee_mult * bdev->ee;
+}
+
+/**
* bam_reset_channel - Reset individual BAM DMA channel
* @bchan: bam channel
*
@@ -317,8 +398,8 @@ static void bam_reset_channel(struct bam_chan *bchan)
lockdep_assert_held(&bchan->vc.lock);
/* reset channel */
- writel_relaxed(1, bdev->regs + BAM_P_RST(bchan->id));
- writel_relaxed(0, bdev->regs + BAM_P_RST(bchan->id));
+ writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_RST));
+ writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_RST));
/* don't allow cpu to reorder BAM register accesses done after this */
wmb();
@@ -347,17 +428,18 @@ static void bam_chan_init_hw(struct bam_chan *bchan,
* because we allocated 1 more descriptor (8 bytes) than we can use
*/
writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)),
- bdev->regs + BAM_P_DESC_FIFO_ADDR(bchan->id));
- writel_relaxed(BAM_DESC_FIFO_SIZE, bdev->regs +
- BAM_P_FIFO_SIZES(bchan->id));
+ bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR));
+ writel_relaxed(BAM_DESC_FIFO_SIZE,
+ bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES));
/* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */
- writel_relaxed(P_DEFAULT_IRQS_EN, bdev->regs + BAM_P_IRQ_EN(bchan->id));
+ writel_relaxed(P_DEFAULT_IRQS_EN,
+ bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
/* unmask the specific pipe and EE combo */
- val = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+ val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
val |= BIT(bchan->id);
- writel_relaxed(val, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+ writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
/* don't allow cpu to reorder the channel enable done below */
wmb();
@@ -367,7 +449,7 @@ static void bam_chan_init_hw(struct bam_chan *bchan,
if (dir == DMA_DEV_TO_MEM)
val |= P_DIRECTION;
- writel_relaxed(val, bdev->regs + BAM_P_CTRL(bchan->id));
+ writel_relaxed(val, bam_addr(bdev, bchan->id, BAM_P_CTRL));
bchan->initialized = 1;
@@ -432,12 +514,12 @@ static void bam_free_chan(struct dma_chan *chan)
bchan->fifo_virt = NULL;
/* mask irq for pipe/channel */
- val = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+ val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
val &= ~BIT(bchan->id);
- writel_relaxed(val, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+ writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
/* disable irq */
- writel_relaxed(0, bdev->regs + BAM_P_IRQ_EN(bchan->id));
+ writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
}
/**
@@ -583,14 +665,14 @@ static int bam_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
switch (cmd) {
case DMA_PAUSE:
spin_lock_irqsave(&bchan->vc.lock, flag);
- writel_relaxed(1, bdev->regs + BAM_P_HALT(bchan->id));
+ writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT));
bchan->paused = 1;
spin_unlock_irqrestore(&bchan->vc.lock, flag);
break;
case DMA_RESUME:
spin_lock_irqsave(&bchan->vc.lock, flag);
- writel_relaxed(0, bdev->regs + BAM_P_HALT(bchan->id));
+ writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
bchan->paused = 0;
spin_unlock_irqrestore(&bchan->vc.lock, flag);
break;
@@ -626,7 +708,7 @@ static u32 process_channel_irqs(struct bam_device *bdev)
unsigned long flags;
struct bam_async_desc *async_desc;
- srcs = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_EE(bdev->ee));
+ srcs = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_EE));
/* return early if no pipe/channel interrupts are present */
if (!(srcs & P_IRQ))
@@ -639,11 +721,9 @@ static u32 process_channel_irqs(struct bam_device *bdev)
continue;
/* clear pipe irq */
- pipe_stts = readl_relaxed(bdev->regs +
- BAM_P_IRQ_STTS(i));
+ pipe_stts = readl_relaxed(bam_addr(bdev, i, BAM_P_IRQ_STTS));
- writel_relaxed(pipe_stts, bdev->regs +
- BAM_P_IRQ_CLR(i));
+ writel_relaxed(pipe_stts, bam_addr(bdev, i, BAM_P_IRQ_CLR));
spin_lock_irqsave(&bchan->vc.lock, flags);
async_desc = bchan->curr_txd;
@@ -694,12 +774,12 @@ static irqreturn_t bam_dma_irq(int irq, void *data)
tasklet_schedule(&bdev->task);
if (srcs & BAM_IRQ)
- clr_mask = readl_relaxed(bdev->regs + BAM_IRQ_STTS);
+ clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS));
/* don't allow reorder of the various accesses to the BAM registers */
mb();
- writel_relaxed(clr_mask, bdev->regs + BAM_IRQ_CLR);
+ writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR));
return IRQ_HANDLED;
}
@@ -763,7 +843,7 @@ static void bam_apply_new_config(struct bam_chan *bchan,
else
maxburst = bchan->slave.dst_maxburst;
- writel_relaxed(maxburst, bdev->regs + BAM_DESC_CNT_TRSHLD);
+ writel_relaxed(maxburst, bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
bchan->reconfigure = 0;
}
@@ -830,7 +910,7 @@ static void bam_start_dma(struct bam_chan *bchan)
/* ensure descriptor writes and dma start not reordered */
wmb();
writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw),
- bdev->regs + BAM_P_EVNT_REG(bchan->id));
+ bam_addr(bdev, bchan->id, BAM_P_EVNT_REG));
}
/**
@@ -918,43 +998,44 @@ static int bam_init(struct bam_device *bdev)
u32 val;
/* read revision and configuration information */
- val = readl_relaxed(bdev->regs + BAM_REVISION) >> NUM_EES_SHIFT;
+ val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)) >> NUM_EES_SHIFT;
val &= NUM_EES_MASK;
/* check that configured EE is within range */
if (bdev->ee >= val)
return -EINVAL;
- val = readl_relaxed(bdev->regs + BAM_NUM_PIPES);
+ val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES));
bdev->num_channels = val & BAM_NUM_PIPES_MASK;
/* s/w reset bam */
/* after reset all pipes are disabled and idle */
- val = readl_relaxed(bdev->regs + BAM_CTRL);
+ val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL));
val |= BAM_SW_RST;
- writel_relaxed(val, bdev->regs + BAM_CTRL);
+ writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
val &= ~BAM_SW_RST;
- writel_relaxed(val, bdev->regs + BAM_CTRL);
+ writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
/* make sure previous stores are visible before enabling BAM */
wmb();
/* enable bam */
val |= BAM_EN;
- writel_relaxed(val, bdev->regs + BAM_CTRL);
+ writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
/* set descriptor threshhold, start with 4 bytes */
- writel_relaxed(DEFAULT_CNT_THRSHLD, bdev->regs + BAM_DESC_CNT_TRSHLD);
+ writel_relaxed(DEFAULT_CNT_THRSHLD,
+ bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
/* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */
- writel_relaxed(BAM_CNFG_BITS_DEFAULT, bdev->regs + BAM_CNFG_BITS);
+ writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS));
/* enable irqs for errors */
writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN,
- bdev->regs + BAM_IRQ_EN);
+ bam_addr(bdev, 0, BAM_IRQ_EN));
/* unmask global bam interrupt */
- writel_relaxed(BAM_IRQ_MSK, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+ writel_relaxed(BAM_IRQ_MSK, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
return 0;
}
@@ -969,9 +1050,18 @@ static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan,
bchan->vc.desc_free = bam_dma_free_desc;
}
+static const struct of_device_id bam_of_match[] = {
+ { .compatible = "qcom,bam-v1.3.0", .data = &bam_v1_3_reg_info },
+ { .compatible = "qcom,bam-v1.4.0", .data = &bam_v1_4_reg_info },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, bam_of_match);
+
static int bam_dma_probe(struct platform_device *pdev)
{
struct bam_device *bdev;
+ const struct of_device_id *match;
struct resource *iores;
int ret, i;
@@ -981,6 +1071,14 @@ static int bam_dma_probe(struct platform_device *pdev)
bdev->dev = &pdev->dev;
+ match = of_match_node(bam_of_match, pdev->dev.of_node);
+ if (!match) {
+ dev_err(&pdev->dev, "Unsupported BAM module\n");
+ return -ENODEV;
+ }
+
+ bdev->layout = match->data;
+
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
bdev->regs = devm_ioremap_resource(&pdev->dev, iores);
if (IS_ERR(bdev->regs))
@@ -1084,7 +1182,7 @@ static int bam_dma_remove(struct platform_device *pdev)
dma_async_device_unregister(&bdev->common);
/* mask all interrupts for this execution environment */
- writel_relaxed(0, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+ writel_relaxed(0, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
devm_free_irq(bdev->dev, bdev->irq, bdev);
@@ -1104,18 +1202,11 @@ static int bam_dma_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id bam_of_match[] = {
- { .compatible = "qcom,bam-v1.4.0", },
- {}
-};
-MODULE_DEVICE_TABLE(of, bam_of_match);
-
static struct platform_driver bam_dma_driver = {
.probe = bam_dma_probe,
.remove = bam_dma_remove,
.driver = {
.name = "bam-dma-engine",
- .owner = THIS_MODULE,
.of_match_table = bam_of_match,
},
};
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 7416572d1e40..6941a77521c3 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -1402,7 +1402,6 @@ static int s3c24xx_dma_remove(struct platform_device *pdev)
static struct platform_driver s3c24xx_dma_driver = {
.driver = {
.name = "s3c24xx-dma",
- .owner = THIS_MODULE,
},
.id_table = s3c24xx_dma_driver_ids,
.probe = s3c24xx_dma_probe,
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 4b0ef043729a..2329d295efb5 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -829,7 +829,6 @@ static int sa11x0_dma_init_dmadev(struct dma_device *dmadev,
{
unsigned i;
- dmadev->chancnt = ARRAY_SIZE(chan_desc);
INIT_LIST_HEAD(&dmadev->channels);
dmadev->dev = dev;
dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources;
@@ -838,7 +837,7 @@ static int sa11x0_dma_init_dmadev(struct dma_device *dmadev,
dmadev->device_tx_status = sa11x0_dma_tx_status;
dmadev->device_issue_pending = sa11x0_dma_issue_pending;
- for (i = 0; i < dmadev->chancnt; i++) {
+ for (i = 0; i < ARRAY_SIZE(chan_desc); i++) {
struct sa11x0_dma_chan *c;
c = kzalloc(sizeof(*c), GFP_KERNEL);
diff --git a/drivers/dma/sh/rcar-audmapp.c b/drivers/dma/sh/rcar-audmapp.c
index 80fd2aeb4870..d95bbdd721f4 100644
--- a/drivers/dma/sh/rcar-audmapp.c
+++ b/drivers/dma/sh/rcar-audmapp.c
@@ -253,7 +253,6 @@ static int audmapp_chan_probe(struct platform_device *pdev,
static void audmapp_chan_remove(struct audmapp_device *audev)
{
- struct dma_device *dma_dev = &audev->shdma_dev.dma_dev;
struct shdma_chan *schan;
int i;
@@ -261,7 +260,6 @@ static void audmapp_chan_remove(struct audmapp_device *audev)
BUG_ON(!schan);
shdma_chan_remove(schan);
}
- dma_dev->chancnt = 0;
}
static struct dma_chan *audmapp_of_xlate(struct of_phandle_args *dma_spec,
@@ -367,7 +365,6 @@ static struct platform_driver audmapp_driver = {
.probe = audmapp_probe,
.remove = audmapp_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "rcar-audmapp-engine",
.of_match_table = audmapp_of_match,
},
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c
index b212d9471ab5..20a6f6f2a018 100644
--- a/drivers/dma/sh/rcar-hpbdma.c
+++ b/drivers/dma/sh/rcar-hpbdma.c
@@ -619,7 +619,6 @@ error:
static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev)
{
- struct dma_device *dma_dev = &hpbdev->shdma_dev.dma_dev;
struct shdma_chan *schan;
int i;
@@ -628,7 +627,6 @@ static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev)
shdma_chan_remove(schan);
}
- dma_dev->chancnt = 0;
}
static int hpb_dmae_remove(struct platform_device *pdev)
@@ -655,7 +653,6 @@ static struct platform_driver hpb_dmae_driver = {
.remove = hpb_dmae_remove,
.shutdown = hpb_dmae_shutdown,
.driver = {
- .owner = THIS_MODULE,
.name = "hpb-dma-engine",
},
};
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 42d497416196..3a2adb131d46 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -391,6 +391,8 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
pm_runtime_put(schan->dev);
schan->pm_state = SHDMA_PM_ESTABLISHED;
+ } else if (schan->pm_state == SHDMA_PM_PENDING) {
+ shdma_chan_xfer_ld_queue(schan);
}
}
}
@@ -951,7 +953,7 @@ void shdma_chan_probe(struct shdma_dev *sdev,
/* Add the channel to DMA device channel list */
list_add_tail(&schan->dma_chan.device_node,
&sdev->dma_dev.channels);
- sdev->schan[sdev->dma_dev.chancnt++] = schan;
+ sdev->schan[id] = schan;
}
EXPORT_SYMBOL(shdma_chan_probe);
diff --git a/drivers/dma/sh/shdma-of.c b/drivers/dma/sh/shdma-of.c
index b4ff9d3e56d1..f999f9b0d314 100644
--- a/drivers/dma/sh/shdma-of.c
+++ b/drivers/dma/sh/shdma-of.c
@@ -66,7 +66,6 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
static struct platform_driver shdma_of = {
.driver = {
- .owner = THIS_MODULE,
.name = "shdma-of",
.of_match_table = shdma_of_match,
},
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 58eb85770eba..b65317c6ea4e 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -572,7 +572,6 @@ err_no_irq:
static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
{
- struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
struct shdma_chan *schan;
int i;
@@ -581,7 +580,6 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
shdma_chan_remove(schan);
}
- dma_dev->chancnt = 0;
}
static void sh_dmae_shutdown(struct platform_device *pdev)
diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c
index 3ce103909896..6da2eaa6c294 100644
--- a/drivers/dma/sh/sudmac.c
+++ b/drivers/dma/sh/sudmac.c
@@ -295,7 +295,6 @@ err_no_irq:
static void sudmac_chan_remove(struct sudmac_device *su_dev)
{
- struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev;
struct shdma_chan *schan;
int i;
@@ -304,7 +303,6 @@ static void sudmac_chan_remove(struct sudmac_device *su_dev)
shdma_chan_remove(schan);
}
- dma_dev->chancnt = 0;
}
static dma_addr_t sudmac_slave_addr(struct shdma_chan *schan)
@@ -411,7 +409,6 @@ static int sudmac_remove(struct platform_device *pdev)
static struct platform_driver sudmac_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = SUDMAC_DRV_NAME,
},
.probe = sudmac_probe,
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index aac03ab10c54..feb1e8ab8d7b 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -735,7 +735,6 @@ static int sirfsoc_dma_probe(struct platform_device *op)
dma = &sdma->dma;
dma->dev = dev;
- dma->chancnt = SIRFSOC_DMA_CHANNELS;
dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
@@ -752,7 +751,7 @@ static int sirfsoc_dma_probe(struct platform_device *op)
dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
dma_cap_set(DMA_PRIVATE, dma->cap_mask);
- for (i = 0; i < dma->chancnt; i++) {
+ for (i = 0; i < SIRFSOC_DMA_CHANNELS; i++) {
schan = &sdma->channels[i];
schan->chan.device = dma;
@@ -835,6 +834,7 @@ static int sirfsoc_dma_runtime_resume(struct device *dev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int sirfsoc_dma_pm_suspend(struct device *dev)
{
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
@@ -916,6 +916,7 @@ static int sirfsoc_dma_pm_resume(struct device *dev)
return 0;
}
+#endif
static const struct dev_pm_ops sirfsoc_dma_pm_ops = {
SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL)
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index d9ca3e32d748..4d0710648b08 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -3432,6 +3432,7 @@ static int __init d40_lcla_allocate(struct d40_base *base)
d40_err(base->dev, "Failed to allocate %d pages.\n",
base->lcla_pool.pages);
+ ret = -ENOMEM;
for (j = 0; j < i; j++)
free_pages(page_list[j], base->lcla_pool.pages);
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 91292f5513ff..159f1736a16f 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -18,6 +18,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of_dma.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/slab.h>
@@ -26,24 +27,6 @@
#include "virt-dma.h"
/*
- * There's 16 physical channels that can work in parallel.
- *
- * However we have 30 different endpoints for our requests.
- *
- * Since the channels are able to handle only an unidirectional
- * transfer, we need to allocate more virtual channels so that
- * everyone can grab one channel.
- *
- * Some devices can't work in both direction (mostly because it
- * wouldn't make sense), so we have a bit fewer virtual channels than
- * 2 channels per endpoints.
- */
-
-#define NR_MAX_CHANNELS 16
-#define NR_MAX_REQUESTS 30
-#define NR_MAX_VCHANS 53
-
-/*
* Common registers
*/
#define DMA_IRQ_EN(x) ((x) * 0x04)
@@ -60,6 +43,12 @@
#define DMA_STAT 0x30
/*
+ * sun8i specific registers
+ */
+#define SUN8I_DMA_GATE 0x20
+#define SUN8I_DMA_GATE_ENABLE 0x4
+
+/*
* Channels specific registers
*/
#define DMA_CHAN_ENABLE 0x00
@@ -102,6 +91,19 @@
#define DRQ_SDRAM 1
/*
+ * Hardware channels / ports representation
+ *
+ * The hardware is used in several SoCs, with differing numbers
+ * of channels and endpoints. This structure ties those numbers
+ * to a certain compatible string.
+ */
+struct sun6i_dma_config {
+ u32 nr_max_channels;
+ u32 nr_max_requests;
+ u32 nr_max_vchans;
+};
+
+/*
* Hardware representation of the LLI
*
* The hardware will be fed the physical address of this structure,
@@ -159,6 +161,7 @@ struct sun6i_dma_dev {
struct dma_pool *pool;
struct sun6i_pchan *pchans;
struct sun6i_vchan *vchans;
+ const struct sun6i_dma_config *cfg;
};
static struct device *chan2dev(struct dma_chan *chan)
@@ -426,6 +429,7 @@ static int sun6i_dma_start_desc(struct sun6i_vchan *vchan)
static void sun6i_dma_tasklet(unsigned long data)
{
struct sun6i_dma_dev *sdev = (struct sun6i_dma_dev *)data;
+ const struct sun6i_dma_config *cfg = sdev->cfg;
struct sun6i_vchan *vchan;
struct sun6i_pchan *pchan;
unsigned int pchan_alloc = 0;
@@ -453,7 +457,7 @@ static void sun6i_dma_tasklet(unsigned long data)
}
spin_lock_irq(&sdev->lock);
- for (pchan_idx = 0; pchan_idx < NR_MAX_CHANNELS; pchan_idx++) {
+ for (pchan_idx = 0; pchan_idx < cfg->nr_max_channels; pchan_idx++) {
pchan = &sdev->pchans[pchan_idx];
if (pchan->vchan || list_empty(&sdev->pending))
@@ -474,7 +478,7 @@ static void sun6i_dma_tasklet(unsigned long data)
}
spin_unlock_irq(&sdev->lock);
- for (pchan_idx = 0; pchan_idx < NR_MAX_CHANNELS; pchan_idx++) {
+ for (pchan_idx = 0; pchan_idx < cfg->nr_max_channels; pchan_idx++) {
if (!(pchan_alloc & BIT(pchan_idx)))
continue;
@@ -496,7 +500,7 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id)
int i, j, ret = IRQ_NONE;
u32 status;
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < sdev->cfg->nr_max_channels / DMA_IRQ_CHAN_NR; i++) {
status = readl(sdev->base + DMA_IRQ_STAT(i));
if (!status)
continue;
@@ -506,7 +510,7 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id)
writel(status, sdev->base + DMA_IRQ_STAT(i));
- for (j = 0; (j < 8) && status; j++) {
+ for (j = 0; (j < DMA_IRQ_CHAN_NR) && status; j++) {
if (status & DMA_IRQ_QUEUE) {
pchan = sdev->pchans + j;
vchan = pchan->vchan;
@@ -519,7 +523,7 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id)
}
}
- status = status >> 4;
+ status = status >> DMA_IRQ_CHAN_WIDTH;
}
if (!atomic_read(&sdev->tasklet_shutdown))
@@ -815,7 +819,7 @@ static struct dma_chan *sun6i_dma_of_xlate(struct of_phandle_args *dma_spec,
struct dma_chan *chan;
u8 port = dma_spec->args[0];
- if (port > NR_MAX_REQUESTS)
+ if (port > sdev->cfg->nr_max_requests)
return NULL;
chan = dma_get_any_slave_channel(&sdev->slave);
@@ -848,7 +852,7 @@ static inline void sun6i_dma_free(struct sun6i_dma_dev *sdev)
{
int i;
- for (i = 0; i < NR_MAX_VCHANS; i++) {
+ for (i = 0; i < sdev->cfg->nr_max_vchans; i++) {
struct sun6i_vchan *vchan = &sdev->vchans[i];
list_del(&vchan->vc.chan.device_node);
@@ -856,8 +860,48 @@ static inline void sun6i_dma_free(struct sun6i_dma_dev *sdev)
}
}
+/*
+ * For A31:
+ *
+ * There's 16 physical channels that can work in parallel.
+ *
+ * However we have 30 different endpoints for our requests.
+ *
+ * Since the channels are able to handle only an unidirectional
+ * transfer, we need to allocate more virtual channels so that
+ * everyone can grab one channel.
+ *
+ * Some devices can't work in both direction (mostly because it
+ * wouldn't make sense), so we have a bit fewer virtual channels than
+ * 2 channels per endpoints.
+ */
+
+static struct sun6i_dma_config sun6i_a31_dma_cfg = {
+ .nr_max_channels = 16,
+ .nr_max_requests = 30,
+ .nr_max_vchans = 53,
+};
+
+/*
+ * The A23 only has 8 physical channels, a maximum DRQ port id of 24,
+ * and a total of 37 usable source and destination endpoints.
+ */
+
+static struct sun6i_dma_config sun8i_a23_dma_cfg = {
+ .nr_max_channels = 8,
+ .nr_max_requests = 24,
+ .nr_max_vchans = 37,
+};
+
+static struct of_device_id sun6i_dma_match[] = {
+ { .compatible = "allwinner,sun6i-a31-dma", .data = &sun6i_a31_dma_cfg },
+ { .compatible = "allwinner,sun8i-a23-dma", .data = &sun8i_a23_dma_cfg },
+ { /* sentinel */ }
+};
+
static int sun6i_dma_probe(struct platform_device *pdev)
{
+ const struct of_device_id *device;
struct sun6i_dma_dev *sdc;
struct resource *res;
int ret, i;
@@ -866,6 +910,11 @@ static int sun6i_dma_probe(struct platform_device *pdev)
if (!sdc)
return -ENOMEM;
+ device = of_match_device(sun6i_dma_match, &pdev->dev);
+ if (!device)
+ return -ENODEV;
+ sdc->cfg = device->data;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
sdc->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(sdc->base))
@@ -912,31 +961,30 @@ static int sun6i_dma_probe(struct platform_device *pdev)
sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg;
sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy;
sdc->slave.device_control = sun6i_dma_control;
- sdc->slave.chancnt = NR_MAX_VCHANS;
sdc->slave.copy_align = 4;
sdc->slave.dev = &pdev->dev;
- sdc->pchans = devm_kcalloc(&pdev->dev, NR_MAX_CHANNELS,
+ sdc->pchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_channels,
sizeof(struct sun6i_pchan), GFP_KERNEL);
if (!sdc->pchans)
return -ENOMEM;
- sdc->vchans = devm_kcalloc(&pdev->dev, NR_MAX_VCHANS,
+ sdc->vchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_vchans,
sizeof(struct sun6i_vchan), GFP_KERNEL);
if (!sdc->vchans)
return -ENOMEM;
tasklet_init(&sdc->task, sun6i_dma_tasklet, (unsigned long)sdc);
- for (i = 0; i < NR_MAX_CHANNELS; i++) {
+ for (i = 0; i < sdc->cfg->nr_max_channels; i++) {
struct sun6i_pchan *pchan = &sdc->pchans[i];
pchan->idx = i;
pchan->base = sdc->base + 0x100 + i * 0x40;
}
- for (i = 0; i < NR_MAX_VCHANS; i++) {
+ for (i = 0; i < sdc->cfg->nr_max_vchans; i++) {
struct sun6i_vchan *vchan = &sdc->vchans[i];
INIT_LIST_HEAD(&vchan->node);
@@ -976,6 +1024,15 @@ static int sun6i_dma_probe(struct platform_device *pdev)
goto err_dma_unregister;
}
+ /*
+ * sun8i variant requires us to toggle a dma gating register,
+ * as seen in Allwinner's SDK. This register is not documented
+ * in the A23 user manual.
+ */
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "allwinner,sun8i-a23-dma"))
+ writel(SUN8I_DMA_GATE_ENABLE, sdc->base + SUN8I_DMA_GATE);
+
return 0;
err_dma_unregister:
@@ -1008,11 +1065,6 @@ static int sun6i_dma_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id sun6i_dma_match[] = {
- { .compatible = "allwinner,sun6i-a31-dma" },
- { /* sentinel */ }
-};
-
static struct platform_driver sun6i_dma_driver = {
.probe = sun6i_dma_probe,
.remove = sun6i_dma_remove,
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 1c867d0303db..d8450c3f35f0 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -1597,7 +1597,6 @@ static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
static struct platform_driver tegra_dmac_driver = {
.driver = {
.name = "tegra-apbdma",
- .owner = THIS_MODULE,
.pm = &tegra_dma_dev_pm_ops,
.of_match_table = tegra_dma_of_match,
},
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 4506a7b4f972..2407ccf1a64b 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -783,7 +783,6 @@ static int td_remove(struct platform_device *pdev)
static struct platform_driver td_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
},
.probe = td_probe,
.remove = td_remove,
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index a6e64767186e..4a3a8f3137b3 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -942,6 +942,9 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
if (!xt->numf || !xt->sgl[0].size)
return NULL;
+ if (xt->frame_size != 1)
+ return NULL;
+
/* Allocate a transaction descriptor. */
desc = xilinx_vdma_alloc_tx_descriptor(chan);
if (!desc)
@@ -960,7 +963,7 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
hw = &segment->hw;
hw->vsize = xt->numf;
hw->hsize = xt->sgl[0].size;
- hw->stride = xt->sgl[0].icg <<
+ hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT;
hw->stride |= chan->config.frm_dly <<
XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
@@ -971,9 +974,11 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
hw->buf_addr = xt->src_start;
/* Link the previous next descriptor to current */
- prev = list_last_entry(&desc->segments,
- struct xilinx_vdma_tx_segment, node);
- prev->hw.next_desc = segment->phys;
+ if (!list_empty(&desc->segments)) {
+ prev = list_last_entry(&desc->segments,
+ struct xilinx_vdma_tx_segment, node);
+ prev->hw.next_desc = segment->phys;
+ }
/* Insert the segment into the descriptor segments list. */
list_add_tail(&segment->node, &desc->segments);
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 3d42043fec51..8c6363cdd208 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -487,7 +487,7 @@ static void ide_floppy_setup(ide_drive_t *drive)
* it. It should be fixed as of version 1.9, but to be on the safe side
* we'll leave the limitation below for the 2.2.x tree.
*/
- if (!strncmp((char *)&id[ATA_ID_PROD], "IOMEGA ZIP 100 ATAPI", 20)) {
+ if (strstarts((char *)&id[ATA_ID_PROD], "IOMEGA ZIP 100 ATAPI")) {
drive->atapi_flags |= IDE_AFLAG_ZIP_DRIVE;
/* This value will be visible in the /proc/ide/hdx/settings */
drive->pc_delay = IDEFLOPPY_PC_DELAY;
@@ -498,7 +498,7 @@ static void ide_floppy_setup(ide_drive_t *drive)
* Guess what? The IOMEGA Clik! drive also needs the above fix. It makes
* nasty clicking noises without it, so please don't remove this.
*/
- if (strncmp((char *)&id[ATA_ID_PROD], "IOMEGA Clik!", 11) == 0) {
+ if (strstarts((char *)&id[ATA_ID_PROD], "IOMEGA Clik!")) {
blk_queue_max_hw_sectors(drive->queue, 64);
drive->atapi_flags |= IDE_AFLAG_CLIK_DRIVE;
/* IOMEGA Clik! drives do not support lock/unlock commands */
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 2ce6268a2734..e29b02ca9e91 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -101,8 +101,7 @@ void ide_device_put(ide_drive_t *drive)
struct device *host_dev = drive->hwif->host->dev[0];
struct module *module = host_dev ? host_dev->driver->owner : NULL;
- if (module)
- module_put(module);
+ module_put(module);
#endif
put_device(&drive->gendev);
}
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 6dbfbc209491..30f0e61341c5 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -144,13 +144,26 @@ config OMAP_IOMMU
select IOMMU_API
config OMAP_IOMMU_DEBUG
- tristate "Export OMAP IOMMU internals in DebugFS"
- depends on OMAP_IOMMU && DEBUG_FS
- help
- Select this to see extensive information about
- the internal state of OMAP IOMMU in debugfs.
+ bool "Export OMAP IOMMU internals in DebugFS"
+ depends on OMAP_IOMMU && DEBUG_FS
+ ---help---
+ Select this to see extensive information about
+ the internal state of OMAP IOMMU in debugfs.
+
+ Say N unless you know you need this.
- Say N unless you know you need this.
+config ROCKCHIP_IOMMU
+ bool "Rockchip IOMMU Support"
+ depends on ARM
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ select IOMMU_API
+ select ARM_DMA_USE_IOMMU
+ help
+ Support for IOMMUs found on Rockchip rk32xx SOCs.
+ These IOMMUs allow virtualization of the address space used by most
+ cores within the multimedia subsystem.
+ Say Y here if you are using a Rockchip SoC that includes an IOMMU
+ device.
config TEGRA_IOMMU_GART
bool "Tegra GART IOMMU Support"
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 16edef74b8ee..7b976f294a69 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -11,8 +11,8 @@ obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
-obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o
obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
+obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o
obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 2d84c9edf3b8..b205f76d7129 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3411,6 +3411,8 @@ static bool amd_iommu_capable(enum iommu_cap cap)
return true;
case IOMMU_CAP_INTR_REMAP:
return (irq_remapping_enabled == 1);
+ case IOMMU_CAP_NOEXEC:
+ return false;
}
return false;
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 9c0d6e290097..bea878f8e7d3 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -279,10 +279,8 @@ static void free_pasid_state(struct pasid_state *pasid_state)
static void put_pasid_state(struct pasid_state *pasid_state)
{
- if (atomic_dec_and_test(&pasid_state->count)) {
- put_device_state(pasid_state->device_state);
+ if (atomic_dec_and_test(&pasid_state->count))
wake_up(&pasid_state->wq);
- }
}
static void put_pasid_state_wait(struct pasid_state *pasid_state)
@@ -291,9 +289,7 @@ static void put_pasid_state_wait(struct pasid_state *pasid_state)
prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE);
- if (atomic_dec_and_test(&pasid_state->count))
- put_device_state(pasid_state->device_state);
- else
+ if (!atomic_dec_and_test(&pasid_state->count))
schedule();
finish_wait(&pasid_state->wq, &wait);
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index e393ae01b5d2..b8aac1389a96 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -404,9 +404,16 @@ struct arm_smmu_cfg {
#define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx)
#define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1)
+enum arm_smmu_domain_stage {
+ ARM_SMMU_DOMAIN_S1 = 0,
+ ARM_SMMU_DOMAIN_S2,
+ ARM_SMMU_DOMAIN_NESTED,
+};
+
struct arm_smmu_domain {
struct arm_smmu_device *smmu;
struct arm_smmu_cfg cfg;
+ enum arm_smmu_domain_stage stage;
spinlock_t lock;
};
@@ -906,19 +913,46 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (smmu_domain->smmu)
goto out_unlock;
- if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) {
+ /*
+ * Mapping the requested stage onto what we support is surprisingly
+ * complicated, mainly because the spec allows S1+S2 SMMUs without
+ * support for nested translation. That means we end up with the
+ * following table:
+ *
+ * Requested Supported Actual
+ * S1 N S1
+ * S1 S1+S2 S1
+ * S1 S2 S2
+ * S1 S1 S1
+ * N N N
+ * N S1+S2 S2
+ * N S2 S2
+ * N S1 S1
+ *
+ * Note that you can't actually request stage-2 mappings.
+ */
+ if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
+ if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
+
+ switch (smmu_domain->stage) {
+ case ARM_SMMU_DOMAIN_S1:
+ cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
+ start = smmu->num_s2_context_banks;
+ break;
+ case ARM_SMMU_DOMAIN_NESTED:
/*
* We will likely want to change this if/when KVM gets
* involved.
*/
- cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
- start = smmu->num_s2_context_banks;
- } else if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) {
- cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
- start = smmu->num_s2_context_banks;
- } else {
+ case ARM_SMMU_DOMAIN_S2:
cfg->cbar = CBAR_TYPE_S2_TRANS;
start = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out_unlock;
}
ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
@@ -1281,7 +1315,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
unsigned long pfn, int prot, int stage)
{
pte_t *pte, *start;
- pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN;
+ pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF;
if (pmd_none(*pmd)) {
/* Allocate a new set of tables */
@@ -1315,10 +1349,11 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
pteval |= ARM_SMMU_PTE_MEMATTR_NC;
}
+ if (prot & IOMMU_NOEXEC)
+ pteval |= ARM_SMMU_PTE_XN;
+
/* If no access, create a faulting entry to avoid TLB fills */
- if (prot & IOMMU_EXEC)
- pteval &= ~ARM_SMMU_PTE_XN;
- else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
+ if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
pteval &= ~ARM_SMMU_PTE_PAGE;
pteval |= ARM_SMMU_PTE_SH_IS;
@@ -1568,6 +1603,8 @@ static bool arm_smmu_capable(enum iommu_cap cap)
return true;
case IOMMU_CAP_INTR_REMAP:
return true; /* MSIs are just memory writes */
+ case IOMMU_CAP_NOEXEC:
+ return true;
default:
return false;
}
@@ -1644,21 +1681,57 @@ static void arm_smmu_remove_device(struct device *dev)
iommu_group_remove_device(dev);
}
+static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ struct arm_smmu_domain *smmu_domain = domain->priv;
+
+ switch (attr) {
+ case DOMAIN_ATTR_NESTING:
+ *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
+ return 0;
+ default:
+ return -ENODEV;
+ }
+}
+
+static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ struct arm_smmu_domain *smmu_domain = domain->priv;
+
+ switch (attr) {
+ case DOMAIN_ATTR_NESTING:
+ if (smmu_domain->smmu)
+ return -EPERM;
+ if (*(int *)data)
+ smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
+ else
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
+
+ return 0;
+ default:
+ return -ENODEV;
+ }
+}
+
static const struct iommu_ops arm_smmu_ops = {
- .capable = arm_smmu_capable,
- .domain_init = arm_smmu_domain_init,
- .domain_destroy = arm_smmu_domain_destroy,
- .attach_dev = arm_smmu_attach_dev,
- .detach_dev = arm_smmu_detach_dev,
- .map = arm_smmu_map,
- .unmap = arm_smmu_unmap,
- .map_sg = default_iommu_map_sg,
- .iova_to_phys = arm_smmu_iova_to_phys,
- .add_device = arm_smmu_add_device,
- .remove_device = arm_smmu_remove_device,
- .pgsize_bitmap = (SECTION_SIZE |
- ARM_SMMU_PTE_CONT_SIZE |
- PAGE_SIZE),
+ .capable = arm_smmu_capable,
+ .domain_init = arm_smmu_domain_init,
+ .domain_destroy = arm_smmu_domain_destroy,
+ .attach_dev = arm_smmu_attach_dev,
+ .detach_dev = arm_smmu_detach_dev,
+ .map = arm_smmu_map,
+ .unmap = arm_smmu_unmap,
+ .map_sg = default_iommu_map_sg,
+ .iova_to_phys = arm_smmu_iova_to_phys,
+ .add_device = arm_smmu_add_device,
+ .remove_device = arm_smmu_remove_device,
+ .domain_get_attr = arm_smmu_domain_get_attr,
+ .domain_set_attr = arm_smmu_domain_set_attr,
+ .pgsize_bitmap = (SECTION_SIZE |
+ ARM_SMMU_PTE_CONT_SIZE |
+ PAGE_SIZE),
};
static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
@@ -2073,8 +2146,20 @@ static struct platform_driver arm_smmu_driver = {
static int __init arm_smmu_init(void)
{
+ struct device_node *np;
int ret;
+ /*
+ * Play nice with systems that don't have an ARM SMMU by checking that
+ * an ARM SMMU exists in the system before proceeding with the driver
+ * and IOMMU bus operation registration.
+ */
+ np = of_find_matching_node(NULL, arm_smmu_of_match);
+ if (!np)
+ return 0;
+
+ of_node_put(np);
+
ret = platform_driver_register(&arm_smmu_driver);
if (ret)
return ret;
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index c5c61cabd6e3..9847613085e1 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -44,6 +44,14 @@
#include "irq_remapping.h"
+typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
+struct dmar_res_callback {
+ dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
+ void *arg[ACPI_DMAR_TYPE_RESERVED];
+ bool ignore_unhandled;
+ bool print_entry;
+};
+
/*
* Assumptions:
* 1) The hotplug framework guarentees that DMAR unit will be hot-added
@@ -62,11 +70,12 @@ LIST_HEAD(dmar_drhd_units);
struct acpi_table_header * __initdata dmar_tbl;
static acpi_size dmar_tbl_size;
static int dmar_dev_scope_status = 1;
+static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
static int alloc_iommu(struct dmar_drhd_unit *drhd);
static void free_iommu(struct intel_iommu *iommu);
-static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
+static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
{
/*
* add INCLUDE_ALL at the tail, so scan the list will find it at
@@ -344,24 +353,45 @@ static struct notifier_block dmar_pci_bus_nb = {
.priority = INT_MIN,
};
+static struct dmar_drhd_unit *
+dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
+{
+ struct dmar_drhd_unit *dmaru;
+
+ list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list)
+ if (dmaru->segment == drhd->segment &&
+ dmaru->reg_base_addr == drhd->address)
+ return dmaru;
+
+ return NULL;
+}
+
/**
* dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
* structure which uniquely represent one DMA remapping hardware unit
* present in the platform
*/
-static int __init
-dmar_parse_one_drhd(struct acpi_dmar_header *header)
+static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_hardware_unit *drhd;
struct dmar_drhd_unit *dmaru;
int ret = 0;
drhd = (struct acpi_dmar_hardware_unit *)header;
- dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
+ dmaru = dmar_find_dmaru(drhd);
+ if (dmaru)
+ goto out;
+
+ dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
if (!dmaru)
return -ENOMEM;
- dmaru->hdr = header;
+ /*
+ * If header is allocated from slab by ACPI _DSM method, we need to
+ * copy the content because the memory buffer will be freed on return.
+ */
+ dmaru->hdr = (void *)(dmaru + 1);
+ memcpy(dmaru->hdr, header, header->length);
dmaru->reg_base_addr = drhd->address;
dmaru->segment = drhd->segment;
dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
@@ -381,6 +411,11 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
return ret;
}
dmar_register_drhd_unit(dmaru);
+
+out:
+ if (arg)
+ (*(int *)arg)++;
+
return 0;
}
@@ -393,7 +428,8 @@ static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
kfree(dmaru);
}
-static int __init dmar_parse_one_andd(struct acpi_dmar_header *header)
+static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
+ void *arg)
{
struct acpi_dmar_andd *andd = (void *)header;
@@ -414,8 +450,7 @@ static int __init dmar_parse_one_andd(struct acpi_dmar_header *header)
}
#ifdef CONFIG_ACPI_NUMA
-static int __init
-dmar_parse_one_rhsa(struct acpi_dmar_header *header)
+static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_rhsa *rhsa;
struct dmar_drhd_unit *drhd;
@@ -442,6 +477,8 @@ dmar_parse_one_rhsa(struct acpi_dmar_header *header)
return 0;
}
+#else
+#define dmar_parse_one_rhsa dmar_res_noop
#endif
static void __init
@@ -503,6 +540,52 @@ static int __init dmar_table_detect(void)
return (ACPI_SUCCESS(status) ? 1 : 0);
}
+static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
+ size_t len, struct dmar_res_callback *cb)
+{
+ int ret = 0;
+ struct acpi_dmar_header *iter, *next;
+ struct acpi_dmar_header *end = ((void *)start) + len;
+
+ for (iter = start; iter < end && ret == 0; iter = next) {
+ next = (void *)iter + iter->length;
+ if (iter->length == 0) {
+ /* Avoid looping forever on bad ACPI tables */
+ pr_debug(FW_BUG "Invalid 0-length structure\n");
+ break;
+ } else if (next > end) {
+ /* Avoid passing table end */
+ pr_warn(FW_BUG "record passes table end\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ if (cb->print_entry)
+ dmar_table_print_dmar_entry(iter);
+
+ if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
+ /* continue for forward compatibility */
+ pr_debug("Unknown DMAR structure type %d\n",
+ iter->type);
+ } else if (cb->cb[iter->type]) {
+ ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
+ } else if (!cb->ignore_unhandled) {
+ pr_warn("No handler for DMAR structure type %d\n",
+ iter->type);
+ ret = -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
+ struct dmar_res_callback *cb)
+{
+ return dmar_walk_remapping_entries((void *)(dmar + 1),
+ dmar->header.length - sizeof(*dmar), cb);
+}
+
/**
* parse_dmar_table - parses the DMA reporting table
*/
@@ -510,9 +593,18 @@ static int __init
parse_dmar_table(void)
{
struct acpi_table_dmar *dmar;
- struct acpi_dmar_header *entry_header;
int ret = 0;
int drhd_count = 0;
+ struct dmar_res_callback cb = {
+ .print_entry = true,
+ .ignore_unhandled = true,
+ .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
+ .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
+ .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
+ .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
+ .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
+ .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
+ };
/*
* Do it again, earlier dmar_tbl mapping could be mapped with
@@ -536,51 +628,10 @@ parse_dmar_table(void)
}
pr_info("Host address width %d\n", dmar->width + 1);
-
- entry_header = (struct acpi_dmar_header *)(dmar + 1);
- while (((unsigned long)entry_header) <
- (((unsigned long)dmar) + dmar_tbl->length)) {
- /* Avoid looping forever on bad ACPI tables */
- if (entry_header->length == 0) {
- pr_warn("Invalid 0-length structure\n");
- ret = -EINVAL;
- break;
- }
-
- dmar_table_print_dmar_entry(entry_header);
-
- switch (entry_header->type) {
- case ACPI_DMAR_TYPE_HARDWARE_UNIT:
- drhd_count++;
- ret = dmar_parse_one_drhd(entry_header);
- break;
- case ACPI_DMAR_TYPE_RESERVED_MEMORY:
- ret = dmar_parse_one_rmrr(entry_header);
- break;
- case ACPI_DMAR_TYPE_ROOT_ATS:
- ret = dmar_parse_one_atsr(entry_header);
- break;
- case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
-#ifdef CONFIG_ACPI_NUMA
- ret = dmar_parse_one_rhsa(entry_header);
-#endif
- break;
- case ACPI_DMAR_TYPE_NAMESPACE:
- ret = dmar_parse_one_andd(entry_header);
- break;
- default:
- pr_warn("Unknown DMAR structure type %d\n",
- entry_header->type);
- ret = 0; /* for forward compatibility */
- break;
- }
- if (ret)
- break;
-
- entry_header = ((void *)entry_header + entry_header->length);
- }
- if (drhd_count == 0)
+ ret = dmar_walk_dmar_table(dmar, &cb);
+ if (ret == 0 && drhd_count == 0)
pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
+
return ret;
}
@@ -778,76 +829,68 @@ static void warn_invalid_dmar(u64 addr, const char *message)
dmi_get_system_info(DMI_PRODUCT_VERSION));
}
-static int __init check_zero_address(void)
+static int __ref
+dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
{
- struct acpi_table_dmar *dmar;
- struct acpi_dmar_header *entry_header;
struct acpi_dmar_hardware_unit *drhd;
+ void __iomem *addr;
+ u64 cap, ecap;
- dmar = (struct acpi_table_dmar *)dmar_tbl;
- entry_header = (struct acpi_dmar_header *)(dmar + 1);
-
- while (((unsigned long)entry_header) <
- (((unsigned long)dmar) + dmar_tbl->length)) {
- /* Avoid looping forever on bad ACPI tables */
- if (entry_header->length == 0) {
- pr_warn("Invalid 0-length structure\n");
- return 0;
- }
+ drhd = (void *)entry;
+ if (!drhd->address) {
+ warn_invalid_dmar(0, "");
+ return -EINVAL;
+ }
- if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
- void __iomem *addr;
- u64 cap, ecap;
+ if (arg)
+ addr = ioremap(drhd->address, VTD_PAGE_SIZE);
+ else
+ addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
+ if (!addr) {
+ pr_warn("IOMMU: can't validate: %llx\n", drhd->address);
+ return -EINVAL;
+ }
- drhd = (void *)entry_header;
- if (!drhd->address) {
- warn_invalid_dmar(0, "");
- goto failed;
- }
+ cap = dmar_readq(addr + DMAR_CAP_REG);
+ ecap = dmar_readq(addr + DMAR_ECAP_REG);
- addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
- if (!addr ) {
- printk("IOMMU: can't validate: %llx\n", drhd->address);
- goto failed;
- }
- cap = dmar_readq(addr + DMAR_CAP_REG);
- ecap = dmar_readq(addr + DMAR_ECAP_REG);
- early_iounmap(addr, VTD_PAGE_SIZE);
- if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
- warn_invalid_dmar(drhd->address,
- " returns all ones");
- goto failed;
- }
- }
+ if (arg)
+ iounmap(addr);
+ else
+ early_iounmap(addr, VTD_PAGE_SIZE);
- entry_header = ((void *)entry_header + entry_header->length);
+ if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
+ warn_invalid_dmar(drhd->address, " returns all ones");
+ return -EINVAL;
}
- return 1;
-failed:
return 0;
}
int __init detect_intel_iommu(void)
{
int ret;
+ struct dmar_res_callback validate_drhd_cb = {
+ .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
+ .ignore_unhandled = true,
+ };
down_write(&dmar_global_lock);
ret = dmar_table_detect();
if (ret)
- ret = check_zero_address();
- {
- if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
- iommu_detected = 1;
- /* Make sure ACS will be enabled */
- pci_request_acs();
- }
+ ret = !dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
+ &validate_drhd_cb);
+ if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
+ iommu_detected = 1;
+ /* Make sure ACS will be enabled */
+ pci_request_acs();
+ }
#ifdef CONFIG_X86
- if (ret)
- x86_init.iommu.iommu_init = intel_iommu_init;
+ if (ret)
+ x86_init.iommu.iommu_init = intel_iommu_init;
#endif
- }
+
early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
dmar_tbl = NULL;
up_write(&dmar_global_lock);
@@ -931,11 +974,32 @@ out:
return err;
}
+static int dmar_alloc_seq_id(struct intel_iommu *iommu)
+{
+ iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
+ DMAR_UNITS_SUPPORTED);
+ if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
+ iommu->seq_id = -1;
+ } else {
+ set_bit(iommu->seq_id, dmar_seq_ids);
+ sprintf(iommu->name, "dmar%d", iommu->seq_id);
+ }
+
+ return iommu->seq_id;
+}
+
+static void dmar_free_seq_id(struct intel_iommu *iommu)
+{
+ if (iommu->seq_id >= 0) {
+ clear_bit(iommu->seq_id, dmar_seq_ids);
+ iommu->seq_id = -1;
+ }
+}
+
static int alloc_iommu(struct dmar_drhd_unit *drhd)
{
struct intel_iommu *iommu;
u32 ver, sts;
- static int iommu_allocated = 0;
int agaw = 0;
int msagaw = 0;
int err;
@@ -949,13 +1013,16 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
if (!iommu)
return -ENOMEM;
- iommu->seq_id = iommu_allocated++;
- sprintf (iommu->name, "dmar%d", iommu->seq_id);
+ if (dmar_alloc_seq_id(iommu) < 0) {
+ pr_err("IOMMU: failed to allocate seq_id\n");
+ err = -ENOSPC;
+ goto error;
+ }
err = map_iommu(iommu, drhd->reg_base_addr);
if (err) {
pr_err("IOMMU: failed to map %s\n", iommu->name);
- goto error;
+ goto error_free_seq_id;
}
err = -EINVAL;
@@ -1005,9 +1072,11 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
return 0;
- err_unmap:
+err_unmap:
unmap_iommu(iommu);
- error:
+error_free_seq_id:
+ dmar_free_seq_id(iommu);
+error:
kfree(iommu);
return err;
}
@@ -1031,6 +1100,7 @@ static void free_iommu(struct intel_iommu *iommu)
if (iommu->reg)
unmap_iommu(iommu);
+ dmar_free_seq_id(iommu);
kfree(iommu);
}
@@ -1661,12 +1731,17 @@ int __init dmar_ir_support(void)
return dmar->flags & 0x1;
}
+/* Check whether DMAR units are in use */
+static inline bool dmar_in_use(void)
+{
+ return irq_remapping_enabled || intel_iommu_enabled;
+}
+
static int __init dmar_free_unused_resources(void)
{
struct dmar_drhd_unit *dmaru, *dmaru_n;
- /* DMAR units are in use */
- if (irq_remapping_enabled || intel_iommu_enabled)
+ if (dmar_in_use())
return 0;
if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
@@ -1684,3 +1759,242 @@ static int __init dmar_free_unused_resources(void)
late_initcall(dmar_free_unused_resources);
IOMMU_INIT_POST(detect_intel_iommu);
+
+/*
+ * DMAR Hotplug Support
+ * For more details, please refer to Intel(R) Virtualization Technology
+ * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
+ * "Remapping Hardware Unit Hot Plug".
+ */
+static u8 dmar_hp_uuid[] = {
+ /* 0000 */ 0xA6, 0xA3, 0xC1, 0xD8, 0x9B, 0xBE, 0x9B, 0x4C,
+ /* 0008 */ 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF
+};
+
+/*
+ * Currently there's only one revision and BIOS will not check the revision id,
+ * so use 0 for safety.
+ */
+#define DMAR_DSM_REV_ID 0
+#define DMAR_DSM_FUNC_DRHD 1
+#define DMAR_DSM_FUNC_ATSR 2
+#define DMAR_DSM_FUNC_RHSA 3
+
+static inline bool dmar_detect_dsm(acpi_handle handle, int func)
+{
+ return acpi_check_dsm(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, 1 << func);
+}
+
+static int dmar_walk_dsm_resource(acpi_handle handle, int func,
+ dmar_res_handler_t handler, void *arg)
+{
+ int ret = -ENODEV;
+ union acpi_object *obj;
+ struct acpi_dmar_header *start;
+ struct dmar_res_callback callback;
+ static int res_type[] = {
+ [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
+ [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
+ [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
+ };
+
+ if (!dmar_detect_dsm(handle, func))
+ return 0;
+
+ obj = acpi_evaluate_dsm_typed(handle, dmar_hp_uuid, DMAR_DSM_REV_ID,
+ func, NULL, ACPI_TYPE_BUFFER);
+ if (!obj)
+ return -ENODEV;
+
+ memset(&callback, 0, sizeof(callback));
+ callback.cb[res_type[func]] = handler;
+ callback.arg[res_type[func]] = arg;
+ start = (struct acpi_dmar_header *)obj->buffer.pointer;
+ ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
+
+ ACPI_FREE(obj);
+
+ return ret;
+}
+
+static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
+{
+ int ret;
+ struct dmar_drhd_unit *dmaru;
+
+ dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
+ if (!dmaru)
+ return -ENODEV;
+
+ ret = dmar_ir_hotplug(dmaru, true);
+ if (ret == 0)
+ ret = dmar_iommu_hotplug(dmaru, true);
+
+ return ret;
+}
+
+static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
+{
+ int i, ret;
+ struct device *dev;
+ struct dmar_drhd_unit *dmaru;
+
+ dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
+ if (!dmaru)
+ return 0;
+
+ /*
+ * All PCI devices managed by this unit should have been destroyed.
+ */
+ if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt)
+ for_each_active_dev_scope(dmaru->devices,
+ dmaru->devices_cnt, i, dev)
+ return -EBUSY;
+
+ ret = dmar_ir_hotplug(dmaru, false);
+ if (ret == 0)
+ ret = dmar_iommu_hotplug(dmaru, false);
+
+ return ret;
+}
+
+static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
+{
+ struct dmar_drhd_unit *dmaru;
+
+ dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
+ if (dmaru) {
+ list_del_rcu(&dmaru->list);
+ synchronize_rcu();
+ dmar_free_drhd(dmaru);
+ }
+
+ return 0;
+}
+
+static int dmar_hotplug_insert(acpi_handle handle)
+{
+ int ret;
+ int drhd_count = 0;
+
+ ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_validate_one_drhd, (void *)1);
+ if (ret)
+ goto out;
+
+ ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_parse_one_drhd, (void *)&drhd_count);
+ if (ret == 0 && drhd_count == 0) {
+ pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
+ goto out;
+ } else if (ret) {
+ goto release_drhd;
+ }
+
+ ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
+ &dmar_parse_one_rhsa, NULL);
+ if (ret)
+ goto release_drhd;
+
+ ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
+ &dmar_parse_one_atsr, NULL);
+ if (ret)
+ goto release_atsr;
+
+ ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_hp_add_drhd, NULL);
+ if (!ret)
+ return 0;
+
+ dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_hp_remove_drhd, NULL);
+release_atsr:
+ dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
+ &dmar_release_one_atsr, NULL);
+release_drhd:
+ dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_hp_release_drhd, NULL);
+out:
+ return ret;
+}
+
+static int dmar_hotplug_remove(acpi_handle handle)
+{
+ int ret;
+
+ ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
+ &dmar_check_one_atsr, NULL);
+ if (ret)
+ return ret;
+
+ ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_hp_remove_drhd, NULL);
+ if (ret == 0) {
+ WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
+ &dmar_release_one_atsr, NULL));
+ WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_hp_release_drhd, NULL));
+ } else {
+ dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_hp_add_drhd, NULL);
+ }
+
+ return ret;
+}
+
+static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
+ void *context, void **retval)
+{
+ acpi_handle *phdl = retval;
+
+ if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
+ *phdl = handle;
+ return AE_CTRL_TERMINATE;
+ }
+
+ return AE_OK;
+}
+
+static int dmar_device_hotplug(acpi_handle handle, bool insert)
+{
+ int ret;
+ acpi_handle tmp = NULL;
+ acpi_status status;
+
+ if (!dmar_in_use())
+ return 0;
+
+ if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
+ tmp = handle;
+ } else {
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
+ ACPI_UINT32_MAX,
+ dmar_get_dsm_handle,
+ NULL, NULL, &tmp);
+ if (ACPI_FAILURE(status)) {
+ pr_warn("Failed to locate _DSM method.\n");
+ return -ENXIO;
+ }
+ }
+ if (tmp == NULL)
+ return 0;
+
+ down_write(&dmar_global_lock);
+ if (insert)
+ ret = dmar_hotplug_insert(tmp);
+ else
+ ret = dmar_hotplug_remove(tmp);
+ up_write(&dmar_global_lock);
+
+ return ret;
+}
+
+int dmar_device_add(acpi_handle handle)
+{
+ return dmar_device_hotplug(handle, true);
+}
+
+int dmar_device_remove(acpi_handle handle)
+{
+ return dmar_device_hotplug(handle, false);
+}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 02cd26a17fe0..1232336b960e 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -195,6 +195,7 @@ static inline void set_root_present(struct root_entry *root)
}
static inline void set_root_value(struct root_entry *root, unsigned long value)
{
+ root->val &= ~VTD_PAGE_MASK;
root->val |= value & VTD_PAGE_MASK;
}
@@ -247,6 +248,7 @@ static inline void context_set_translation_type(struct context_entry *context,
static inline void context_set_address_root(struct context_entry *context,
unsigned long value)
{
+ context->lo &= ~VTD_PAGE_MASK;
context->lo |= value & VTD_PAGE_MASK;
}
@@ -328,17 +330,10 @@ static int hw_pass_through = 1;
/* si_domain contains mulitple devices */
#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
-/* define the limit of IOMMUs supported in each domain */
-#ifdef CONFIG_X86
-# define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
-#else
-# define IOMMU_UNITS_SUPPORTED 64
-#endif
-
struct dmar_domain {
int id; /* domain id */
int nid; /* node id */
- DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
+ DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
/* bitmap of iommus this domain uses*/
struct list_head devices; /* all devices' list */
@@ -1132,8 +1127,11 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
unsigned long flags;
root = (struct root_entry *)alloc_pgtable_page(iommu->node);
- if (!root)
+ if (!root) {
+ pr_err("IOMMU: allocating root entry for %s failed\n",
+ iommu->name);
return -ENOMEM;
+ }
__iommu_flush_cache(iommu, root, ROOT_SIZE);
@@ -1473,7 +1471,7 @@ static int iommu_init_domains(struct intel_iommu *iommu)
return 0;
}
-static void free_dmar_iommu(struct intel_iommu *iommu)
+static void disable_dmar_iommu(struct intel_iommu *iommu)
{
struct dmar_domain *domain;
int i;
@@ -1497,11 +1495,16 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
+}
- kfree(iommu->domains);
- kfree(iommu->domain_ids);
- iommu->domains = NULL;
- iommu->domain_ids = NULL;
+static void free_dmar_iommu(struct intel_iommu *iommu)
+{
+ if ((iommu->domains) && (iommu->domain_ids)) {
+ kfree(iommu->domains);
+ kfree(iommu->domain_ids);
+ iommu->domains = NULL;
+ iommu->domain_ids = NULL;
+ }
g_iommus[iommu->seq_id] = NULL;
@@ -1983,7 +1986,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
{
struct dma_pte *first_pte = NULL, *pte = NULL;
phys_addr_t uninitialized_var(pteval);
- unsigned long sg_res;
+ unsigned long sg_res = 0;
unsigned int largepage_lvl = 0;
unsigned long lvl_pages = 0;
@@ -1994,10 +1997,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
- if (sg)
- sg_res = 0;
- else {
- sg_res = nr_pages + 1;
+ if (!sg) {
+ sg_res = nr_pages;
pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
}
@@ -2708,6 +2709,41 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
return 0;
}
+static void intel_iommu_init_qi(struct intel_iommu *iommu)
+{
+ /*
+ * Start from the sane iommu hardware state.
+ * If the queued invalidation is already initialized by us
+ * (for example, while enabling interrupt-remapping) then
+ * we got the things already rolling from a sane state.
+ */
+ if (!iommu->qi) {
+ /*
+ * Clear any previous faults.
+ */
+ dmar_fault(-1, iommu);
+ /*
+ * Disable queued invalidation if supported and already enabled
+ * before OS handover.
+ */
+ dmar_disable_qi(iommu);
+ }
+
+ if (dmar_enable_qi(iommu)) {
+ /*
+ * Queued Invalidate not enabled, use Register Based Invalidate
+ */
+ iommu->flush.flush_context = __iommu_flush_context;
+ iommu->flush.flush_iotlb = __iommu_flush_iotlb;
+ pr_info("IOMMU: %s using Register based invalidation\n",
+ iommu->name);
+ } else {
+ iommu->flush.flush_context = qi_flush_context;
+ iommu->flush.flush_iotlb = qi_flush_iotlb;
+ pr_info("IOMMU: %s using Queued invalidation\n", iommu->name);
+ }
+}
+
static int __init init_dmars(void)
{
struct dmar_drhd_unit *drhd;
@@ -2728,14 +2764,18 @@ static int __init init_dmars(void)
* threaded kernel __init code path all other access are read
* only
*/
- if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
+ if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
g_num_of_iommus++;
continue;
}
printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
- IOMMU_UNITS_SUPPORTED);
+ DMAR_UNITS_SUPPORTED);
}
+ /* Preallocate enough resources for IOMMU hot-addition */
+ if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
+ g_num_of_iommus = DMAR_UNITS_SUPPORTED;
+
g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
GFP_KERNEL);
if (!g_iommus) {
@@ -2764,58 +2804,14 @@ static int __init init_dmars(void)
* among all IOMMU's. Need to Split it later.
*/
ret = iommu_alloc_root_entry(iommu);
- if (ret) {
- printk(KERN_ERR "IOMMU: allocate root entry failed\n");
+ if (ret)
goto free_iommu;
- }
if (!ecap_pass_through(iommu->ecap))
hw_pass_through = 0;
}
- /*
- * Start from the sane iommu hardware state.
- */
- for_each_active_iommu(iommu, drhd) {
- /*
- * If the queued invalidation is already initialized by us
- * (for example, while enabling interrupt-remapping) then
- * we got the things already rolling from a sane state.
- */
- if (iommu->qi)
- continue;
-
- /*
- * Clear any previous faults.
- */
- dmar_fault(-1, iommu);
- /*
- * Disable queued invalidation if supported and already enabled
- * before OS handover.
- */
- dmar_disable_qi(iommu);
- }
-
- for_each_active_iommu(iommu, drhd) {
- if (dmar_enable_qi(iommu)) {
- /*
- * Queued Invalidate not enabled, use Register Based
- * Invalidate
- */
- iommu->flush.flush_context = __iommu_flush_context;
- iommu->flush.flush_iotlb = __iommu_flush_iotlb;
- printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
- "invalidation\n",
- iommu->seq_id,
- (unsigned long long)drhd->reg_base_addr);
- } else {
- iommu->flush.flush_context = qi_flush_context;
- iommu->flush.flush_iotlb = qi_flush_iotlb;
- printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
- "invalidation\n",
- iommu->seq_id,
- (unsigned long long)drhd->reg_base_addr);
- }
- }
+ for_each_active_iommu(iommu, drhd)
+ intel_iommu_init_qi(iommu);
if (iommu_pass_through)
iommu_identity_mapping |= IDENTMAP_ALL;
@@ -2901,8 +2897,10 @@ static int __init init_dmars(void)
return 0;
free_iommu:
- for_each_active_iommu(iommu, drhd)
+ for_each_active_iommu(iommu, drhd) {
+ disable_dmar_iommu(iommu);
free_dmar_iommu(iommu);
+ }
kfree(deferred_flush);
free_g_iommus:
kfree(g_iommus);
@@ -3682,7 +3680,7 @@ static inline void init_iommu_pm_ops(void) {}
#endif /* CONFIG_PM */
-int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
+int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_reserved_memory *rmrr;
struct dmar_rmrr_unit *rmrru;
@@ -3708,17 +3706,48 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
return 0;
}
-int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
+static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
+{
+ struct dmar_atsr_unit *atsru;
+ struct acpi_dmar_atsr *tmp;
+
+ list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
+ tmp = (struct acpi_dmar_atsr *)atsru->hdr;
+ if (atsr->segment != tmp->segment)
+ continue;
+ if (atsr->header.length != tmp->header.length)
+ continue;
+ if (memcmp(atsr, tmp, atsr->header.length) == 0)
+ return atsru;
+ }
+
+ return NULL;
+}
+
+int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
{
struct acpi_dmar_atsr *atsr;
struct dmar_atsr_unit *atsru;
+ if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
+ return 0;
+
atsr = container_of(hdr, struct acpi_dmar_atsr, header);
- atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
+ atsru = dmar_find_atsr(atsr);
+ if (atsru)
+ return 0;
+
+ atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
if (!atsru)
return -ENOMEM;
- atsru->hdr = hdr;
+ /*
+ * If memory is allocated from slab by ACPI _DSM method, we need to
+ * copy the memory content because the memory buffer will be freed
+ * on return.
+ */
+ atsru->hdr = (void *)(atsru + 1);
+ memcpy(atsru->hdr, hdr, hdr->length);
atsru->include_all = atsr->flags & 0x1;
if (!atsru->include_all) {
atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
@@ -3741,6 +3770,138 @@ static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
kfree(atsru);
}
+int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
+{
+ struct acpi_dmar_atsr *atsr;
+ struct dmar_atsr_unit *atsru;
+
+ atsr = container_of(hdr, struct acpi_dmar_atsr, header);
+ atsru = dmar_find_atsr(atsr);
+ if (atsru) {
+ list_del_rcu(&atsru->list);
+ synchronize_rcu();
+ intel_iommu_free_atsr(atsru);
+ }
+
+ return 0;
+}
+
+int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
+{
+ int i;
+ struct device *dev;
+ struct acpi_dmar_atsr *atsr;
+ struct dmar_atsr_unit *atsru;
+
+ atsr = container_of(hdr, struct acpi_dmar_atsr, header);
+ atsru = dmar_find_atsr(atsr);
+ if (!atsru)
+ return 0;
+
+ if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
+ for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
+ i, dev)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
+{
+ int sp, ret = 0;
+ struct intel_iommu *iommu = dmaru->iommu;
+
+ if (g_iommus[iommu->seq_id])
+ return 0;
+
+ if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
+ pr_warn("IOMMU: %s doesn't support hardware pass through.\n",
+ iommu->name);
+ return -ENXIO;
+ }
+ if (!ecap_sc_support(iommu->ecap) &&
+ domain_update_iommu_snooping(iommu)) {
+ pr_warn("IOMMU: %s doesn't support snooping.\n",
+ iommu->name);
+ return -ENXIO;
+ }
+ sp = domain_update_iommu_superpage(iommu) - 1;
+ if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
+ pr_warn("IOMMU: %s doesn't support large page.\n",
+ iommu->name);
+ return -ENXIO;
+ }
+
+ /*
+ * Disable translation if already enabled prior to OS handover.
+ */
+ if (iommu->gcmd & DMA_GCMD_TE)
+ iommu_disable_translation(iommu);
+
+ g_iommus[iommu->seq_id] = iommu;
+ ret = iommu_init_domains(iommu);
+ if (ret == 0)
+ ret = iommu_alloc_root_entry(iommu);
+ if (ret)
+ goto out;
+
+ if (dmaru->ignored) {
+ /*
+ * we always have to disable PMRs or DMA may fail on this device
+ */
+ if (force_on)
+ iommu_disable_protect_mem_regions(iommu);
+ return 0;
+ }
+
+ intel_iommu_init_qi(iommu);
+ iommu_flush_write_buffer(iommu);
+ ret = dmar_set_interrupt(iommu);
+ if (ret)
+ goto disable_iommu;
+
+ iommu_set_root_entry(iommu);
+ iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+ iommu_enable_translation(iommu);
+
+ if (si_domain) {
+ ret = iommu_attach_domain(si_domain, iommu);
+ if (ret < 0 || si_domain->id != ret)
+ goto disable_iommu;
+ domain_attach_iommu(si_domain, iommu);
+ }
+
+ iommu_disable_protect_mem_regions(iommu);
+ return 0;
+
+disable_iommu:
+ disable_dmar_iommu(iommu);
+out:
+ free_dmar_iommu(iommu);
+ return ret;
+}
+
+int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
+{
+ int ret = 0;
+ struct intel_iommu *iommu = dmaru->iommu;
+
+ if (!intel_iommu_enabled)
+ return 0;
+ if (iommu == NULL)
+ return -EINVAL;
+
+ if (insert) {
+ ret = intel_iommu_add(dmaru);
+ } else {
+ disable_dmar_iommu(iommu);
+ free_dmar_iommu(iommu);
+ }
+
+ return ret;
+}
+
static void intel_iommu_free_dmars(void)
{
struct dmar_rmrr_unit *rmrru, *rmrr_n;
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 7c80661b35c1..27541d440849 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -36,7 +36,6 @@ struct hpet_scope {
static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
static struct hpet_scope ir_hpet[MAX_HPET_TBS];
-static int ir_ioapic_num, ir_hpet_num;
/*
* Lock ordering:
@@ -206,7 +205,7 @@ static struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
int i;
for (i = 0; i < MAX_HPET_TBS; i++)
- if (ir_hpet[i].id == hpet_id)
+ if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu)
return ir_hpet[i].iommu;
return NULL;
}
@@ -216,7 +215,7 @@ static struct intel_iommu *map_ioapic_to_ir(int apic)
int i;
for (i = 0; i < MAX_IO_APICS; i++)
- if (ir_ioapic[i].id == apic)
+ if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu)
return ir_ioapic[i].iommu;
return NULL;
}
@@ -325,7 +324,7 @@ static int set_ioapic_sid(struct irte *irte, int apic)
down_read(&dmar_global_lock);
for (i = 0; i < MAX_IO_APICS; i++) {
- if (ir_ioapic[i].id == apic) {
+ if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) {
sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
break;
}
@@ -352,7 +351,7 @@ static int set_hpet_sid(struct irte *irte, u8 id)
down_read(&dmar_global_lock);
for (i = 0; i < MAX_HPET_TBS; i++) {
- if (ir_hpet[i].id == id) {
+ if (ir_hpet[i].iommu && ir_hpet[i].id == id) {
sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
break;
}
@@ -473,17 +472,17 @@ static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
}
-
-static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
+static int intel_setup_irq_remapping(struct intel_iommu *iommu)
{
struct ir_table *ir_table;
struct page *pages;
unsigned long *bitmap;
- ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
- GFP_ATOMIC);
+ if (iommu->ir_table)
+ return 0;
- if (!iommu->ir_table)
+ ir_table = kzalloc(sizeof(struct ir_table), GFP_ATOMIC);
+ if (!ir_table)
return -ENOMEM;
pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
@@ -492,24 +491,37 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
if (!pages) {
pr_err("IR%d: failed to allocate pages of order %d\n",
iommu->seq_id, INTR_REMAP_PAGE_ORDER);
- kfree(iommu->ir_table);
- return -ENOMEM;
+ goto out_free_table;
}
bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
sizeof(long), GFP_ATOMIC);
if (bitmap == NULL) {
pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
- __free_pages(pages, INTR_REMAP_PAGE_ORDER);
- kfree(ir_table);
- return -ENOMEM;
+ goto out_free_pages;
}
ir_table->base = page_address(pages);
ir_table->bitmap = bitmap;
-
- iommu_set_irq_remapping(iommu, mode);
+ iommu->ir_table = ir_table;
return 0;
+
+out_free_pages:
+ __free_pages(pages, INTR_REMAP_PAGE_ORDER);
+out_free_table:
+ kfree(ir_table);
+ return -ENOMEM;
+}
+
+static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
+{
+ if (iommu && iommu->ir_table) {
+ free_pages((unsigned long)iommu->ir_table->base,
+ INTR_REMAP_PAGE_ORDER);
+ kfree(iommu->ir_table->bitmap);
+ kfree(iommu->ir_table);
+ iommu->ir_table = NULL;
+ }
}
/*
@@ -666,9 +678,10 @@ static int __init intel_enable_irq_remapping(void)
if (!ecap_ir_support(iommu->ecap))
continue;
- if (intel_setup_irq_remapping(iommu, eim))
+ if (intel_setup_irq_remapping(iommu))
goto error;
+ iommu_set_irq_remapping(iommu, eim);
setup = 1;
}
@@ -689,9 +702,11 @@ static int __init intel_enable_irq_remapping(void)
return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
error:
- /*
- * handle error condition gracefully here!
- */
+ for_each_iommu(iommu, drhd)
+ if (ecap_ir_support(iommu->ecap)) {
+ iommu_disable_irq_remapping(iommu);
+ intel_teardown_irq_remapping(iommu);
+ }
if (x2apic_present)
pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
@@ -699,12 +714,13 @@ error:
return -1;
}
-static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
- struct intel_iommu *iommu)
+static int ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
+ struct intel_iommu *iommu,
+ struct acpi_dmar_hardware_unit *drhd)
{
struct acpi_dmar_pci_path *path;
u8 bus;
- int count;
+ int count, free = -1;
bus = scope->bus;
path = (struct acpi_dmar_pci_path *)(scope + 1);
@@ -720,19 +736,36 @@ static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
PCI_SECONDARY_BUS);
path++;
}
- ir_hpet[ir_hpet_num].bus = bus;
- ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->device, path->function);
- ir_hpet[ir_hpet_num].iommu = iommu;
- ir_hpet[ir_hpet_num].id = scope->enumeration_id;
- ir_hpet_num++;
+
+ for (count = 0; count < MAX_HPET_TBS; count++) {
+ if (ir_hpet[count].iommu == iommu &&
+ ir_hpet[count].id == scope->enumeration_id)
+ return 0;
+ else if (ir_hpet[count].iommu == NULL && free == -1)
+ free = count;
+ }
+ if (free == -1) {
+ pr_warn("Exceeded Max HPET blocks\n");
+ return -ENOSPC;
+ }
+
+ ir_hpet[free].iommu = iommu;
+ ir_hpet[free].id = scope->enumeration_id;
+ ir_hpet[free].bus = bus;
+ ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function);
+ pr_info("HPET id %d under DRHD base 0x%Lx\n",
+ scope->enumeration_id, drhd->address);
+
+ return 0;
}
-static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
- struct intel_iommu *iommu)
+static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
+ struct intel_iommu *iommu,
+ struct acpi_dmar_hardware_unit *drhd)
{
struct acpi_dmar_pci_path *path;
u8 bus;
- int count;
+ int count, free = -1;
bus = scope->bus;
path = (struct acpi_dmar_pci_path *)(scope + 1);
@@ -749,54 +782,63 @@ static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
path++;
}
- ir_ioapic[ir_ioapic_num].bus = bus;
- ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->device, path->function);
- ir_ioapic[ir_ioapic_num].iommu = iommu;
- ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
- ir_ioapic_num++;
+ for (count = 0; count < MAX_IO_APICS; count++) {
+ if (ir_ioapic[count].iommu == iommu &&
+ ir_ioapic[count].id == scope->enumeration_id)
+ return 0;
+ else if (ir_ioapic[count].iommu == NULL && free == -1)
+ free = count;
+ }
+ if (free == -1) {
+ pr_warn("Exceeded Max IO APICS\n");
+ return -ENOSPC;
+ }
+
+ ir_ioapic[free].bus = bus;
+ ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function);
+ ir_ioapic[free].iommu = iommu;
+ ir_ioapic[free].id = scope->enumeration_id;
+ pr_info("IOAPIC id %d under DRHD base 0x%Lx IOMMU %d\n",
+ scope->enumeration_id, drhd->address, iommu->seq_id);
+
+ return 0;
}
static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
struct intel_iommu *iommu)
{
+ int ret = 0;
struct acpi_dmar_hardware_unit *drhd;
struct acpi_dmar_device_scope *scope;
void *start, *end;
drhd = (struct acpi_dmar_hardware_unit *)header;
-
start = (void *)(drhd + 1);
end = ((void *)drhd) + header->length;
- while (start < end) {
+ while (start < end && ret == 0) {
scope = start;
- if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
- if (ir_ioapic_num == MAX_IO_APICS) {
- printk(KERN_WARNING "Exceeded Max IO APICS\n");
- return -1;
- }
-
- printk(KERN_INFO "IOAPIC id %d under DRHD base "
- " 0x%Lx IOMMU %d\n", scope->enumeration_id,
- drhd->address, iommu->seq_id);
+ if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC)
+ ret = ir_parse_one_ioapic_scope(scope, iommu, drhd);
+ else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET)
+ ret = ir_parse_one_hpet_scope(scope, iommu, drhd);
+ start += scope->length;
+ }
- ir_parse_one_ioapic_scope(scope, iommu);
- } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
- if (ir_hpet_num == MAX_HPET_TBS) {
- printk(KERN_WARNING "Exceeded Max HPET blocks\n");
- return -1;
- }
+ return ret;
+}
- printk(KERN_INFO "HPET id %d under DRHD base"
- " 0x%Lx\n", scope->enumeration_id,
- drhd->address);
+static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu)
+{
+ int i;
- ir_parse_one_hpet_scope(scope, iommu);
- }
- start += scope->length;
- }
+ for (i = 0; i < MAX_HPET_TBS; i++)
+ if (ir_hpet[i].iommu == iommu)
+ ir_hpet[i].iommu = NULL;
- return 0;
+ for (i = 0; i < MAX_IO_APICS; i++)
+ if (ir_ioapic[i].iommu == iommu)
+ ir_ioapic[i].iommu = NULL;
}
/*
@@ -1171,3 +1213,86 @@ struct irq_remap_ops intel_irq_remap_ops = {
.msi_setup_irq = intel_msi_setup_irq,
.alloc_hpet_msi = intel_alloc_hpet_msi,
};
+
+/*
+ * Support of Interrupt Remapping Unit Hotplug
+ */
+static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
+{
+ int ret;
+ int eim = x2apic_enabled();
+
+ if (eim && !ecap_eim_support(iommu->ecap)) {
+ pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n",
+ iommu->reg_phys, iommu->ecap);
+ return -ENODEV;
+ }
+
+ if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) {
+ pr_warn("DRHD %Lx: failed to parse managed IOAPIC/HPET\n",
+ iommu->reg_phys);
+ return -ENODEV;
+ }
+
+ /* TODO: check all IOAPICs are covered by IOMMU */
+
+ /* Setup Interrupt-remapping now. */
+ ret = intel_setup_irq_remapping(iommu);
+ if (ret) {
+ pr_err("DRHD %Lx: failed to allocate resource\n",
+ iommu->reg_phys);
+ ir_remove_ioapic_hpet_scope(iommu);
+ return ret;
+ }
+
+ if (!iommu->qi) {
+ /* Clear previous faults. */
+ dmar_fault(-1, iommu);
+ iommu_disable_irq_remapping(iommu);
+ dmar_disable_qi(iommu);
+ }
+
+ /* Enable queued invalidation */
+ ret = dmar_enable_qi(iommu);
+ if (!ret) {
+ iommu_set_irq_remapping(iommu, eim);
+ } else {
+ pr_err("DRHD %Lx: failed to enable queued invalidation, ecap %Lx, ret %d\n",
+ iommu->reg_phys, iommu->ecap, ret);
+ intel_teardown_irq_remapping(iommu);
+ ir_remove_ioapic_hpet_scope(iommu);
+ }
+
+ return ret;
+}
+
+int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
+{
+ int ret = 0;
+ struct intel_iommu *iommu = dmaru->iommu;
+
+ if (!irq_remapping_enabled)
+ return 0;
+ if (iommu == NULL)
+ return -EINVAL;
+ if (!ecap_ir_support(iommu->ecap))
+ return 0;
+
+ if (insert) {
+ if (!iommu->ir_table)
+ ret = dmar_ir_add(dmaru, iommu);
+ } else {
+ if (iommu->ir_table) {
+ if (!bitmap_empty(iommu->ir_table->bitmap,
+ INTR_REMAP_TABLE_ENTRIES)) {
+ ret = -EBUSY;
+ } else {
+ iommu_disable_irq_remapping(iommu);
+ intel_teardown_irq_remapping(iommu);
+ ir_remove_ioapic_hpet_scope(iommu);
+ }
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 02e4313e937c..1bd63352ab17 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1143,14 +1143,24 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
{
struct scatterlist *s;
size_t mapped = 0;
- unsigned int i;
+ unsigned int i, min_pagesz;
int ret;
- for_each_sg(sg, s, nents, i) {
- phys_addr_t phys = page_to_phys(sg_page(s));
+ if (unlikely(domain->ops->pgsize_bitmap == 0UL))
+ return 0;
- /* We are mapping on page boundarys, so offset must be 0 */
- if (s->offset)
+ min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+
+ for_each_sg(sg, s, nents, i) {
+ phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
+
+ /*
+ * We are mapping on IOMMU page boundaries, so offset within
+ * the page must be 0. However, the IOMMU may support pages
+ * smaller than PAGE_SIZE, so s->offset may still represent
+ * an offset of that boundary within the CPU page.
+ */
+ if (!IS_ALIGNED(s->offset, min_pagesz))
goto out_err;
ret = iommu_map(domain, iova + mapped, phys, s->length, prot);
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index e509c58eee92..99effbb17191 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -1185,7 +1185,7 @@ static int ipmmu_probe(struct platform_device *pdev)
dev_name(&pdev->dev), mmu);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
- return irq;
+ return ret;
}
ipmmu_device_reset(mmu);
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 1c7b78ecf3e3..e1b05379ca0e 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -73,8 +73,7 @@ fail:
static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
{
- if (drvdata->clk)
- clk_disable(drvdata->clk);
+ clk_disable(drvdata->clk);
clk_disable(drvdata->pclk);
}
diff --git a/drivers/iommu/msm_iommu_dev.c b/drivers/iommu/msm_iommu_dev.c
index 61def7cb5263..b6d01f97e537 100644
--- a/drivers/iommu/msm_iommu_dev.c
+++ b/drivers/iommu/msm_iommu_dev.c
@@ -131,7 +131,7 @@ static int msm_iommu_probe(struct platform_device *pdev)
struct clk *iommu_clk;
struct clk *iommu_pclk;
struct msm_iommu_drvdata *drvdata;
- struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data;
+ struct msm_iommu_dev *iommu_dev = dev_get_platdata(&pdev->dev);
void __iomem *regs_base;
int ret, irq, par;
@@ -224,8 +224,7 @@ static int msm_iommu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, drvdata);
- if (iommu_clk)
- clk_disable(iommu_clk);
+ clk_disable(iommu_clk);
clk_disable(iommu_pclk);
@@ -264,7 +263,7 @@ static int msm_iommu_remove(struct platform_device *pdev)
static int msm_iommu_ctx_probe(struct platform_device *pdev)
{
- struct msm_iommu_ctx_dev *c = pdev->dev.platform_data;
+ struct msm_iommu_ctx_dev *c = dev_get_platdata(&pdev->dev);
struct msm_iommu_drvdata *drvdata;
struct msm_iommu_ctx_drvdata *ctx_drvdata;
int i, ret;
@@ -323,8 +322,7 @@ static int msm_iommu_ctx_probe(struct platform_device *pdev)
SET_NSCFG(drvdata->base, mid, 3);
}
- if (drvdata->clk)
- clk_disable(drvdata->clk);
+ clk_disable(drvdata->clk);
clk_disable(drvdata->pclk);
dev_info(&pdev->dev, "context %s using bank %d\n", c->name, c->num);
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index 531658d17333..f3d20a2039d2 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -10,45 +10,35 @@
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
#include <linux/err.h>
-#include <linux/clk.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
-#include <linux/platform_device.h>
#include <linux/debugfs.h>
-#include <linux/omap-iommu.h>
#include <linux/platform_data/iommu-omap.h>
#include "omap-iopgtable.h"
#include "omap-iommu.h"
-#define MAXCOLUMN 100 /* for short messages */
-
static DEFINE_MUTEX(iommu_debug_lock);
static struct dentry *iommu_debug_root;
-static ssize_t debug_read_ver(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
+static inline bool is_omap_iommu_detached(struct omap_iommu *obj)
{
- u32 ver = omap_iommu_arch_version();
- char buf[MAXCOLUMN], *p = buf;
-
- p += sprintf(p, "H/W version: %d.%d\n", (ver >> 4) & 0xf , ver & 0xf);
-
- return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
+ return !obj->domain;
}
static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
- struct device *dev = file->private_data;
- struct omap_iommu *obj = dev_to_omap_iommu(dev);
+ struct omap_iommu *obj = file->private_data;
char *p, *buf;
ssize_t bytes;
+ if (is_omap_iommu_detached(obj))
+ return -EPERM;
+
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -68,11 +58,13 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
static ssize_t debug_read_tlb(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
- struct device *dev = file->private_data;
- struct omap_iommu *obj = dev_to_omap_iommu(dev);
+ struct omap_iommu *obj = file->private_data;
char *p, *buf;
ssize_t bytes, rest;
+ if (is_omap_iommu_detached(obj))
+ return -EPERM;
+
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -93,133 +85,69 @@ static ssize_t debug_read_tlb(struct file *file, char __user *userbuf,
return bytes;
}
-static ssize_t debug_write_pagetable(struct file *file,
- const char __user *userbuf, size_t count, loff_t *ppos)
+static void dump_ioptable(struct seq_file *s)
{
- struct iotlb_entry e;
- struct cr_regs cr;
- int err;
- struct device *dev = file->private_data;
- struct omap_iommu *obj = dev_to_omap_iommu(dev);
- char buf[MAXCOLUMN], *p = buf;
-
- count = min(count, sizeof(buf));
-
- mutex_lock(&iommu_debug_lock);
- if (copy_from_user(p, userbuf, count)) {
- mutex_unlock(&iommu_debug_lock);
- return -EFAULT;
- }
-
- sscanf(p, "%x %x", &cr.cam, &cr.ram);
- if (!cr.cam || !cr.ram) {
- mutex_unlock(&iommu_debug_lock);
- return -EINVAL;
- }
-
- omap_iotlb_cr_to_e(&cr, &e);
- err = omap_iopgtable_store_entry(obj, &e);
- if (err)
- dev_err(obj->dev, "%s: fail to store cr\n", __func__);
-
- mutex_unlock(&iommu_debug_lock);
- return count;
-}
-
-#define dump_ioptable_entry_one(lv, da, val) \
- ({ \
- int __err = 0; \
- ssize_t bytes; \
- const int maxcol = 22; \
- const char *str = "%d: %08x %08x\n"; \
- bytes = snprintf(p, maxcol, str, lv, da, val); \
- p += bytes; \
- len -= bytes; \
- if (len < maxcol) \
- __err = -ENOMEM; \
- __err; \
- })
-
-static ssize_t dump_ioptable(struct omap_iommu *obj, char *buf, ssize_t len)
-{
- int i;
- u32 *iopgd;
- char *p = buf;
+ int i, j;
+ u32 da;
+ u32 *iopgd, *iopte;
+ struct omap_iommu *obj = s->private;
spin_lock(&obj->page_table_lock);
iopgd = iopgd_offset(obj, 0);
for (i = 0; i < PTRS_PER_IOPGD; i++, iopgd++) {
- int j, err;
- u32 *iopte;
- u32 da;
-
if (!*iopgd)
continue;
if (!(*iopgd & IOPGD_TABLE)) {
da = i << IOPGD_SHIFT;
-
- err = dump_ioptable_entry_one(1, da, *iopgd);
- if (err)
- goto out;
+ seq_printf(s, "1: 0x%08x 0x%08x\n", da, *iopgd);
continue;
}
iopte = iopte_offset(iopgd, 0);
-
for (j = 0; j < PTRS_PER_IOPTE; j++, iopte++) {
if (!*iopte)
continue;
da = (i << IOPGD_SHIFT) + (j << IOPTE_SHIFT);
- err = dump_ioptable_entry_one(2, da, *iopgd);
- if (err)
- goto out;
+ seq_printf(s, "2: 0x%08x 0x%08x\n", da, *iopte);
}
}
-out:
- spin_unlock(&obj->page_table_lock);
- return p - buf;
+ spin_unlock(&obj->page_table_lock);
}
-static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
+static int debug_read_pagetable(struct seq_file *s, void *data)
{
- struct device *dev = file->private_data;
- struct omap_iommu *obj = dev_to_omap_iommu(dev);
- char *p, *buf;
- size_t bytes;
+ struct omap_iommu *obj = s->private;
- buf = (char *)__get_free_page(GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- p = buf;
-
- p += sprintf(p, "L: %8s %8s\n", "da:", "pa:");
- p += sprintf(p, "-----------------------------------------\n");
+ if (is_omap_iommu_detached(obj))
+ return -EPERM;
mutex_lock(&iommu_debug_lock);
- bytes = PAGE_SIZE - (p - buf);
- p += dump_ioptable(obj, p, bytes);
-
- bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
+ seq_printf(s, "L: %8s %8s\n", "da:", "pte:");
+ seq_puts(s, "--------------------------\n");
+ dump_ioptable(s);
mutex_unlock(&iommu_debug_lock);
- free_page((unsigned long)buf);
- return bytes;
+ return 0;
}
-#define DEBUG_FOPS(name) \
- static const struct file_operations debug_##name##_fops = { \
- .open = simple_open, \
- .read = debug_read_##name, \
- .write = debug_write_##name, \
- .llseek = generic_file_llseek, \
- };
+#define DEBUG_SEQ_FOPS_RO(name) \
+ static int debug_open_##name(struct inode *inode, struct file *file) \
+ { \
+ return single_open(file, debug_read_##name, inode->i_private); \
+ } \
+ \
+ static const struct file_operations debug_##name##_fops = { \
+ .open = debug_open_##name, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ }
#define DEBUG_FOPS_RO(name) \
static const struct file_operations debug_##name##_fops = { \
@@ -228,103 +156,63 @@ static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf,
.llseek = generic_file_llseek, \
};
-DEBUG_FOPS_RO(ver);
DEBUG_FOPS_RO(regs);
DEBUG_FOPS_RO(tlb);
-DEBUG_FOPS(pagetable);
+DEBUG_SEQ_FOPS_RO(pagetable);
#define __DEBUG_ADD_FILE(attr, mode) \
{ \
struct dentry *dent; \
- dent = debugfs_create_file(#attr, mode, parent, \
- dev, &debug_##attr##_fops); \
+ dent = debugfs_create_file(#attr, mode, obj->debug_dir, \
+ obj, &debug_##attr##_fops); \
if (!dent) \
- return -ENOMEM; \
+ goto err; \
}
-#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 0600)
#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 0400)
-static int iommu_debug_register(struct device *dev, void *data)
+void omap_iommu_debugfs_add(struct omap_iommu *obj)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct omap_iommu *obj = platform_get_drvdata(pdev);
- struct omap_iommu_arch_data *arch_data;
- struct dentry *d, *parent;
-
- if (!obj || !obj->dev)
- return -EINVAL;
-
- arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL);
- if (!arch_data)
- return -ENOMEM;
-
- arch_data->iommu_dev = obj;
+ struct dentry *d;
- dev->archdata.iommu = arch_data;
+ if (!iommu_debug_root)
+ return;
- d = debugfs_create_dir(obj->name, iommu_debug_root);
- if (!d)
- goto nomem;
- parent = d;
+ obj->debug_dir = debugfs_create_dir(obj->name, iommu_debug_root);
+ if (!obj->debug_dir)
+ return;
- d = debugfs_create_u8("nr_tlb_entries", 400, parent,
+ d = debugfs_create_u8("nr_tlb_entries", 0400, obj->debug_dir,
(u8 *)&obj->nr_tlb_entries);
if (!d)
- goto nomem;
+ return;
- DEBUG_ADD_FILE_RO(ver);
DEBUG_ADD_FILE_RO(regs);
DEBUG_ADD_FILE_RO(tlb);
- DEBUG_ADD_FILE(pagetable);
+ DEBUG_ADD_FILE_RO(pagetable);
- return 0;
+ return;
-nomem:
- kfree(arch_data);
- return -ENOMEM;
+err:
+ debugfs_remove_recursive(obj->debug_dir);
}
-static int iommu_debug_unregister(struct device *dev, void *data)
+void omap_iommu_debugfs_remove(struct omap_iommu *obj)
{
- if (!dev->archdata.iommu)
- return 0;
-
- kfree(dev->archdata.iommu);
+ if (!obj->debug_dir)
+ return;
- dev->archdata.iommu = NULL;
-
- return 0;
+ debugfs_remove_recursive(obj->debug_dir);
}
-static int __init iommu_debug_init(void)
+void __init omap_iommu_debugfs_init(void)
{
- struct dentry *d;
- int err;
-
- d = debugfs_create_dir("iommu", NULL);
- if (!d)
- return -ENOMEM;
- iommu_debug_root = d;
-
- err = omap_foreach_iommu_device(d, iommu_debug_register);
- if (err)
- goto err_out;
- return 0;
-
-err_out:
- debugfs_remove_recursive(iommu_debug_root);
- return err;
+ iommu_debug_root = debugfs_create_dir("omap_iommu", NULL);
+ if (!iommu_debug_root)
+ pr_err("can't create debugfs dir\n");
}
-module_init(iommu_debug_init)
-static void __exit iommu_debugfs_exit(void)
+void __exit omap_iommu_debugfs_exit(void)
{
- debugfs_remove_recursive(iommu_debug_root);
- omap_foreach_iommu_device(NULL, iommu_debug_unregister);
+ debugfs_remove(iommu_debug_root);
}
-module_exit(iommu_debugfs_exit)
-
-MODULE_DESCRIPTION("omap iommu: debugfs interface");
-MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 18003c044454..bbb7dcef02d3 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -76,53 +76,23 @@ struct iotlb_lock {
short vict;
};
-/* accommodate the difference between omap1 and omap2/3 */
-static const struct iommu_functions *arch_iommu;
-
static struct platform_driver omap_iommu_driver;
static struct kmem_cache *iopte_cachep;
/**
- * omap_install_iommu_arch - Install archtecure specific iommu functions
- * @ops: a pointer to architecture specific iommu functions
- *
- * There are several kind of iommu algorithm(tlb, pagetable) among
- * omap series. This interface installs such an iommu algorighm.
- **/
-int omap_install_iommu_arch(const struct iommu_functions *ops)
-{
- if (arch_iommu)
- return -EBUSY;
-
- arch_iommu = ops;
- return 0;
-}
-EXPORT_SYMBOL_GPL(omap_install_iommu_arch);
-
-/**
- * omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions
- * @ops: a pointer to architecture specific iommu functions
- *
- * This interface uninstalls the iommu algorighm installed previously.
- **/
-void omap_uninstall_iommu_arch(const struct iommu_functions *ops)
-{
- if (arch_iommu != ops)
- pr_err("%s: not your arch\n", __func__);
-
- arch_iommu = NULL;
-}
-EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch);
-
-/**
* omap_iommu_save_ctx - Save registers for pm off-mode support
* @dev: client device
**/
void omap_iommu_save_ctx(struct device *dev)
{
struct omap_iommu *obj = dev_to_omap_iommu(dev);
+ u32 *p = obj->ctx;
+ int i;
- arch_iommu->save_ctx(obj);
+ for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
+ p[i] = iommu_read_reg(obj, i * sizeof(u32));
+ dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
+ }
}
EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
@@ -133,28 +103,74 @@ EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
void omap_iommu_restore_ctx(struct device *dev)
{
struct omap_iommu *obj = dev_to_omap_iommu(dev);
+ u32 *p = obj->ctx;
+ int i;
- arch_iommu->restore_ctx(obj);
+ for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
+ iommu_write_reg(obj, p[i], i * sizeof(u32));
+ dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
+ }
}
EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
-/**
- * omap_iommu_arch_version - Return running iommu arch version
- **/
-u32 omap_iommu_arch_version(void)
+static void __iommu_set_twl(struct omap_iommu *obj, bool on)
{
- return arch_iommu->version;
+ u32 l = iommu_read_reg(obj, MMU_CNTL);
+
+ if (on)
+ iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
+ else
+ iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
+
+ l &= ~MMU_CNTL_MASK;
+ if (on)
+ l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
+ else
+ l |= (MMU_CNTL_MMU_EN);
+
+ iommu_write_reg(obj, l, MMU_CNTL);
+}
+
+static int omap2_iommu_enable(struct omap_iommu *obj)
+{
+ u32 l, pa;
+
+ if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
+ return -EINVAL;
+
+ pa = virt_to_phys(obj->iopgd);
+ if (!IS_ALIGNED(pa, SZ_16K))
+ return -EINVAL;
+
+ l = iommu_read_reg(obj, MMU_REVISION);
+ dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
+ (l >> 4) & 0xf, l & 0xf);
+
+ iommu_write_reg(obj, pa, MMU_TTB);
+
+ if (obj->has_bus_err_back)
+ iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
+
+ __iommu_set_twl(obj, true);
+
+ return 0;
+}
+
+static void omap2_iommu_disable(struct omap_iommu *obj)
+{
+ u32 l = iommu_read_reg(obj, MMU_CNTL);
+
+ l &= ~MMU_CNTL_MASK;
+ iommu_write_reg(obj, l, MMU_CNTL);
+
+ dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
}
-EXPORT_SYMBOL_GPL(omap_iommu_arch_version);
static int iommu_enable(struct omap_iommu *obj)
{
int err;
struct platform_device *pdev = to_platform_device(obj->dev);
- struct iommu_platform_data *pdata = pdev->dev.platform_data;
-
- if (!arch_iommu)
- return -ENODEV;
+ struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
if (pdata && pdata->deassert_reset) {
err = pdata->deassert_reset(pdev, pdata->reset_name);
@@ -166,7 +182,7 @@ static int iommu_enable(struct omap_iommu *obj)
pm_runtime_get_sync(obj->dev);
- err = arch_iommu->enable(obj);
+ err = omap2_iommu_enable(obj);
return err;
}
@@ -174,9 +190,9 @@ static int iommu_enable(struct omap_iommu *obj)
static void iommu_disable(struct omap_iommu *obj)
{
struct platform_device *pdev = to_platform_device(obj->dev);
- struct iommu_platform_data *pdata = pdev->dev.platform_data;
+ struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
- arch_iommu->disable(obj);
+ omap2_iommu_disable(obj);
pm_runtime_put_sync(obj->dev);
@@ -187,44 +203,51 @@ static void iommu_disable(struct omap_iommu *obj)
/*
* TLB operations
*/
-void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
-{
- BUG_ON(!cr || !e);
-
- arch_iommu->cr_to_e(cr, e);
-}
-EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e);
-
static inline int iotlb_cr_valid(struct cr_regs *cr)
{
if (!cr)
return -EINVAL;
- return arch_iommu->cr_valid(cr);
-}
-
-static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
- struct iotlb_entry *e)
-{
- if (!e)
- return NULL;
-
- return arch_iommu->alloc_cr(obj, e);
+ return cr->cam & MMU_CAM_V;
}
static u32 iotlb_cr_to_virt(struct cr_regs *cr)
{
- return arch_iommu->cr_to_virt(cr);
+ u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
+ u32 mask = get_cam_va_mask(cr->cam & page_size);
+
+ return cr->cam & mask;
}
static u32 get_iopte_attr(struct iotlb_entry *e)
{
- return arch_iommu->get_pte_attr(e);
+ u32 attr;
+
+ attr = e->mixed << 5;
+ attr |= e->endian;
+ attr |= e->elsz >> 3;
+ attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
+ (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
+ return attr;
}
static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
{
- return arch_iommu->fault_isr(obj, da);
+ u32 status, fault_addr;
+
+ status = iommu_read_reg(obj, MMU_IRQSTATUS);
+ status &= MMU_IRQ_MASK;
+ if (!status) {
+ *da = 0;
+ return 0;
+ }
+
+ fault_addr = iommu_read_reg(obj, MMU_FAULT_AD);
+ *da = fault_addr;
+
+ iommu_write_reg(obj, status, MMU_IRQSTATUS);
+
+ return status;
}
static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
@@ -250,31 +273,19 @@ static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
{
- arch_iommu->tlb_read_cr(obj, cr);
+ cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
+ cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
}
static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
{
- arch_iommu->tlb_load_cr(obj, cr);
+ iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
+ iommu_write_reg(obj, cr->ram, MMU_RAM);
iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
iommu_write_reg(obj, 1, MMU_LD_TLB);
}
-/**
- * iotlb_dump_cr - Dump an iommu tlb entry into buf
- * @obj: target iommu
- * @cr: contents of cam and ram register
- * @buf: output buffer
- **/
-static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
- char *buf)
-{
- BUG_ON(!cr || !buf);
-
- return arch_iommu->dump_cr(obj, cr, buf);
-}
-
/* only used in iotlb iteration for-loop */
static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
{
@@ -289,12 +300,36 @@ static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
return cr;
}
+#ifdef PREFETCH_IOTLB
+static struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
+ struct iotlb_entry *e)
+{
+ struct cr_regs *cr;
+
+ if (!e)
+ return NULL;
+
+ if (e->da & ~(get_cam_va_mask(e->pgsz))) {
+ dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
+ e->da);
+ return ERR_PTR(-EINVAL);
+ }
+
+ cr = kmalloc(sizeof(*cr), GFP_KERNEL);
+ if (!cr)
+ return ERR_PTR(-ENOMEM);
+
+ cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
+ cr->ram = e->pa | e->endian | e->elsz | e->mixed;
+
+ return cr;
+}
+
/**
* load_iotlb_entry - Set an iommu tlb entry
* @obj: target iommu
* @e: an iommu tlb entry info
**/
-#ifdef PREFETCH_IOTLB
static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
{
int err = 0;
@@ -423,7 +458,45 @@ static void flush_iotlb_all(struct omap_iommu *obj)
pm_runtime_put_sync(obj->dev);
}
-#if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
+#ifdef CONFIG_OMAP_IOMMU_DEBUG
+
+#define pr_reg(name) \
+ do { \
+ ssize_t bytes; \
+ const char *str = "%20s: %08x\n"; \
+ const int maxcol = 32; \
+ bytes = snprintf(p, maxcol, str, __stringify(name), \
+ iommu_read_reg(obj, MMU_##name)); \
+ p += bytes; \
+ len -= bytes; \
+ if (len < maxcol) \
+ goto out; \
+ } while (0)
+
+static ssize_t
+omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
+{
+ char *p = buf;
+
+ pr_reg(REVISION);
+ pr_reg(IRQSTATUS);
+ pr_reg(IRQENABLE);
+ pr_reg(WALKING_ST);
+ pr_reg(CNTL);
+ pr_reg(FAULT_AD);
+ pr_reg(TTB);
+ pr_reg(LOCK);
+ pr_reg(LD_TLB);
+ pr_reg(CAM);
+ pr_reg(RAM);
+ pr_reg(GFLUSH);
+ pr_reg(FLUSH_ENTRY);
+ pr_reg(READ_CAM);
+ pr_reg(READ_RAM);
+ pr_reg(EMU_FAULT_AD);
+out:
+ return p - buf;
+}
ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
{
@@ -432,13 +505,12 @@ ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
pm_runtime_get_sync(obj->dev);
- bytes = arch_iommu->dump_ctx(obj, buf, bytes);
+ bytes = omap2_iommu_dump_ctx(obj, buf, bytes);
pm_runtime_put_sync(obj->dev);
return bytes;
}
-EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx);
static int
__dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
@@ -464,6 +536,24 @@ __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
}
/**
+ * iotlb_dump_cr - Dump an iommu tlb entry into buf
+ * @obj: target iommu
+ * @cr: contents of cam and ram register
+ * @buf: output buffer
+ **/
+static ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
+ char *buf)
+{
+ char *p = buf;
+
+ /* FIXME: Need more detail analysis of cam/ram */
+ p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram,
+ (cr->cam & MMU_CAM_P) ? 1 : 0);
+
+ return p - buf;
+}
+
+/**
* omap_dump_tlb_entries - dump cr arrays to given buffer
* @obj: target iommu
* @buf: output buffer
@@ -488,16 +578,8 @@ size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes)
return p - buf;
}
-EXPORT_SYMBOL_GPL(omap_dump_tlb_entries);
-
-int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *))
-{
- return driver_for_each_device(&omap_iommu_driver.driver,
- NULL, data, fn);
-}
-EXPORT_SYMBOL_GPL(omap_foreach_iommu_device);
-#endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */
+#endif /* CONFIG_OMAP_IOMMU_DEBUG */
/*
* H/W pagetable operations
@@ -680,7 +762,8 @@ iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
* @obj: target iommu
* @e: an iommu tlb entry info
**/
-int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
+static int
+omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
{
int err;
@@ -690,7 +773,6 @@ int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
prefetch_iotlb_entry(obj, e);
return err;
}
-EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry);
/**
* iopgtable_lookup_entry - Lookup an iommu pte entry
@@ -819,8 +901,9 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
u32 *iopgd, *iopte;
struct omap_iommu *obj = data;
struct iommu_domain *domain = obj->domain;
+ struct omap_iommu_domain *omap_domain = domain->priv;
- if (!obj->refcount)
+ if (!omap_domain->iommu_dev)
return IRQ_NONE;
errs = iommu_report_fault(obj, &da);
@@ -880,13 +963,6 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
spin_lock(&obj->iommu_lock);
- /* an iommu device can only be attached once */
- if (++obj->refcount > 1) {
- dev_err(dev, "%s: already attached!\n", obj->name);
- err = -EBUSY;
- goto err_enable;
- }
-
obj->iopgd = iopgd;
err = iommu_enable(obj);
if (err)
@@ -899,7 +975,6 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
return obj;
err_enable:
- obj->refcount--;
spin_unlock(&obj->iommu_lock);
return ERR_PTR(err);
}
@@ -915,9 +990,7 @@ static void omap_iommu_detach(struct omap_iommu *obj)
spin_lock(&obj->iommu_lock);
- if (--obj->refcount == 0)
- iommu_disable(obj);
-
+ iommu_disable(obj);
obj->iopgd = NULL;
spin_unlock(&obj->iommu_lock);
@@ -934,7 +1007,7 @@ static int omap_iommu_probe(struct platform_device *pdev)
int irq;
struct omap_iommu *obj;
struct resource *res;
- struct iommu_platform_data *pdata = pdev->dev.platform_data;
+ struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct device_node *of = pdev->dev.of_node;
obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
@@ -981,6 +1054,8 @@ static int omap_iommu_probe(struct platform_device *pdev)
pm_runtime_irq_safe(obj->dev);
pm_runtime_enable(obj->dev);
+ omap_iommu_debugfs_add(obj);
+
dev_info(&pdev->dev, "%s registered\n", obj->name);
return 0;
}
@@ -990,6 +1065,7 @@ static int omap_iommu_remove(struct platform_device *pdev)
struct omap_iommu *obj = platform_get_drvdata(pdev);
iopgtable_clear_entry_all(obj);
+ omap_iommu_debugfs_remove(obj);
pm_runtime_disable(obj->dev);
@@ -1026,7 +1102,6 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
e->da = da;
e->pa = pa;
e->valid = MMU_CAM_V;
- /* FIXME: add OMAP1 support */
e->pgsz = pgsz;
e->endian = MMU_RAM_ENDIAN_LITTLE;
e->elsz = MMU_RAM_ELSZ_8;
@@ -1131,6 +1206,7 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
omap_domain->dev = NULL;
+ oiommu->domain = NULL;
}
static void omap_iommu_detach_dev(struct iommu_domain *domain,
@@ -1309,6 +1385,8 @@ static int __init omap_iommu_init(void)
bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
+ omap_iommu_debugfs_init();
+
return platform_driver_register(&omap_iommu_driver);
}
/* must be ready before omap3isp is probed */
@@ -1319,6 +1397,8 @@ static void __exit omap_iommu_exit(void)
kmem_cache_destroy(iopte_cachep);
platform_driver_unregister(&omap_iommu_driver);
+
+ omap_iommu_debugfs_exit();
}
module_exit(omap_iommu_exit);
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h
index 4f1b68c08c15..d736630df3c8 100644
--- a/drivers/iommu/omap-iommu.h
+++ b/drivers/iommu/omap-iommu.h
@@ -10,9 +10,8 @@
* published by the Free Software Foundation.
*/
-#if defined(CONFIG_ARCH_OMAP1)
-#error "iommu for this processor not implemented yet"
-#endif
+#ifndef _OMAP_IOMMU_H
+#define _OMAP_IOMMU_H
struct iotlb_entry {
u32 da;
@@ -30,10 +29,9 @@ struct omap_iommu {
const char *name;
void __iomem *regbase;
struct device *dev;
- void *isr_priv;
struct iommu_domain *domain;
+ struct dentry *debug_dir;
- unsigned int refcount;
spinlock_t iommu_lock; /* global for this whole object */
/*
@@ -67,34 +65,6 @@ struct cr_regs {
};
};
-/* architecture specific functions */
-struct iommu_functions {
- unsigned long version;
-
- int (*enable)(struct omap_iommu *obj);
- void (*disable)(struct omap_iommu *obj);
- void (*set_twl)(struct omap_iommu *obj, bool on);
- u32 (*fault_isr)(struct omap_iommu *obj, u32 *ra);
-
- void (*tlb_read_cr)(struct omap_iommu *obj, struct cr_regs *cr);
- void (*tlb_load_cr)(struct omap_iommu *obj, struct cr_regs *cr);
-
- struct cr_regs *(*alloc_cr)(struct omap_iommu *obj,
- struct iotlb_entry *e);
- int (*cr_valid)(struct cr_regs *cr);
- u32 (*cr_to_virt)(struct cr_regs *cr);
- void (*cr_to_e)(struct cr_regs *cr, struct iotlb_entry *e);
- ssize_t (*dump_cr)(struct omap_iommu *obj, struct cr_regs *cr,
- char *buf);
-
- u32 (*get_pte_attr)(struct iotlb_entry *e);
-
- void (*save_ctx)(struct omap_iommu *obj);
- void (*restore_ctx)(struct omap_iommu *obj);
- ssize_t (*dump_ctx)(struct omap_iommu *obj, char *buf, ssize_t len);
-};
-
-#ifdef CONFIG_IOMMU_API
/**
* dev_to_omap_iommu() - retrieves an omap iommu object from a user device
* @dev: iommu client device
@@ -105,7 +75,6 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
return arch_data->iommu_dev;
}
-#endif
/*
* MMU Register offsets
@@ -133,6 +102,28 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
/*
* MMU Register bit definitions
*/
+/* IRQSTATUS & IRQENABLE */
+#define MMU_IRQ_MULTIHITFAULT (1 << 4)
+#define MMU_IRQ_TABLEWALKFAULT (1 << 3)
+#define MMU_IRQ_EMUMISS (1 << 2)
+#define MMU_IRQ_TRANSLATIONFAULT (1 << 1)
+#define MMU_IRQ_TLBMISS (1 << 0)
+
+#define __MMU_IRQ_FAULT \
+ (MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT)
+#define MMU_IRQ_MASK \
+ (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_TLBMISS)
+#define MMU_IRQ_TWL_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT)
+#define MMU_IRQ_TLB_MISS_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TLBMISS)
+
+/* MMU_CNTL */
+#define MMU_CNTL_SHIFT 1
+#define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT)
+#define MMU_CNTL_EML_TLB (1 << 3)
+#define MMU_CNTL_TWL_EN (1 << 2)
+#define MMU_CNTL_MMU_EN (1 << 1)
+
+/* CAM */
#define MMU_CAM_VATAG_SHIFT 12
#define MMU_CAM_VATAG_MASK \
((~0UL >> MMU_CAM_VATAG_SHIFT) << MMU_CAM_VATAG_SHIFT)
@@ -144,6 +135,7 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
#define MMU_CAM_PGSZ_4K (2 << 0)
#define MMU_CAM_PGSZ_16M (3 << 0)
+/* RAM */
#define MMU_RAM_PADDR_SHIFT 12
#define MMU_RAM_PADDR_MASK \
((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT)
@@ -165,6 +157,12 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
#define MMU_GP_REG_BUS_ERR_BACK_EN 0x1
+#define get_cam_va_mask(pgsz) \
+ (((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 : \
+ ((pgsz) == MMU_CAM_PGSZ_1M) ? 0xfff00000 : \
+ ((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \
+ ((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0)
+
/*
* utilities for super page(16MB, 1MB, 64KB and 4KB)
*/
@@ -192,27 +190,25 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
/*
* global functions
*/
-extern u32 omap_iommu_arch_version(void);
-
-extern void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e);
-
-extern int
-omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e);
-
-extern void omap_iommu_save_ctx(struct device *dev);
-extern void omap_iommu_restore_ctx(struct device *dev);
-
-extern int omap_foreach_iommu_device(void *data,
- int (*fn)(struct device *, void *));
-
-extern int omap_install_iommu_arch(const struct iommu_functions *ops);
-extern void omap_uninstall_iommu_arch(const struct iommu_functions *ops);
-
+#ifdef CONFIG_OMAP_IOMMU_DEBUG
extern ssize_t
omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len);
extern size_t
omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len);
+void omap_iommu_debugfs_init(void);
+void omap_iommu_debugfs_exit(void);
+
+void omap_iommu_debugfs_add(struct omap_iommu *obj);
+void omap_iommu_debugfs_remove(struct omap_iommu *obj);
+#else
+static inline void omap_iommu_debugfs_init(void) { }
+static inline void omap_iommu_debugfs_exit(void) { }
+
+static inline void omap_iommu_debugfs_add(struct omap_iommu *obj) { }
+static inline void omap_iommu_debugfs_remove(struct omap_iommu *obj) { }
+#endif
+
/*
* register accessors
*/
@@ -225,3 +221,5 @@ static inline void iommu_write_reg(struct omap_iommu *obj, u32 val, size_t offs)
{
__raw_writel(val, obj->regbase + offs);
}
+
+#endif /* _OMAP_IOMMU_H */
diff --git a/drivers/iommu/omap-iommu2.c b/drivers/iommu/omap-iommu2.c
deleted file mode 100644
index 5e1ea3b0bf16..000000000000
--- a/drivers/iommu/omap-iommu2.c
+++ /dev/null
@@ -1,337 +0,0 @@
-/*
- * omap iommu: omap2/3 architecture specific functions
- *
- * Copyright (C) 2008-2009 Nokia Corporation
- *
- * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
- * Paul Mundt and Toshihiro Kobayashi
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/err.h>
-#include <linux/device.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <linux/omap-iommu.h>
-#include <linux/slab.h>
-#include <linux/stringify.h>
-#include <linux/platform_data/iommu-omap.h>
-
-#include "omap-iommu.h"
-
-/*
- * omap2 architecture specific register bit definitions
- */
-#define IOMMU_ARCH_VERSION 0x00000011
-
-/* IRQSTATUS & IRQENABLE */
-#define MMU_IRQ_MULTIHITFAULT (1 << 4)
-#define MMU_IRQ_TABLEWALKFAULT (1 << 3)
-#define MMU_IRQ_EMUMISS (1 << 2)
-#define MMU_IRQ_TRANSLATIONFAULT (1 << 1)
-#define MMU_IRQ_TLBMISS (1 << 0)
-
-#define __MMU_IRQ_FAULT \
- (MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT)
-#define MMU_IRQ_MASK \
- (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_TLBMISS)
-#define MMU_IRQ_TWL_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT)
-#define MMU_IRQ_TLB_MISS_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TLBMISS)
-
-/* MMU_CNTL */
-#define MMU_CNTL_SHIFT 1
-#define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT)
-#define MMU_CNTL_EML_TLB (1 << 3)
-#define MMU_CNTL_TWL_EN (1 << 2)
-#define MMU_CNTL_MMU_EN (1 << 1)
-
-#define get_cam_va_mask(pgsz) \
- (((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 : \
- ((pgsz) == MMU_CAM_PGSZ_1M) ? 0xfff00000 : \
- ((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \
- ((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0)
-
-/* IOMMU errors */
-#define OMAP_IOMMU_ERR_TLB_MISS (1 << 0)
-#define OMAP_IOMMU_ERR_TRANS_FAULT (1 << 1)
-#define OMAP_IOMMU_ERR_EMU_MISS (1 << 2)
-#define OMAP_IOMMU_ERR_TBLWALK_FAULT (1 << 3)
-#define OMAP_IOMMU_ERR_MULTIHIT_FAULT (1 << 4)
-
-static void __iommu_set_twl(struct omap_iommu *obj, bool on)
-{
- u32 l = iommu_read_reg(obj, MMU_CNTL);
-
- if (on)
- iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
- else
- iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
-
- l &= ~MMU_CNTL_MASK;
- if (on)
- l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
- else
- l |= (MMU_CNTL_MMU_EN);
-
- iommu_write_reg(obj, l, MMU_CNTL);
-}
-
-
-static int omap2_iommu_enable(struct omap_iommu *obj)
-{
- u32 l, pa;
-
- if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
- return -EINVAL;
-
- pa = virt_to_phys(obj->iopgd);
- if (!IS_ALIGNED(pa, SZ_16K))
- return -EINVAL;
-
- l = iommu_read_reg(obj, MMU_REVISION);
- dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
- (l >> 4) & 0xf, l & 0xf);
-
- iommu_write_reg(obj, pa, MMU_TTB);
-
- if (obj->has_bus_err_back)
- iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
-
- __iommu_set_twl(obj, true);
-
- return 0;
-}
-
-static void omap2_iommu_disable(struct omap_iommu *obj)
-{
- u32 l = iommu_read_reg(obj, MMU_CNTL);
-
- l &= ~MMU_CNTL_MASK;
- iommu_write_reg(obj, l, MMU_CNTL);
-
- dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
-}
-
-static void omap2_iommu_set_twl(struct omap_iommu *obj, bool on)
-{
- __iommu_set_twl(obj, false);
-}
-
-static u32 omap2_iommu_fault_isr(struct omap_iommu *obj, u32 *ra)
-{
- u32 stat, da;
- u32 errs = 0;
-
- stat = iommu_read_reg(obj, MMU_IRQSTATUS);
- stat &= MMU_IRQ_MASK;
- if (!stat) {
- *ra = 0;
- return 0;
- }
-
- da = iommu_read_reg(obj, MMU_FAULT_AD);
- *ra = da;
-
- if (stat & MMU_IRQ_TLBMISS)
- errs |= OMAP_IOMMU_ERR_TLB_MISS;
- if (stat & MMU_IRQ_TRANSLATIONFAULT)
- errs |= OMAP_IOMMU_ERR_TRANS_FAULT;
- if (stat & MMU_IRQ_EMUMISS)
- errs |= OMAP_IOMMU_ERR_EMU_MISS;
- if (stat & MMU_IRQ_TABLEWALKFAULT)
- errs |= OMAP_IOMMU_ERR_TBLWALK_FAULT;
- if (stat & MMU_IRQ_MULTIHITFAULT)
- errs |= OMAP_IOMMU_ERR_MULTIHIT_FAULT;
- iommu_write_reg(obj, stat, MMU_IRQSTATUS);
-
- return errs;
-}
-
-static void omap2_tlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
-{
- cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
- cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
-}
-
-static void omap2_tlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
-{
- iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
- iommu_write_reg(obj, cr->ram, MMU_RAM);
-}
-
-static u32 omap2_cr_to_virt(struct cr_regs *cr)
-{
- u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
- u32 mask = get_cam_va_mask(cr->cam & page_size);
-
- return cr->cam & mask;
-}
-
-static struct cr_regs *omap2_alloc_cr(struct omap_iommu *obj,
- struct iotlb_entry *e)
-{
- struct cr_regs *cr;
-
- if (e->da & ~(get_cam_va_mask(e->pgsz))) {
- dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
- e->da);
- return ERR_PTR(-EINVAL);
- }
-
- cr = kmalloc(sizeof(*cr), GFP_KERNEL);
- if (!cr)
- return ERR_PTR(-ENOMEM);
-
- cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
- cr->ram = e->pa | e->endian | e->elsz | e->mixed;
-
- return cr;
-}
-
-static inline int omap2_cr_valid(struct cr_regs *cr)
-{
- return cr->cam & MMU_CAM_V;
-}
-
-static u32 omap2_get_pte_attr(struct iotlb_entry *e)
-{
- u32 attr;
-
- attr = e->mixed << 5;
- attr |= e->endian;
- attr |= e->elsz >> 3;
- attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
- (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
- return attr;
-}
-
-static ssize_t
-omap2_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, char *buf)
-{
- char *p = buf;
-
- /* FIXME: Need more detail analysis of cam/ram */
- p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram,
- (cr->cam & MMU_CAM_P) ? 1 : 0);
-
- return p - buf;
-}
-
-#define pr_reg(name) \
- do { \
- ssize_t bytes; \
- const char *str = "%20s: %08x\n"; \
- const int maxcol = 32; \
- bytes = snprintf(p, maxcol, str, __stringify(name), \
- iommu_read_reg(obj, MMU_##name)); \
- p += bytes; \
- len -= bytes; \
- if (len < maxcol) \
- goto out; \
- } while (0)
-
-static ssize_t
-omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
-{
- char *p = buf;
-
- pr_reg(REVISION);
- pr_reg(IRQSTATUS);
- pr_reg(IRQENABLE);
- pr_reg(WALKING_ST);
- pr_reg(CNTL);
- pr_reg(FAULT_AD);
- pr_reg(TTB);
- pr_reg(LOCK);
- pr_reg(LD_TLB);
- pr_reg(CAM);
- pr_reg(RAM);
- pr_reg(GFLUSH);
- pr_reg(FLUSH_ENTRY);
- pr_reg(READ_CAM);
- pr_reg(READ_RAM);
- pr_reg(EMU_FAULT_AD);
-out:
- return p - buf;
-}
-
-static void omap2_iommu_save_ctx(struct omap_iommu *obj)
-{
- int i;
- u32 *p = obj->ctx;
-
- for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
- p[i] = iommu_read_reg(obj, i * sizeof(u32));
- dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
- }
-
- BUG_ON(p[0] != IOMMU_ARCH_VERSION);
-}
-
-static void omap2_iommu_restore_ctx(struct omap_iommu *obj)
-{
- int i;
- u32 *p = obj->ctx;
-
- for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
- iommu_write_reg(obj, p[i], i * sizeof(u32));
- dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
- }
-
- BUG_ON(p[0] != IOMMU_ARCH_VERSION);
-}
-
-static void omap2_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
-{
- e->da = cr->cam & MMU_CAM_VATAG_MASK;
- e->pa = cr->ram & MMU_RAM_PADDR_MASK;
- e->valid = cr->cam & MMU_CAM_V;
- e->pgsz = cr->cam & MMU_CAM_PGSZ_MASK;
- e->endian = cr->ram & MMU_RAM_ENDIAN_MASK;
- e->elsz = cr->ram & MMU_RAM_ELSZ_MASK;
- e->mixed = cr->ram & MMU_RAM_MIXED;
-}
-
-static const struct iommu_functions omap2_iommu_ops = {
- .version = IOMMU_ARCH_VERSION,
-
- .enable = omap2_iommu_enable,
- .disable = omap2_iommu_disable,
- .set_twl = omap2_iommu_set_twl,
- .fault_isr = omap2_iommu_fault_isr,
-
- .tlb_read_cr = omap2_tlb_read_cr,
- .tlb_load_cr = omap2_tlb_load_cr,
-
- .cr_to_e = omap2_cr_to_e,
- .cr_to_virt = omap2_cr_to_virt,
- .alloc_cr = omap2_alloc_cr,
- .cr_valid = omap2_cr_valid,
- .dump_cr = omap2_dump_cr,
-
- .get_pte_attr = omap2_get_pte_attr,
-
- .save_ctx = omap2_iommu_save_ctx,
- .restore_ctx = omap2_iommu_restore_ctx,
- .dump_ctx = omap2_iommu_dump_ctx,
-};
-
-static int __init omap2_iommu_init(void)
-{
- return omap_install_iommu_arch(&omap2_iommu_ops);
-}
-module_init(omap2_iommu_init);
-
-static void __exit omap2_iommu_exit(void)
-{
- omap_uninstall_iommu_arch(&omap2_iommu_ops);
-}
-module_exit(omap2_iommu_exit);
-
-MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
-MODULE_DESCRIPTION("omap iommu: omap2/3 architecture specific functions");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
new file mode 100644
index 000000000000..b2023af384b9
--- /dev/null
+++ b/drivers/iommu/rockchip-iommu.c
@@ -0,0 +1,1038 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/cacheflush.h>
+#include <asm/pgtable.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+/** MMU register offsets */
+#define RK_MMU_DTE_ADDR 0x00 /* Directory table address */
+#define RK_MMU_STATUS 0x04
+#define RK_MMU_COMMAND 0x08
+#define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */
+#define RK_MMU_ZAP_ONE_LINE 0x10 /* Shootdown one IOTLB entry */
+#define RK_MMU_INT_RAWSTAT 0x14 /* IRQ status ignoring mask */
+#define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */
+#define RK_MMU_INT_MASK 0x1C /* IRQ enable */
+#define RK_MMU_INT_STATUS 0x20 /* IRQ status after masking */
+#define RK_MMU_AUTO_GATING 0x24
+
+#define DTE_ADDR_DUMMY 0xCAFEBABE
+#define FORCE_RESET_TIMEOUT 100 /* ms */
+
+/* RK_MMU_STATUS fields */
+#define RK_MMU_STATUS_PAGING_ENABLED BIT(0)
+#define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1)
+#define RK_MMU_STATUS_STALL_ACTIVE BIT(2)
+#define RK_MMU_STATUS_IDLE BIT(3)
+#define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
+#define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
+#define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31)
+
+/* RK_MMU_COMMAND command values */
+#define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */
+#define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */
+#define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */
+#define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */
+#define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */
+#define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */
+#define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */
+
+/* RK_MMU_INT_* register fields */
+#define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */
+#define RK_MMU_IRQ_BUS_ERROR 0x02 /* bus read error */
+#define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
+
+#define NUM_DT_ENTRIES 1024
+#define NUM_PT_ENTRIES 1024
+
+#define SPAGE_ORDER 12
+#define SPAGE_SIZE (1 << SPAGE_ORDER)
+
+ /*
+ * Support mapping any size that fits in one page table:
+ * 4 KiB to 4 MiB
+ */
+#define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
+
+#define IOMMU_REG_POLL_COUNT_FAST 1000
+
+struct rk_iommu_domain {
+ struct list_head iommus;
+ u32 *dt; /* page directory table */
+ spinlock_t iommus_lock; /* lock for iommus list */
+ spinlock_t dt_lock; /* lock for modifying page directory table */
+};
+
+struct rk_iommu {
+ struct device *dev;
+ void __iomem *base;
+ int irq;
+ struct list_head node; /* entry in rk_iommu_domain.iommus */
+ struct iommu_domain *domain; /* domain to which iommu is attached */
+};
+
+static inline void rk_table_flush(u32 *va, unsigned int count)
+{
+ phys_addr_t pa_start = virt_to_phys(va);
+ phys_addr_t pa_end = virt_to_phys(va + count);
+ size_t size = pa_end - pa_start;
+
+ __cpuc_flush_dcache_area(va, size);
+ outer_flush_range(pa_start, pa_end);
+}
+
+/**
+ * Inspired by _wait_for in intel_drv.h
+ * This is NOT safe for use in interrupt context.
+ *
+ * Note that it's important that we check the condition again after having
+ * timed out, since the timeout could be due to preemption or similar and
+ * we've never had a chance to check the condition before the timeout.
+ */
+#define rk_wait_for(COND, MS) ({ \
+ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
+ int ret__ = 0; \
+ while (!(COND)) { \
+ if (time_after(jiffies, timeout__)) { \
+ ret__ = (COND) ? 0 : -ETIMEDOUT; \
+ break; \
+ } \
+ usleep_range(50, 100); \
+ } \
+ ret__; \
+})
+
+/*
+ * The Rockchip rk3288 iommu uses a 2-level page table.
+ * The first level is the "Directory Table" (DT).
+ * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing
+ * to a "Page Table".
+ * The second level is the 1024 Page Tables (PT).
+ * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to
+ * a 4 KB page of physical memory.
+ *
+ * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries).
+ * Each iommu device has a MMU_DTE_ADDR register that contains the physical
+ * address of the start of the DT page.
+ *
+ * The structure of the page table is as follows:
+ *
+ * DT
+ * MMU_DTE_ADDR -> +-----+
+ * | |
+ * +-----+ PT
+ * | DTE | -> +-----+
+ * +-----+ | | Memory
+ * | | +-----+ Page
+ * | | | PTE | -> +-----+
+ * +-----+ +-----+ | |
+ * | | | |
+ * | | | |
+ * +-----+ | |
+ * | |
+ * | |
+ * +-----+
+ */
+
+/*
+ * Each DTE has a PT address and a valid bit:
+ * +---------------------+-----------+-+
+ * | PT address | Reserved |V|
+ * +---------------------+-----------+-+
+ * 31:12 - PT address (PTs always starts on a 4 KB boundary)
+ * 11: 1 - Reserved
+ * 0 - 1 if PT @ PT address is valid
+ */
+#define RK_DTE_PT_ADDRESS_MASK 0xfffff000
+#define RK_DTE_PT_VALID BIT(0)
+
+static inline phys_addr_t rk_dte_pt_address(u32 dte)
+{
+ return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
+}
+
+static inline bool rk_dte_is_pt_valid(u32 dte)
+{
+ return dte & RK_DTE_PT_VALID;
+}
+
+static u32 rk_mk_dte(u32 *pt)
+{
+ phys_addr_t pt_phys = virt_to_phys(pt);
+ return (pt_phys & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
+}
+
+/*
+ * Each PTE has a Page address, some flags and a valid bit:
+ * +---------------------+---+-------+-+
+ * | Page address |Rsv| Flags |V|
+ * +---------------------+---+-------+-+
+ * 31:12 - Page address (Pages always start on a 4 KB boundary)
+ * 11: 9 - Reserved
+ * 8: 1 - Flags
+ * 8 - Read allocate - allocate cache space on read misses
+ * 7 - Read cache - enable cache & prefetch of data
+ * 6 - Write buffer - enable delaying writes on their way to memory
+ * 5 - Write allocate - allocate cache space on write misses
+ * 4 - Write cache - different writes can be merged together
+ * 3 - Override cache attributes
+ * if 1, bits 4-8 control cache attributes
+ * if 0, the system bus defaults are used
+ * 2 - Writable
+ * 1 - Readable
+ * 0 - 1 if Page @ Page address is valid
+ */
+#define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000
+#define RK_PTE_PAGE_FLAGS_MASK 0x000001fe
+#define RK_PTE_PAGE_WRITABLE BIT(2)
+#define RK_PTE_PAGE_READABLE BIT(1)
+#define RK_PTE_PAGE_VALID BIT(0)
+
+static inline phys_addr_t rk_pte_page_address(u32 pte)
+{
+ return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
+}
+
+static inline bool rk_pte_is_page_valid(u32 pte)
+{
+ return pte & RK_PTE_PAGE_VALID;
+}
+
+/* TODO: set cache flags per prot IOMMU_CACHE */
+static u32 rk_mk_pte(phys_addr_t page, int prot)
+{
+ u32 flags = 0;
+ flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
+ flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
+ page &= RK_PTE_PAGE_ADDRESS_MASK;
+ return page | flags | RK_PTE_PAGE_VALID;
+}
+
+static u32 rk_mk_pte_invalid(u32 pte)
+{
+ return pte & ~RK_PTE_PAGE_VALID;
+}
+
+/*
+ * rk3288 iova (IOMMU Virtual Address) format
+ * 31 22.21 12.11 0
+ * +-----------+-----------+-------------+
+ * | DTE index | PTE index | Page offset |
+ * +-----------+-----------+-------------+
+ * 31:22 - DTE index - index of DTE in DT
+ * 21:12 - PTE index - index of PTE in PT @ DTE.pt_address
+ * 11: 0 - Page offset - offset into page @ PTE.page_address
+ */
+#define RK_IOVA_DTE_MASK 0xffc00000
+#define RK_IOVA_DTE_SHIFT 22
+#define RK_IOVA_PTE_MASK 0x003ff000
+#define RK_IOVA_PTE_SHIFT 12
+#define RK_IOVA_PAGE_MASK 0x00000fff
+#define RK_IOVA_PAGE_SHIFT 0
+
+static u32 rk_iova_dte_index(dma_addr_t iova)
+{
+ return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
+}
+
+static u32 rk_iova_pte_index(dma_addr_t iova)
+{
+ return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
+}
+
+static u32 rk_iova_page_offset(dma_addr_t iova)
+{
+ return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
+}
+
+static u32 rk_iommu_read(struct rk_iommu *iommu, u32 offset)
+{
+ return readl(iommu->base + offset);
+}
+
+static void rk_iommu_write(struct rk_iommu *iommu, u32 offset, u32 value)
+{
+ writel(value, iommu->base + offset);
+}
+
+static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
+{
+ writel(command, iommu->base + RK_MMU_COMMAND);
+}
+
+static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova,
+ size_t size)
+{
+ dma_addr_t iova_end = iova + size;
+ /*
+ * TODO(djkurtz): Figure out when it is more efficient to shootdown the
+ * entire iotlb rather than iterate over individual iovas.
+ */
+ for (; iova < iova_end; iova += SPAGE_SIZE)
+ rk_iommu_write(iommu, RK_MMU_ZAP_ONE_LINE, iova);
+}
+
+static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
+{
+ return rk_iommu_read(iommu, RK_MMU_STATUS) & RK_MMU_STATUS_STALL_ACTIVE;
+}
+
+static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
+{
+ return rk_iommu_read(iommu, RK_MMU_STATUS) &
+ RK_MMU_STATUS_PAGING_ENABLED;
+}
+
+static int rk_iommu_enable_stall(struct rk_iommu *iommu)
+{
+ int ret;
+
+ if (rk_iommu_is_stall_active(iommu))
+ return 0;
+
+ /* Stall can only be enabled if paging is enabled */
+ if (!rk_iommu_is_paging_enabled(iommu))
+ return 0;
+
+ rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
+
+ ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1);
+ if (ret)
+ dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
+ rk_iommu_read(iommu, RK_MMU_STATUS));
+
+ return ret;
+}
+
+static int rk_iommu_disable_stall(struct rk_iommu *iommu)
+{
+ int ret;
+
+ if (!rk_iommu_is_stall_active(iommu))
+ return 0;
+
+ rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
+
+ ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1);
+ if (ret)
+ dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
+ rk_iommu_read(iommu, RK_MMU_STATUS));
+
+ return ret;
+}
+
+static int rk_iommu_enable_paging(struct rk_iommu *iommu)
+{
+ int ret;
+
+ if (rk_iommu_is_paging_enabled(iommu))
+ return 0;
+
+ rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
+
+ ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1);
+ if (ret)
+ dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
+ rk_iommu_read(iommu, RK_MMU_STATUS));
+
+ return ret;
+}
+
+static int rk_iommu_disable_paging(struct rk_iommu *iommu)
+{
+ int ret;
+
+ if (!rk_iommu_is_paging_enabled(iommu))
+ return 0;
+
+ rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
+
+ ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1);
+ if (ret)
+ dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
+ rk_iommu_read(iommu, RK_MMU_STATUS));
+
+ return ret;
+}
+
+static int rk_iommu_force_reset(struct rk_iommu *iommu)
+{
+ int ret;
+ u32 dte_addr;
+
+ /*
+ * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
+ * and verifying that upper 5 nybbles are read back.
+ */
+ rk_iommu_write(iommu, RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
+
+ dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR);
+ if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
+ dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
+ return -EFAULT;
+ }
+
+ rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
+
+ ret = rk_wait_for(rk_iommu_read(iommu, RK_MMU_DTE_ADDR) == 0x00000000,
+ FORCE_RESET_TIMEOUT);
+ if (ret)
+ dev_err(iommu->dev, "FORCE_RESET command timed out\n");
+
+ return ret;
+}
+
+static void log_iova(struct rk_iommu *iommu, dma_addr_t iova)
+{
+ u32 dte_index, pte_index, page_offset;
+ u32 mmu_dte_addr;
+ phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
+ u32 *dte_addr;
+ u32 dte;
+ phys_addr_t pte_addr_phys = 0;
+ u32 *pte_addr = NULL;
+ u32 pte = 0;
+ phys_addr_t page_addr_phys = 0;
+ u32 page_flags = 0;
+
+ dte_index = rk_iova_dte_index(iova);
+ pte_index = rk_iova_pte_index(iova);
+ page_offset = rk_iova_page_offset(iova);
+
+ mmu_dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR);
+ mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
+
+ dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
+ dte_addr = phys_to_virt(dte_addr_phys);
+ dte = *dte_addr;
+
+ if (!rk_dte_is_pt_valid(dte))
+ goto print_it;
+
+ pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
+ pte_addr = phys_to_virt(pte_addr_phys);
+ pte = *pte_addr;
+
+ if (!rk_pte_is_page_valid(pte))
+ goto print_it;
+
+ page_addr_phys = rk_pte_page_address(pte) + page_offset;
+ page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
+
+print_it:
+ dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
+ &iova, dte_index, pte_index, page_offset);
+ dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
+ &mmu_dte_addr_phys, &dte_addr_phys, dte,
+ rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
+ rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
+}
+
+static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
+{
+ struct rk_iommu *iommu = dev_id;
+ u32 status;
+ u32 int_status;
+ dma_addr_t iova;
+
+ int_status = rk_iommu_read(iommu, RK_MMU_INT_STATUS);
+ if (int_status == 0)
+ return IRQ_NONE;
+
+ iova = rk_iommu_read(iommu, RK_MMU_PAGE_FAULT_ADDR);
+
+ if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
+ int flags;
+
+ status = rk_iommu_read(iommu, RK_MMU_STATUS);
+ flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
+ IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
+
+ dev_err(iommu->dev, "Page fault at %pad of type %s\n",
+ &iova,
+ (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
+
+ log_iova(iommu, iova);
+
+ /*
+ * Report page fault to any installed handlers.
+ * Ignore the return code, though, since we always zap cache
+ * and clear the page fault anyway.
+ */
+ if (iommu->domain)
+ report_iommu_fault(iommu->domain, iommu->dev, iova,
+ flags);
+ else
+ dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
+
+ rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE);
+ rk_iommu_command(iommu, RK_MMU_CMD_PAGE_FAULT_DONE);
+ }
+
+ if (int_status & RK_MMU_IRQ_BUS_ERROR)
+ dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
+
+ if (int_status & ~RK_MMU_IRQ_MASK)
+ dev_err(iommu->dev, "unexpected int_status: %#08x\n",
+ int_status);
+
+ rk_iommu_write(iommu, RK_MMU_INT_CLEAR, int_status);
+
+ return IRQ_HANDLED;
+}
+
+static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ struct rk_iommu_domain *rk_domain = domain->priv;
+ unsigned long flags;
+ phys_addr_t pt_phys, phys = 0;
+ u32 dte, pte;
+ u32 *page_table;
+
+ spin_lock_irqsave(&rk_domain->dt_lock, flags);
+
+ dte = rk_domain->dt[rk_iova_dte_index(iova)];
+ if (!rk_dte_is_pt_valid(dte))
+ goto out;
+
+ pt_phys = rk_dte_pt_address(dte);
+ page_table = (u32 *)phys_to_virt(pt_phys);
+ pte = page_table[rk_iova_pte_index(iova)];
+ if (!rk_pte_is_page_valid(pte))
+ goto out;
+
+ phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
+out:
+ spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+
+ return phys;
+}
+
+static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
+ dma_addr_t iova, size_t size)
+{
+ struct list_head *pos;
+ unsigned long flags;
+
+ /* shootdown these iova from all iommus using this domain */
+ spin_lock_irqsave(&rk_domain->iommus_lock, flags);
+ list_for_each(pos, &rk_domain->iommus) {
+ struct rk_iommu *iommu;
+ iommu = list_entry(pos, struct rk_iommu, node);
+ rk_iommu_zap_lines(iommu, iova, size);
+ }
+ spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
+}
+
+static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
+ dma_addr_t iova)
+{
+ u32 *page_table, *dte_addr;
+ u32 dte;
+ phys_addr_t pt_phys;
+
+ assert_spin_locked(&rk_domain->dt_lock);
+
+ dte_addr = &rk_domain->dt[rk_iova_dte_index(iova)];
+ dte = *dte_addr;
+ if (rk_dte_is_pt_valid(dte))
+ goto done;
+
+ page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+ if (!page_table)
+ return ERR_PTR(-ENOMEM);
+
+ dte = rk_mk_dte(page_table);
+ *dte_addr = dte;
+
+ rk_table_flush(page_table, NUM_PT_ENTRIES);
+ rk_table_flush(dte_addr, 1);
+
+ /*
+ * Zap the first iova of newly allocated page table so iommu evicts
+ * old cached value of new dte from the iotlb.
+ */
+ rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
+
+done:
+ pt_phys = rk_dte_pt_address(dte);
+ return (u32 *)phys_to_virt(pt_phys);
+}
+
+static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
+ u32 *pte_addr, dma_addr_t iova, size_t size)
+{
+ unsigned int pte_count;
+ unsigned int pte_total = size / SPAGE_SIZE;
+
+ assert_spin_locked(&rk_domain->dt_lock);
+
+ for (pte_count = 0; pte_count < pte_total; pte_count++) {
+ u32 pte = pte_addr[pte_count];
+ if (!rk_pte_is_page_valid(pte))
+ break;
+
+ pte_addr[pte_count] = rk_mk_pte_invalid(pte);
+ }
+
+ rk_table_flush(pte_addr, pte_count);
+
+ return pte_count * SPAGE_SIZE;
+}
+
+static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
+ dma_addr_t iova, phys_addr_t paddr, size_t size,
+ int prot)
+{
+ unsigned int pte_count;
+ unsigned int pte_total = size / SPAGE_SIZE;
+ phys_addr_t page_phys;
+
+ assert_spin_locked(&rk_domain->dt_lock);
+
+ for (pte_count = 0; pte_count < pte_total; pte_count++) {
+ u32 pte = pte_addr[pte_count];
+
+ if (rk_pte_is_page_valid(pte))
+ goto unwind;
+
+ pte_addr[pte_count] = rk_mk_pte(paddr, prot);
+
+ paddr += SPAGE_SIZE;
+ }
+
+ rk_table_flush(pte_addr, pte_count);
+
+ return 0;
+unwind:
+ /* Unmap the range of iovas that we just mapped */
+ rk_iommu_unmap_iova(rk_domain, pte_addr, iova, pte_count * SPAGE_SIZE);
+
+ iova += pte_count * SPAGE_SIZE;
+ page_phys = rk_pte_page_address(pte_addr[pte_count]);
+ pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
+ &iova, &page_phys, &paddr, prot);
+
+ return -EADDRINUSE;
+}
+
+static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ struct rk_iommu_domain *rk_domain = domain->priv;
+ unsigned long flags;
+ dma_addr_t iova = (dma_addr_t)_iova;
+ u32 *page_table, *pte_addr;
+ int ret;
+
+ spin_lock_irqsave(&rk_domain->dt_lock, flags);
+
+ /*
+ * pgsize_bitmap specifies iova sizes that fit in one page table
+ * (1024 4-KiB pages = 4 MiB).
+ * So, size will always be 4096 <= size <= 4194304.
+ * Since iommu_map() guarantees that both iova and size will be
+ * aligned, we will always only be mapping from a single dte here.
+ */
+ page_table = rk_dte_get_page_table(rk_domain, iova);
+ if (IS_ERR(page_table)) {
+ spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+ return PTR_ERR(page_table);
+ }
+
+ pte_addr = &page_table[rk_iova_pte_index(iova)];
+ ret = rk_iommu_map_iova(rk_domain, pte_addr, iova, paddr, size, prot);
+ spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+
+ return ret;
+}
+
+static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
+ size_t size)
+{
+ struct rk_iommu_domain *rk_domain = domain->priv;
+ unsigned long flags;
+ dma_addr_t iova = (dma_addr_t)_iova;
+ phys_addr_t pt_phys;
+ u32 dte;
+ u32 *pte_addr;
+ size_t unmap_size;
+
+ spin_lock_irqsave(&rk_domain->dt_lock, flags);
+
+ /*
+ * pgsize_bitmap specifies iova sizes that fit in one page table
+ * (1024 4-KiB pages = 4 MiB).
+ * So, size will always be 4096 <= size <= 4194304.
+ * Since iommu_unmap() guarantees that both iova and size will be
+ * aligned, we will always only be unmapping from a single dte here.
+ */
+ dte = rk_domain->dt[rk_iova_dte_index(iova)];
+ /* Just return 0 if iova is unmapped */
+ if (!rk_dte_is_pt_valid(dte)) {
+ spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+ return 0;
+ }
+
+ pt_phys = rk_dte_pt_address(dte);
+ pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
+ unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, iova, size);
+
+ spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+
+ /* Shootdown iotlb entries for iova range that was just unmapped */
+ rk_iommu_zap_iova(rk_domain, iova, unmap_size);
+
+ return unmap_size;
+}
+
+static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
+{
+ struct iommu_group *group;
+ struct device *iommu_dev;
+ struct rk_iommu *rk_iommu;
+
+ group = iommu_group_get(dev);
+ if (!group)
+ return NULL;
+ iommu_dev = iommu_group_get_iommudata(group);
+ rk_iommu = dev_get_drvdata(iommu_dev);
+ iommu_group_put(group);
+
+ return rk_iommu;
+}
+
+static int rk_iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct rk_iommu *iommu;
+ struct rk_iommu_domain *rk_domain = domain->priv;
+ unsigned long flags;
+ int ret;
+ phys_addr_t dte_addr;
+
+ /*
+ * Allow 'virtual devices' (e.g., drm) to attach to domain.
+ * Such a device does not belong to an iommu group.
+ */
+ iommu = rk_iommu_from_dev(dev);
+ if (!iommu)
+ return 0;
+
+ ret = rk_iommu_enable_stall(iommu);
+ if (ret)
+ return ret;
+
+ ret = rk_iommu_force_reset(iommu);
+ if (ret)
+ return ret;
+
+ iommu->domain = domain;
+
+ ret = devm_request_irq(dev, iommu->irq, rk_iommu_irq,
+ IRQF_SHARED, dev_name(dev), iommu);
+ if (ret)
+ return ret;
+
+ dte_addr = virt_to_phys(rk_domain->dt);
+ rk_iommu_write(iommu, RK_MMU_DTE_ADDR, dte_addr);
+ rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE);
+ rk_iommu_write(iommu, RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
+
+ ret = rk_iommu_enable_paging(iommu);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&rk_domain->iommus_lock, flags);
+ list_add_tail(&iommu->node, &rk_domain->iommus);
+ spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
+
+ dev_info(dev, "Attached to iommu domain\n");
+
+ rk_iommu_disable_stall(iommu);
+
+ return 0;
+}
+
+static void rk_iommu_detach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct rk_iommu *iommu;
+ struct rk_iommu_domain *rk_domain = domain->priv;
+ unsigned long flags;
+
+ /* Allow 'virtual devices' (eg drm) to detach from domain */
+ iommu = rk_iommu_from_dev(dev);
+ if (!iommu)
+ return;
+
+ spin_lock_irqsave(&rk_domain->iommus_lock, flags);
+ list_del_init(&iommu->node);
+ spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
+
+ /* Ignore error while disabling, just keep going */
+ rk_iommu_enable_stall(iommu);
+ rk_iommu_disable_paging(iommu);
+ rk_iommu_write(iommu, RK_MMU_INT_MASK, 0);
+ rk_iommu_write(iommu, RK_MMU_DTE_ADDR, 0);
+ rk_iommu_disable_stall(iommu);
+
+ devm_free_irq(dev, iommu->irq, iommu);
+
+ iommu->domain = NULL;
+
+ dev_info(dev, "Detached from iommu domain\n");
+}
+
+static int rk_iommu_domain_init(struct iommu_domain *domain)
+{
+ struct rk_iommu_domain *rk_domain;
+
+ rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
+ if (!rk_domain)
+ return -ENOMEM;
+
+ /*
+ * rk32xx iommus use a 2 level pagetable.
+ * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
+ * Allocate one 4 KiB page for each table.
+ */
+ rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
+ if (!rk_domain->dt)
+ goto err_dt;
+
+ rk_table_flush(rk_domain->dt, NUM_DT_ENTRIES);
+
+ spin_lock_init(&rk_domain->iommus_lock);
+ spin_lock_init(&rk_domain->dt_lock);
+ INIT_LIST_HEAD(&rk_domain->iommus);
+
+ domain->priv = rk_domain;
+
+ return 0;
+err_dt:
+ kfree(rk_domain);
+ return -ENOMEM;
+}
+
+static void rk_iommu_domain_destroy(struct iommu_domain *domain)
+{
+ struct rk_iommu_domain *rk_domain = domain->priv;
+ int i;
+
+ WARN_ON(!list_empty(&rk_domain->iommus));
+
+ for (i = 0; i < NUM_DT_ENTRIES; i++) {
+ u32 dte = rk_domain->dt[i];
+ if (rk_dte_is_pt_valid(dte)) {
+ phys_addr_t pt_phys = rk_dte_pt_address(dte);
+ u32 *page_table = phys_to_virt(pt_phys);
+ free_page((unsigned long)page_table);
+ }
+ }
+
+ free_page((unsigned long)rk_domain->dt);
+ kfree(domain->priv);
+ domain->priv = NULL;
+}
+
+static bool rk_iommu_is_dev_iommu_master(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ /*
+ * An iommu master has an iommus property containing a list of phandles
+ * to iommu nodes, each with an #iommu-cells property with value 0.
+ */
+ ret = of_count_phandle_with_args(np, "iommus", "#iommu-cells");
+ return (ret > 0);
+}
+
+static int rk_iommu_group_set_iommudata(struct iommu_group *group,
+ struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct platform_device *pd;
+ int ret;
+ struct of_phandle_args args;
+
+ /*
+ * An iommu master has an iommus property containing a list of phandles
+ * to iommu nodes, each with an #iommu-cells property with value 0.
+ */
+ ret = of_parse_phandle_with_args(np, "iommus", "#iommu-cells", 0,
+ &args);
+ if (ret) {
+ dev_err(dev, "of_parse_phandle_with_args(%s) => %d\n",
+ np->full_name, ret);
+ return ret;
+ }
+ if (args.args_count != 0) {
+ dev_err(dev, "incorrect number of iommu params found for %s (found %d, expected 0)\n",
+ args.np->full_name, args.args_count);
+ return -EINVAL;
+ }
+
+ pd = of_find_device_by_node(args.np);
+ of_node_put(args.np);
+ if (!pd) {
+ dev_err(dev, "iommu %s not found\n", args.np->full_name);
+ return -EPROBE_DEFER;
+ }
+
+ /* TODO(djkurtz): handle multiple slave iommus for a single master */
+ iommu_group_set_iommudata(group, &pd->dev, NULL);
+
+ return 0;
+}
+
+static int rk_iommu_add_device(struct device *dev)
+{
+ struct iommu_group *group;
+ int ret;
+
+ if (!rk_iommu_is_dev_iommu_master(dev))
+ return -ENODEV;
+
+ group = iommu_group_get(dev);
+ if (!group) {
+ group = iommu_group_alloc();
+ if (IS_ERR(group)) {
+ dev_err(dev, "Failed to allocate IOMMU group\n");
+ return PTR_ERR(group);
+ }
+ }
+
+ ret = iommu_group_add_device(group, dev);
+ if (ret)
+ goto err_put_group;
+
+ ret = rk_iommu_group_set_iommudata(group, dev);
+ if (ret)
+ goto err_remove_device;
+
+ iommu_group_put(group);
+
+ return 0;
+
+err_remove_device:
+ iommu_group_remove_device(dev);
+err_put_group:
+ iommu_group_put(group);
+ return ret;
+}
+
+static void rk_iommu_remove_device(struct device *dev)
+{
+ if (!rk_iommu_is_dev_iommu_master(dev))
+ return;
+
+ iommu_group_remove_device(dev);
+}
+
+static const struct iommu_ops rk_iommu_ops = {
+ .domain_init = rk_iommu_domain_init,
+ .domain_destroy = rk_iommu_domain_destroy,
+ .attach_dev = rk_iommu_attach_device,
+ .detach_dev = rk_iommu_detach_device,
+ .map = rk_iommu_map,
+ .unmap = rk_iommu_unmap,
+ .add_device = rk_iommu_add_device,
+ .remove_device = rk_iommu_remove_device,
+ .iova_to_phys = rk_iommu_iova_to_phys,
+ .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
+};
+
+static int rk_iommu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rk_iommu *iommu;
+ struct resource *res;
+
+ iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
+ if (!iommu)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, iommu);
+ iommu->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ iommu->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(iommu->base))
+ return PTR_ERR(iommu->base);
+
+ iommu->irq = platform_get_irq(pdev, 0);
+ if (iommu->irq < 0) {
+ dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int rk_iommu_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id rk_iommu_dt_ids[] = {
+ { .compatible = "rockchip,iommu" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids);
+#endif
+
+static struct platform_driver rk_iommu_driver = {
+ .probe = rk_iommu_probe,
+ .remove = rk_iommu_remove,
+ .driver = {
+ .name = "rk_iommu",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(rk_iommu_dt_ids),
+ },
+};
+
+static int __init rk_iommu_init(void)
+{
+ int ret;
+
+ ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&rk_iommu_driver);
+}
+static void __exit rk_iommu_exit(void)
+{
+ platform_driver_unregister(&rk_iommu_driver);
+}
+
+subsys_initcall(rk_iommu_init);
+module_exit(rk_iommu_exit);
+
+MODULE_DESCRIPTION("IOMMU API for Rockchip");
+MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>");
+MODULE_ALIAS("platform:rockchip-iommu");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 4f4c2a7888e5..feb29c4526f7 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -684,10 +684,9 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
struct fixed_phy_status *status)
{
struct bcm_sf2_priv *priv = ds_to_priv(ds);
- u32 link, duplex, pause, speed;
+ u32 duplex, pause, speed;
u32 reg;
- link = core_readl(priv, CORE_LNKSTS);
duplex = core_readl(priv, CORE_DUPSTS);
pause = core_readl(priv, CORE_PAUSESTS);
speed = core_readl(priv, CORE_SPDSTS);
@@ -701,22 +700,26 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
* which means that we need to force the link at the port override
* level to get the data to flow. We do use what the interrupt handler
* did determine before.
+ *
+ * For the other ports, we just force the link status, since this is
+ * a fixed PHY device.
*/
if (port == 7) {
status->link = priv->port_sts[port].link;
- reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(7));
- reg |= SW_OVERRIDE;
- if (status->link)
- reg |= LINK_STS;
- else
- reg &= ~LINK_STS;
- core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(7));
status->duplex = 1;
} else {
- status->link = !!(link & (1 << port));
+ status->link = 1;
status->duplex = !!(duplex & (1 << port));
}
+ reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(port));
+ reg |= SW_OVERRIDE;
+ if (status->link)
+ reg |= LINK_STS;
+ else
+ reg &= ~LINK_STS;
+ core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
+
switch (speed) {
case SPDSTS_10:
status->speed = SPEED_10;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index b6bc318b148e..0987d2a77f9f 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -66,23 +66,25 @@ static unsigned int macb_tx_ring_wrap(unsigned int index)
return index & (TX_RING_SIZE - 1);
}
-static struct macb_dma_desc *macb_tx_desc(struct macb *bp, unsigned int index)
+static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
+ unsigned int index)
{
- return &bp->tx_ring[macb_tx_ring_wrap(index)];
+ return &queue->tx_ring[macb_tx_ring_wrap(index)];
}
-static struct macb_tx_skb *macb_tx_skb(struct macb *bp, unsigned int index)
+static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
+ unsigned int index)
{
- return &bp->tx_skb[macb_tx_ring_wrap(index)];
+ return &queue->tx_skb[macb_tx_ring_wrap(index)];
}
-static dma_addr_t macb_tx_dma(struct macb *bp, unsigned int index)
+static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
{
dma_addr_t offset;
offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
- return bp->tx_ring_dma + offset;
+ return queue->tx_ring_dma + offset;
}
static unsigned int macb_rx_ring_wrap(unsigned int index)
@@ -490,38 +492,49 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
static void macb_tx_error_task(struct work_struct *work)
{
- struct macb *bp = container_of(work, struct macb, tx_error_task);
+ struct macb_queue *queue = container_of(work, struct macb_queue,
+ tx_error_task);
+ struct macb *bp = queue->bp;
struct macb_tx_skb *tx_skb;
+ struct macb_dma_desc *desc;
struct sk_buff *skb;
unsigned int tail;
+ unsigned long flags;
+
+ netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
+ (unsigned int)(queue - bp->queues),
+ queue->tx_tail, queue->tx_head);
- netdev_vdbg(bp->dev, "macb_tx_error_task: t = %u, h = %u\n",
- bp->tx_tail, bp->tx_head);
+ /* Prevent the queue IRQ handlers from running: each of them may call
+ * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
+ * As explained below, we have to halt the transmission before updating
+ * TBQP registers so we call netif_tx_stop_all_queues() to notify the
+ * network engine about the macb/gem being halted.
+ */
+ spin_lock_irqsave(&bp->lock, flags);
/* Make sure nobody is trying to queue up new packets */
- netif_stop_queue(bp->dev);
+ netif_tx_stop_all_queues(bp->dev);
/*
* Stop transmission now
* (in case we have just queued new packets)
+ * macb/gem must be halted to write TBQP register
*/
if (macb_halt_tx(bp))
/* Just complain for now, reinitializing TX path can be good */
netdev_err(bp->dev, "BUG: halt tx timed out\n");
- /* No need for the lock here as nobody will interrupt us anymore */
-
/*
* Treat frames in TX queue including the ones that caused the error.
* Free transmit buffers in upper layer.
*/
- for (tail = bp->tx_tail; tail != bp->tx_head; tail++) {
- struct macb_dma_desc *desc;
- u32 ctrl;
+ for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
+ u32 ctrl;
- desc = macb_tx_desc(bp, tail);
+ desc = macb_tx_desc(queue, tail);
ctrl = desc->ctrl;
- tx_skb = macb_tx_skb(bp, tail);
+ tx_skb = macb_tx_skb(queue, tail);
skb = tx_skb->skb;
if (ctrl & MACB_BIT(TX_USED)) {
@@ -529,7 +542,7 @@ static void macb_tx_error_task(struct work_struct *work)
while (!skb) {
macb_tx_unmap(bp, tx_skb);
tail++;
- tx_skb = macb_tx_skb(bp, tail);
+ tx_skb = macb_tx_skb(queue, tail);
skb = tx_skb->skb;
}
@@ -558,45 +571,56 @@ static void macb_tx_error_task(struct work_struct *work)
macb_tx_unmap(bp, tx_skb);
}
+ /* Set end of TX queue */
+ desc = macb_tx_desc(queue, 0);
+ desc->addr = 0;
+ desc->ctrl = MACB_BIT(TX_USED);
+
/* Make descriptor updates visible to hardware */
wmb();
/* Reinitialize the TX desc queue */
- macb_writel(bp, TBQP, bp->tx_ring_dma);
+ queue_writel(queue, TBQP, queue->tx_ring_dma);
/* Make TX ring reflect state of hardware */
- bp->tx_head = bp->tx_tail = 0;
-
- /* Now we are ready to start transmission again */
- netif_wake_queue(bp->dev);
+ queue->tx_head = 0;
+ queue->tx_tail = 0;
/* Housework before enabling TX IRQ */
macb_writel(bp, TSR, macb_readl(bp, TSR));
- macb_writel(bp, IER, MACB_TX_INT_FLAGS);
+ queue_writel(queue, IER, MACB_TX_INT_FLAGS);
+
+ /* Now we are ready to start transmission again */
+ netif_tx_start_all_queues(bp->dev);
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
+
+ spin_unlock_irqrestore(&bp->lock, flags);
}
-static void macb_tx_interrupt(struct macb *bp)
+static void macb_tx_interrupt(struct macb_queue *queue)
{
unsigned int tail;
unsigned int head;
u32 status;
+ struct macb *bp = queue->bp;
+ u16 queue_index = queue - bp->queues;
status = macb_readl(bp, TSR);
macb_writel(bp, TSR, status);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
- macb_writel(bp, ISR, MACB_BIT(TCOMP));
+ queue_writel(queue, ISR, MACB_BIT(TCOMP));
netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
(unsigned long)status);
- head = bp->tx_head;
- for (tail = bp->tx_tail; tail != head; tail++) {
+ head = queue->tx_head;
+ for (tail = queue->tx_tail; tail != head; tail++) {
struct macb_tx_skb *tx_skb;
struct sk_buff *skb;
struct macb_dma_desc *desc;
u32 ctrl;
- desc = macb_tx_desc(bp, tail);
+ desc = macb_tx_desc(queue, tail);
/* Make hw descriptor updates visible to CPU */
rmb();
@@ -611,7 +635,7 @@ static void macb_tx_interrupt(struct macb *bp)
/* Process all buffers of the current transmitted frame */
for (;; tail++) {
- tx_skb = macb_tx_skb(bp, tail);
+ tx_skb = macb_tx_skb(queue, tail);
skb = tx_skb->skb;
/* First, update TX stats if needed */
@@ -634,11 +658,11 @@ static void macb_tx_interrupt(struct macb *bp)
}
}
- bp->tx_tail = tail;
- if (netif_queue_stopped(bp->dev)
- && CIRC_CNT(bp->tx_head, bp->tx_tail,
- TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
- netif_wake_queue(bp->dev);
+ queue->tx_tail = tail;
+ if (__netif_subqueue_stopped(bp->dev, queue_index) &&
+ CIRC_CNT(queue->tx_head, queue->tx_tail,
+ TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
+ netif_wake_subqueue(bp->dev, queue_index);
}
static void gem_rx_refill(struct macb *bp)
@@ -949,11 +973,12 @@ static int macb_poll(struct napi_struct *napi, int budget)
static irqreturn_t macb_interrupt(int irq, void *dev_id)
{
- struct net_device *dev = dev_id;
- struct macb *bp = netdev_priv(dev);
+ struct macb_queue *queue = dev_id;
+ struct macb *bp = queue->bp;
+ struct net_device *dev = bp->dev;
u32 status;
- status = macb_readl(bp, ISR);
+ status = queue_readl(queue, ISR);
if (unlikely(!status))
return IRQ_NONE;
@@ -963,11 +988,13 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
while (status) {
/* close possible race with dev_close */
if (unlikely(!netif_running(dev))) {
- macb_writel(bp, IDR, -1);
+ queue_writel(queue, IDR, -1);
break;
}
- netdev_vdbg(bp->dev, "isr = 0x%08lx\n", (unsigned long)status);
+ netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
+ (unsigned int)(queue - bp->queues),
+ (unsigned long)status);
if (status & MACB_RX_INT_FLAGS) {
/*
@@ -977,9 +1004,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
* is already scheduled, so disable interrupts
* now.
*/
- macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
+ queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
- macb_writel(bp, ISR, MACB_BIT(RCOMP));
+ queue_writel(queue, ISR, MACB_BIT(RCOMP));
if (napi_schedule_prep(&bp->napi)) {
netdev_vdbg(bp->dev, "scheduling RX softirq\n");
@@ -988,17 +1015,17 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
}
if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
- macb_writel(bp, IDR, MACB_TX_INT_FLAGS);
- schedule_work(&bp->tx_error_task);
+ queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
+ schedule_work(&queue->tx_error_task);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
- macb_writel(bp, ISR, MACB_TX_ERR_FLAGS);
+ queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
break;
}
if (status & MACB_BIT(TCOMP))
- macb_tx_interrupt(bp);
+ macb_tx_interrupt(queue);
/*
* Link change detection isn't possible with RMII, so we'll
@@ -1013,7 +1040,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
bp->hw_stats.macb.rx_overruns++;
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
- macb_writel(bp, ISR, MACB_BIT(ISR_ROVR));
+ queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
}
if (status & MACB_BIT(HRESP)) {
@@ -1025,10 +1052,10 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
netdev_err(dev, "DMA bus error: HRESP not OK\n");
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
- macb_writel(bp, ISR, MACB_BIT(HRESP));
+ queue_writel(queue, ISR, MACB_BIT(HRESP));
}
- status = macb_readl(bp, ISR);
+ status = queue_readl(queue, ISR);
}
spin_unlock(&bp->lock);
@@ -1043,10 +1070,14 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
*/
static void macb_poll_controller(struct net_device *dev)
{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_queue *queue;
unsigned long flags;
+ unsigned int q;
local_irq_save(flags);
- macb_interrupt(dev->irq, dev);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+ macb_interrupt(dev->irq, queue);
local_irq_restore(flags);
}
#endif
@@ -1058,10 +1089,11 @@ static inline unsigned int macb_count_tx_descriptors(struct macb *bp,
}
static unsigned int macb_tx_map(struct macb *bp,
+ struct macb_queue *queue,
struct sk_buff *skb)
{
dma_addr_t mapping;
- unsigned int len, entry, i, tx_head = bp->tx_head;
+ unsigned int len, entry, i, tx_head = queue->tx_head;
struct macb_tx_skb *tx_skb = NULL;
struct macb_dma_desc *desc;
unsigned int offset, size, count = 0;
@@ -1075,7 +1107,7 @@ static unsigned int macb_tx_map(struct macb *bp,
while (len) {
size = min(len, bp->max_tx_length);
entry = macb_tx_ring_wrap(tx_head);
- tx_skb = &bp->tx_skb[entry];
+ tx_skb = &queue->tx_skb[entry];
mapping = dma_map_single(&bp->pdev->dev,
skb->data + offset,
@@ -1104,7 +1136,7 @@ static unsigned int macb_tx_map(struct macb *bp,
while (len) {
size = min(len, bp->max_tx_length);
entry = macb_tx_ring_wrap(tx_head);
- tx_skb = &bp->tx_skb[entry];
+ tx_skb = &queue->tx_skb[entry];
mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
offset, size, DMA_TO_DEVICE);
@@ -1143,14 +1175,14 @@ static unsigned int macb_tx_map(struct macb *bp,
i = tx_head;
entry = macb_tx_ring_wrap(i);
ctrl = MACB_BIT(TX_USED);
- desc = &bp->tx_ring[entry];
+ desc = &queue->tx_ring[entry];
desc->ctrl = ctrl;
do {
i--;
entry = macb_tx_ring_wrap(i);
- tx_skb = &bp->tx_skb[entry];
- desc = &bp->tx_ring[entry];
+ tx_skb = &queue->tx_skb[entry];
+ desc = &queue->tx_ring[entry];
ctrl = (u32)tx_skb->size;
if (eof) {
@@ -1167,17 +1199,17 @@ static unsigned int macb_tx_map(struct macb *bp,
*/
wmb();
desc->ctrl = ctrl;
- } while (i != bp->tx_head);
+ } while (i != queue->tx_head);
- bp->tx_head = tx_head;
+ queue->tx_head = tx_head;
return count;
dma_error:
netdev_err(bp->dev, "TX DMA map failed\n");
- for (i = bp->tx_head; i != tx_head; i++) {
- tx_skb = macb_tx_skb(bp, i);
+ for (i = queue->tx_head; i != tx_head; i++) {
+ tx_skb = macb_tx_skb(queue, i);
macb_tx_unmap(bp, tx_skb);
}
@@ -1187,14 +1219,16 @@ dma_error:
static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ u16 queue_index = skb_get_queue_mapping(skb);
struct macb *bp = netdev_priv(dev);
+ struct macb_queue *queue = &bp->queues[queue_index];
unsigned long flags;
unsigned int count, nr_frags, frag_size, f;
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
netdev_vdbg(bp->dev,
- "start_xmit: len %u head %p data %p tail %p end %p\n",
- skb->len, skb->head, skb->data,
+ "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
+ queue_index, skb->len, skb->head, skb->data,
skb_tail_pointer(skb), skb_end_pointer(skb));
print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, 16, true);
@@ -1214,16 +1248,16 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_lock_irqsave(&bp->lock, flags);
/* This is a hard error, log it. */
- if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < count) {
- netif_stop_queue(dev);
+ if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < count) {
+ netif_stop_subqueue(dev, queue_index);
spin_unlock_irqrestore(&bp->lock, flags);
netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
- bp->tx_head, bp->tx_tail);
+ queue->tx_head, queue->tx_tail);
return NETDEV_TX_BUSY;
}
/* Map socket buffer for DMA transfer */
- if (!macb_tx_map(bp, skb)) {
+ if (!macb_tx_map(bp, queue, skb)) {
dev_kfree_skb_any(skb);
goto unlock;
}
@@ -1235,8 +1269,8 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
- if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1)
- netif_stop_queue(dev);
+ if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < 1)
+ netif_stop_subqueue(dev, queue_index);
unlock:
spin_unlock_irqrestore(&bp->lock, flags);
@@ -1304,20 +1338,24 @@ static void macb_free_rx_buffers(struct macb *bp)
static void macb_free_consistent(struct macb *bp)
{
- if (bp->tx_skb) {
- kfree(bp->tx_skb);
- bp->tx_skb = NULL;
- }
+ struct macb_queue *queue;
+ unsigned int q;
+
bp->macbgem_ops.mog_free_rx_buffers(bp);
if (bp->rx_ring) {
dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
bp->rx_ring, bp->rx_ring_dma);
bp->rx_ring = NULL;
}
- if (bp->tx_ring) {
- dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
- bp->tx_ring, bp->tx_ring_dma);
- bp->tx_ring = NULL;
+
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ kfree(queue->tx_skb);
+ queue->tx_skb = NULL;
+ if (queue->tx_ring) {
+ dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
+ queue->tx_ring, queue->tx_ring_dma);
+ queue->tx_ring = NULL;
+ }
}
}
@@ -1354,12 +1392,27 @@ static int macb_alloc_rx_buffers(struct macb *bp)
static int macb_alloc_consistent(struct macb *bp)
{
+ struct macb_queue *queue;
+ unsigned int q;
int size;
- size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
- bp->tx_skb = kmalloc(size, GFP_KERNEL);
- if (!bp->tx_skb)
- goto out_err;
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ size = TX_RING_BYTES;
+ queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
+ &queue->tx_ring_dma,
+ GFP_KERNEL);
+ if (!queue->tx_ring)
+ goto out_err;
+ netdev_dbg(bp->dev,
+ "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
+ q, size, (unsigned long)queue->tx_ring_dma,
+ queue->tx_ring);
+
+ size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
+ queue->tx_skb = kmalloc(size, GFP_KERNEL);
+ if (!queue->tx_skb)
+ goto out_err;
+ }
size = RX_RING_BYTES;
bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
@@ -1370,15 +1423,6 @@ static int macb_alloc_consistent(struct macb *bp)
"Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
- size = TX_RING_BYTES;
- bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
- &bp->tx_ring_dma, GFP_KERNEL);
- if (!bp->tx_ring)
- goto out_err;
- netdev_dbg(bp->dev,
- "Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
- size, (unsigned long)bp->tx_ring_dma, bp->tx_ring);
-
if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
goto out_err;
@@ -1391,15 +1435,22 @@ out_err:
static void gem_init_rings(struct macb *bp)
{
+ struct macb_queue *queue;
+ unsigned int q;
int i;
- for (i = 0; i < TX_RING_SIZE; i++) {
- bp->tx_ring[i].addr = 0;
- bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ queue->tx_ring[i].addr = 0;
+ queue->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+ }
+ queue->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
+ queue->tx_head = 0;
+ queue->tx_tail = 0;
}
- bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
- bp->rx_tail = bp->rx_prepared_head = bp->tx_head = bp->tx_tail = 0;
+ bp->rx_tail = 0;
+ bp->rx_prepared_head = 0;
gem_rx_refill(bp);
}
@@ -1418,16 +1469,21 @@ static void macb_init_rings(struct macb *bp)
bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
for (i = 0; i < TX_RING_SIZE; i++) {
- bp->tx_ring[i].addr = 0;
- bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+ bp->queues[0].tx_ring[i].addr = 0;
+ bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED);
+ bp->queues[0].tx_head = 0;
+ bp->queues[0].tx_tail = 0;
}
- bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
+ bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
- bp->rx_tail = bp->tx_head = bp->tx_tail = 0;
+ bp->rx_tail = 0;
}
static void macb_reset_hw(struct macb *bp)
{
+ struct macb_queue *queue;
+ unsigned int q;
+
/*
* Disable RX and TX (XXX: Should we halt the transmission
* more gracefully?)
@@ -1442,8 +1498,10 @@ static void macb_reset_hw(struct macb *bp)
macb_writel(bp, RSR, -1);
/* Disable all interrupts */
- macb_writel(bp, IDR, -1);
- macb_readl(bp, ISR);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue_writel(queue, IDR, -1);
+ queue_readl(queue, ISR);
+ }
}
static u32 gem_mdc_clk_div(struct macb *bp)
@@ -1540,6 +1598,9 @@ static void macb_configure_dma(struct macb *bp)
static void macb_init_hw(struct macb *bp)
{
+ struct macb_queue *queue;
+ unsigned int q;
+
u32 config;
macb_reset_hw(bp);
@@ -1565,16 +1626,18 @@ static void macb_init_hw(struct macb *bp)
/* Initialize TX and RX buffers */
macb_writel(bp, RBQP, bp->rx_ring_dma);
- macb_writel(bp, TBQP, bp->tx_ring_dma);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue_writel(queue, TBQP, queue->tx_ring_dma);
+
+ /* Enable interrupts */
+ queue_writel(queue, IER,
+ MACB_RX_INT_FLAGS |
+ MACB_TX_INT_FLAGS |
+ MACB_BIT(HRESP));
+ }
/* Enable TX and RX */
macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
-
- /* Enable interrupts */
- macb_writel(bp, IER, (MACB_RX_INT_FLAGS
- | MACB_TX_INT_FLAGS
- | MACB_BIT(HRESP)));
-
}
/*
@@ -1736,7 +1799,7 @@ static int macb_open(struct net_device *dev)
/* schedule a link state check */
phy_start(bp->phy_dev);
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
return 0;
}
@@ -1746,7 +1809,7 @@ static int macb_close(struct net_device *dev)
struct macb *bp = netdev_priv(dev);
unsigned long flags;
- netif_stop_queue(dev);
+ netif_tx_stop_all_queues(dev);
napi_disable(&bp->napi);
if (bp->phy_dev)
@@ -1895,8 +1958,8 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
| MACB_GREGS_VERSION;
- tail = macb_tx_ring_wrap(bp->tx_tail);
- head = macb_tx_ring_wrap(bp->tx_head);
+ tail = macb_tx_ring_wrap(bp->queues[0].tx_tail);
+ head = macb_tx_ring_wrap(bp->queues[0].tx_head);
regs_buff[0] = macb_readl(bp, NCR);
regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
@@ -1909,8 +1972,8 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
regs_buff[8] = tail;
regs_buff[9] = head;
- regs_buff[10] = macb_tx_dma(bp, tail);
- regs_buff[11] = macb_tx_dma(bp, head);
+ regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
+ regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
if (macb_is_gem(bp)) {
regs_buff[12] = gem_readl(bp, USRIO);
@@ -2061,16 +2124,44 @@ static void macb_configure_caps(struct macb *bp)
netdev_dbg(bp->dev, "Cadence caps 0x%08x\n", bp->caps);
}
+static void macb_probe_queues(void __iomem *mem,
+ unsigned int *queue_mask,
+ unsigned int *num_queues)
+{
+ unsigned int hw_q;
+ u32 mid;
+
+ *queue_mask = 0x1;
+ *num_queues = 1;
+
+ /* is it macb or gem ? */
+ mid = __raw_readl(mem + MACB_MID);
+ if (MACB_BFEXT(IDNUM, mid) != 0x2)
+ return;
+
+ /* bit 0 is never set but queue 0 always exists */
+ *queue_mask = __raw_readl(mem + GEM_DCFG6) & 0xff;
+ *queue_mask |= 0x1;
+
+ for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
+ if (*queue_mask & (1 << hw_q))
+ (*num_queues)++;
+}
+
static int __init macb_probe(struct platform_device *pdev)
{
struct macb_platform_data *pdata;
struct resource *regs;
struct net_device *dev;
struct macb *bp;
+ struct macb_queue *queue;
struct phy_device *phydev;
u32 config;
int err = -ENXIO;
const char *mac;
+ void __iomem *mem;
+ unsigned int hw_q, queue_mask, q, num_queues, q_irq = 0;
+ struct clk *pclk, *hclk, *tx_clk;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs) {
@@ -2078,72 +2169,112 @@ static int __init macb_probe(struct platform_device *pdev)
goto err_out;
}
- err = -ENOMEM;
- dev = alloc_etherdev(sizeof(*bp));
- if (!dev)
- goto err_out;
-
- SET_NETDEV_DEV(dev, &pdev->dev);
-
- bp = netdev_priv(dev);
- bp->pdev = pdev;
- bp->dev = dev;
-
- spin_lock_init(&bp->lock);
- INIT_WORK(&bp->tx_error_task, macb_tx_error_task);
-
- bp->pclk = devm_clk_get(&pdev->dev, "pclk");
- if (IS_ERR(bp->pclk)) {
- err = PTR_ERR(bp->pclk);
+ pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(pclk)) {
+ err = PTR_ERR(pclk);
dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
- goto err_out_free_dev;
+ goto err_out;
}
- bp->hclk = devm_clk_get(&pdev->dev, "hclk");
- if (IS_ERR(bp->hclk)) {
- err = PTR_ERR(bp->hclk);
+ hclk = devm_clk_get(&pdev->dev, "hclk");
+ if (IS_ERR(hclk)) {
+ err = PTR_ERR(hclk);
dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
- goto err_out_free_dev;
+ goto err_out;
}
- bp->tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
+ tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
- err = clk_prepare_enable(bp->pclk);
+ err = clk_prepare_enable(pclk);
if (err) {
dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
- goto err_out_free_dev;
+ goto err_out;
}
- err = clk_prepare_enable(bp->hclk);
+ err = clk_prepare_enable(hclk);
if (err) {
dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
goto err_out_disable_pclk;
}
- if (!IS_ERR(bp->tx_clk)) {
- err = clk_prepare_enable(bp->tx_clk);
+ if (!IS_ERR(tx_clk)) {
+ err = clk_prepare_enable(tx_clk);
if (err) {
dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n",
- err);
+ err);
goto err_out_disable_hclk;
}
}
- bp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
- if (!bp->regs) {
+ err = -ENOMEM;
+ mem = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
+ if (!mem) {
dev_err(&pdev->dev, "failed to map registers, aborting.\n");
- err = -ENOMEM;
goto err_out_disable_clocks;
}
- dev->irq = platform_get_irq(pdev, 0);
- err = devm_request_irq(&pdev->dev, dev->irq, macb_interrupt, 0,
- dev->name, dev);
- if (err) {
- dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
- dev->irq, err);
+ macb_probe_queues(mem, &queue_mask, &num_queues);
+ dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
+ if (!dev)
goto err_out_disable_clocks;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ bp = netdev_priv(dev);
+ bp->pdev = pdev;
+ bp->dev = dev;
+ bp->regs = mem;
+ bp->num_queues = num_queues;
+ bp->pclk = pclk;
+ bp->hclk = hclk;
+ bp->tx_clk = tx_clk;
+
+ spin_lock_init(&bp->lock);
+
+ /* set the queue register mapping once for all: queue0 has a special
+ * register mapping but we don't want to test the queue index then
+ * compute the corresponding register offset at run time.
+ */
+ for (hw_q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
+ if (!(queue_mask & (1 << hw_q)))
+ continue;
+
+ queue = &bp->queues[q_irq];
+ queue->bp = bp;
+ if (hw_q) {
+ queue->ISR = GEM_ISR(hw_q - 1);
+ queue->IER = GEM_IER(hw_q - 1);
+ queue->IDR = GEM_IDR(hw_q - 1);
+ queue->IMR = GEM_IMR(hw_q - 1);
+ queue->TBQP = GEM_TBQP(hw_q - 1);
+ } else {
+ /* queue0 uses legacy registers */
+ queue->ISR = MACB_ISR;
+ queue->IER = MACB_IER;
+ queue->IDR = MACB_IDR;
+ queue->IMR = MACB_IMR;
+ queue->TBQP = MACB_TBQP;
+ }
+
+ /* get irq: here we use the linux queue index, not the hardware
+ * queue index. the queue irq definitions in the device tree
+ * must remove the optional gaps that could exist in the
+ * hardware queue mask.
+ */
+ queue->irq = platform_get_irq(pdev, q_irq);
+ err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
+ 0, dev->name, queue);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Unable to request IRQ %d (error %d)\n",
+ queue->irq, err);
+ goto err_out_free_irq;
+ }
+
+ INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
+ q_irq++;
}
+ dev->irq = bp->queues[0].irq;
dev->netdev_ops = &macb_netdev_ops;
netif_napi_add(dev, &bp->napi, macb_poll, 64);
@@ -2219,7 +2350,7 @@ static int __init macb_probe(struct platform_device *pdev)
err = register_netdev(dev);
if (err) {
dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
- goto err_out_disable_clocks;
+ goto err_out_free_irq;
}
err = macb_mii_init(bp);
@@ -2242,15 +2373,17 @@ static int __init macb_probe(struct platform_device *pdev)
err_out_unregister_netdev:
unregister_netdev(dev);
+err_out_free_irq:
+ for (q = 0, queue = bp->queues; q < q_irq; ++q, ++queue)
+ devm_free_irq(&pdev->dev, queue->irq, queue);
+ free_netdev(dev);
err_out_disable_clocks:
- if (!IS_ERR(bp->tx_clk))
- clk_disable_unprepare(bp->tx_clk);
+ if (!IS_ERR(tx_clk))
+ clk_disable_unprepare(tx_clk);
err_out_disable_hclk:
- clk_disable_unprepare(bp->hclk);
+ clk_disable_unprepare(hclk);
err_out_disable_pclk:
- clk_disable_unprepare(bp->pclk);
-err_out_free_dev:
- free_netdev(dev);
+ clk_disable_unprepare(pclk);
err_out:
return err;
}
@@ -2259,6 +2392,8 @@ static int __exit macb_remove(struct platform_device *pdev)
{
struct net_device *dev;
struct macb *bp;
+ struct macb_queue *queue;
+ unsigned int q;
dev = platform_get_drvdata(pdev);
@@ -2270,11 +2405,14 @@ static int __exit macb_remove(struct platform_device *pdev)
kfree(bp->mii_bus->irq);
mdiobus_free(bp->mii_bus);
unregister_netdev(dev);
+ queue = bp->queues;
+ for (q = 0; q < bp->num_queues; ++q, ++queue)
+ devm_free_irq(&pdev->dev, queue->irq, queue);
+ free_netdev(dev);
if (!IS_ERR(bp->tx_clk))
clk_disable_unprepare(bp->tx_clk);
clk_disable_unprepare(bp->hclk);
clk_disable_unprepare(bp->pclk);
- free_netdev(dev);
}
return 0;
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 517c09d72c4a..084191b6fad2 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -12,6 +12,7 @@
#define MACB_GREGS_NBR 16
#define MACB_GREGS_VERSION 1
+#define MACB_MAX_QUEUES 8
/* MACB register offsets */
#define MACB_NCR 0x0000
@@ -89,6 +90,13 @@
#define GEM_DCFG6 0x0294
#define GEM_DCFG7 0x0298
+#define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2))
+#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2))
+#define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2))
+#define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2))
+#define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2))
+#define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2))
+
/* Bitfields in NCR */
#define MACB_LB_OFFSET 0
#define MACB_LB_SIZE 1
@@ -376,6 +384,10 @@
__raw_readl((port)->regs + GEM_##reg)
#define gem_writel(port, reg, value) \
__raw_writel((value), (port)->regs + GEM_##reg)
+#define queue_readl(queue, reg) \
+ __raw_readl((queue)->bp->regs + (queue)->reg)
+#define queue_writel(queue, reg, value) \
+ __raw_writel((value), (queue)->bp->regs + (queue)->reg)
/*
* Conditional GEM/MACB macros. These perform the operation to the correct
@@ -597,6 +609,23 @@ struct macb_config {
unsigned int dma_burst_length;
};
+struct macb_queue {
+ struct macb *bp;
+ int irq;
+
+ unsigned int ISR;
+ unsigned int IER;
+ unsigned int IDR;
+ unsigned int IMR;
+ unsigned int TBQP;
+
+ unsigned int tx_head, tx_tail;
+ struct macb_dma_desc *tx_ring;
+ struct macb_tx_skb *tx_skb;
+ dma_addr_t tx_ring_dma;
+ struct work_struct tx_error_task;
+};
+
struct macb {
void __iomem *regs;
@@ -607,9 +636,8 @@ struct macb {
void *rx_buffers;
size_t rx_buffer_size;
- unsigned int tx_head, tx_tail;
- struct macb_dma_desc *tx_ring;
- struct macb_tx_skb *tx_skb;
+ unsigned int num_queues;
+ struct macb_queue queues[MACB_MAX_QUEUES];
spinlock_t lock;
struct platform_device *pdev;
@@ -618,7 +646,6 @@ struct macb {
struct clk *tx_clk;
struct net_device *dev;
struct napi_struct napi;
- struct work_struct tx_error_task;
struct net_device_stats stats;
union {
struct macb_stats macb;
@@ -626,7 +653,6 @@ struct macb {
} hw_stats;
dma_addr_t rx_ring_dma;
- dma_addr_t tx_ring_dma;
dma_addr_t rx_buffers_dma;
struct macb_or_gem_ops macbgem_ops;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index a18d33fdb271..5ab5c3133acd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -392,7 +392,7 @@ struct port_info {
s16 xact_addr_filt; /* index of exact MAC address filter */
u16 rss_size; /* size of VI's RSS table slice */
s8 mdio_addr;
- u8 port_type;
+ enum fw_port_type port_type;
u8 mod_type;
u8 port_id;
u8 tx_chan;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 973dbb7938c3..ccf3436024bc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2325,7 +2325,7 @@ static int identify_port(struct net_device *dev,
return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
}
-static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
+static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps)
{
unsigned int v = 0;
@@ -2354,14 +2354,20 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
SUPPORTED_10000baseKX4_Full;
else if (type == FW_PORT_TYPE_FIBER_XFI ||
- type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) {
+ type == FW_PORT_TYPE_FIBER_XAUI ||
+ type == FW_PORT_TYPE_SFP ||
+ type == FW_PORT_TYPE_QSFP_10G ||
+ type == FW_PORT_TYPE_QSA) {
v |= SUPPORTED_FIBRE;
if (caps & FW_PORT_CAP_SPEED_1G)
v |= SUPPORTED_1000baseT_Full;
if (caps & FW_PORT_CAP_SPEED_10G)
v |= SUPPORTED_10000baseT_Full;
- } else if (type == FW_PORT_TYPE_BP40_BA)
+ } else if (type == FW_PORT_TYPE_BP40_BA ||
+ type == FW_PORT_TYPE_QSFP) {
v |= SUPPORTED_40000baseSR4_Full;
+ v |= SUPPORTED_FIBRE;
+ }
if (caps & FW_PORT_CAP_ANEG)
v |= SUPPORTED_Autoneg;
@@ -2396,6 +2402,7 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->port = PORT_FIBRE;
else if (p->port_type == FW_PORT_TYPE_SFP ||
p->port_type == FW_PORT_TYPE_QSFP_10G ||
+ p->port_type == FW_PORT_TYPE_QSA ||
p->port_type == FW_PORT_TYPE_QSFP) {
if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
p->mod_type == FW_PORT_MOD_TYPE_SR ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index beaf80a6214b..291b6f219708 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -560,6 +560,7 @@ enum fw_flowc_mnem {
FW_FLOWC_MNEM_RCVNXT,
FW_FLOWC_MNEM_SNDBUF,
FW_FLOWC_MNEM_MSS,
+ FW_FLOWC_MNEM_TXDATAPLEN_MAX,
};
struct fw_flowc_mnemval {
@@ -2470,6 +2471,7 @@ enum fw_port_type {
FW_PORT_TYPE_BP4_AP,
FW_PORT_TYPE_QSFP_10G,
FW_PORT_TYPE_QSFP,
+ FW_PORT_TYPE_QSA,
FW_PORT_TYPE_BP40_BA,
FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_M
diff --git a/drivers/net/ethernet/davicom/Kconfig b/drivers/net/ethernet/davicom/Kconfig
index 316c5e5a92ad..7ec2d74f94d3 100644
--- a/drivers/net/ethernet/davicom/Kconfig
+++ b/drivers/net/ethernet/davicom/Kconfig
@@ -4,7 +4,7 @@
config DM9000
tristate "DM9000 support"
- depends on ARM || BLACKFIN || MIPS || COLDFIRE
+ depends on ARM || BLACKFIN || MIPS || COLDFIRE || NIOS2
select CRC32
select MII
---help---
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index ee1ecb146df7..eb088b129bc7 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -615,14 +615,14 @@ static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
rx_desc = FM10K_RX_DESC(rx_ring, rx_ring->next_to_clean);
- if (!fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_DD))
+ if (!rx_desc->d.staterr)
break;
/* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we know the
- * RXD_STATUS_DD bit is set
+ * descriptor has been written back
*/
- rmb();
+ dma_rmb();
/* retrieve a buffer from the ring */
skb = fm10k_fetch_rx_buffer(rx_ring, rx_desc, skb);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 2e526d4904a6..ff59897a9463 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -6910,14 +6910,14 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
- if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
+ if (!rx_desc->wb.upper.status_error)
break;
/* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we know the
- * RXD_STAT_DD bit is set
+ * descriptor has been written back
*/
- rmb();
+ dma_rmb();
/* retrieve a buffer from the ring */
skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 798b05556e1b..2ed2c7de2304 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2009,15 +2009,14 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
- if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
+ if (!rx_desc->wb.upper.status_error)
break;
- /*
- * This memory barrier is needed to keep us from reading
+ /* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we know the
- * RXD_STAT_DD bit is set
+ * descriptor has been written back
*/
- rmb();
+ dma_rmb();
/* retrieve a buffer from the ring */
skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 4a1be34d7214..44ce7d88f554 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1364,8 +1364,8 @@ err_out_free_rx_resources:
jme_free_rx_resources(jme);
out_enable_tasklet:
tasklet_enable(&jme->txclean_task);
- tasklet_hi_enable(&jme->rxclean_task);
- tasklet_hi_enable(&jme->rxempty_task);
+ tasklet_enable(&jme->rxclean_task);
+ tasklet_enable(&jme->rxempty_task);
out:
atomic_inc(&jme->link_changing);
}
@@ -2408,8 +2408,8 @@ static inline void jme_resume_rx(struct jme_adapter *jme)
if (test_bit(JME_FLAG_POLL, &jme->flags)) {
JME_NAPI_ENABLE(jme);
} else {
- tasklet_hi_enable(&jme->rxclean_task);
- tasklet_hi_enable(&jme->rxempty_task);
+ tasklet_enable(&jme->rxclean_task);
+ tasklet_enable(&jme->rxempty_task);
}
dpi->cur = PCC_P1;
dpi->attempt = PCC_P1;
@@ -3290,8 +3290,8 @@ jme_suspend(struct device *dev)
}
tasklet_enable(&jme->txclean_task);
- tasklet_hi_enable(&jme->rxclean_task);
- tasklet_hi_enable(&jme->rxempty_task);
+ tasklet_enable(&jme->rxclean_task);
+ tasklet_enable(&jme->rxempty_task);
jme_powersave_phy(jme);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 3dad7e884952..14a1c5cec3a5 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -5919,7 +5919,7 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
}
-static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
+static void rtl_hw_start_8168g(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
@@ -5954,6 +5954,24 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
rtl_pcie_state_l2l3_enable(tp, false);
}
+static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ static const struct ephy_info e_info_8168g_1[] = {
+ { 0x00, 0x0000, 0x0008 },
+ { 0x0c, 0x37d0, 0x0820 },
+ { 0x1e, 0x0000, 0x0001 },
+ { 0x19, 0x8000, 0x0000 }
+ };
+
+ rtl_hw_start_8168g(tp);
+
+ /* disable aspm and clock request before access ephy */
+ RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
+ RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
+ rtl_ephy_init(tp, e_info_8168g_1, ARRAY_SIZE(e_info_8168g_1));
+}
+
static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
@@ -5964,7 +5982,7 @@ static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
{ 0x1e, 0xffff, 0x20eb }
};
- rtl_hw_start_8168g_1(tp);
+ rtl_hw_start_8168g(tp);
/* disable aspm and clock request before access ephy */
RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
@@ -5983,7 +6001,7 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
{ 0x1e, 0x0000, 0x2000 }
};
- rtl_hw_start_8168g_1(tp);
+ rtl_hw_start_8168g(tp);
/* disable aspm and clock request before access ephy */
RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
@@ -6605,6 +6623,9 @@ static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
{
u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
+ /* Force memory writes to complete before releasing descriptor */
+ dma_wmb();
+
desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
}
@@ -6612,7 +6633,6 @@ static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
u32 rx_buf_sz)
{
desc->addr = cpu_to_le64(mapping);
- wmb();
rtl8169_mark_to_asic(desc, rx_buf_sz);
}
@@ -7073,16 +7093,18 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
skb_tx_timestamp(skb);
- wmb();
+ /* Force memory writes to complete before releasing descriptor */
+ dma_wmb();
/* Anti gcc 2.95.3 bugware (sic) */
status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
txd->opts1 = cpu_to_le32(status);
- tp->cur_tx += frags + 1;
-
+ /* Force all memory writes to complete before notifying device */
wmb();
+ tp->cur_tx += frags + 1;
+
RTL_W8(TxPoll, NPQ);
mmiowb();
@@ -7181,11 +7203,16 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
struct ring_info *tx_skb = tp->tx_skb + entry;
u32 status;
- rmb();
status = le32_to_cpu(tp->TxDescArray[entry].opts1);
if (status & DescOwn)
break;
+ /* This barrier is needed to keep us from reading
+ * any other fields out of the Tx descriptor until
+ * we know the status of DescOwn
+ */
+ dma_rmb();
+
rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
tp->TxDescArray + entry);
if (status & LastFrag) {
@@ -7280,11 +7307,16 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
struct RxDesc *desc = tp->RxDescArray + entry;
u32 status;
- rmb();
status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
-
if (status & DescOwn)
break;
+
+ /* This barrier is needed to keep us from reading
+ * any other fields out of the Rx descriptor until
+ * we know the status of DescOwn
+ */
+ dma_rmb();
+
if (unlikely(status & RxRES)) {
netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
status);
@@ -7346,7 +7378,6 @@ process_pkt:
}
release_descriptor:
desc->opts2 = 0;
- wmb();
rtl8169_mark_to_asic(desc, rx_buf_sz);
}
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index 753630f5d3d3..627926800ff3 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -6,7 +6,7 @@ config NET_VENDOR_SMSC
bool "SMC (SMSC)/Western Digital devices"
default y
depends on ARM || ISA || MAC || ARM64 || MIPS || M32R || SUPERH || \
- BLACKFIN || MN10300 || COLDFIRE || XTENSA || PCI || PCMCIA
+ BLACKFIN || MN10300 || COLDFIRE || XTENSA || NIOS2 || PCI || PCMCIA
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -39,7 +39,7 @@ config SMC91X
select CRC32
select MII
depends on (ARM || M32R || SUPERH || MIPS || BLACKFIN || \
- MN10300 || COLDFIRE || ARM64 || XTENSA)
+ MN10300 || COLDFIRE || ARM64 || XTENSA || NIOS2)
---help---
This is a driver for SMC's 91x series of Ethernet chipsets,
including the SMC91C94 and the SMC91C111. Say Y if you want it
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 90c86cd3be14..45c408ef67d0 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -466,23 +466,6 @@ static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
return err;
}
-static u32 next_idx(u32 idx, struct vio_dring_state *dr)
-{
- if (++idx == dr->num_entries)
- idx = 0;
- return idx;
-}
-
-static u32 prev_idx(u32 idx, struct vio_dring_state *dr)
-{
- if (idx == 0)
- idx = dr->num_entries - 1;
- else
- idx--;
-
- return idx;
-}
-
static struct vio_net_desc *get_rx_desc(struct vnet_port *port,
struct vio_dring_state *dr,
u32 index)
@@ -556,7 +539,8 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
int ack_start = -1, ack_end = -1;
bool send_ack = true;
- end = (end == (u32) -1) ? prev_idx(start, dr) : next_idx(end, dr);
+ end = (end == (u32) -1) ? vio_dring_prev(dr, start)
+ : vio_dring_next(dr, end);
viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end);
@@ -570,7 +554,7 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
if (ack_start == -1)
ack_start = start;
ack_end = start;
- start = next_idx(start, dr);
+ start = vio_dring_next(dr, start);
if (ack && start != end) {
err = vnet_send_ack(port, dr, ack_start, ack_end,
VIO_DRING_ACTIVE);
@@ -584,7 +568,7 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
}
}
if (unlikely(ack_start == -1))
- ack_start = ack_end = prev_idx(start, dr);
+ ack_start = ack_end = vio_dring_prev(dr, start);
if (send_ack) {
port->napi_resume = false;
return vnet_send_ack(port, dr, ack_start, ack_end,
@@ -633,7 +617,7 @@ static int idx_is_pending(struct vio_dring_state *dr, u32 end)
found = 1;
break;
}
- idx = next_idx(idx, dr);
+ idx = vio_dring_next(dr, idx);
}
return found;
}
@@ -663,7 +647,7 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf)
/* sync for race conditions with vnet_start_xmit() and tell xmit it
* is time to send a trigger.
*/
- dr->cons = next_idx(end, dr);
+ dr->cons = vio_dring_next(dr, end);
desc = vio_dring_entry(dr, dr->cons);
if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) {
/* vnet_start_xmit() just populated this dring but missed
@@ -784,7 +768,7 @@ ldc_ctrl:
pkt->tag.stype = VIO_SUBTYPE_INFO;
pkt->tag.stype_env = VIO_DRING_DATA;
pkt->seq = dr->rcv_nxt;
- pkt->start_idx = next_idx(port->napi_stop_idx, dr);
+ pkt->start_idx = vio_dring_next(dr, port->napi_stop_idx);
pkt->end_idx = -1;
goto napi_resume;
}
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
index 47872caa0081..3ad0e6e16c39 100644
--- a/drivers/net/phy/fixed.c
+++ b/drivers/net/phy/fixed.c
@@ -274,6 +274,7 @@ struct phy_device *fixed_phy_register(unsigned int irq,
return phy;
}
+EXPORT_SYMBOL_GPL(fixed_phy_register);
static int __init fixed_mdio_bus_init(void)
{
diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c
index ff8a027a4afb..d2ab06048169 100644
--- a/drivers/pcmcia/sa1100_generic.c
+++ b/drivers/pcmcia/sa1100_generic.c
@@ -93,6 +93,7 @@ static int sa11x0_drv_pcmcia_remove(struct platform_device *dev)
for (i = 0; i < sinfo->nskt; i++)
soc_pcmcia_remove_one(&sinfo->skt[i]);
+ clk_put(sinfo->clk);
kfree(sinfo);
return 0;
}
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c
index 65b02c3e14ce..7bae7e549d8b 100644
--- a/drivers/pcmcia/sa1111_generic.c
+++ b/drivers/pcmcia/sa1111_generic.c
@@ -145,6 +145,12 @@ int sa1111_pcmcia_add(struct sa1111_dev *dev, struct pcmcia_low_level *ops,
return -ENOMEM;
s->soc.nr = ops->first + i;
+ s->soc.clk = clk_get(&dev->dev, NULL);
+ if (IS_ERR(s->soc.clk)) {
+ ret = PTR_ERR(s->soc.clk);
+ kfree(s);
+ return ret;
+ }
soc_pcmcia_init_one(&s->soc, ops, &dev->dev);
s->dev = dev;
if (s->soc.nr) {
@@ -220,6 +226,7 @@ static int pcmcia_remove(struct sa1111_dev *dev)
for (; s; s = next) {
next = s->next;
soc_pcmcia_remove_one(&s->soc);
+ clk_put(s->soc.clk);
kfree(s);
}
diff --git a/drivers/pcmcia/sa11xx_base.c b/drivers/pcmcia/sa11xx_base.c
index 54d3089d157b..cf6de2c2b329 100644
--- a/drivers/pcmcia/sa11xx_base.c
+++ b/drivers/pcmcia/sa11xx_base.c
@@ -135,14 +135,16 @@ sa1100_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
static int
sa1100_pcmcia_set_timing(struct soc_pcmcia_socket *skt)
{
- return sa1100_pcmcia_set_mecr(skt, cpufreq_get(0));
+ unsigned long clk = clk_get_rate(skt->clk);
+
+ return sa1100_pcmcia_set_mecr(skt, clk / 1000);
}
static int
sa1100_pcmcia_show_timing(struct soc_pcmcia_socket *skt, char *buf)
{
struct soc_pcmcia_timing timing;
- unsigned int clock = cpufreq_get(0);
+ unsigned int clock = clk_get_rate(skt->clk);
unsigned long mecr = MECR;
char *p = buf;
@@ -218,6 +220,11 @@ int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops,
struct skt_dev_info *sinfo;
struct soc_pcmcia_socket *skt;
int i, ret = 0;
+ struct clk *clk;
+
+ clk = clk_get(dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
sa11xx_drv_pcmcia_ops(ops);
@@ -226,12 +233,14 @@ int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops,
return -ENOMEM;
sinfo->nskt = nr;
+ sinfo->clk = clk;
/* Initialize processor specific parameters */
for (i = 0; i < nr; i++) {
skt = &sinfo->skt[i];
skt->nr = first + i;
+ skt->clk = clk;
soc_pcmcia_init_one(skt, ops, dev);
ret = sa11xx_drv_pcmcia_add_one(skt);
@@ -242,6 +251,7 @@ int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops,
if (ret) {
while (--i >= 0)
soc_pcmcia_remove_one(&sinfo->skt[i]);
+ clk_put(clk);
kfree(sinfo);
} else {
dev_set_drvdata(dev, sinfo);
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index a2bc6ee1702e..933f4657515b 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -120,6 +120,8 @@ static void __soc_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt,
if (skt->ops->hw_shutdown)
skt->ops->hw_shutdown(skt);
+
+ clk_disable_unprepare(skt->clk);
}
static void soc_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
@@ -131,6 +133,8 @@ static int soc_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
int ret = 0, i;
+ clk_prepare_enable(skt->clk);
+
if (skt->ops->hw_init) {
ret = skt->ops->hw_init(skt);
if (ret)
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 69fbfc89efb6..a83d2ceded83 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -75,6 +75,7 @@ typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *);
static void *t4_uld_add(const struct cxgb4_lld_info *);
static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *);
static int t4_uld_state_change(void *, enum cxgb4_state state);
+static inline int send_tx_flowc_wr(struct cxgbi_sock *);
static const struct cxgb4_uld_info cxgb4i_uld_info = {
.name = DRV_MODULE_NAME,
@@ -157,12 +158,6 @@ static struct scsi_transport_template *cxgb4i_stt;
#define RCV_BUFSIZ_MASK 0x3FFU
#define MAX_IMM_TX_PKT_LEN 128
-static inline void set_queue(struct sk_buff *skb, unsigned int queue,
- const struct cxgbi_sock *csk)
-{
- skb->queue_mapping = queue;
-}
-
static int push_tx_frames(struct cxgbi_sock *, int);
/*
@@ -172,10 +167,14 @@ static int push_tx_frames(struct cxgbi_sock *, int);
* Returns true if a packet can be sent as an offload WR with immediate
* data. We currently use the same limit as for Ethernet packets.
*/
-static inline int is_ofld_imm(const struct sk_buff *skb)
+static inline bool is_ofld_imm(const struct sk_buff *skb)
{
- return skb->len <= (MAX_IMM_TX_PKT_LEN -
- sizeof(struct fw_ofld_tx_data_wr));
+ int len = skb->len;
+
+ if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
+ len += sizeof(struct fw_ofld_tx_data_wr);
+
+ return len <= MAX_IMM_TX_PKT_LEN;
}
static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
@@ -388,13 +387,19 @@ static void send_abort_req(struct cxgbi_sock *csk)
if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev)
return;
+
+ if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
+ send_tx_flowc_wr(csk);
+ cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
+ }
+
cxgbi_sock_set_state(csk, CTP_ABORTING);
cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
cxgbi_sock_purge_write_queue(csk);
csk->cpl_abort_req = NULL;
req = (struct cpl_abort_req *)skb->head;
- set_queue(skb, CPL_PRIORITY_DATA, csk);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
req->cmd = CPL_ABORT_SEND_RST;
t4_set_arp_err_handler(skb, csk, abort_arp_failure);
INIT_TP_WR(req, csk->tid);
@@ -420,7 +425,7 @@ static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
csk, csk->state, csk->flags, csk->tid, rst_status);
csk->cpl_abort_rpl = NULL;
- set_queue(skb, CPL_PRIORITY_DATA, csk);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
INIT_TP_WR(rpl, csk->tid);
OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
rpl->cmd = rst_status;
@@ -491,20 +496,40 @@ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
return flits + sgl_len(cnt);
}
-static inline void send_tx_flowc_wr(struct cxgbi_sock *csk)
+#define FLOWC_WR_NPARAMS_MIN 9
+static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp)
+{
+ int nparams, flowclen16, flowclen;
+
+ nparams = FLOWC_WR_NPARAMS_MIN;
+ flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
+ flowclen16 = DIV_ROUND_UP(flowclen, 16);
+ flowclen = flowclen16 * 16;
+ /*
+ * Return the number of 16-byte credits used by the FlowC request.
+ * Pass back the nparams and actual FlowC length if requested.
+ */
+ if (nparamsp)
+ *nparamsp = nparams;
+ if (flowclenp)
+ *flowclenp = flowclen;
+
+ return flowclen16;
+}
+
+static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
{
struct sk_buff *skb;
struct fw_flowc_wr *flowc;
- int flowclen, i;
+ int nparams, flowclen16, flowclen;
- flowclen = 80;
+ flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen);
skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
flowc = (struct fw_flowc_wr *)skb->head;
flowc->op_to_nparams =
- htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(8));
+ htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(nparams));
flowc->flowid_len16 =
- htonl(FW_WR_LEN16_V(DIV_ROUND_UP(72, 16)) |
- FW_WR_FLOWID_V(csk->tid));
+ htonl(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid));
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
flowc->mnemval[0].val = htonl(csk->cdev->pfvf);
flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
@@ -523,12 +548,10 @@ static inline void send_tx_flowc_wr(struct cxgbi_sock *csk)
flowc->mnemval[7].val = htonl(csk->advmss);
flowc->mnemval[8].mnemonic = 0;
flowc->mnemval[8].val = 0;
- for (i = 0; i < 9; i++) {
- flowc->mnemval[i].r4[0] = 0;
- flowc->mnemval[i].r4[1] = 0;
- flowc->mnemval[i].r4[2] = 0;
- }
- set_queue(skb, CPL_PRIORITY_DATA, csk);
+ flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
+ flowc->mnemval[8].val = 16384;
+
+ set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n",
@@ -537,6 +560,8 @@ static inline void send_tx_flowc_wr(struct cxgbi_sock *csk)
csk->advmss);
cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
+
+ return flowclen16;
}
static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
@@ -545,10 +570,11 @@ static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
struct fw_ofld_tx_data_wr *req;
unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3;
unsigned int wr_ulp_mode = 0, val;
+ bool imm = is_ofld_imm(skb);
req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req));
- if (is_ofld_imm(skb)) {
+ if (imm) {
req->op_to_immdlen = htonl(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
FW_WR_COMPL_F |
FW_WR_IMMDLEN_V(dlen));
@@ -597,16 +623,32 @@ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
int dlen = skb->len;
int len = skb->len;
unsigned int credits_needed;
+ int flowclen16 = 0;
skb_reset_transport_header(skb);
if (is_ofld_imm(skb))
- credits_needed = DIV_ROUND_UP(dlen +
- sizeof(struct fw_ofld_tx_data_wr), 16);
+ credits_needed = DIV_ROUND_UP(dlen, 16);
else
- credits_needed = DIV_ROUND_UP(8*calc_tx_flits_ofld(skb)
- + sizeof(struct fw_ofld_tx_data_wr),
+ credits_needed = DIV_ROUND_UP(
+ 8 * calc_tx_flits_ofld(skb),
+ 16);
+
+ if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
+ credits_needed += DIV_ROUND_UP(
+ sizeof(struct fw_ofld_tx_data_wr),
16);
+ /*
+ * Assumes the initial credits is large enough to support
+ * fw_flowc_wr plus largest possible first payload
+ */
+ if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
+ flowclen16 = send_tx_flowc_wr(csk);
+ csk->wr_cred -= flowclen16;
+ csk->wr_una_cred += flowclen16;
+ cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
+ }
+
if (csk->wr_cred < credits_needed) {
log_debug(1 << CXGBI_DBG_PDU_TX,
"csk 0x%p, skb %u/%u, wr %d < %u.\n",
@@ -615,8 +657,8 @@ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
break;
}
__skb_unlink(skb, &csk->write_queue);
- set_queue(skb, CPL_PRIORITY_DATA, csk);
- skb->csum = credits_needed;
+ set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
+ skb->csum = credits_needed + flowclen16;
csk->wr_cred -= credits_needed;
csk->wr_una_cred += credits_needed;
cxgbi_sock_enqueue_wr(csk, skb);
@@ -627,12 +669,6 @@ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
csk->wr_cred, csk->wr_una_cred);
if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
- if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
- send_tx_flowc_wr(csk);
- skb->csum += 5;
- csk->wr_cred -= 5;
- csk->wr_una_cred += 5;
- }
len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
make_tx_data_wr(csk, skb, dlen, len, credits_needed,
req_completion);
@@ -807,6 +843,13 @@ static void csk_act_open_retry_timer(unsigned long data)
}
+static inline bool is_neg_adv(unsigned int status)
+{
+ return status == CPL_ERR_RTX_NEG_ADVICE ||
+ status == CPL_ERR_KEEPALV_NEG_ADVICE ||
+ status == CPL_ERR_PERSIST_NEG_ADVICE;
+}
+
static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
{
struct cxgbi_sock *csk;
@@ -828,7 +871,7 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
"csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr),
atid, tid, status, csk, csk->state, csk->flags);
- if (status == CPL_ERR_RTX_NEG_ADVICE)
+ if (is_neg_adv(status))
goto rel_skb;
module_put(THIS_MODULE);
@@ -934,8 +977,7 @@ static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
(&csk->saddr), (&csk->daddr),
csk, csk->state, csk->flags, csk->tid, req->status);
- if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
- req->status == CPL_ERR_PERSIST_NEG_ADVICE)
+ if (is_neg_adv(req->status))
goto rel_skb;
cxgbi_sock_get(csk);
@@ -989,6 +1031,27 @@ rel_skb:
__kfree_skb(skb);
}
+static void do_rx_data(struct cxgbi_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbi_sock *csk;
+ struct cpl_rx_data *cpl = (struct cpl_rx_data *)skb->data;
+ unsigned int tid = GET_TID(cpl);
+ struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
+ struct tid_info *t = lldi->tids;
+
+ csk = lookup_tid(t, tid);
+ if (!csk) {
+ pr_err("can't find connection for tid %u.\n", tid);
+ } else {
+ /* not expecting this, reset the connection. */
+ pr_err("csk 0x%p, tid %u, rcv cpl_rx_data.\n", csk, tid);
+ spin_lock_bh(&csk->lock);
+ send_abort_req(csk);
+ spin_unlock_bh(&csk->lock);
+ }
+ __kfree_skb(skb);
+}
+
static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
{
struct cxgbi_sock *csk;
@@ -1408,6 +1471,7 @@ cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
[CPL_SET_TCB_RPL] = do_set_tcb_rpl,
[CPL_RX_DATA_DDP] = do_rx_data_ddp,
[CPL_RX_ISCSI_DDP] = do_rx_data_ddp,
+ [CPL_RX_DATA] = do_rx_data,
};
int cxgb4i_ofld_init(struct cxgbi_device *cdev)
@@ -1485,7 +1549,7 @@ static int ddp_ppod_write_idata(struct cxgbi_device *cdev, unsigned int port_id,
return -ENOMEM;
}
req = (struct ulp_mem_io *)skb->head;
- set_queue(skb, CPL_PRIORITY_CONTROL, NULL);
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
ulp_mem_io_set_hdr(lldi, req, wr_len, dlen, pm_addr);
idata = (struct ulptx_idata *)(req + 1);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 7da59c38a69e..eb58afcfb73b 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -2294,10 +2294,12 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
return err;
}
- kfree_skb(skb);
log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
"itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
task->itt, skb, skb->len, skb->data_len, err);
+
+ kfree_skb(skb);
+
iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
return err;
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 2c7cb1c0c453..aba1af720df6 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -317,8 +317,8 @@ static inline void cxgbi_skcb_clear_flag(struct sk_buff *skb,
__clear_bit(flag, &(cxgbi_skcb_flags(skb)));
}
-static inline int cxgbi_skcb_test_flag(struct sk_buff *skb,
- enum cxgbi_skcb_flags flag)
+static inline int cxgbi_skcb_test_flag(const struct sk_buff *skb,
+ enum cxgbi_skcb_flags flag)
{
return test_bit(flag, &(cxgbi_skcb_flags(skb)));
}
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index 9690216d38ff..c0abe276ee55 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -178,6 +178,7 @@
#include <linux/dma-mapping.h>
#include <linux/mutex.h>
#include <linux/io.h>
+#include <linux/clk.h>
#include <video/sa1100fb.h>
@@ -416,9 +417,9 @@ sa1100fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
var->transp.offset);
#ifdef CONFIG_CPU_FREQ
- dev_dbg(fbi->dev, "dma period = %d ps, clock = %d kHz\n",
+ dev_dbg(fbi->dev, "dma period = %d ps, clock = %ld kHz\n",
sa1100fb_display_dma_period(var),
- cpufreq_get(smp_processor_id()));
+ clk_get_rate(fbi->clk) / 1000);
#endif
return 0;
@@ -592,9 +593,10 @@ static struct fb_ops sa1100fb_ops = {
* Calculate the PCD value from the clock rate (in picoseconds).
* We take account of the PPCR clock setting.
*/
-static inline unsigned int get_pcd(unsigned int pixclock, unsigned int cpuclock)
+static inline unsigned int get_pcd(struct sa1100fb_info *fbi,
+ unsigned int pixclock)
{
- unsigned int pcd = cpuclock / 100;
+ unsigned int pcd = clk_get_rate(fbi->clk) / 100 / 1000;
pcd *= pixclock;
pcd /= 10000000;
@@ -673,7 +675,7 @@ static int sa1100fb_activate_var(struct fb_var_screeninfo *var, struct sa1100fb_
LCCR2_BegFrmDel(var->upper_margin) +
LCCR2_EndFrmDel(var->lower_margin);
- pcd = get_pcd(var->pixclock, cpufreq_get(0));
+ pcd = get_pcd(fbi, var->pixclock);
new_regs.lccr3 = LCCR3_PixClkDiv(pcd) | fbi->inf->lccr3 |
(var->sync & FB_SYNC_HOR_HIGH_ACT ? LCCR3_HorSnchH : LCCR3_HorSnchL) |
(var->sync & FB_SYNC_VERT_HIGH_ACT ? LCCR3_VrtSnchH : LCCR3_VrtSnchL);
@@ -787,6 +789,9 @@ static void sa1100fb_enable_controller(struct sa1100fb_info *fbi)
fbi->palette_cpu[0] &= 0xcfff;
fbi->palette_cpu[0] |= palette_pbs(&fbi->fb.var);
+ /* enable LCD controller clock */
+ clk_prepare_enable(fbi->clk);
+
/* Sequence from 11.7.10 */
writel_relaxed(fbi->reg_lccr3, fbi->base + LCCR3);
writel_relaxed(fbi->reg_lccr2, fbi->base + LCCR2);
@@ -831,6 +836,9 @@ static void sa1100fb_disable_controller(struct sa1100fb_info *fbi)
schedule_timeout(20 * HZ / 1000);
remove_wait_queue(&fbi->ctrlr_wait, &wait);
+
+ /* disable LCD controller clock */
+ clk_disable_unprepare(fbi->clk);
}
/*
@@ -1009,7 +1017,6 @@ sa1100fb_freq_transition(struct notifier_block *nb, unsigned long val,
void *data)
{
struct sa1100fb_info *fbi = TO_INF(nb, freq_transition);
- struct cpufreq_freqs *f = data;
u_int pcd;
switch (val) {
@@ -1018,7 +1025,7 @@ sa1100fb_freq_transition(struct notifier_block *nb, unsigned long val,
break;
case CPUFREQ_POSTCHANGE:
- pcd = get_pcd(fbi->fb.var.pixclock, f->new);
+ pcd = get_pcd(fbi, fbi->fb.var.pixclock);
fbi->reg_lccr3 = (fbi->reg_lccr3 & ~0xff) | LCCR3_PixClkDiv(pcd);
set_ctrlr_state(fbi, C_ENABLE_CLKCHANGE);
break;
@@ -1225,6 +1232,13 @@ static int sa1100fb_probe(struct platform_device *pdev)
if (!fbi)
goto failed;
+ fbi->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(fbi->clk)) {
+ ret = PTR_ERR(fbi->clk);
+ fbi->clk = NULL;
+ goto failed;
+ }
+
fbi->base = ioremap(res->start, resource_size(res));
if (!fbi->base)
goto failed;
@@ -1277,6 +1291,8 @@ static int sa1100fb_probe(struct platform_device *pdev)
failed:
if (fbi)
iounmap(fbi->base);
+ if (fbi->clk)
+ clk_put(fbi->clk);
kfree(fbi);
release_mem_region(res->start, resource_size(res));
return ret;
diff --git a/drivers/video/fbdev/sa1100fb.h b/drivers/video/fbdev/sa1100fb.h
index fc5d4292fad6..0139d13377a5 100644
--- a/drivers/video/fbdev/sa1100fb.h
+++ b/drivers/video/fbdev/sa1100fb.h
@@ -68,6 +68,7 @@ struct sa1100fb_info {
#endif
const struct sa1100fb_mach_info *inf;
+ struct clk *clk;
};
#define TO_INF(ptr,member) container_of(ptr,struct sa1100fb_info,member)