summaryrefslogtreecommitdiffstats
path: root/include/acpi
diff options
context:
space:
mode:
authorKeshavamurthy, Anil S <anil.s.keshavamurthy@intel.com>2007-10-21 16:41:41 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-22 08:13:18 -0700
commit10e5247f40f3bf7508a0ed2848c9cae37bddf4bc (patch)
treeadca606f00ebcbdbdc5c474f012105d7e59152f6 /include/acpi
parent89910cccb8fec0c1140d33a743e72a712efd4f05 (diff)
downloadlinux-10e5247f40f3bf7508a0ed2848c9cae37bddf4bc.tar.bz2
Intel IOMMU: DMAR detection and parsing logic
This patch supports the upcomming Intel IOMMU hardware a.k.a. Intel(R) Virtualization Technology for Directed I/O Architecture and the hardware spec for the same can be found here http://www.intel.com/technology/virtualization/index.htm FAQ! (questions from akpm, answers from ak) > So... what's all this code for? > > I assume that the intent here is to speed things up under Xen, etc? Yes in some cases, but not this code. That would be the Xen version of this code that could potentially assign whole devices to guests. I expect this to be only useful in some special cases though because most hardware is not virtualizable and you typically want an own instance for each guest. Ok at some point KVM might implement this too; i likely would use this code for this. > Do we > have any benchmark results to help us to decide whether a merge would be > justified? The main advantage for doing it in the normal kernel is not performance, but more safety. Broken devices won't be able to corrupt memory by doing random DMA. Unfortunately that doesn't work for graphics yet, for that need user space interfaces for the X server are needed. There are some potential performance benefits too: - When you have a device that cannot address the complete address range an IOMMU can remap its memory instead of bounce buffering. Remapping is likely cheaper than copying. - The IOMMU can merge sg lists into a single virtual block. This could potentially speed up SG IO when the device is slow walking SG lists. [I long ago benchmarked 5% on some block benchmark with an old MPT Fusion; but it probably depends a lot on the HBA] And you get better driver debugging because unexpected memory accesses from the devices will cause a trappable event. > > Does it slow anything down? It adds more overhead to each IO so yes. This patch: Add support for early detection and parsing of DMAR's (DMA Remapping) reported to OS via ACPI tables. DMA remapping(DMAR) devices support enables independent address translations for Direct Memory Access(DMA) from Devices. These DMA remapping devices are reported via ACPI tables and includes pci device scope covered by these DMA remapping device. For detailed info on the specification of "Intel(R) Virtualization Technology for Directed I/O Architecture" please see http://www.intel.com/technology/virtualization/index.htm Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: Andi Kleen <ak@suse.de> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Muli Ben-Yehuda <muli@il.ibm.com> Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com> Cc: Arjan van de Ven <arjan@infradead.org> Cc: Ashok Raj <ashok.raj@intel.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Christoph Lameter <clameter@sgi.com> Cc: Greg KH <greg@kroah.com> Cc: Len Brown <lenb@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/acpi')
-rw-r--r--include/acpi/actbl1.h27
1 files changed, 20 insertions, 7 deletions
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 4e5d3ca53a8e..a1b1b2ee3e51 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -257,7 +257,8 @@ struct acpi_table_dbgp {
struct acpi_table_dmar {
struct acpi_table_header header; /* Common ACPI table header */
u8 width; /* Host Address Width */
- u8 reserved[11];
+ u8 flags;
+ u8 reserved[10];
};
/* DMAR subtable header */
@@ -265,8 +266,6 @@ struct acpi_table_dmar {
struct acpi_dmar_header {
u16 type;
u16 length;
- u8 flags;
- u8 reserved[3];
};
/* Values for subtable type in struct acpi_dmar_header */
@@ -274,13 +273,15 @@ struct acpi_dmar_header {
enum acpi_dmar_type {
ACPI_DMAR_TYPE_HARDWARE_UNIT = 0,
ACPI_DMAR_TYPE_RESERVED_MEMORY = 1,
- ACPI_DMAR_TYPE_RESERVED = 2 /* 2 and greater are reserved */
+ ACPI_DMAR_TYPE_ATSR = 2,
+ ACPI_DMAR_TYPE_RESERVED = 3 /* 3 and greater are reserved */
};
struct acpi_dmar_device_scope {
u8 entry_type;
u8 length;
- u8 segment;
+ u16 reserved;
+ u8 enumeration_id;
u8 bus;
};
@@ -290,7 +291,14 @@ enum acpi_dmar_scope_type {
ACPI_DMAR_SCOPE_TYPE_NOT_USED = 0,
ACPI_DMAR_SCOPE_TYPE_ENDPOINT = 1,
ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2,
- ACPI_DMAR_SCOPE_TYPE_RESERVED = 3 /* 3 and greater are reserved */
+ ACPI_DMAR_SCOPE_TYPE_IOAPIC = 3,
+ ACPI_DMAR_SCOPE_TYPE_HPET = 4,
+ ACPI_DMAR_SCOPE_TYPE_RESERVED = 5 /* 5 and greater are reserved */
+};
+
+struct acpi_dmar_pci_path {
+ u8 dev;
+ u8 fn;
};
/*
@@ -301,6 +309,9 @@ enum acpi_dmar_scope_type {
struct acpi_dmar_hardware_unit {
struct acpi_dmar_header header;
+ u8 flags;
+ u8 reserved;
+ u16 segment;
u64 address; /* Register Base Address */
};
@@ -312,7 +323,9 @@ struct acpi_dmar_hardware_unit {
struct acpi_dmar_reserved_memory {
struct acpi_dmar_header header;
- u64 address; /* 4_k aligned base address */
+ u16 reserved;
+ u16 segment;
+ u64 base_address; /* 4_k aligned base address */
u64 end_address; /* 4_k aligned limit address */
};