summaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
authorMatt Domsch <Matt_Domsch@dell.com>2006-09-29 15:23:23 -0500
committerGreg Kroah-Hartman <gregkh@suse.de>2006-10-18 11:36:12 -0700
commit6b4b78fed47e7380dfe9280b154e8b9bfcd4c86c (patch)
tree9fbf5135f4ba87fc68681bcd8996d509cac6f9e9 /drivers/pci
parent49c61cca2b6591a28ffa4abb73c718091f569746 (diff)
downloadlinux-6b4b78fed47e7380dfe9280b154e8b9bfcd4c86c.tar.bz2
PCI: optionally sort device lists breadth-first
Problem: New Dell PowerEdge servers have 2 embedded ethernet ports, which are labeled NIC1 and NIC2 on the chassis, in the BIOS setup screens, and in the printed documentation. Assuming no other add-in ethernet ports in the system, Linux 2.4 kernels name these eth0 and eth1 respectively. Many people have come to expect this naming. Linux 2.6 kernels name these eth1 and eth0 respectively (backwards from expectations). I also have reports that various Sun and HP servers have similar behavior. Root cause: Linux 2.4 kernels walk the pci_devices list, which happens to be sorted in breadth-first order (or pcbios_find_device order on i386, which most often is breadth-first also). 2.6 kernels have both the pci_devices list and the pci_bus_type.klist_devices list, the latter is what is walked at driver load time to match the pci_id tables; this klist happens to be in depth-first order. On systems where, for physical routing reasons, NIC1 appears on a lower bus number than NIC2, but NIC2's bridge is discovered first in the depth-first ordering, NIC2 will be discovered before NIC1. If the list were sorted breadth-first, NIC1 would be discovered before NIC2. A PowerEdge 1955 system has the following topology which easily exhibits the difference between depth-first and breadth-first device lists. -[0000:00]-+-00.0 Intel Corporation 5000P Chipset Memory Controller Hub +-02.0-[0000:03-08]--+-00.0-[0000:04-07]--+-00.0-[0000:05-06]----00.0-[0000:06]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC2, 2.4 kernel name eth1, 2.6 kernel name eth0) +-1c.0-[0000:01-02]----00.0-[0000:02]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC1, 2.4 kernel name eth0, 2.6 kernel name eth1) Other factors, such as device driver load order and the presence of PCI slots at various points in the bus hierarchy further complicate this problem; I'm not trying to solve those here, just restore the device order, and thus basic behavior, that 2.4 kernels had. Solution: The solution can come in multiple steps. Suggested fix #1: kernel Patch below optionally sorts the two device lists into breadth-first ordering to maintain compatibility with 2.4 kernels. It adds two new command line options: pci=bfsort pci=nobfsort to force the sort order, or not, as you wish. It also adds DMI checks for the specific Dell systems which exhibit "backwards" ordering, to make them "right". Suggested fix #2: udev rules from userland Many people also have the expectation that embedded NICs are always discovered before add-in NICs (which this patch does not try to do). Using the PCI IRQ Routing Table provided by system BIOS, it's easy to determine which PCI devices are embedded, or if add-in, which PCI slot they're in. I'm working on a tool that would allow udev to name ethernet devices in ascending embedded, slot 1 .. slot N order, subsort by PCI bus/dev/fn breadth-first. It'll be possible to use it independent of udev as well for those distributions that don't use udev in their installers. Suggested fix #3: system board routing rules One can constrain the system board layout to put NIC1 ahead of NIC2 regardless of breadth-first or depth-first discovery order. This adds a significant level of complexity to board routing, and may not be possible in all instances (witness the above systems from several major manufacturers). I don't want to encourage this particular train of thought too far, at the expense of not doing #1 or #2 above. Feedback appreciated. Patch tested on a Dell PowerEdge 1955 blade with 2.6.18. You'll also note I took some liberty and temporarily break the klist abstraction to simplify and speed up the sort algorithm. I think that's both safe and appropriate in this instance. Signed-off-by: Matt Domsch <Matt_Domsch@dell.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/probe.c92
1 files changed, 92 insertions, 0 deletions
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index a3b0a5eb5054..e159d6604494 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1067,3 +1067,95 @@ EXPORT_SYMBOL(pci_scan_bridge);
EXPORT_SYMBOL(pci_scan_single_device);
EXPORT_SYMBOL_GPL(pci_scan_child_bus);
#endif
+
+static int __init pci_sort_bf_cmp(const struct pci_dev *a, const struct pci_dev *b)
+{
+ if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
+ else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1;
+
+ if (a->bus->number < b->bus->number) return -1;
+ else if (a->bus->number > b->bus->number) return 1;
+
+ if (a->devfn < b->devfn) return -1;
+ else if (a->devfn > b->devfn) return 1;
+
+ return 0;
+}
+
+/*
+ * Yes, this forcably breaks the klist abstraction temporarily. It
+ * just wants to sort the klist, not change reference counts and
+ * take/drop locks rapidly in the process. It does all this while
+ * holding the lock for the list, so objects can't otherwise be
+ * added/removed while we're swizzling.
+ */
+static void __init pci_insertion_sort_klist(struct pci_dev *a, struct list_head *list)
+{
+ struct list_head *pos;
+ struct klist_node *n;
+ struct device *dev;
+ struct pci_dev *b;
+
+ list_for_each(pos, list) {
+ n = container_of(pos, struct klist_node, n_node);
+ dev = container_of(n, struct device, knode_bus);
+ b = to_pci_dev(dev);
+ if (pci_sort_bf_cmp(a, b) <= 0) {
+ list_move_tail(&a->dev.knode_bus.n_node, &b->dev.knode_bus.n_node);
+ return;
+ }
+ }
+ list_move_tail(&a->dev.knode_bus.n_node, list);
+}
+
+static void __init pci_sort_breadthfirst_klist(void)
+{
+ LIST_HEAD(sorted_devices);
+ struct list_head *pos, *tmp;
+ struct klist_node *n;
+ struct device *dev;
+ struct pci_dev *pdev;
+
+ spin_lock(&pci_bus_type.klist_devices.k_lock);
+ list_for_each_safe(pos, tmp, &pci_bus_type.klist_devices.k_list) {
+ n = container_of(pos, struct klist_node, n_node);
+ dev = container_of(n, struct device, knode_bus);
+ pdev = to_pci_dev(dev);
+ pci_insertion_sort_klist(pdev, &sorted_devices);
+ }
+ list_splice(&sorted_devices, &pci_bus_type.klist_devices.k_list);
+ spin_unlock(&pci_bus_type.klist_devices.k_lock);
+}
+
+static void __init pci_insertion_sort_devices(struct pci_dev *a, struct list_head *list)
+{
+ struct pci_dev *b;
+
+ list_for_each_entry(b, list, global_list) {
+ if (pci_sort_bf_cmp(a, b) <= 0) {
+ list_move_tail(&a->global_list, &b->global_list);
+ return;
+ }
+ }
+ list_move_tail(&a->global_list, list);
+}
+
+static void __init pci_sort_breadthfirst_devices(void)
+{
+ LIST_HEAD(sorted_devices);
+ struct pci_dev *dev, *tmp;
+
+ down_write(&pci_bus_sem);
+ list_for_each_entry_safe(dev, tmp, &pci_devices, global_list) {
+ pci_insertion_sort_devices(dev, &sorted_devices);
+ }
+ list_splice(&sorted_devices, &pci_devices);
+ up_write(&pci_bus_sem);
+}
+
+void __init pci_sort_breadthfirst(void)
+{
+ pci_sort_breadthfirst_devices();
+ pci_sort_breadthfirst_klist();
+}
+