summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile4
-rw-r--r--arch/powerpc/kernel/cacheinfo.c44
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S10
-rw-r--r--arch/powerpc/kernel/cputable.c4
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c11
-rw-r--r--arch/powerpc/kernel/eeh.c176
-rw-r--r--arch/powerpc/kernel/eeh_cache.c25
-rw-r--r--arch/powerpc/kernel/eeh_dev.c14
-rw-r--r--arch/powerpc/kernel/eeh_driver.c22
-rw-r--r--arch/powerpc/kernel/eeh_pe.c129
-rw-r--r--arch/powerpc/kernel/entry_64.S24
-rw-r--r--arch/powerpc/kernel/idle_power7.S1
-rw-r--r--arch/powerpc/kernel/mce_power.c53
-rw-r--r--arch/powerpc/kernel/nvram_64.c677
-rw-r--r--arch/powerpc/kernel/of_platform.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c57
-rw-r--r--arch/powerpc/kernel/pci-hotplug.c9
-rw-r--r--arch/powerpc/kernel/pci_dn.c309
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c9
-rw-r--r--arch/powerpc/kernel/process.c9
-rw-r--r--arch/powerpc/kernel/prom_init.c4
-rw-r--r--arch/powerpc/kernel/rtas.c30
-rw-r--r--arch/powerpc/kernel/rtas_pci.c49
-rw-r--r--arch/powerpc/kernel/setup_64.c20
-rw-r--r--arch/powerpc/kernel/syscalls.c17
-rw-r--r--arch/powerpc/kernel/systbl.S2
-rw-r--r--arch/powerpc/kernel/systbl_chk.c2
-rw-r--r--arch/powerpc/kernel/tm.S8
-rw-r--r--arch/powerpc/kernel/udbg.c2
-rw-r--r--arch/powerpc/kernel/vector.S24
30 files changed, 1439 insertions, 308 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 502cf69b6c89..c1ebbdaac28f 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -33,7 +33,8 @@ obj-y := cputable.o ptrace.o syscalls.o \
signal.o sysfs.o cacheinfo.o time.o \
prom.o traps.o setup-common.o \
udbg.o misc.o io.o dma.o \
- misc_$(CONFIG_WORD_SIZE).o vdso32/
+ misc_$(CONFIG_WORD_SIZE).o vdso32/ \
+ of_platform.o prom_parse.o
obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
signal_64.o ptrace32.o \
paca.o nvram_64.o firmware.o
@@ -47,7 +48,6 @@ obj-$(CONFIG_PPC64) += vdso64/
obj-$(CONFIG_ALTIVEC) += vecemu.o
obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
obj-$(CONFIG_PPC_P7_NAP) += idle_power7.o
-obj-$(CONFIG_PPC_OF) += of_platform.o prom_parse.o
procfs-y := proc_powerpc.o
obj-$(CONFIG_PROC_FS) += $(procfs-y)
rtaspci-$(CONFIG_PPC64)-$(CONFIG_PCI) := rtas_pci.o
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index ae77b7e59889..c641983bbdd6 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -61,12 +61,22 @@ struct cache_type_info {
};
/* These are used to index the cache_type_info array. */
-#define CACHE_TYPE_UNIFIED 0
-#define CACHE_TYPE_INSTRUCTION 1
-#define CACHE_TYPE_DATA 2
+#define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */
+#define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */
+#define CACHE_TYPE_INSTRUCTION 2
+#define CACHE_TYPE_DATA 3
static const struct cache_type_info cache_type_info[] = {
{
+ /* Embedded systems that use cache-size, cache-block-size,
+ * etc. for the Unified (typically L2) cache. */
+ .name = "Unified",
+ .size_prop = "cache-size",
+ .line_size_props = { "cache-line-size",
+ "cache-block-size", },
+ .nr_sets_prop = "cache-sets",
+ },
+ {
/* PowerPC Processor binding says the [di]-cache-*
* must be equal on unified caches, so just use
* d-cache properties. */
@@ -293,7 +303,8 @@ static struct cache *cache_find_first_sibling(struct cache *cache)
{
struct cache *iter;
- if (cache->type == CACHE_TYPE_UNIFIED)
+ if (cache->type == CACHE_TYPE_UNIFIED ||
+ cache->type == CACHE_TYPE_UNIFIED_D)
return cache;
list_for_each_entry(iter, &cache_list, list)
@@ -324,16 +335,29 @@ static bool cache_node_is_unified(const struct device_node *np)
return of_get_property(np, "cache-unified", NULL);
}
-static struct cache *cache_do_one_devnode_unified(struct device_node *node,
- int level)
+/*
+ * Unified caches can have two different sets of tags. Most embedded
+ * use cache-size, etc. for the unified cache size, but open firmware systems
+ * use d-cache-size, etc. Check on initialization for which type we have, and
+ * return the appropriate structure type. Assume it's embedded if it isn't
+ * open firmware. If it's yet a 3rd type, then there will be missing entries
+ * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
+ * to be extended further.
+ */
+static int cache_is_unified_d(const struct device_node *np)
{
- struct cache *cache;
+ return of_get_property(np,
+ cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ?
+ CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
+}
+/*
+ */
+static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level)
+{
pr_debug("creating L%d ucache for %s\n", level, node->full_name);
- cache = new_cache(CACHE_TYPE_UNIFIED, level, node);
-
- return cache;
+ return new_cache(cache_is_unified_d(node), level, node);
}
static struct cache *cache_do_one_devnode_split(struct device_node *node,
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 46733535cc0b..9c9b7411b28b 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -137,15 +137,11 @@ __init_HFSCR:
/*
* Clear the TLB using the specified IS form of tlbiel instruction
* (invalidate by congruence class). P7 has 128 CCs., P8 has 512.
- *
- * r3 = IS field
*/
__init_tlb_power7:
- li r3,0xc00 /* IS field = 0b11 */
-_GLOBAL(__flush_tlb_power7)
li r6,128
mtctr r6
- mr r7,r3 /* IS field */
+ li r7,0xc00 /* IS field = 0b11 */
ptesync
2: tlbiel r7
addi r7,r7,0x1000
@@ -154,11 +150,9 @@ _GLOBAL(__flush_tlb_power7)
1: blr
__init_tlb_power8:
- li r3,0xc00 /* IS field = 0b11 */
-_GLOBAL(__flush_tlb_power8)
li r6,512
mtctr r6
- mr r7,r3 /* IS field */
+ li r7,0xc00 /* IS field = 0b11 */
ptesync
2: tlbiel r7
addi r7,r7,0x1000
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index f83046878336..60262fdf35ba 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -71,8 +71,8 @@ extern void __restore_cpu_power7(void);
extern void __setup_cpu_power8(unsigned long offset, struct cpu_spec* spec);
extern void __restore_cpu_power8(void);
extern void __restore_cpu_a2(void);
-extern void __flush_tlb_power7(unsigned long inval_selector);
-extern void __flush_tlb_power8(unsigned long inval_selector);
+extern void __flush_tlb_power7(unsigned int action);
+extern void __flush_tlb_power8(unsigned int action);
extern long __machine_check_early_realmode_p7(struct pt_regs *regs);
extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 735979764cd4..6e8d764ce47b 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -116,16 +116,13 @@ void __init swiotlb_detect_4g(void)
}
}
-static int __init swiotlb_late_init(void)
+static int __init check_swiotlb_enabled(void)
{
- if (ppc_swiotlb_enable) {
+ if (ppc_swiotlb_enable)
swiotlb_print_info();
- set_pci_dma_ops(&swiotlb_dma_ops);
- ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
- } else {
+ else
swiotlb_free();
- }
return 0;
}
-subsys_initcall(swiotlb_late_init);
+subsys_initcall(check_swiotlb_enabled);
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 3b2252e7731b..a4c62eb0ee48 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -164,30 +164,34 @@ __setup("eeh=", eeh_setup);
*/
static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
{
- struct device_node *dn = eeh_dev_to_of_node(edev);
+ struct pci_dn *pdn = eeh_dev_to_pdn(edev);
u32 cfg;
int cap, i;
int n = 0, l = 0;
char buffer[128];
- n += scnprintf(buf+n, len-n, "%s\n", dn->full_name);
- pr_warn("EEH: of node=%s\n", dn->full_name);
+ n += scnprintf(buf+n, len-n, "%04x:%02x:%02x:%01x\n",
+ edev->phb->global_number, pdn->busno,
+ PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
+ pr_warn("EEH: of node=%04x:%02x:%02x:%01x\n",
+ edev->phb->global_number, pdn->busno,
+ PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
- eeh_ops->read_config(dn, PCI_VENDOR_ID, 4, &cfg);
+ eeh_ops->read_config(pdn, PCI_VENDOR_ID, 4, &cfg);
n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg);
pr_warn("EEH: PCI device/vendor: %08x\n", cfg);
- eeh_ops->read_config(dn, PCI_COMMAND, 4, &cfg);
+ eeh_ops->read_config(pdn, PCI_COMMAND, 4, &cfg);
n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg);
pr_warn("EEH: PCI cmd/status register: %08x\n", cfg);
/* Gather bridge-specific registers */
if (edev->mode & EEH_DEV_BRIDGE) {
- eeh_ops->read_config(dn, PCI_SEC_STATUS, 2, &cfg);
+ eeh_ops->read_config(pdn, PCI_SEC_STATUS, 2, &cfg);
n += scnprintf(buf+n, len-n, "sec stat:%x\n", cfg);
pr_warn("EEH: Bridge secondary status: %04x\n", cfg);
- eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &cfg);
+ eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &cfg);
n += scnprintf(buf+n, len-n, "brdg ctl:%x\n", cfg);
pr_warn("EEH: Bridge control: %04x\n", cfg);
}
@@ -195,11 +199,11 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
/* Dump out the PCI-X command and status regs */
cap = edev->pcix_cap;
if (cap) {
- eeh_ops->read_config(dn, cap, 4, &cfg);
+ eeh_ops->read_config(pdn, cap, 4, &cfg);
n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg);
pr_warn("EEH: PCI-X cmd: %08x\n", cfg);
- eeh_ops->read_config(dn, cap+4, 4, &cfg);
+ eeh_ops->read_config(pdn, cap+4, 4, &cfg);
n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg);
pr_warn("EEH: PCI-X status: %08x\n", cfg);
}
@@ -211,7 +215,7 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
pr_warn("EEH: PCI-E capabilities and status follow:\n");
for (i=0; i<=8; i++) {
- eeh_ops->read_config(dn, cap+4*i, 4, &cfg);
+ eeh_ops->read_config(pdn, cap+4*i, 4, &cfg);
n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
if ((i % 4) == 0) {
@@ -238,7 +242,7 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
pr_warn("EEH: PCI-E AER capability register set follows:\n");
for (i=0; i<=13; i++) {
- eeh_ops->read_config(dn, cap+4*i, 4, &cfg);
+ eeh_ops->read_config(pdn, cap+4*i, 4, &cfg);
n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
if ((i % 4) == 0) {
@@ -414,11 +418,11 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
int ret;
int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
unsigned long flags;
- struct device_node *dn;
+ struct pci_dn *pdn;
struct pci_dev *dev;
struct eeh_pe *pe, *parent_pe, *phb_pe;
int rc = 0;
- const char *location;
+ const char *location = NULL;
eeh_stats.total_mmio_ffs++;
@@ -429,15 +433,14 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
eeh_stats.no_dn++;
return 0;
}
- dn = eeh_dev_to_of_node(edev);
dev = eeh_dev_to_pci_dev(edev);
pe = eeh_dev_to_pe(edev);
/* Access to IO BARs might get this far and still not want checking. */
if (!pe) {
eeh_stats.ignored_check++;
- pr_debug("EEH: Ignored check for %s %s\n",
- eeh_pci_name(dev), dn->full_name);
+ pr_debug("EEH: Ignored check for %s\n",
+ eeh_pci_name(dev));
return 0;
}
@@ -473,10 +476,13 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
if (pe->state & EEH_PE_ISOLATED) {
pe->check_count++;
if (pe->check_count % EEH_MAX_FAILS == 0) {
- location = of_get_property(dn, "ibm,loc-code", NULL);
+ pdn = eeh_dev_to_pdn(edev);
+ if (pdn->node)
+ location = of_get_property(pdn->node, "ibm,loc-code", NULL);
printk(KERN_ERR "EEH: %d reads ignored for recovering device at "
"location=%s driver=%s pci addr=%s\n",
- pe->check_count, location,
+ pe->check_count,
+ location ? location : "unknown",
eeh_driver_name(dev), eeh_pci_name(dev));
printk(KERN_ERR "EEH: Might be infinite loop in %s driver\n",
eeh_driver_name(dev));
@@ -667,6 +673,55 @@ int eeh_pci_enable(struct eeh_pe *pe, int function)
return rc;
}
+static void *eeh_disable_and_save_dev_state(void *data, void *userdata)
+{
+ struct eeh_dev *edev = data;
+ struct pci_dev *pdev = eeh_dev_to_pci_dev(edev);
+ struct pci_dev *dev = userdata;
+
+ /*
+ * The caller should have disabled and saved the
+ * state for the specified device
+ */
+ if (!pdev || pdev == dev)
+ return NULL;
+
+ /* Ensure we have D0 power state */
+ pci_set_power_state(pdev, PCI_D0);
+
+ /* Save device state */
+ pci_save_state(pdev);
+
+ /*
+ * Disable device to avoid any DMA traffic and
+ * interrupt from the device
+ */
+ pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
+
+ return NULL;
+}
+
+static void *eeh_restore_dev_state(void *data, void *userdata)
+{
+ struct eeh_dev *edev = data;
+ struct pci_dn *pdn = eeh_dev_to_pdn(edev);
+ struct pci_dev *pdev = eeh_dev_to_pci_dev(edev);
+ struct pci_dev *dev = userdata;
+
+ if (!pdev)
+ return NULL;
+
+ /* Apply customization from firmware */
+ if (pdn && eeh_ops->restore_config)
+ eeh_ops->restore_config(pdn);
+
+ /* The caller should restore state for the specified device */
+ if (pdev != dev)
+ pci_save_state(pdev);
+
+ return NULL;
+}
+
/**
* pcibios_set_pcie_slot_reset - Set PCI-E reset state
* @dev: pci device struct
@@ -689,13 +744,19 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat
switch (state) {
case pcie_deassert_reset:
eeh_ops->reset(pe, EEH_RESET_DEACTIVATE);
+ eeh_unfreeze_pe(pe, false);
eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
+ eeh_pe_dev_traverse(pe, eeh_restore_dev_state, dev);
break;
case pcie_hot_reset:
+ eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
+ eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
eeh_ops->reset(pe, EEH_RESET_HOT);
break;
case pcie_warm_reset:
+ eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
+ eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL);
break;
@@ -815,15 +876,15 @@ out:
*/
void eeh_save_bars(struct eeh_dev *edev)
{
+ struct pci_dn *pdn;
int i;
- struct device_node *dn;
- if (!edev)
+ pdn = eeh_dev_to_pdn(edev);
+ if (!pdn)
return;
- dn = eeh_dev_to_of_node(edev);
for (i = 0; i < 16; i++)
- eeh_ops->read_config(dn, i * 4, 4, &edev->config_space[i]);
+ eeh_ops->read_config(pdn, i * 4, 4, &edev->config_space[i]);
/*
* For PCI bridges including root port, we need enable bus
@@ -914,7 +975,7 @@ static struct notifier_block eeh_reboot_nb = {
int eeh_init(void)
{
struct pci_controller *hose, *tmp;
- struct device_node *phb;
+ struct pci_dn *pdn;
static int cnt = 0;
int ret = 0;
@@ -949,20 +1010,9 @@ int eeh_init(void)
return ret;
/* Enable EEH for all adapters */
- if (eeh_has_flag(EEH_PROBE_MODE_DEVTREE)) {
- list_for_each_entry_safe(hose, tmp,
- &hose_list, list_node) {
- phb = hose->dn;
- traverse_pci_devices(phb, eeh_ops->of_probe, NULL);
- }
- } else if (eeh_has_flag(EEH_PROBE_MODE_DEV)) {
- list_for_each_entry_safe(hose, tmp,
- &hose_list, list_node)
- pci_walk_bus(hose->bus, eeh_ops->dev_probe, NULL);
- } else {
- pr_warn("%s: Invalid probe mode %x",
- __func__, eeh_subsystem_flags);
- return -EINVAL;
+ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
+ pdn = hose->pci_data;
+ traverse_pci_dn(pdn, eeh_ops->probe, NULL);
}
/*
@@ -987,8 +1037,8 @@ int eeh_init(void)
core_initcall_sync(eeh_init);
/**
- * eeh_add_device_early - Enable EEH for the indicated device_node
- * @dn: device node for which to set up EEH
+ * eeh_add_device_early - Enable EEH for the indicated device node
+ * @pdn: PCI device node for which to set up EEH
*
* This routine must be used to perform EEH initialization for PCI
* devices that were added after system boot (e.g. hotplug, dlpar).
@@ -998,44 +1048,41 @@ core_initcall_sync(eeh_init);
* on the CEC architecture, type of the device, on earlier boot
* command-line arguments & etc.
*/
-void eeh_add_device_early(struct device_node *dn)
+void eeh_add_device_early(struct pci_dn *pdn)
{
struct pci_controller *phb;
+ struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
- /*
- * If we're doing EEH probe based on PCI device, we
- * would delay the probe until late stage because
- * the PCI device isn't available this moment.
- */
- if (!eeh_has_flag(EEH_PROBE_MODE_DEVTREE))
+ if (!edev || !eeh_enabled())
return;
- if (!of_node_to_eeh_dev(dn))
- return;
- phb = of_node_to_eeh_dev(dn)->phb;
-
/* USB Bus children of PCI devices will not have BUID's */
- if (NULL == phb || 0 == phb->buid)
+ phb = edev->phb;
+ if (NULL == phb ||
+ (eeh_has_flag(EEH_PROBE_MODE_DEVTREE) && 0 == phb->buid))
return;
- eeh_ops->of_probe(dn, NULL);
+ eeh_ops->probe(pdn, NULL);
}
/**
* eeh_add_device_tree_early - Enable EEH for the indicated device
- * @dn: device node
+ * @pdn: PCI device node
*
* This routine must be used to perform EEH initialization for the
* indicated PCI device that was added after system boot (e.g.
* hotplug, dlpar).
*/
-void eeh_add_device_tree_early(struct device_node *dn)
+void eeh_add_device_tree_early(struct pci_dn *pdn)
{
- struct device_node *sib;
+ struct pci_dn *n;
- for_each_child_of_node(dn, sib)
- eeh_add_device_tree_early(sib);
- eeh_add_device_early(dn);
+ if (!pdn)
+ return;
+
+ list_for_each_entry(n, &pdn->child_list, list)
+ eeh_add_device_tree_early(n);
+ eeh_add_device_early(pdn);
}
EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
@@ -1048,7 +1095,7 @@ EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
*/
void eeh_add_device_late(struct pci_dev *dev)
{
- struct device_node *dn;
+ struct pci_dn *pdn;
struct eeh_dev *edev;
if (!dev || !eeh_enabled())
@@ -1056,8 +1103,8 @@ void eeh_add_device_late(struct pci_dev *dev)
pr_debug("EEH: Adding device %s\n", pci_name(dev));
- dn = pci_device_to_OF_node(dev);
- edev = of_node_to_eeh_dev(dn);
+ pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn);
+ edev = pdn_to_eeh_dev(pdn);
if (edev->pdev == dev) {
pr_debug("EEH: Already referenced !\n");
return;
@@ -1089,13 +1136,6 @@ void eeh_add_device_late(struct pci_dev *dev)
edev->pdev = dev;
dev->dev.archdata.edev = edev;
- /*
- * We have to do the EEH probe here because the PCI device
- * hasn't been created yet in the early stage.
- */
- if (eeh_has_flag(EEH_PROBE_MODE_DEV))
- eeh_ops->dev_probe(dev, NULL);
-
eeh_addr_cache_insert_dev(dev);
}
diff --git a/arch/powerpc/kernel/eeh_cache.c b/arch/powerpc/kernel/eeh_cache.c
index 07d8a2423a61..eeabeabea49c 100644
--- a/arch/powerpc/kernel/eeh_cache.c
+++ b/arch/powerpc/kernel/eeh_cache.c
@@ -171,30 +171,27 @@ eeh_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
static void __eeh_addr_cache_insert_dev(struct pci_dev *dev)
{
- struct device_node *dn;
+ struct pci_dn *pdn;
struct eeh_dev *edev;
int i;
- dn = pci_device_to_OF_node(dev);
- if (!dn) {
+ pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn);
+ if (!pdn) {
pr_warn("PCI: no pci dn found for dev=%s\n",
pci_name(dev));
return;
}
- edev = of_node_to_eeh_dev(dn);
+ edev = pdn_to_eeh_dev(pdn);
if (!edev) {
- pr_warn("PCI: no EEH dev found for dn=%s\n",
- dn->full_name);
+ pr_warn("PCI: no EEH dev found for %s\n",
+ pci_name(dev));
return;
}
/* Skip any devices for which EEH is not enabled. */
if (!edev->pe) {
-#ifdef DEBUG
- pr_info("PCI: skip building address cache for=%s - %s\n",
- pci_name(dev), dn->full_name);
-#endif
+ dev_dbg(&dev->dev, "EEH: Skip building address cache\n");
return;
}
@@ -282,18 +279,18 @@ void eeh_addr_cache_rmv_dev(struct pci_dev *dev)
*/
void eeh_addr_cache_build(void)
{
- struct device_node *dn;
+ struct pci_dn *pdn;
struct eeh_dev *edev;
struct pci_dev *dev = NULL;
spin_lock_init(&pci_io_addr_cache_root.piar_lock);
for_each_pci_dev(dev) {
- dn = pci_device_to_OF_node(dev);
- if (!dn)
+ pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn);
+ if (!pdn)
continue;
- edev = of_node_to_eeh_dev(dn);
+ edev = pdn_to_eeh_dev(pdn);
if (!edev)
continue;
diff --git a/arch/powerpc/kernel/eeh_dev.c b/arch/powerpc/kernel/eeh_dev.c
index e5274ee9a75f..aabba94ff9cb 100644
--- a/arch/powerpc/kernel/eeh_dev.c
+++ b/arch/powerpc/kernel/eeh_dev.c
@@ -43,13 +43,13 @@
/**
* eeh_dev_init - Create EEH device according to OF node
- * @dn: device node
+ * @pdn: PCI device node
* @data: PHB
*
* It will create EEH device according to the given OF node. The function
* might be called by PCI emunation, DR, PHB hotplug.
*/
-void *eeh_dev_init(struct device_node *dn, void *data)
+void *eeh_dev_init(struct pci_dn *pdn, void *data)
{
struct pci_controller *phb = data;
struct eeh_dev *edev;
@@ -63,8 +63,8 @@ void *eeh_dev_init(struct device_node *dn, void *data)
}
/* Associate EEH device with OF node */
- PCI_DN(dn)->edev = edev;
- edev->dn = dn;
+ pdn->edev = edev;
+ edev->pdn = pdn;
edev->phb = phb;
INIT_LIST_HEAD(&edev->list);
@@ -80,16 +80,16 @@ void *eeh_dev_init(struct device_node *dn, void *data)
*/
void eeh_dev_phb_init_dynamic(struct pci_controller *phb)
{
- struct device_node *dn = phb->dn;
+ struct pci_dn *root = phb->pci_data;
/* EEH PE for PHB */
eeh_phb_pe_create(phb);
/* EEH device for PHB */
- eeh_dev_init(dn, phb);
+ eeh_dev_init(root, phb);
/* EEH devices for children OF nodes */
- traverse_pci_devices(dn, eeh_dev_init, phb);
+ traverse_pci_dn(root, eeh_dev_init, phb);
}
/**
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index d099540c0f56..24768ff3cb73 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -83,28 +83,6 @@ static inline void eeh_pcid_put(struct pci_dev *pdev)
module_put(pdev->driver->driver.owner);
}
-#if 0
-static void print_device_node_tree(struct pci_dn *pdn, int dent)
-{
- int i;
- struct device_node *pc;
-
- if (!pdn)
- return;
- for (i = 0; i < dent; i++)
- printk(" ");
- printk("dn=%s mode=%x \tcfg_addr=%x pe_addr=%x \tfull=%s\n",
- pdn->node->name, pdn->eeh_mode, pdn->eeh_config_addr,
- pdn->eeh_pe_config_addr, pdn->node->full_name);
- dent += 3;
- pc = pdn->node->child;
- while (pc) {
- print_device_node_tree(PCI_DN(pc), dent);
- pc = pc->sibling;
- }
-}
-#endif
-
/**
* eeh_disable_irq - Disable interrupt for the recovering device
* @dev: PCI device
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index 1e4946c36f9e..35f0b62259bb 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -291,27 +291,25 @@ struct eeh_pe *eeh_pe_get(struct eeh_dev *edev)
*/
static struct eeh_pe *eeh_pe_get_parent(struct eeh_dev *edev)
{
- struct device_node *dn;
struct eeh_dev *parent;
+ struct pci_dn *pdn = eeh_dev_to_pdn(edev);
/*
* It might have the case for the indirect parent
* EEH device already having associated PE, but
* the direct parent EEH device doesn't have yet.
*/
- dn = edev->dn->parent;
- while (dn) {
+ pdn = pdn ? pdn->parent : NULL;
+ while (pdn) {
/* We're poking out of PCI territory */
- if (!PCI_DN(dn)) return NULL;
-
- parent = of_node_to_eeh_dev(dn);
- /* We're poking out of PCI territory */
- if (!parent) return NULL;
+ parent = pdn_to_eeh_dev(pdn);
+ if (!parent)
+ return NULL;
if (parent->pe)
return parent->pe;
- dn = dn->parent;
+ pdn = pdn->parent;
}
return NULL;
@@ -330,6 +328,13 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
{
struct eeh_pe *pe, *parent;
+ /* Check if the PE number is valid */
+ if (!eeh_has_flag(EEH_VALID_PE_ZERO) && !edev->pe_config_addr) {
+ pr_err("%s: Invalid PE#0 for edev 0x%x on PHB#%d\n",
+ __func__, edev->config_addr, edev->phb->global_number);
+ return -EINVAL;
+ }
+
/*
* Search the PE has been existing or not according
* to the PE address. If that has been existing, the
@@ -338,21 +343,18 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
*/
pe = eeh_pe_get(edev);
if (pe && !(pe->type & EEH_PE_INVALID)) {
- if (!edev->pe_config_addr) {
- pr_err("%s: PE with addr 0x%x already exists\n",
- __func__, edev->config_addr);
- return -EEXIST;
- }
-
/* Mark the PE as type of PCI bus */
pe->type = EEH_PE_BUS;
edev->pe = pe;
/* Put the edev to PE */
list_add_tail(&edev->list, &pe->edevs);
- pr_debug("EEH: Add %s to Bus PE#%x\n",
- edev->dn->full_name, pe->addr);
-
+ pr_debug("EEH: Add %04x:%02x:%02x.%01x to Bus PE#%x\n",
+ edev->phb->global_number,
+ edev->config_addr >> 8,
+ PCI_SLOT(edev->config_addr & 0xFF),
+ PCI_FUNC(edev->config_addr & 0xFF),
+ pe->addr);
return 0;
} else if (pe && (pe->type & EEH_PE_INVALID)) {
list_add_tail(&edev->list, &pe->edevs);
@@ -368,9 +370,14 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
parent->type &= ~(EEH_PE_INVALID | EEH_PE_KEEP);
parent = parent->parent;
}
- pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n",
- edev->dn->full_name, pe->addr, pe->parent->addr);
+ pr_debug("EEH: Add %04x:%02x:%02x.%01x to Device "
+ "PE#%x, Parent PE#%x\n",
+ edev->phb->global_number,
+ edev->config_addr >> 8,
+ PCI_SLOT(edev->config_addr & 0xFF),
+ PCI_FUNC(edev->config_addr & 0xFF),
+ pe->addr, pe->parent->addr);
return 0;
}
@@ -409,8 +416,13 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
list_add_tail(&pe->child, &parent->child_list);
list_add_tail(&edev->list, &pe->edevs);
edev->pe = pe;
- pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n",
- edev->dn->full_name, pe->addr, pe->parent->addr);
+ pr_debug("EEH: Add %04x:%02x:%02x.%01x to "
+ "Device PE#%x, Parent PE#%x\n",
+ edev->phb->global_number,
+ edev->config_addr >> 8,
+ PCI_SLOT(edev->config_addr & 0xFF),
+ PCI_FUNC(edev->config_addr & 0xFF),
+ pe->addr, pe->parent->addr);
return 0;
}
@@ -430,8 +442,11 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev)
int cnt;
if (!edev->pe) {
- pr_debug("%s: No PE found for EEH device %s\n",
- __func__, edev->dn->full_name);
+ pr_debug("%s: No PE found for device %04x:%02x:%02x.%01x\n",
+ __func__, edev->phb->global_number,
+ edev->config_addr >> 8,
+ PCI_SLOT(edev->config_addr & 0xFF),
+ PCI_FUNC(edev->config_addr & 0xFF));
return -EEXIST;
}
@@ -653,9 +668,9 @@ void eeh_pe_state_clear(struct eeh_pe *pe, int state)
* blocked on normal path during the stage. So we need utilize
* eeh operations, which is always permitted.
*/
-static void eeh_bridge_check_link(struct eeh_dev *edev,
- struct device_node *dn)
+static void eeh_bridge_check_link(struct eeh_dev *edev)
{
+ struct pci_dn *pdn = eeh_dev_to_pdn(edev);
int cap;
uint32_t val;
int timeout = 0;
@@ -675,32 +690,32 @@ static void eeh_bridge_check_link(struct eeh_dev *edev,
/* Check slot status */
cap = edev->pcie_cap;
- eeh_ops->read_config(dn, cap + PCI_EXP_SLTSTA, 2, &val);
+ eeh_ops->read_config(pdn, cap + PCI_EXP_SLTSTA, 2, &val);
if (!(val & PCI_EXP_SLTSTA_PDS)) {
pr_debug(" No card in the slot (0x%04x) !\n", val);
return;
}
/* Check power status if we have the capability */
- eeh_ops->read_config(dn, cap + PCI_EXP_SLTCAP, 2, &val);
+ eeh_ops->read_config(pdn, cap + PCI_EXP_SLTCAP, 2, &val);
if (val & PCI_EXP_SLTCAP_PCP) {
- eeh_ops->read_config(dn, cap + PCI_EXP_SLTCTL, 2, &val);
+ eeh_ops->read_config(pdn, cap + PCI_EXP_SLTCTL, 2, &val);
if (val & PCI_EXP_SLTCTL_PCC) {
pr_debug(" In power-off state, power it on ...\n");
val &= ~(PCI_EXP_SLTCTL_PCC | PCI_EXP_SLTCTL_PIC);
val |= (0x0100 & PCI_EXP_SLTCTL_PIC);
- eeh_ops->write_config(dn, cap + PCI_EXP_SLTCTL, 2, val);
+ eeh_ops->write_config(pdn, cap + PCI_EXP_SLTCTL, 2, val);
msleep(2 * 1000);
}
}
/* Enable link */
- eeh_ops->read_config(dn, cap + PCI_EXP_LNKCTL, 2, &val);
+ eeh_ops->read_config(pdn, cap + PCI_EXP_LNKCTL, 2, &val);
val &= ~PCI_EXP_LNKCTL_LD;
- eeh_ops->write_config(dn, cap + PCI_EXP_LNKCTL, 2, val);
+ eeh_ops->write_config(pdn, cap + PCI_EXP_LNKCTL, 2, val);
/* Check link */
- eeh_ops->read_config(dn, cap + PCI_EXP_LNKCAP, 4, &val);
+ eeh_ops->read_config(pdn, cap + PCI_EXP_LNKCAP, 4, &val);
if (!(val & PCI_EXP_LNKCAP_DLLLARC)) {
pr_debug(" No link reporting capability (0x%08x) \n", val);
msleep(1000);
@@ -713,7 +728,7 @@ static void eeh_bridge_check_link(struct eeh_dev *edev,
msleep(20);
timeout += 20;
- eeh_ops->read_config(dn, cap + PCI_EXP_LNKSTA, 2, &val);
+ eeh_ops->read_config(pdn, cap + PCI_EXP_LNKSTA, 2, &val);
if (val & PCI_EXP_LNKSTA_DLLLA)
break;
}
@@ -728,9 +743,9 @@ static void eeh_bridge_check_link(struct eeh_dev *edev,
#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)])
-static void eeh_restore_bridge_bars(struct eeh_dev *edev,
- struct device_node *dn)
+static void eeh_restore_bridge_bars(struct eeh_dev *edev)
{
+ struct pci_dn *pdn = eeh_dev_to_pdn(edev);
int i;
/*
@@ -738,49 +753,49 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev,
* Bus numbers and windows: 0x18 - 0x30
*/
for (i = 4; i < 13; i++)
- eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]);
+ eeh_ops->write_config(pdn, i*4, 4, edev->config_space[i]);
/* Rom: 0x38 */
- eeh_ops->write_config(dn, 14*4, 4, edev->config_space[14]);
+ eeh_ops->write_config(pdn, 14*4, 4, edev->config_space[14]);
/* Cache line & Latency timer: 0xC 0xD */
- eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1,
+ eeh_ops->write_config(pdn, PCI_CACHE_LINE_SIZE, 1,
SAVED_BYTE(PCI_CACHE_LINE_SIZE));
- eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1,
+ eeh_ops->write_config(pdn, PCI_LATENCY_TIMER, 1,
SAVED_BYTE(PCI_LATENCY_TIMER));
/* Max latency, min grant, interrupt ping and line: 0x3C */
- eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]);
+ eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]);
/* PCI Command: 0x4 */
- eeh_ops->write_config(dn, PCI_COMMAND, 4, edev->config_space[1]);
+ eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1]);
/* Check the PCIe link is ready */
- eeh_bridge_check_link(edev, dn);
+ eeh_bridge_check_link(edev);
}
-static void eeh_restore_device_bars(struct eeh_dev *edev,
- struct device_node *dn)
+static void eeh_restore_device_bars(struct eeh_dev *edev)
{
+ struct pci_dn *pdn = eeh_dev_to_pdn(edev);
int i;
u32 cmd;
for (i = 4; i < 10; i++)
- eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]);
+ eeh_ops->write_config(pdn, i*4, 4, edev->config_space[i]);
/* 12 == Expansion ROM Address */
- eeh_ops->write_config(dn, 12*4, 4, edev->config_space[12]);
+ eeh_ops->write_config(pdn, 12*4, 4, edev->config_space[12]);
- eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1,
+ eeh_ops->write_config(pdn, PCI_CACHE_LINE_SIZE, 1,
SAVED_BYTE(PCI_CACHE_LINE_SIZE));
- eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1,
+ eeh_ops->write_config(pdn, PCI_LATENCY_TIMER, 1,
SAVED_BYTE(PCI_LATENCY_TIMER));
/* max latency, min grant, interrupt pin and line */
- eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]);
+ eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]);
/*
* Restore PERR & SERR bits, some devices require it,
* don't touch the other command bits
*/
- eeh_ops->read_config(dn, PCI_COMMAND, 4, &cmd);
+ eeh_ops->read_config(pdn, PCI_COMMAND, 4, &cmd);
if (edev->config_space[1] & PCI_COMMAND_PARITY)
cmd |= PCI_COMMAND_PARITY;
else
@@ -789,7 +804,7 @@ static void eeh_restore_device_bars(struct eeh_dev *edev,
cmd |= PCI_COMMAND_SERR;
else
cmd &= ~PCI_COMMAND_SERR;
- eeh_ops->write_config(dn, PCI_COMMAND, 4, cmd);
+ eeh_ops->write_config(pdn, PCI_COMMAND, 4, cmd);
}
/**
@@ -804,16 +819,16 @@ static void eeh_restore_device_bars(struct eeh_dev *edev,
static void *eeh_restore_one_device_bars(void *data, void *flag)
{
struct eeh_dev *edev = (struct eeh_dev *)data;
- struct device_node *dn = eeh_dev_to_of_node(edev);
+ struct pci_dn *pdn = eeh_dev_to_pdn(edev);
/* Do special restore for bridges */
if (edev->mode & EEH_DEV_BRIDGE)
- eeh_restore_bridge_bars(edev, dn);
+ eeh_restore_bridge_bars(edev);
else
- eeh_restore_device_bars(edev, dn);
+ eeh_restore_device_bars(edev);
- if (eeh_ops->restore_config)
- eeh_ops->restore_config(dn);
+ if (eeh_ops->restore_config && pdn)
+ eeh_ops->restore_config(pdn);
return NULL;
}
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index d180caf2d6de..8ca9434c40e6 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -34,6 +34,7 @@
#include <asm/ftrace.h>
#include <asm/hw_irq.h>
#include <asm/context_tracking.h>
+#include <asm/tm.h>
/*
* System calls.
@@ -145,6 +146,24 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
andi. r11,r10,_TIF_SYSCALL_DOTRACE
bne syscall_dotrace
.Lsyscall_dotrace_cont:
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+BEGIN_FTR_SECTION
+ b 1f
+END_FTR_SECTION_IFCLR(CPU_FTR_TM)
+ extrdi. r11, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
+ beq+ 1f
+
+ /* Doom the transaction and don't perform the syscall: */
+ mfmsr r11
+ li r12, 1
+ rldimi r11, r12, MSR_TM_LG, 63-MSR_TM_LG
+ mtmsrd r11, 0
+ li r11, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
+ TABORT(R11)
+
+ b .Lsyscall_exit
+1:
+#endif
cmpldi 0,r0,NR_syscalls
bge- syscall_enosys
@@ -356,6 +375,11 @@ _GLOBAL(ppc64_swapcontext)
bl sys_swapcontext
b .Lsyscall_exit
+_GLOBAL(ppc_switch_endian)
+ bl save_nvgprs
+ bl sys_switch_endian
+ b .Lsyscall_exit
+
_GLOBAL(ret_from_fork)
bl schedule_tail
REST_NVGPRS(r1)
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index 05adc8bbdef8..eeaa0d5f69d5 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -94,6 +94,7 @@ _GLOBAL(power7_powersave_common)
beq 1f
addi r1,r1,INT_FRAME_SIZE
ld r0,16(r1)
+ li r3,0 /* Return 0 (no nap) */
mtlr r0
blr
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index b6f123ab90ed..2c647b1e62e4 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -28,6 +28,55 @@
#include <asm/mce.h>
#include <asm/machdep.h>
+static void flush_tlb_206(unsigned int num_sets, unsigned int action)
+{
+ unsigned long rb;
+ unsigned int i;
+
+ switch (action) {
+ case TLB_INVAL_SCOPE_GLOBAL:
+ rb = TLBIEL_INVAL_SET;
+ break;
+ case TLB_INVAL_SCOPE_LPID:
+ rb = TLBIEL_INVAL_SET_LPID;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ asm volatile("ptesync" : : : "memory");
+ for (i = 0; i < num_sets; i++) {
+ asm volatile("tlbiel %0" : : "r" (rb));
+ rb += 1 << TLBIEL_INVAL_SET_SHIFT;
+ }
+ asm volatile("ptesync" : : : "memory");
+}
+
+/*
+ * Generic routine to flush TLB on power7. This routine is used as
+ * flush_tlb hook in cpu_spec for Power7 processor.
+ *
+ * action => TLB_INVAL_SCOPE_GLOBAL: Invalidate all TLBs.
+ * TLB_INVAL_SCOPE_LPID: Invalidate TLB for current LPID.
+ */
+void __flush_tlb_power7(unsigned int action)
+{
+ flush_tlb_206(POWER7_TLB_SETS, action);
+}
+
+/*
+ * Generic routine to flush TLB on power8. This routine is used as
+ * flush_tlb hook in cpu_spec for power8 processor.
+ *
+ * action => TLB_INVAL_SCOPE_GLOBAL: Invalidate all TLBs.
+ * TLB_INVAL_SCOPE_LPID: Invalidate TLB for current LPID.
+ */
+void __flush_tlb_power8(unsigned int action)
+{
+ flush_tlb_206(POWER8_TLB_SETS, action);
+}
+
/* flush SLBs and reload */
static void flush_and_reload_slb(void)
{
@@ -79,7 +128,7 @@ static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
}
if (dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB) {
if (cur_cpu_spec && cur_cpu_spec->flush_tlb)
- cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET);
+ cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_GLOBAL);
/* reset error bits */
dsisr &= ~P7_DSISR_MC_TLB_MULTIHIT_MFTLB;
}
@@ -110,7 +159,7 @@ static long mce_handle_common_ierror(uint64_t srr1)
break;
case P7_SRR1_MC_IFETCH_TLB_MULTIHIT:
if (cur_cpu_spec && cur_cpu_spec->flush_tlb) {
- cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET);
+ cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_GLOBAL);
handled = 1;
}
break;
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 34f7c9b7cd96..1e703f8ebad4 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -26,6 +26,9 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/kmsg_dump.h>
+#include <linux/pstore.h>
+#include <linux/zlib.h>
#include <asm/uaccess.h>
#include <asm/nvram.h>
#include <asm/rtas.h>
@@ -54,6 +57,680 @@ struct nvram_partition {
static LIST_HEAD(nvram_partitions);
+#ifdef CONFIG_PPC_PSERIES
+struct nvram_os_partition rtas_log_partition = {
+ .name = "ibm,rtas-log",
+ .req_size = 2079,
+ .min_size = 1055,
+ .index = -1,
+ .os_partition = true
+};
+#endif
+
+struct nvram_os_partition oops_log_partition = {
+ .name = "lnx,oops-log",
+ .req_size = 4000,
+ .min_size = 2000,
+ .index = -1,
+ .os_partition = true
+};
+
+static const char *nvram_os_partitions[] = {
+#ifdef CONFIG_PPC_PSERIES
+ "ibm,rtas-log",
+#endif
+ "lnx,oops-log",
+ NULL
+};
+
+static void oops_to_nvram(struct kmsg_dumper *dumper,
+ enum kmsg_dump_reason reason);
+
+static struct kmsg_dumper nvram_kmsg_dumper = {
+ .dump = oops_to_nvram
+};
+
+/*
+ * For capturing and compressing an oops or panic report...
+
+ * big_oops_buf[] holds the uncompressed text we're capturing.
+ *
+ * oops_buf[] holds the compressed text, preceded by a oops header.
+ * oops header has u16 holding the version of oops header (to differentiate
+ * between old and new format header) followed by u16 holding the length of
+ * the compressed* text (*Or uncompressed, if compression fails.) and u64
+ * holding the timestamp. oops_buf[] gets written to NVRAM.
+ *
+ * oops_log_info points to the header. oops_data points to the compressed text.
+ *
+ * +- oops_buf
+ * | +- oops_data
+ * v v
+ * +-----------+-----------+-----------+------------------------+
+ * | version | length | timestamp | text |
+ * | (2 bytes) | (2 bytes) | (8 bytes) | (oops_data_sz bytes) |
+ * +-----------+-----------+-----------+------------------------+
+ * ^
+ * +- oops_log_info
+ *
+ * We preallocate these buffers during init to avoid kmalloc during oops/panic.
+ */
+static size_t big_oops_buf_sz;
+static char *big_oops_buf, *oops_buf;
+static char *oops_data;
+static size_t oops_data_sz;
+
+/* Compression parameters */
+#define COMPR_LEVEL 6
+#define WINDOW_BITS 12
+#define MEM_LEVEL 4
+static struct z_stream_s stream;
+
+#ifdef CONFIG_PSTORE
+#ifdef CONFIG_PPC_POWERNV
+static struct nvram_os_partition skiboot_partition = {
+ .name = "ibm,skiboot",
+ .index = -1,
+ .os_partition = false
+};
+#endif
+
+#ifdef CONFIG_PPC_PSERIES
+static struct nvram_os_partition of_config_partition = {
+ .name = "of-config",
+ .index = -1,
+ .os_partition = false
+};
+#endif
+
+static struct nvram_os_partition common_partition = {
+ .name = "common",
+ .index = -1,
+ .os_partition = false
+};
+
+static enum pstore_type_id nvram_type_ids[] = {
+ PSTORE_TYPE_DMESG,
+ PSTORE_TYPE_PPC_COMMON,
+ -1,
+ -1,
+ -1
+};
+static int read_type;
+#endif
+
+/* nvram_write_os_partition
+ *
+ * We need to buffer the error logs into nvram to ensure that we have
+ * the failure information to decode. If we have a severe error there
+ * is no way to guarantee that the OS or the machine is in a state to
+ * get back to user land and write the error to disk. For example if
+ * the SCSI device driver causes a Machine Check by writing to a bad
+ * IO address, there is no way of guaranteeing that the device driver
+ * is in any state that is would also be able to write the error data
+ * captured to disk, thus we buffer it in NVRAM for analysis on the
+ * next boot.
+ *
+ * In NVRAM the partition containing the error log buffer will looks like:
+ * Header (in bytes):
+ * +-----------+----------+--------+------------+------------------+
+ * | signature | checksum | length | name | data |
+ * |0 |1 |2 3|4 15|16 length-1|
+ * +-----------+----------+--------+------------+------------------+
+ *
+ * The 'data' section would look like (in bytes):
+ * +--------------+------------+-----------------------------------+
+ * | event_logged | sequence # | error log |
+ * |0 3|4 7|8 error_log_size-1|
+ * +--------------+------------+-----------------------------------+
+ *
+ * event_logged: 0 if event has not been logged to syslog, 1 if it has
+ * sequence #: The unique sequence # for each event. (until it wraps)
+ * error log: The error log from event_scan
+ */
+int nvram_write_os_partition(struct nvram_os_partition *part,
+ char *buff, int length,
+ unsigned int err_type,
+ unsigned int error_log_cnt)
+{
+ int rc;
+ loff_t tmp_index;
+ struct err_log_info info;
+
+ if (part->index == -1)
+ return -ESPIPE;
+
+ if (length > part->size)
+ length = part->size;
+
+ info.error_type = cpu_to_be32(err_type);
+ info.seq_num = cpu_to_be32(error_log_cnt);
+
+ tmp_index = part->index;
+
+ rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info),
+ &tmp_index);
+ if (rc <= 0) {
+ pr_err("%s: Failed nvram_write (%d)\n", __func__, rc);
+ return rc;
+ }
+
+ rc = ppc_md.nvram_write(buff, length, &tmp_index);
+ if (rc <= 0) {
+ pr_err("%s: Failed nvram_write (%d)\n", __func__, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/* nvram_read_partition
+ *
+ * Reads nvram partition for at most 'length'
+ */
+int nvram_read_partition(struct nvram_os_partition *part, char *buff,
+ int length, unsigned int *err_type,
+ unsigned int *error_log_cnt)
+{
+ int rc;
+ loff_t tmp_index;
+ struct err_log_info info;
+
+ if (part->index == -1)
+ return -1;
+
+ if (length > part->size)
+ length = part->size;
+
+ tmp_index = part->index;
+
+ if (part->os_partition) {
+ rc = ppc_md.nvram_read((char *)&info,
+ sizeof(struct err_log_info),
+ &tmp_index);
+ if (rc <= 0) {
+ pr_err("%s: Failed nvram_read (%d)\n", __func__, rc);
+ return rc;
+ }
+ }
+
+ rc = ppc_md.nvram_read(buff, length, &tmp_index);
+ if (rc <= 0) {
+ pr_err("%s: Failed nvram_read (%d)\n", __func__, rc);
+ return rc;
+ }
+
+ if (part->os_partition) {
+ *error_log_cnt = be32_to_cpu(info.seq_num);
+ *err_type = be32_to_cpu(info.error_type);
+ }
+
+ return 0;
+}
+
+/* nvram_init_os_partition
+ *
+ * This sets up a partition with an "OS" signature.
+ *
+ * The general strategy is the following:
+ * 1.) If a partition with the indicated name already exists...
+ * - If it's large enough, use it.
+ * - Otherwise, recycle it and keep going.
+ * 2.) Search for a free partition that is large enough.
+ * 3.) If there's not a free partition large enough, recycle any obsolete
+ * OS partitions and try again.
+ * 4.) Will first try getting a chunk that will satisfy the requested size.
+ * 5.) If a chunk of the requested size cannot be allocated, then try finding
+ * a chunk that will satisfy the minum needed.
+ *
+ * Returns 0 on success, else -1.
+ */
+int __init nvram_init_os_partition(struct nvram_os_partition *part)
+{
+ loff_t p;
+ int size;
+
+ /* Look for ours */
+ p = nvram_find_partition(part->name, NVRAM_SIG_OS, &size);
+
+ /* Found one but too small, remove it */
+ if (p && size < part->min_size) {
+ pr_info("nvram: Found too small %s partition,"
+ " removing it...\n", part->name);
+ nvram_remove_partition(part->name, NVRAM_SIG_OS, NULL);
+ p = 0;
+ }
+
+ /* Create one if we didn't find */
+ if (!p) {
+ p = nvram_create_partition(part->name, NVRAM_SIG_OS,
+ part->req_size, part->min_size);
+ if (p == -ENOSPC) {
+ pr_info("nvram: No room to create %s partition, "
+ "deleting any obsolete OS partitions...\n",
+ part->name);
+ nvram_remove_partition(NULL, NVRAM_SIG_OS,
+ nvram_os_partitions);
+ p = nvram_create_partition(part->name, NVRAM_SIG_OS,
+ part->req_size, part->min_size);
+ }
+ }
+
+ if (p <= 0) {
+ pr_err("nvram: Failed to find or create %s"
+ " partition, err %d\n", part->name, (int)p);
+ return -1;
+ }
+
+ part->index = p;
+ part->size = nvram_get_partition_size(p) - sizeof(struct err_log_info);
+
+ return 0;
+}
+
+/* Derived from logfs_compress() */
+static int nvram_compress(const void *in, void *out, size_t inlen,
+ size_t outlen)
+{
+ int err, ret;
+
+ ret = -EIO;
+ err = zlib_deflateInit2(&stream, COMPR_LEVEL, Z_DEFLATED, WINDOW_BITS,
+ MEM_LEVEL, Z_DEFAULT_STRATEGY);
+ if (err != Z_OK)
+ goto error;
+
+ stream.next_in = in;
+ stream.avail_in = inlen;
+ stream.total_in = 0;
+ stream.next_out = out;
+ stream.avail_out = outlen;
+ stream.total_out = 0;
+
+ err = zlib_deflate(&stream, Z_FINISH);
+ if (err != Z_STREAM_END)
+ goto error;
+
+ err = zlib_deflateEnd(&stream);
+ if (err != Z_OK)
+ goto error;
+
+ if (stream.total_out >= stream.total_in)
+ goto error;
+
+ ret = stream.total_out;
+error:
+ return ret;
+}
+
+/* Compress the text from big_oops_buf into oops_buf. */
+static int zip_oops(size_t text_len)
+{
+ struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
+ int zipped_len = nvram_compress(big_oops_buf, oops_data, text_len,
+ oops_data_sz);
+ if (zipped_len < 0) {
+ pr_err("nvram: compression failed; returned %d\n", zipped_len);
+ pr_err("nvram: logging uncompressed oops/panic report\n");
+ return -1;
+ }
+ oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
+ oops_hdr->report_length = cpu_to_be16(zipped_len);
+ oops_hdr->timestamp = cpu_to_be64(ktime_get_real_seconds());
+ return 0;
+}
+
+#ifdef CONFIG_PSTORE
+static int nvram_pstore_open(struct pstore_info *psi)
+{
+ /* Reset the iterator to start reading partitions again */
+ read_type = -1;
+ return 0;
+}
+
+/**
+ * nvram_pstore_write - pstore write callback for nvram
+ * @type: Type of message logged
+ * @reason: reason behind dump (oops/panic)
+ * @id: identifier to indicate the write performed
+ * @part: pstore writes data to registered buffer in parts,
+ * part number will indicate the same.
+ * @count: Indicates oops count
+ * @compressed: Flag to indicate the log is compressed
+ * @size: number of bytes written to the registered buffer
+ * @psi: registered pstore_info structure
+ *
+ * Called by pstore_dump() when an oops or panic report is logged in the
+ * printk buffer.
+ * Returns 0 on successful write.
+ */
+static int nvram_pstore_write(enum pstore_type_id type,
+ enum kmsg_dump_reason reason,
+ u64 *id, unsigned int part, int count,
+ bool compressed, size_t size,
+ struct pstore_info *psi)
+{
+ int rc;
+ unsigned int err_type = ERR_TYPE_KERNEL_PANIC;
+ struct oops_log_info *oops_hdr = (struct oops_log_info *) oops_buf;
+
+ /* part 1 has the recent messages from printk buffer */
+ if (part > 1 || (type != PSTORE_TYPE_DMESG))
+ return -1;
+
+ if (clobbering_unread_rtas_event())
+ return -1;
+
+ oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
+ oops_hdr->report_length = cpu_to_be16(size);
+ oops_hdr->timestamp = cpu_to_be64(ktime_get_real_seconds());
+
+ if (compressed)
+ err_type = ERR_TYPE_KERNEL_PANIC_GZ;
+
+ rc = nvram_write_os_partition(&oops_log_partition, oops_buf,
+ (int) (sizeof(*oops_hdr) + size), err_type, count);
+
+ if (rc != 0)
+ return rc;
+
+ *id = part;
+ return 0;
+}
+
+/*
+ * Reads the oops/panic report, rtas, of-config and common partition.
+ * Returns the length of the data we read from each partition.
+ * Returns 0 if we've been called before.
+ */
+static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
+ int *count, struct timespec *time, char **buf,
+ bool *compressed, struct pstore_info *psi)
+{
+ struct oops_log_info *oops_hdr;
+ unsigned int err_type, id_no, size = 0;
+ struct nvram_os_partition *part = NULL;
+ char *buff = NULL;
+ int sig = 0;
+ loff_t p;
+
+ read_type++;
+
+ switch (nvram_type_ids[read_type]) {
+ case PSTORE_TYPE_DMESG:
+ part = &oops_log_partition;
+ *type = PSTORE_TYPE_DMESG;
+ break;
+ case PSTORE_TYPE_PPC_COMMON:
+ sig = NVRAM_SIG_SYS;
+ part = &common_partition;
+ *type = PSTORE_TYPE_PPC_COMMON;
+ *id = PSTORE_TYPE_PPC_COMMON;
+ time->tv_sec = 0;
+ time->tv_nsec = 0;
+ break;
+#ifdef CONFIG_PPC_PSERIES
+ case PSTORE_TYPE_PPC_RTAS:
+ part = &rtas_log_partition;
+ *type = PSTORE_TYPE_PPC_RTAS;
+ time->tv_sec = last_rtas_event;
+ time->tv_nsec = 0;
+ break;
+ case PSTORE_TYPE_PPC_OF:
+ sig = NVRAM_SIG_OF;
+ part = &of_config_partition;
+ *type = PSTORE_TYPE_PPC_OF;
+ *id = PSTORE_TYPE_PPC_OF;
+ time->tv_sec = 0;
+ time->tv_nsec = 0;
+ break;
+#endif
+#ifdef CONFIG_PPC_POWERNV
+ case PSTORE_TYPE_PPC_OPAL:
+ sig = NVRAM_SIG_FW;
+ part = &skiboot_partition;
+ *type = PSTORE_TYPE_PPC_OPAL;
+ *id = PSTORE_TYPE_PPC_OPAL;
+ time->tv_sec = 0;
+ time->tv_nsec = 0;
+ break;
+#endif
+ default:
+ return 0;
+ }
+
+ if (!part->os_partition) {
+ p = nvram_find_partition(part->name, sig, &size);
+ if (p <= 0) {
+ pr_err("nvram: Failed to find partition %s, "
+ "err %d\n", part->name, (int)p);
+ return 0;
+ }
+ part->index = p;
+ part->size = size;
+ }
+
+ buff = kmalloc(part->size, GFP_KERNEL);
+
+ if (!buff)
+ return -ENOMEM;
+
+ if (nvram_read_partition(part, buff, part->size, &err_type, &id_no)) {
+ kfree(buff);
+ return 0;
+ }
+
+ *count = 0;
+
+ if (part->os_partition)
+ *id = id_no;
+
+ if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) {
+ size_t length, hdr_size;
+
+ oops_hdr = (struct oops_log_info *)buff;
+ if (be16_to_cpu(oops_hdr->version) < OOPS_HDR_VERSION) {
+ /* Old format oops header had 2-byte record size */
+ hdr_size = sizeof(u16);
+ length = be16_to_cpu(oops_hdr->version);
+ time->tv_sec = 0;
+ time->tv_nsec = 0;
+ } else {
+ hdr_size = sizeof(*oops_hdr);
+ length = be16_to_cpu(oops_hdr->report_length);
+ time->tv_sec = be64_to_cpu(oops_hdr->timestamp);
+ time->tv_nsec = 0;
+ }
+ *buf = kmalloc(length, GFP_KERNEL);
+ if (*buf == NULL)
+ return -ENOMEM;
+ memcpy(*buf, buff + hdr_size, length);
+ kfree(buff);
+
+ if (err_type == ERR_TYPE_KERNEL_PANIC_GZ)
+ *compressed = true;
+ else
+ *compressed = false;
+ return length;
+ }
+
+ *buf = buff;
+ return part->size;
+}
+
+static struct pstore_info nvram_pstore_info = {
+ .owner = THIS_MODULE,
+ .name = "nvram",
+ .open = nvram_pstore_open,
+ .read = nvram_pstore_read,
+ .write = nvram_pstore_write,
+};
+
+static int nvram_pstore_init(void)
+{
+ int rc = 0;
+
+ if (machine_is(pseries)) {
+ nvram_type_ids[2] = PSTORE_TYPE_PPC_RTAS;
+ nvram_type_ids[3] = PSTORE_TYPE_PPC_OF;
+ } else
+ nvram_type_ids[2] = PSTORE_TYPE_PPC_OPAL;
+
+ nvram_pstore_info.buf = oops_data;
+ nvram_pstore_info.bufsize = oops_data_sz;
+
+ spin_lock_init(&nvram_pstore_info.buf_lock);
+
+ rc = pstore_register(&nvram_pstore_info);
+ if (rc != 0)
+ pr_err("nvram: pstore_register() failed, defaults to "
+ "kmsg_dump; returned %d\n", rc);
+
+ return rc;
+}
+#else
+static int nvram_pstore_init(void)
+{
+ return -1;
+}
+#endif
+
+void __init nvram_init_oops_partition(int rtas_partition_exists)
+{
+ int rc;
+
+ rc = nvram_init_os_partition(&oops_log_partition);
+ if (rc != 0) {
+#ifdef CONFIG_PPC_PSERIES
+ if (!rtas_partition_exists) {
+ pr_err("nvram: Failed to initialize oops partition!");
+ return;
+ }
+ pr_notice("nvram: Using %s partition to log both"
+ " RTAS errors and oops/panic reports\n",
+ rtas_log_partition.name);
+ memcpy(&oops_log_partition, &rtas_log_partition,
+ sizeof(rtas_log_partition));
+#else
+ pr_err("nvram: Failed to initialize oops partition!");
+ return;
+#endif
+ }
+ oops_buf = kmalloc(oops_log_partition.size, GFP_KERNEL);
+ if (!oops_buf) {
+ pr_err("nvram: No memory for %s partition\n",
+ oops_log_partition.name);
+ return;
+ }
+ oops_data = oops_buf + sizeof(struct oops_log_info);
+ oops_data_sz = oops_log_partition.size - sizeof(struct oops_log_info);
+
+ rc = nvram_pstore_init();
+
+ if (!rc)
+ return;
+
+ /*
+ * Figure compression (preceded by elimination of each line's <n>
+ * severity prefix) will reduce the oops/panic report to at most
+ * 45% of its original size.
+ */
+ big_oops_buf_sz = (oops_data_sz * 100) / 45;
+ big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
+ if (big_oops_buf) {
+ stream.workspace = kmalloc(zlib_deflate_workspacesize(
+ WINDOW_BITS, MEM_LEVEL), GFP_KERNEL);
+ if (!stream.workspace) {
+ pr_err("nvram: No memory for compression workspace; "
+ "skipping compression of %s partition data\n",
+ oops_log_partition.name);
+ kfree(big_oops_buf);
+ big_oops_buf = NULL;
+ }
+ } else {
+ pr_err("No memory for uncompressed %s data; "
+ "skipping compression\n", oops_log_partition.name);
+ stream.workspace = NULL;
+ }
+
+ rc = kmsg_dump_register(&nvram_kmsg_dumper);
+ if (rc != 0) {
+ pr_err("nvram: kmsg_dump_register() failed; returned %d\n", rc);
+ kfree(oops_buf);
+ kfree(big_oops_buf);
+ kfree(stream.workspace);
+ }
+}
+
+/*
+ * This is our kmsg_dump callback, called after an oops or panic report
+ * has been written to the printk buffer. We want to capture as much
+ * of the printk buffer as possible. First, capture as much as we can
+ * that we think will compress sufficiently to fit in the lnx,oops-log
+ * partition. If that's too much, go back and capture uncompressed text.
+ */
+static void oops_to_nvram(struct kmsg_dumper *dumper,
+ enum kmsg_dump_reason reason)
+{
+ struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
+ static unsigned int oops_count = 0;
+ static bool panicking = false;
+ static DEFINE_SPINLOCK(lock);
+ unsigned long flags;
+ size_t text_len;
+ unsigned int err_type = ERR_TYPE_KERNEL_PANIC_GZ;
+ int rc = -1;
+
+ switch (reason) {
+ case KMSG_DUMP_RESTART:
+ case KMSG_DUMP_HALT:
+ case KMSG_DUMP_POWEROFF:
+ /* These are almost always orderly shutdowns. */
+ return;
+ case KMSG_DUMP_OOPS:
+ break;
+ case KMSG_DUMP_PANIC:
+ panicking = true;
+ break;
+ case KMSG_DUMP_EMERG:
+ if (panicking)
+ /* Panic report already captured. */
+ return;
+ break;
+ default:
+ pr_err("%s: ignoring unrecognized KMSG_DUMP_* reason %d\n",
+ __func__, (int) reason);
+ return;
+ }
+
+ if (clobbering_unread_rtas_event())
+ return;
+
+ if (!spin_trylock_irqsave(&lock, flags))
+ return;
+
+ if (big_oops_buf) {
+ kmsg_dump_get_buffer(dumper, false,
+ big_oops_buf, big_oops_buf_sz, &text_len);
+ rc = zip_oops(text_len);
+ }
+ if (rc != 0) {
+ kmsg_dump_rewind(dumper);
+ kmsg_dump_get_buffer(dumper, false,
+ oops_data, oops_data_sz, &text_len);
+ err_type = ERR_TYPE_KERNEL_PANIC;
+ oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
+ oops_hdr->report_length = cpu_to_be16(text_len);
+ oops_hdr->timestamp = cpu_to_be64(ktime_get_real_seconds());
+ }
+
+ (void) nvram_write_os_partition(&oops_log_partition, oops_buf,
+ (int) (sizeof(*oops_hdr) + text_len), err_type,
+ ++oops_count);
+
+ spin_unlock_irqrestore(&lock, flags);
+}
+
static loff_t dev_nvram_llseek(struct file *file, loff_t offset, int origin)
{
int size;
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index 2f35a72642c6..b60a67d92ebd 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -72,7 +72,7 @@ static int of_pci_phb_probe(struct platform_device *dev)
/* Register devices with EEH */
if (dev->dev.of_node->child)
- eeh_add_device_tree_early(dev->dev.of_node);
+ eeh_add_device_tree_early(PCI_DN(dev->dev.of_node));
/* Scan the bus */
pcibios_scan_phb(phb);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 2a525c938158..0d054068a21d 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -76,7 +76,7 @@ struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
list_add_tail(&phb->list_node, &hose_list);
spin_unlock(&hose_spinlock);
phb->dn = dev;
- phb->is_dynamic = mem_init_done;
+ phb->is_dynamic = slab_is_available();
#ifdef CONFIG_PPC64
if (dev) {
int nid = of_node_to_nid(dev);
@@ -109,8 +109,10 @@ void pcibios_free_controller(struct pci_controller *phb)
resource_size_t pcibios_window_alignment(struct pci_bus *bus,
unsigned long type)
{
- if (ppc_md.pcibios_window_alignment)
- return ppc_md.pcibios_window_alignment(bus, type);
+ struct pci_controller *phb = pci_bus_to_host(bus);
+
+ if (phb->controller_ops.window_alignment)
+ return phb->controller_ops.window_alignment(bus, type);
/*
* PCI core will figure out the default
@@ -122,14 +124,26 @@ resource_size_t pcibios_window_alignment(struct pci_bus *bus,
void pcibios_reset_secondary_bus(struct pci_dev *dev)
{
- if (ppc_md.pcibios_reset_secondary_bus) {
- ppc_md.pcibios_reset_secondary_bus(dev);
+ struct pci_controller *phb = pci_bus_to_host(dev->bus);
+
+ if (phb->controller_ops.reset_secondary_bus) {
+ phb->controller_ops.reset_secondary_bus(dev);
return;
}
pci_reset_secondary_bus(dev);
}
+#ifdef CONFIG_PCI_IOV
+resource_size_t pcibios_iov_resource_alignment(struct pci_dev *pdev, int resno)
+{
+ if (ppc_md.pcibios_iov_resource_alignment)
+ return ppc_md.pcibios_iov_resource_alignment(pdev, resno);
+
+ return pci_iov_resource_size(pdev, resno);
+}
+#endif /* CONFIG_PCI_IOV */
+
static resource_size_t pcibios_io_size(const struct pci_controller *hose)
{
#ifdef CONFIG_PPC64
@@ -788,6 +802,10 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
pci_name(dev));
return;
}
+
+ if (dev->is_virtfn)
+ return;
+
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
struct resource *res = dev->resource + i;
struct pci_bus_region reg;
@@ -942,6 +960,8 @@ static void pcibios_fixup_bridge(struct pci_bus *bus)
void pcibios_setup_bus_self(struct pci_bus *bus)
{
+ struct pci_controller *phb;
+
/* Fix up the bus resources for P2P bridges */
if (bus->self != NULL)
pcibios_fixup_bridge(bus);
@@ -953,12 +973,14 @@ void pcibios_setup_bus_self(struct pci_bus *bus)
ppc_md.pcibios_fixup_bus(bus);
/* Setup bus DMA mappings */
- if (ppc_md.pci_dma_bus_setup)
- ppc_md.pci_dma_bus_setup(bus);
+ phb = pci_bus_to_host(bus);
+ if (phb->controller_ops.dma_bus_setup)
+ phb->controller_ops.dma_bus_setup(bus);
}
static void pcibios_setup_device(struct pci_dev *dev)
{
+ struct pci_controller *phb;
/* Fixup NUMA node as it may not be setup yet by the generic
* code and is needed by the DMA init
*/
@@ -969,8 +991,9 @@ static void pcibios_setup_device(struct pci_dev *dev)
set_dma_offset(&dev->dev, PCI_DRAM_OFFSET);
/* Additional platform DMA/iommu setup */
- if (ppc_md.pci_dma_dev_setup)
- ppc_md.pci_dma_dev_setup(dev);
+ phb = pci_bus_to_host(dev->bus);
+ if (phb->controller_ops.dma_dev_setup)
+ phb->controller_ops.dma_dev_setup(dev);
/* Read default IRQs and fixup if necessary */
pci_read_irq_line(dev);
@@ -986,6 +1009,12 @@ int pcibios_add_device(struct pci_dev *dev)
*/
if (dev->bus->is_added)
pcibios_setup_device(dev);
+
+#ifdef CONFIG_PCI_IOV
+ if (ppc_md.pcibios_fixup_sriov)
+ ppc_md.pcibios_fixup_sriov(dev);
+#endif /* CONFIG_PCI_IOV */
+
return 0;
}
@@ -1450,8 +1479,10 @@ EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
- if (ppc_md.pcibios_enable_device_hook)
- if (ppc_md.pcibios_enable_device_hook(dev))
+ struct pci_controller *phb = pci_bus_to_host(dev->bus);
+
+ if (phb->controller_ops.enable_device_hook)
+ if (!phb->controller_ops.enable_device_hook(dev))
return -EINVAL;
return pci_enable_resources(dev, mask);
@@ -1624,8 +1655,8 @@ void pcibios_scan_phb(struct pci_controller *hose)
/* Get probe mode and perform scan */
mode = PCI_PROBE_NORMAL;
- if (node && ppc_md.pci_probe_mode)
- mode = ppc_md.pci_probe_mode(bus);
+ if (node && hose->controller_ops.probe_mode)
+ mode = hose->controller_ops.probe_mode(bus);
pr_debug(" probe mode: %d\n", mode);
if (mode == PCI_PROBE_DEVTREE)
of_scan_bus(node, bus);
diff --git a/arch/powerpc/kernel/pci-hotplug.c b/arch/powerpc/kernel/pci-hotplug.c
index 5b789177aa29..7ed85a69a9c2 100644
--- a/arch/powerpc/kernel/pci-hotplug.c
+++ b/arch/powerpc/kernel/pci-hotplug.c
@@ -73,13 +73,16 @@ void pcibios_add_pci_devices(struct pci_bus * bus)
{
int slotno, mode, pass, max;
struct pci_dev *dev;
+ struct pci_controller *phb;
struct device_node *dn = pci_bus_to_OF_node(bus);
- eeh_add_device_tree_early(dn);
+ eeh_add_device_tree_early(PCI_DN(dn));
+
+ phb = pci_bus_to_host(bus);
mode = PCI_PROBE_NORMAL;
- if (ppc_md.pci_probe_mode)
- mode = ppc_md.pci_probe_mode(bus);
+ if (phb->controller_ops.probe_mode)
+ mode = phb->controller_ops.probe_mode(bus);
if (mode == PCI_PROBE_DEVTREE) {
/* use ofdt-based probe */
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index 83df3075d3df..b3b4df91b792 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -32,12 +32,237 @@
#include <asm/ppc-pci.h>
#include <asm/firmware.h>
+/*
+ * The function is used to find the firmware data of one
+ * specific PCI device, which is attached to the indicated
+ * PCI bus. For VFs, their firmware data is linked to that
+ * one of PF's bridge. For other devices, their firmware
+ * data is linked to that of their bridge.
+ */
+static struct pci_dn *pci_bus_to_pdn(struct pci_bus *bus)
+{
+ struct pci_bus *pbus;
+ struct device_node *dn;
+ struct pci_dn *pdn;
+
+ /*
+ * We probably have virtual bus which doesn't
+ * have associated bridge.
+ */
+ pbus = bus;
+ while (pbus) {
+ if (pci_is_root_bus(pbus) || pbus->self)
+ break;
+
+ pbus = pbus->parent;
+ }
+
+ /*
+ * Except virtual bus, all PCI buses should
+ * have device nodes.
+ */
+ dn = pci_bus_to_OF_node(pbus);
+ pdn = dn ? PCI_DN(dn) : NULL;
+
+ return pdn;
+}
+
+struct pci_dn *pci_get_pdn_by_devfn(struct pci_bus *bus,
+ int devfn)
+{
+ struct device_node *dn = NULL;
+ struct pci_dn *parent, *pdn;
+ struct pci_dev *pdev = NULL;
+
+ /* Fast path: fetch from PCI device */
+ list_for_each_entry(pdev, &bus->devices, bus_list) {
+ if (pdev->devfn == devfn) {
+ if (pdev->dev.archdata.pci_data)
+ return pdev->dev.archdata.pci_data;
+
+ dn = pci_device_to_OF_node(pdev);
+ break;
+ }
+ }
+
+ /* Fast path: fetch from device node */
+ pdn = dn ? PCI_DN(dn) : NULL;
+ if (pdn)
+ return pdn;
+
+ /* Slow path: fetch from firmware data hierarchy */
+ parent = pci_bus_to_pdn(bus);
+ if (!parent)
+ return NULL;
+
+ list_for_each_entry(pdn, &parent->child_list, list) {
+ if (pdn->busno == bus->number &&
+ pdn->devfn == devfn)
+ return pdn;
+ }
+
+ return NULL;
+}
+
struct pci_dn *pci_get_pdn(struct pci_dev *pdev)
{
- struct device_node *dn = pci_device_to_OF_node(pdev);
- if (!dn)
+ struct device_node *dn;
+ struct pci_dn *parent, *pdn;
+
+ /* Search device directly */
+ if (pdev->dev.archdata.pci_data)
+ return pdev->dev.archdata.pci_data;
+
+ /* Check device node */
+ dn = pci_device_to_OF_node(pdev);
+ pdn = dn ? PCI_DN(dn) : NULL;
+ if (pdn)
+ return pdn;
+
+ /*
+ * VFs don't have device nodes. We hook their
+ * firmware data to PF's bridge.
+ */
+ parent = pci_bus_to_pdn(pdev->bus);
+ if (!parent)
+ return NULL;
+
+ list_for_each_entry(pdn, &parent->child_list, list) {
+ if (pdn->busno == pdev->bus->number &&
+ pdn->devfn == pdev->devfn)
+ return pdn;
+ }
+
+ return NULL;
+}
+
+#ifdef CONFIG_PCI_IOV
+static struct pci_dn *add_one_dev_pci_data(struct pci_dn *parent,
+ struct pci_dev *pdev,
+ int busno, int devfn)
+{
+ struct pci_dn *pdn;
+
+ /* Except PHB, we always have the parent */
+ if (!parent)
+ return NULL;
+
+ pdn = kzalloc(sizeof(*pdn), GFP_KERNEL);
+ if (!pdn) {
+ dev_warn(&pdev->dev, "%s: Out of memory!\n", __func__);
return NULL;
- return PCI_DN(dn);
+ }
+
+ pdn->phb = parent->phb;
+ pdn->parent = parent;
+ pdn->busno = busno;
+ pdn->devfn = devfn;
+#ifdef CONFIG_PPC_POWERNV
+ pdn->pe_number = IODA_INVALID_PE;
+#endif
+ INIT_LIST_HEAD(&pdn->child_list);
+ INIT_LIST_HEAD(&pdn->list);
+ list_add_tail(&pdn->list, &parent->child_list);
+
+ /*
+ * If we already have PCI device instance, lets
+ * bind them.
+ */
+ if (pdev)
+ pdev->dev.archdata.pci_data = pdn;
+
+ return pdn;
+}
+#endif
+
+struct pci_dn *add_dev_pci_data(struct pci_dev *pdev)
+{
+#ifdef CONFIG_PCI_IOV
+ struct pci_dn *parent, *pdn;
+ int i;
+
+ /* Only support IOV for now */
+ if (!pdev->is_physfn)
+ return pci_get_pdn(pdev);
+
+ /* Check if VFs have been populated */
+ pdn = pci_get_pdn(pdev);
+ if (!pdn || (pdn->flags & PCI_DN_FLAG_IOV_VF))
+ return NULL;
+
+ pdn->flags |= PCI_DN_FLAG_IOV_VF;
+ parent = pci_bus_to_pdn(pdev->bus);
+ if (!parent)
+ return NULL;
+
+ for (i = 0; i < pci_sriov_get_totalvfs(pdev); i++) {
+ pdn = add_one_dev_pci_data(parent, NULL,
+ pci_iov_virtfn_bus(pdev, i),
+ pci_iov_virtfn_devfn(pdev, i));
+ if (!pdn) {
+ dev_warn(&pdev->dev, "%s: Cannot create firmware data for VF#%d\n",
+ __func__, i);
+ return NULL;
+ }
+ }
+#endif /* CONFIG_PCI_IOV */
+
+ return pci_get_pdn(pdev);
+}
+
+void remove_dev_pci_data(struct pci_dev *pdev)
+{
+#ifdef CONFIG_PCI_IOV
+ struct pci_dn *parent;
+ struct pci_dn *pdn, *tmp;
+ int i;
+
+ /*
+ * VF and VF PE are created/released dynamically, so we need to
+ * bind/unbind them. Otherwise the VF and VF PE would be mismatched
+ * when re-enabling SR-IOV.
+ */
+ if (pdev->is_virtfn) {
+ pdn = pci_get_pdn(pdev);
+#ifdef CONFIG_PPC_POWERNV
+ pdn->pe_number = IODA_INVALID_PE;
+#endif
+ return;
+ }
+
+ /* Only support IOV PF for now */
+ if (!pdev->is_physfn)
+ return;
+
+ /* Check if VFs have been populated */
+ pdn = pci_get_pdn(pdev);
+ if (!pdn || !(pdn->flags & PCI_DN_FLAG_IOV_VF))
+ return;
+
+ pdn->flags &= ~PCI_DN_FLAG_IOV_VF;
+ parent = pci_bus_to_pdn(pdev->bus);
+ if (!parent)
+ return;
+
+ /*
+ * We might introduce flag to pci_dn in future
+ * so that we can release VF's firmware data in
+ * a batch mode.
+ */
+ for (i = 0; i < pci_sriov_get_totalvfs(pdev); i++) {
+ list_for_each_entry_safe(pdn, tmp,
+ &parent->child_list, list) {
+ if (pdn->busno != pci_iov_virtfn_bus(pdev, i) ||
+ pdn->devfn != pci_iov_virtfn_devfn(pdev, i))
+ continue;
+
+ if (!list_empty(&pdn->list))
+ list_del(&pdn->list);
+
+ kfree(pdn);
+ }
+ }
+#endif /* CONFIG_PCI_IOV */
}
/*
@@ -49,6 +274,7 @@ void *update_dn_pci_info(struct device_node *dn, void *data)
struct pci_controller *phb = data;
const __be32 *type = of_get_property(dn, "ibm,pci-config-space-type", NULL);
const __be32 *regs;
+ struct device_node *parent;
struct pci_dn *pdn;
pdn = zalloc_maybe_bootmem(sizeof(*pdn), GFP_KERNEL);
@@ -69,7 +295,25 @@ void *update_dn_pci_info(struct device_node *dn, void *data)
pdn->devfn = (addr >> 8) & 0xff;
}
+ /* vendor/device IDs and class code */
+ regs = of_get_property(dn, "vendor-id", NULL);
+ pdn->vendor_id = regs ? of_read_number(regs, 1) : 0;
+ regs = of_get_property(dn, "device-id", NULL);
+ pdn->device_id = regs ? of_read_number(regs, 1) : 0;
+ regs = of_get_property(dn, "class-code", NULL);
+ pdn->class_code = regs ? of_read_number(regs, 1) : 0;
+
+ /* Extended config space */
pdn->pci_ext_config_space = (type && of_read_number(type, 1) == 1);
+
+ /* Attach to parent node */
+ INIT_LIST_HEAD(&pdn->child_list);
+ INIT_LIST_HEAD(&pdn->list);
+ parent = of_get_parent(dn);
+ pdn->parent = parent ? PCI_DN(parent) : NULL;
+ if (pdn->parent)
+ list_add_tail(&pdn->list, &pdn->parent->child_list);
+
return NULL;
}
@@ -131,6 +375,46 @@ void *traverse_pci_devices(struct device_node *start, traverse_func pre,
return NULL;
}
+static struct pci_dn *pci_dn_next_one(struct pci_dn *root,
+ struct pci_dn *pdn)
+{
+ struct list_head *next = pdn->child_list.next;
+
+ if (next != &pdn->child_list)
+ return list_entry(next, struct pci_dn, list);
+
+ while (1) {
+ if (pdn == root)
+ return NULL;
+
+ next = pdn->list.next;
+ if (next != &pdn->parent->child_list)
+ break;
+
+ pdn = pdn->parent;
+ }
+
+ return list_entry(next, struct pci_dn, list);
+}
+
+void *traverse_pci_dn(struct pci_dn *root,
+ void *(*fn)(struct pci_dn *, void *),
+ void *data)
+{
+ struct pci_dn *pdn = root;
+ void *ret;
+
+ /* Only scan the child nodes */
+ for (pdn = pci_dn_next_one(root, pdn); pdn;
+ pdn = pci_dn_next_one(root, pdn)) {
+ ret = fn(pdn, data);
+ if (ret)
+ return ret;
+ }
+
+ return NULL;
+}
+
/**
* pci_devs_phb_init_dynamic - setup pci devices under this PHB
* phb: pci-to-host bridge (top-level bridge connecting to cpu)
@@ -147,8 +431,12 @@ void pci_devs_phb_init_dynamic(struct pci_controller *phb)
/* PHB nodes themselves must not match */
update_dn_pci_info(dn, phb);
pdn = dn->data;
- if (pdn)
+ if (pdn) {
pdn->devfn = pdn->busno = -1;
+ pdn->vendor_id = pdn->device_id = pdn->class_code = 0;
+ pdn->phb = phb;
+ phb->pci_data = pdn;
+ }
/* Update dn->phb ptrs for new phb and children devices */
traverse_pci_devices(dn, update_dn_pci_info, phb);
@@ -171,3 +459,16 @@ void __init pci_devs_phb_init(void)
list_for_each_entry_safe(phb, tmp, &hose_list, list_node)
pci_devs_phb_init_dynamic(phb);
}
+
+static void pci_dev_pdn_setup(struct pci_dev *pdev)
+{
+ struct pci_dn *pdn;
+
+ if (pdev->dev.archdata.pci_data)
+ return;
+
+ /* Setup the fast path */
+ pdn = pci_get_pdn(pdev);
+ pdev->dev.archdata.pci_data = pdn;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, pci_dev_pdn_setup);
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index e6245e9c7d8d..42e02a2d570b 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -207,6 +207,7 @@ void of_scan_pci_bridge(struct pci_dev *dev)
{
struct device_node *node = dev->dev.of_node;
struct pci_bus *bus;
+ struct pci_controller *phb;
const __be32 *busrange, *ranges;
int len, i, mode;
struct pci_bus_region region;
@@ -286,9 +287,11 @@ void of_scan_pci_bridge(struct pci_dev *dev)
bus->number);
pr_debug(" bus name: %s\n", bus->name);
+ phb = pci_bus_to_host(bus);
+
mode = PCI_PROBE_NORMAL;
- if (ppc_md.pci_probe_mode)
- mode = ppc_md.pci_probe_mode(bus);
+ if (phb->controller_ops.probe_mode)
+ mode = phb->controller_ops.probe_mode(bus);
pr_debug(" probe mode: %d\n", mode);
if (mode == PCI_PROBE_DEVTREE)
@@ -305,7 +308,7 @@ static struct pci_dev *of_scan_pci_dev(struct pci_bus *bus,
const __be32 *reg;
int reglen, devfn;
#ifdef CONFIG_EEH
- struct eeh_dev *edev = of_node_to_eeh_dev(dn);
+ struct eeh_dev *edev = pdn_to_eeh_dev(PCI_DN(dn));
#endif
pr_debug(" * %s\n", dn->full_name);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index b4cc7bef6b16..febb50dd5328 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1114,8 +1114,11 @@ static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
*/
extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
+/*
+ * Copy architecture-specific thread state
+ */
int copy_thread(unsigned long clone_flags, unsigned long usp,
- unsigned long arg, struct task_struct *p)
+ unsigned long kthread_arg, struct task_struct *p)
{
struct pt_regs *childregs, *kregs;
extern void ret_from_fork(void);
@@ -1127,6 +1130,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
sp -= sizeof(struct pt_regs);
childregs = (struct pt_regs *) sp;
if (unlikely(p->flags & PF_KTHREAD)) {
+ /* kernel thread */
struct thread_info *ti = (void *)task_stack_page(p);
memset(childregs, 0, sizeof(struct pt_regs));
childregs->gpr[1] = sp + sizeof(struct pt_regs);
@@ -1137,11 +1141,12 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
clear_tsk_thread_flag(p, TIF_32BIT);
childregs->softe = 1;
#endif
- childregs->gpr[15] = arg;
+ childregs->gpr[15] = kthread_arg;
p->thread.regs = NULL; /* no user register state */
ti->flags |= _TIF_RESTOREALL;
f = ret_from_kernel_thread;
} else {
+ /* user thread */
struct pt_regs *regs = current_pt_regs();
CHECK_FULL_REGS(regs);
*childregs = *regs;
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 1a85d8f96739..fd1fe4c37599 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -2898,7 +2898,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
* Call OF "quiesce" method to shut down pending DMA's from
* devices etc...
*/
- prom_printf("Calling quiesce...\n");
+ prom_printf("Quiescing Open Firmware ...\n");
call_prom("quiesce", 0, 0);
/*
@@ -2910,7 +2910,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
/* Don't print anything after quiesce under OPAL, it crashes OFW */
if (of_platform != PLATFORM_OPAL) {
- prom_printf("returning from prom_init\n");
+ prom_printf("Booting Linux via __start() ...\n");
prom_debug("->dt_header_start=0x%x\n", hdr);
}
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 21c45a2d0706..7a488c108410 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -401,7 +401,7 @@ static char *__fetch_rtas_last_error(char *altbuf)
buf = altbuf;
} else {
buf = rtas_err_buf;
- if (mem_init_done)
+ if (slab_is_available())
buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC);
}
if (buf)
@@ -461,7 +461,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
if (buff_copy) {
log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0);
- if (mem_init_done)
+ if (slab_is_available())
kfree(buff_copy);
}
return ret;
@@ -897,7 +897,7 @@ int rtas_offline_cpus_mask(cpumask_var_t cpus)
}
EXPORT_SYMBOL(rtas_offline_cpus_mask);
-int rtas_ibm_suspend_me(u64 handle, int *vasi_return)
+int rtas_ibm_suspend_me(u64 handle)
{
long state;
long rc;
@@ -919,13 +919,11 @@ int rtas_ibm_suspend_me(u64 handle, int *vasi_return)
printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned %ld\n",rc);
return rc;
} else if (state == H_VASI_ENABLED) {
- *vasi_return = RTAS_NOT_SUSPENDABLE;
- return 0;
+ return -EAGAIN;
} else if (state != H_VASI_SUSPENDING) {
printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned state %ld\n",
state);
- *vasi_return = -1;
- return 0;
+ return -EIO;
}
if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
@@ -972,7 +970,7 @@ out:
return atomic_read(&data.error);
}
#else /* CONFIG_PPC_PSERIES */
-int rtas_ibm_suspend_me(u64 handle, int *vasi_return)
+int rtas_ibm_suspend_me(u64 handle)
{
return -ENOSYS;
}
@@ -1022,7 +1020,6 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
unsigned long flags;
char *buff_copy, *errbuf = NULL;
int nargs, nret, token;
- int rc;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -1054,15 +1051,18 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
if (token == ibm_suspend_me_token) {
/*
- * rtas_ibm_suspend_me assumes args are in cpu endian, or at least the
- * hcall within it requires it.
+ * rtas_ibm_suspend_me assumes the streamid handle is in cpu
+ * endian, or at least the hcall within it requires it.
*/
- int vasi_rc = 0;
+ int rc = 0;
u64 handle = ((u64)be32_to_cpu(args.args[0]) << 32)
| be32_to_cpu(args.args[1]);
- rc = rtas_ibm_suspend_me(handle, &vasi_rc);
- args.rets[0] = cpu_to_be32(vasi_rc);
- if (rc)
+ rc = rtas_ibm_suspend_me(handle);
+ if (rc == -EAGAIN)
+ args.rets[0] = cpu_to_be32(RTAS_NOT_SUSPENDABLE);
+ else if (rc == -EIO)
+ args.rets[0] = cpu_to_be32(-1);
+ else if (rc)
return rc;
goto copy_return;
}
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index ce230da2c015..73f1934582c2 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -113,7 +113,7 @@ static int rtas_pci_read_config(struct pci_bus *bus,
ret = rtas_read_config(pdn, where, size, val);
if (*val == EEH_IO_ERROR_VALUE(size) &&
- eeh_dev_check_failure(of_node_to_eeh_dev(dn)))
+ eeh_dev_check_failure(pdn_to_eeh_dev(pdn)))
return PCIBIOS_DEVICE_NOT_FOUND;
return ret;
@@ -277,50 +277,3 @@ int rtas_setup_phb(struct pci_controller *phb)
return 0;
}
-
-void __init find_and_init_phbs(void)
-{
- struct device_node *node;
- struct pci_controller *phb;
- struct device_node *root = of_find_node_by_path("/");
-
- for_each_child_of_node(root, node) {
- if (node->type == NULL || (strcmp(node->type, "pci") != 0 &&
- strcmp(node->type, "pciex") != 0))
- continue;
-
- phb = pcibios_alloc_controller(node);
- if (!phb)
- continue;
- rtas_setup_phb(phb);
- pci_process_bridge_OF_ranges(phb, node, 0);
- isa_bridge_find_early(phb);
- }
-
- of_node_put(root);
- pci_devs_phb_init();
-
- /*
- * PCI_PROBE_ONLY and PCI_REASSIGN_ALL_BUS can be set via properties
- * in chosen.
- */
- if (of_chosen) {
- const int *prop;
-
- prop = of_get_property(of_chosen,
- "linux,pci-probe-only", NULL);
- if (prop) {
- if (*prop)
- pci_add_flags(PCI_PROBE_ONLY);
- else
- pci_clear_flags(PCI_PROBE_ONLY);
- }
-
-#ifdef CONFIG_PPC32 /* Will be made generic soon */
- prop = of_get_property(of_chosen,
- "linux,pci-assign-all-buses", NULL);
- if (prop && *prop)
- pci_add_flags(PCI_REASSIGN_ALL_BUS);
-#endif /* CONFIG_PPC32 */
- }
-}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 49f553bbb360..c69671c03c3b 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -37,6 +37,7 @@
#include <linux/memblock.h>
#include <linux/hugetlb.h>
#include <linux/memory.h>
+#include <linux/nmi.h>
#include <asm/io.h>
#include <asm/kdump.h>
@@ -779,3 +780,22 @@ unsigned long memory_block_size_bytes(void)
struct ppc_pci_io ppc_pci_io;
EXPORT_SYMBOL(ppc_pci_io);
#endif
+
+#ifdef CONFIG_HARDLOCKUP_DETECTOR
+u64 hw_nmi_get_sample_period(int watchdog_thresh)
+{
+ return ppc_proc_freq * watchdog_thresh;
+}
+
+/*
+ * The hardlockup detector breaks PMU event based branches and is likely
+ * to get false positives in KVM guests, so disable it by default.
+ */
+static int __init disable_hardlockup_detector(void)
+{
+ hardlockup_detector_disable();
+
+ return 0;
+}
+early_initcall(disable_hardlockup_detector);
+#endif
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index b2702e87db0d..5fa92706444b 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -121,3 +121,20 @@ long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low,
(u64)len_high << 32 | len_low, advice);
}
+
+long sys_switch_endian(void)
+{
+ struct thread_info *ti;
+
+ current->thread.regs->msr ^= MSR_LE;
+
+ /*
+ * Set TIF_RESTOREALL so that r3 isn't clobbered on return to
+ * userspace. That also has the effect of restoring the non-volatile
+ * GPRs, so we saved them on the way in here.
+ */
+ ti = current_thread_info();
+ ti->flags |= _TIF_RESTOREALL;
+
+ return 0;
+}
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
index 7ab5d434e2ee..4d6b1d3a747f 100644
--- a/arch/powerpc/kernel/systbl.S
+++ b/arch/powerpc/kernel/systbl.S
@@ -22,6 +22,7 @@
#define PPC_SYS(func) .llong DOTSYM(ppc_##func),DOTSYM(ppc_##func)
#define OLDSYS(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(sys_ni_syscall)
#define SYS32ONLY(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(compat_sys_##func)
+#define PPC64ONLY(func) .llong DOTSYM(ppc_##func),DOTSYM(sys_ni_syscall)
#define SYSX(f, f3264, f32) .llong DOTSYM(f),DOTSYM(f3264)
#else
#define SYSCALL(func) .long sys_##func
@@ -29,6 +30,7 @@
#define PPC_SYS(func) .long ppc_##func
#define OLDSYS(func) .long sys_##func
#define SYS32ONLY(func) .long sys_##func
+#define PPC64ONLY(func) .long sys_ni_syscall
#define SYSX(f, f3264, f32) .long f32
#endif
#define SYSCALL_SPU(func) SYSCALL(func)
diff --git a/arch/powerpc/kernel/systbl_chk.c b/arch/powerpc/kernel/systbl_chk.c
index 238aa63ced8f..2384129f5893 100644
--- a/arch/powerpc/kernel/systbl_chk.c
+++ b/arch/powerpc/kernel/systbl_chk.c
@@ -21,9 +21,11 @@
#ifdef CONFIG_PPC64
#define OLDSYS(func) -1
#define SYS32ONLY(func) -1
+#define PPC64ONLY(func) __NR_##func
#else
#define OLDSYS(func) __NR_old##func
#define SYS32ONLY(func) __NR_##func
+#define PPC64ONLY(func) -1
#endif
#define SYSX(f, f3264, f32) -1
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 2a324f4cb1b9..5754b226da7e 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -152,9 +152,9 @@ _GLOBAL(tm_reclaim)
addi r7, r3, THREAD_TRANSACT_VRSTATE
SAVE_32VRS(0, r6, r7) /* r6 scratch, r7 transact vr state */
- mfvscr vr0
+ mfvscr v0
li r6, VRSTATE_VSCR
- stvx vr0, r7, r6
+ stvx v0, r7, r6
dont_backup_vec:
mfspr r0, SPRN_VRSAVE
std r0, THREAD_TRANSACT_VRSAVE(r3)
@@ -359,8 +359,8 @@ _GLOBAL(__tm_recheckpoint)
addi r8, r3, THREAD_VRSTATE
li r5, VRSTATE_VSCR
- lvx vr0, r8, r5
- mtvscr vr0
+ lvx v0, r8, r5
+ mtvscr v0
REST_32VRS(0, r5, r8) /* r5 scratch, r8 ptr */
dont_restore_vec:
ld r5, THREAD_VRSAVE(r3)
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index b7aa07279a63..7cc38b5b58bc 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -46,8 +46,6 @@ void __init udbg_early_init(void)
#elif defined(CONFIG_PPC_EARLY_DEBUG_MAPLE)
/* Maple real mode debug */
udbg_init_maple_realmode();
-#elif defined(CONFIG_PPC_EARLY_DEBUG_BEAT)
- udbg_init_debug_beat();
#elif defined(CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE)
udbg_init_pas_realmode();
#elif defined(CONFIG_PPC_EARLY_DEBUG_BOOTX)
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 74f8050518d6..f5c80d567d8d 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -24,8 +24,8 @@ _GLOBAL(do_load_up_transact_altivec)
stw r4,THREAD_USED_VR(r3)
li r10,THREAD_TRANSACT_VRSTATE+VRSTATE_VSCR
- lvx vr0,r10,r3
- mtvscr vr0
+ lvx v0,r10,r3
+ mtvscr v0
addi r10,r3,THREAD_TRANSACT_VRSTATE
REST_32VRS(0,r4,r10)
@@ -52,8 +52,8 @@ _GLOBAL(vec_enable)
*/
_GLOBAL(load_vr_state)
li r4,VRSTATE_VSCR
- lvx vr0,r4,r3
- mtvscr vr0
+ lvx v0,r4,r3
+ mtvscr v0
REST_32VRS(0,r4,r3)
blr
@@ -63,9 +63,9 @@ _GLOBAL(load_vr_state)
*/
_GLOBAL(store_vr_state)
SAVE_32VRS(0, r4, r3)
- mfvscr vr0
+ mfvscr v0
li r4, VRSTATE_VSCR
- stvx vr0, r4, r3
+ stvx v0, r4, r3
blr
/*
@@ -104,9 +104,9 @@ _GLOBAL(load_up_altivec)
addi r4,r4,THREAD
addi r6,r4,THREAD_VRSTATE
SAVE_32VRS(0,r5,r6)
- mfvscr vr0
+ mfvscr v0
li r10,VRSTATE_VSCR
- stvx vr0,r10,r6
+ stvx v0,r10,r6
/* Disable VMX for last_task_used_altivec */
PPC_LL r5,PT_REGS(r4)
toreal(r5)
@@ -142,8 +142,8 @@ _GLOBAL(load_up_altivec)
li r4,1
li r10,VRSTATE_VSCR
stw r4,THREAD_USED_VR(r5)
- lvx vr0,r10,r6
- mtvscr vr0
+ lvx v0,r10,r6
+ mtvscr v0
REST_32VRS(0,r4,r6)
#ifndef CONFIG_SMP
/* Update last_task_used_altivec to 'current' */
@@ -186,9 +186,9 @@ _GLOBAL(giveup_altivec)
addi r7,r3,THREAD_VRSTATE
2: PPC_LCMPI 0,r5,0
SAVE_32VRS(0,r4,r7)
- mfvscr vr0
+ mfvscr v0
li r4,VRSTATE_VSCR
- stvx vr0,r4,r7
+ stvx v0,r4,r7
beq 1f
PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
#ifdef CONFIG_VSX