From 5c5a26fa9cd27ed4642f5a4ec76d9b487959e8dc Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Mon, 4 Feb 2008 23:50:25 -0800 Subject: sh: termios ioctl definitions These ports are holding up progress and now have been for months. Do the job for them. Signed-off-by: Alan Cox Cc: Paul Mundt Signed-off-by: Andrew Morton Signed-off-by: Paul Mundt --- include/asm-sh/ioctls.h | 4 ++++ include/asm-sh/termbits.h | 5 ++++- include/asm-sh/termios.h | 6 ++++-- 3 files changed, 12 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-sh/ioctls.h b/include/asm-sh/ioctls.h index 35805df010a0..c212c371a4a5 100644 --- a/include/asm-sh/ioctls.h +++ b/include/asm-sh/ioctls.h @@ -78,6 +78,10 @@ #define TIOCSBRK _IO('T', 39) /* 0x5427 */ /* BSD compatibility */ #define TIOCCBRK _IO('T', 40) /* 0x5428 */ /* BSD compatibility */ #define TIOCGSID _IOR('T', 41, pid_t) /* 0x5429 */ /* Return the session ID of FD */ +#define TCGETS2 _IOR('T', 42, struct termios2) +#define TCSETS2 _IOW('T', 43, struct termios2) +#define TCSETSW2 _IOW('T', 44, struct termios2) +#define TCSETSF2 _IOW('T', 45, struct termios2) #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ diff --git a/include/asm-sh/termbits.h b/include/asm-sh/termbits.h index 7ee1b42eeab0..77db116948cf 100644 --- a/include/asm-sh/termbits.h +++ b/include/asm-sh/termbits.h @@ -140,6 +140,7 @@ struct ktermios { #define HUPCL 0002000 #define CLOCAL 0004000 #define CBAUDEX 0010000 +#define BOTHER 0010000 #define B57600 0010001 #define B115200 0010002 #define B230400 0010003 @@ -155,10 +156,12 @@ struct ktermios { #define B3000000 0010015 #define B3500000 0010016 #define B4000000 0010017 -#define CIBAUD 002003600000 /* input baud rate (not used) */ +#define CIBAUD 002003600000 /* input baud rate */ #define CMSPAR 010000000000 /* mark or space (stick) parity */ #define CRTSCTS 020000000000 /* flow control */ +#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */ + /* c_lflag bits */ #define ISIG 0000001 #define ICANON 0000002 diff --git a/include/asm-sh/termios.h b/include/asm-sh/termios.h index e7c8f86ef890..0a8c793c76f2 100644 --- a/include/asm-sh/termios.h +++ b/include/asm-sh/termios.h @@ -80,8 +80,10 @@ struct termio { copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ }) -#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) -#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) +#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2)) +#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2)) +#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios)) +#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios)) #endif /* __KERNEL__ */ -- cgit v1.2.3 From b9482378916abb9a1e0a2334187cdc67f2deda2c Mon Sep 17 00:00:00 2001 From: Adrian McMenamin Date: Wed, 6 Feb 2008 22:46:21 +0000 Subject: maple: fix up whitespace damage. This patch is fundamentally about fixing up the whitespace problems introduced by my previous patch (that brought the code into mainline). A second patch will follow that will fix memory leaks. The two need to be applied sequentially. Signed-off-by: Adrian McMenamin Signed-off-by: Paul Mundt --- drivers/sh/maple/maple.c | 978 ++++++++++++++++++++++++----------------------- include/linux/maple.h | 101 ++--- 2 files changed, 543 insertions(+), 536 deletions(-) (limited to 'include') diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c index e52a6296ca46..9c48ccc44c29 100644 --- a/drivers/sh/maple/maple.c +++ b/drivers/sh/maple/maple.c @@ -57,8 +57,8 @@ static int started, scanning, liststatus; static struct kmem_cache *maple_queue_cache; struct maple_device_specify { - int port; - int unit; + int port; + int unit; }; /** @@ -68,22 +68,23 @@ struct maple_device_specify { */ int maple_driver_register(struct device_driver *drv) { - if (!drv) - return -EINVAL; - drv->bus = &maple_bus_type; - return driver_register(drv); + if (!drv) + return -EINVAL; + drv->bus = &maple_bus_type; + return driver_register(drv); } + EXPORT_SYMBOL_GPL(maple_driver_register); /* set hardware registers to enable next round of dma */ static void maplebus_dma_reset(void) { - ctrl_outl(MAPLE_MAGIC, MAPLE_RESET); - /* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */ - ctrl_outl(1, MAPLE_TRIGTYPE); - ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(50000), MAPLE_SPEED); - ctrl_outl(PHYSADDR(maple_sendbuf), MAPLE_DMAADDR); - ctrl_outl(1, MAPLE_ENABLE); + ctrl_outl(MAPLE_MAGIC, MAPLE_RESET); + /* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */ + ctrl_outl(1, MAPLE_TRIGTYPE); + ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(50000), MAPLE_SPEED); + ctrl_outl(PHYSADDR(maple_sendbuf), MAPLE_DMAADDR); + ctrl_outl(1, MAPLE_ENABLE); } /** @@ -94,27 +95,28 @@ static void maplebus_dma_reset(void) * @function: the function code for the device */ void maple_getcond_callback(struct maple_device *dev, - void (*callback) (struct mapleq * mq), - unsigned long interval, unsigned long function) + void (*callback) (struct mapleq * mq), + unsigned long interval, unsigned long function) { - dev->callback = callback; - dev->interval = interval; - dev->function = cpu_to_be32(function); - dev->when = jiffies; + dev->callback = callback; + dev->interval = interval; + dev->function = cpu_to_be32(function); + dev->when = jiffies; } + EXPORT_SYMBOL_GPL(maple_getcond_callback); static int maple_dma_done(void) { - return (ctrl_inl(MAPLE_STATE) & 1) == 0; + return (ctrl_inl(MAPLE_STATE) & 1) == 0; } static void maple_release_device(struct device *dev) { - if (dev->type) { - kfree(dev->type->name); - kfree(dev->type); - } + if (dev->type) { + kfree(dev->type->name); + kfree(dev->type); + } } /** @@ -123,60 +125,61 @@ static void maple_release_device(struct device *dev) */ void maple_add_packet(struct mapleq *mq) { - mutex_lock(&maple_list_lock); - list_add(&mq->list, &maple_waitq); - mutex_unlock(&maple_list_lock); + mutex_lock(&maple_list_lock); + list_add(&mq->list, &maple_waitq); + mutex_unlock(&maple_list_lock); } + EXPORT_SYMBOL_GPL(maple_add_packet); static struct mapleq *maple_allocq(struct maple_device *dev) { - struct mapleq *mq; + struct mapleq *mq; - mq = kmalloc(sizeof(*mq), GFP_KERNEL); - if (!mq) - return NULL; + mq = kmalloc(sizeof(*mq), GFP_KERNEL); + if (!mq) + return NULL; - mq->dev = dev; - mq->recvbufdcsp = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL); - mq->recvbuf = (void *) P2SEGADDR(mq->recvbufdcsp); - if (!mq->recvbuf) { - kfree(mq); - return NULL; - } + mq->dev = dev; + mq->recvbufdcsp = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL); + mq->recvbuf = (void *) P2SEGADDR(mq->recvbufdcsp); + if (!mq->recvbuf) { + kfree(mq); + return NULL; + } - return mq; + return mq; } static struct maple_device *maple_alloc_dev(int port, int unit) { - struct maple_device *dev; + struct maple_device *dev; - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) - return NULL; + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return NULL; - dev->port = port; - dev->unit = unit; - dev->mq = maple_allocq(dev); + dev->port = port; + dev->unit = unit; + dev->mq = maple_allocq(dev); - if (!dev->mq) { - kfree(dev); - return NULL; - } + if (!dev->mq) { + kfree(dev); + return NULL; + } - return dev; + return dev; } static void maple_free_dev(struct maple_device *mdev) { - if (!mdev) - return; - if (mdev->mq) { - kmem_cache_free(maple_queue_cache, mdev->mq->recvbufdcsp); - kfree(mdev->mq); - } - kfree(mdev); + if (!mdev) + return; + if (mdev->mq) { + kmem_cache_free(maple_queue_cache, mdev->mq->recvbufdcsp); + kfree(mdev->mq); + } + kfree(mdev); } /* process the command queue into a maple command block @@ -184,153 +187,153 @@ static void maple_free_dev(struct maple_device *mdev) */ static void maple_build_block(struct mapleq *mq) { - int port, unit, from, to, len; - unsigned long *lsendbuf = mq->sendbuf; + int port, unit, from, to, len; + unsigned long *lsendbuf = mq->sendbuf; - port = mq->dev->port & 3; - unit = mq->dev->unit; - len = mq->length; - from = port << 6; - to = (port << 6) | (unit > 0 ? (1 << (unit - 1)) & 0x1f : 0x20); + port = mq->dev->port & 3; + unit = mq->dev->unit; + len = mq->length; + from = port << 6; + to = (port << 6) | (unit > 0 ? (1 << (unit - 1)) & 0x1f : 0x20); - *maple_lastptr &= 0x7fffffff; - maple_lastptr = maple_sendptr; + *maple_lastptr &= 0x7fffffff; + maple_lastptr = maple_sendptr; - *maple_sendptr++ = (port << 16) | len | 0x80000000; - *maple_sendptr++ = PHYSADDR(mq->recvbuf); - *maple_sendptr++ = - mq->command | (to << 8) | (from << 16) | (len << 24); + *maple_sendptr++ = (port << 16) | len | 0x80000000; + *maple_sendptr++ = PHYSADDR(mq->recvbuf); + *maple_sendptr++ = + mq->command | (to << 8) | (from << 16) | (len << 24); - while (len-- > 0) - *maple_sendptr++ = *lsendbuf++; + while (len-- > 0) + *maple_sendptr++ = *lsendbuf++; } /* build up command queue */ static void maple_send(void) { - int i; - int maple_packets; - struct mapleq *mq, *nmq; - - if (!list_empty(&maple_sentq)) - return; - if (list_empty(&maple_waitq) || !maple_dma_done()) - return; - maple_packets = 0; - maple_sendptr = maple_lastptr = maple_sendbuf; - list_for_each_entry_safe(mq, nmq, &maple_waitq, list) { - maple_build_block(mq); - list_move(&mq->list, &maple_sentq); - if (maple_packets++ > MAPLE_MAXPACKETS) - break; - } - if (maple_packets > 0) { - for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++) - dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE, - PAGE_SIZE, DMA_BIDIRECTIONAL); - } + int i; + int maple_packets; + struct mapleq *mq, *nmq; + + if (!list_empty(&maple_sentq)) + return; + if (list_empty(&maple_waitq) || !maple_dma_done()) + return; + maple_packets = 0; + maple_sendptr = maple_lastptr = maple_sendbuf; + list_for_each_entry_safe(mq, nmq, &maple_waitq, list) { + maple_build_block(mq); + list_move(&mq->list, &maple_sentq); + if (maple_packets++ > MAPLE_MAXPACKETS) + break; + } + if (maple_packets > 0) { + for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++) + dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE, + PAGE_SIZE, DMA_BIDIRECTIONAL); + } } static int attach_matching_maple_driver(struct device_driver *driver, - void *devptr) + void *devptr) { - struct maple_driver *maple_drv; - struct maple_device *mdev; - - mdev = devptr; - maple_drv = to_maple_driver(driver); - if (mdev->devinfo.function & be32_to_cpu(maple_drv->function)) { - if (maple_drv->connect(mdev) == 0) { - mdev->driver = maple_drv; - return 1; - } - } - return 0; + struct maple_driver *maple_drv; + struct maple_device *mdev; + + mdev = devptr; + maple_drv = to_maple_driver(driver); + if (mdev->devinfo.function & be32_to_cpu(maple_drv->function)) { + if (maple_drv->connect(mdev) == 0) { + mdev->driver = maple_drv; + return 1; + } + } + return 0; } static void maple_detach_driver(struct maple_device *mdev) { - if (!mdev) - return; - if (mdev->driver) { - if (mdev->driver->disconnect) - mdev->driver->disconnect(mdev); - } - mdev->driver = NULL; - if (mdev->registered) { - maple_release_device(&mdev->dev); - device_unregister(&mdev->dev); - } - mdev->registered = 0; - maple_free_dev(mdev); + if (!mdev) + return; + if (mdev->driver) { + if (mdev->driver->disconnect) + mdev->driver->disconnect(mdev); + } + mdev->driver = NULL; + if (mdev->registered) { + maple_release_device(&mdev->dev); + device_unregister(&mdev->dev); + } + mdev->registered = 0; + maple_free_dev(mdev); } /* process initial MAPLE_COMMAND_DEVINFO for each device or port */ static void maple_attach_driver(struct maple_device *dev) { - char *p; - - char *recvbuf; - unsigned long function; - int matched, retval; - - recvbuf = dev->mq->recvbuf; - memcpy(&dev->devinfo, recvbuf + 4, sizeof(dev->devinfo)); - memcpy(dev->product_name, dev->devinfo.product_name, 30); - memcpy(dev->product_licence, dev->devinfo.product_licence, 60); - dev->product_name[30] = '\0'; - dev->product_licence[60] = '\0'; - - for (p = dev->product_name + 29; dev->product_name <= p; p--) - if (*p == ' ') - *p = '\0'; - else - break; - - for (p = dev->product_licence + 59; dev->product_licence <= p; p--) - if (*p == ' ') - *p = '\0'; - else - break; - - function = be32_to_cpu(dev->devinfo.function); - - if (function > 0x200) { - /* Do this silently - as not a real device */ - function = 0; - dev->driver = &maple_dummy_driver; - sprintf(dev->dev.bus_id, "%d:0.port", dev->port); - } else { - printk(KERN_INFO - "Maple bus at (%d, %d): Connected function 0x%lX\n", - dev->port, dev->unit, function); - - matched = - bus_for_each_drv(&maple_bus_type, NULL, dev, - attach_matching_maple_driver); - - if (matched == 0) { - /* Driver does not exist yet */ - printk(KERN_INFO - "No maple driver found for this device\n"); - dev->driver = &maple_dummy_driver; - } - - sprintf(dev->dev.bus_id, "%d:0%d.%lX", dev->port, - dev->unit, function); - } - dev->function = function; - dev->dev.bus = &maple_bus_type; - dev->dev.parent = &maple_bus; - dev->dev.release = &maple_release_device; - retval = device_register(&dev->dev); - if (retval) { - printk(KERN_INFO - "Maple bus: Attempt to register device (%x, %x) failed.\n", - dev->port, dev->unit); - maple_free_dev(dev); - } - dev->registered = 1; + char *p; + + char *recvbuf; + unsigned long function; + int matched, retval; + + recvbuf = dev->mq->recvbuf; + memcpy(&dev->devinfo, recvbuf + 4, sizeof(dev->devinfo)); + memcpy(dev->product_name, dev->devinfo.product_name, 30); + memcpy(dev->product_licence, dev->devinfo.product_licence, 60); + dev->product_name[30] = '\0'; + dev->product_licence[60] = '\0'; + + for (p = dev->product_name + 29; dev->product_name <= p; p--) + if (*p == ' ') + *p = '\0'; + else + break; + + for (p = dev->product_licence + 59; dev->product_licence <= p; p--) + if (*p == ' ') + *p = '\0'; + else + break; + + function = be32_to_cpu(dev->devinfo.function); + + if (function > 0x200) { + /* Do this silently - as not a real device */ + function = 0; + dev->driver = &maple_dummy_driver; + sprintf(dev->dev.bus_id, "%d:0.port", dev->port); + } else { + printk(KERN_INFO + "Maple bus at (%d, %d): Connected function 0x%lX\n", + dev->port, dev->unit, function); + + matched = + bus_for_each_drv(&maple_bus_type, NULL, dev, + attach_matching_maple_driver); + + if (matched == 0) { + /* Driver does not exist yet */ + printk(KERN_INFO + "No maple driver found for this device\n"); + dev->driver = &maple_dummy_driver; + } + + sprintf(dev->dev.bus_id, "%d:0%d.%lX", dev->port, + dev->unit, function); + } + dev->function = function; + dev->dev.bus = &maple_bus_type; + dev->dev.parent = &maple_bus; + dev->dev.release = &maple_release_device; + retval = device_register(&dev->dev); + if (retval) { + printk(KERN_INFO + "Maple bus: Attempt to register device (%x, %x) failed.\n", + dev->port, dev->unit); + maple_free_dev(dev); + } + dev->registered = 1; } /* @@ -340,270 +343,271 @@ static void maple_attach_driver(struct maple_device *dev) */ static int detach_maple_device(struct device *device, void *portptr) { - struct maple_device_specify *ds; - struct maple_device *mdev; - - ds = portptr; - mdev = to_maple_dev(device); - if (mdev->port == ds->port && mdev->unit == ds->unit) - return 1; - return 0; + struct maple_device_specify *ds; + struct maple_device *mdev; + + ds = portptr; + mdev = to_maple_dev(device); + if (mdev->port == ds->port && mdev->unit == ds->unit) + return 1; + return 0; } static int setup_maple_commands(struct device *device, void *ignored) { - struct maple_device *maple_dev = to_maple_dev(device); - - if ((maple_dev->interval > 0) - && time_after(jiffies, maple_dev->when)) { - maple_dev->when = jiffies + maple_dev->interval; - maple_dev->mq->command = MAPLE_COMMAND_GETCOND; - maple_dev->mq->sendbuf = &maple_dev->function; - maple_dev->mq->length = 1; - maple_add_packet(maple_dev->mq); - liststatus++; - } else { - if (time_after(jiffies, maple_pnp_time)) { - maple_dev->mq->command = MAPLE_COMMAND_DEVINFO; - maple_dev->mq->length = 0; - maple_add_packet(maple_dev->mq); - liststatus++; - } - } - - return 0; + struct maple_device *maple_dev = to_maple_dev(device); + + if ((maple_dev->interval > 0) + && time_after(jiffies, maple_dev->when)) { + maple_dev->when = jiffies + maple_dev->interval; + maple_dev->mq->command = MAPLE_COMMAND_GETCOND; + maple_dev->mq->sendbuf = &maple_dev->function; + maple_dev->mq->length = 1; + maple_add_packet(maple_dev->mq); + liststatus++; + } else { + if (time_after(jiffies, maple_pnp_time)) { + maple_dev->mq->command = MAPLE_COMMAND_DEVINFO; + maple_dev->mq->length = 0; + maple_add_packet(maple_dev->mq); + liststatus++; + } + } + + return 0; } /* VBLANK bottom half - implemented via workqueue */ static void maple_vblank_handler(struct work_struct *work) { - if (!maple_dma_done()) - return; - if (!list_empty(&maple_sentq)) - return; - ctrl_outl(0, MAPLE_ENABLE); - liststatus = 0; - bus_for_each_dev(&maple_bus_type, NULL, NULL, - setup_maple_commands); - if (time_after(jiffies, maple_pnp_time)) - maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL; - if (liststatus && list_empty(&maple_sentq)) { - INIT_LIST_HEAD(&maple_sentq); - maple_send(); - } - maplebus_dma_reset(); + if (!maple_dma_done()) + return; + if (!list_empty(&maple_sentq)) + return; + ctrl_outl(0, MAPLE_ENABLE); + liststatus = 0; + bus_for_each_dev(&maple_bus_type, NULL, NULL, + setup_maple_commands); + if (time_after(jiffies, maple_pnp_time)) + maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL; + if (liststatus && list_empty(&maple_sentq)) { + INIT_LIST_HEAD(&maple_sentq); + maple_send(); + } + maplebus_dma_reset(); } /* handle devices added via hotplugs - placing them on queue for DEVINFO*/ static void maple_map_subunits(struct maple_device *mdev, int submask) { - int retval, k, devcheck; - struct maple_device *mdev_add; - struct maple_device_specify ds; - - for (k = 0; k < 5; k++) { - ds.port = mdev->port; - ds.unit = k + 1; - retval = - bus_for_each_dev(&maple_bus_type, NULL, &ds, - detach_maple_device); - if (retval) { - submask = submask >> 1; - continue; - } - devcheck = submask & 0x01; - if (devcheck) { - mdev_add = maple_alloc_dev(mdev->port, k + 1); - if (!mdev_add) - return; - mdev_add->mq->command = MAPLE_COMMAND_DEVINFO; - mdev_add->mq->length = 0; - maple_add_packet(mdev_add->mq); - scanning = 1; - } - submask = submask >> 1; - } + int retval, k, devcheck; + struct maple_device *mdev_add; + struct maple_device_specify ds; + + for (k = 0; k < 5; k++) { + ds.port = mdev->port; + ds.unit = k + 1; + retval = + bus_for_each_dev(&maple_bus_type, NULL, &ds, + detach_maple_device); + if (retval) { + submask = submask >> 1; + continue; + } + devcheck = submask & 0x01; + if (devcheck) { + mdev_add = maple_alloc_dev(mdev->port, k + 1); + if (!mdev_add) + return; + mdev_add->mq->command = MAPLE_COMMAND_DEVINFO; + mdev_add->mq->length = 0; + maple_add_packet(mdev_add->mq); + scanning = 1; + } + submask = submask >> 1; + } } /* mark a device as removed */ static void maple_clean_submap(struct maple_device *mdev) { - int killbit; + int killbit; - killbit = (mdev->unit > 0 ? (1 << (mdev->unit - 1)) & 0x1f : 0x20); - killbit = ~killbit; - killbit &= 0xFF; - subdevice_map[mdev->port] = subdevice_map[mdev->port] & killbit; + killbit = (mdev->unit > 0 ? (1 << (mdev->unit - 1)) & 0x1f : 0x20); + killbit = ~killbit; + killbit &= 0xFF; + subdevice_map[mdev->port] = subdevice_map[mdev->port] & killbit; } /* handle empty port or hotplug removal */ static void maple_response_none(struct maple_device *mdev, - struct mapleq *mq) + struct mapleq *mq) { - if (mdev->unit != 0) { - list_del(&mq->list); - maple_clean_submap(mdev); - printk(KERN_INFO - "Maple bus device detaching at (%d, %d)\n", - mdev->port, mdev->unit); - maple_detach_driver(mdev); - return; - } - if (!started) { - printk(KERN_INFO "No maple devices attached to port %d\n", - mdev->port); - return; - } - maple_clean_submap(mdev); + if (mdev->unit != 0) { + list_del(&mq->list); + maple_clean_submap(mdev); + printk(KERN_INFO + "Maple bus device detaching at (%d, %d)\n", + mdev->port, mdev->unit); + maple_detach_driver(mdev); + return; + } + if (!started) { + printk(KERN_INFO "No maple devices attached to port %d\n", + mdev->port); + return; + } + maple_clean_submap(mdev); } /* preprocess hotplugs or scans */ static void maple_response_devinfo(struct maple_device *mdev, - char *recvbuf) + char *recvbuf) { - char submask; - if ((!started) || (scanning == 2)) { - maple_attach_driver(mdev); - return; - } - if (mdev->unit == 0) { - submask = recvbuf[2] & 0x1F; - if (submask ^ subdevice_map[mdev->port]) { - maple_map_subunits(mdev, submask); - subdevice_map[mdev->port] = submask; - } - } + char submask; + if ((!started) || (scanning == 2)) { + maple_attach_driver(mdev); + return; + } + if (mdev->unit == 0) { + submask = recvbuf[2] & 0x1F; + if (submask ^ subdevice_map[mdev->port]) { + maple_map_subunits(mdev, submask); + subdevice_map[mdev->port] = submask; + } + } } /* maple dma end bottom half - implemented via workqueue */ static void maple_dma_handler(struct work_struct *work) { - struct mapleq *mq, *nmq; - struct maple_device *dev; - char *recvbuf; - enum maple_code code; - - if (!maple_dma_done()) - return; - ctrl_outl(0, MAPLE_ENABLE); - if (!list_empty(&maple_sentq)) { - list_for_each_entry_safe(mq, nmq, &maple_sentq, list) { - recvbuf = mq->recvbuf; - code = recvbuf[0]; - dev = mq->dev; - switch (code) { - case MAPLE_RESPONSE_NONE: - maple_response_none(dev, mq); - break; - - case MAPLE_RESPONSE_DEVINFO: - maple_response_devinfo(dev, recvbuf); - break; - - case MAPLE_RESPONSE_DATATRF: - if (dev->callback) - dev->callback(mq); - break; - - case MAPLE_RESPONSE_FILEERR: - case MAPLE_RESPONSE_AGAIN: - case MAPLE_RESPONSE_BADCMD: - case MAPLE_RESPONSE_BADFUNC: - printk(KERN_DEBUG - "Maple non-fatal error 0x%X\n", - code); - break; - - case MAPLE_RESPONSE_ALLINFO: - printk(KERN_DEBUG - "Maple - extended device information not supported\n"); - break; - - case MAPLE_RESPONSE_OK: - break; - - default: - break; - } - } - INIT_LIST_HEAD(&maple_sentq); - if (scanning == 1) { - maple_send(); - scanning = 2; - } else - scanning = 0; - - if (started == 0) - started = 1; - } - maplebus_dma_reset(); + struct mapleq *mq, *nmq; + struct maple_device *dev; + char *recvbuf; + enum maple_code code; + + if (!maple_dma_done()) + return; + ctrl_outl(0, MAPLE_ENABLE); + if (!list_empty(&maple_sentq)) { + list_for_each_entry_safe(mq, nmq, &maple_sentq, list) { + recvbuf = mq->recvbuf; + code = recvbuf[0]; + dev = mq->dev; + switch (code) { + case MAPLE_RESPONSE_NONE: + maple_response_none(dev, mq); + break; + + case MAPLE_RESPONSE_DEVINFO: + maple_response_devinfo(dev, recvbuf); + break; + + case MAPLE_RESPONSE_DATATRF: + if (dev->callback) + dev->callback(mq); + break; + + case MAPLE_RESPONSE_FILEERR: + case MAPLE_RESPONSE_AGAIN: + case MAPLE_RESPONSE_BADCMD: + case MAPLE_RESPONSE_BADFUNC: + printk(KERN_DEBUG + "Maple non-fatal error 0x%X\n", + code); + break; + + case MAPLE_RESPONSE_ALLINFO: + printk(KERN_DEBUG + "Maple - extended device information not supported\n"); + break; + + case MAPLE_RESPONSE_OK: + break; + + default: + break; + } + } + INIT_LIST_HEAD(&maple_sentq); + if (scanning == 1) { + maple_send(); + scanning = 2; + } else + scanning = 0; + + if (started == 0) + started = 1; + } + maplebus_dma_reset(); } static irqreturn_t maplebus_dma_interrupt(int irq, void *dev_id) { - /* Load everything into the bottom half */ - schedule_work(&maple_dma_process); - return IRQ_HANDLED; + /* Load everything into the bottom half */ + schedule_work(&maple_dma_process); + return IRQ_HANDLED; } static irqreturn_t maplebus_vblank_interrupt(int irq, void *dev_id) { - schedule_work(&maple_vblank_process); - return IRQ_HANDLED; + schedule_work(&maple_vblank_process); + return IRQ_HANDLED; } static struct irqaction maple_dma_irq = { - .name = "maple bus DMA handler", - .handler = maplebus_dma_interrupt, - .flags = IRQF_SHARED, + .name = "maple bus DMA handler", + .handler = maplebus_dma_interrupt, + .flags = IRQF_SHARED, }; static struct irqaction maple_vblank_irq = { - .name = "maple bus VBLANK handler", - .handler = maplebus_vblank_interrupt, - .flags = IRQF_SHARED, + .name = "maple bus VBLANK handler", + .handler = maplebus_vblank_interrupt, + .flags = IRQF_SHARED, }; static int maple_set_dma_interrupt_handler(void) { - return setup_irq(HW_EVENT_MAPLE_DMA, &maple_dma_irq); + return setup_irq(HW_EVENT_MAPLE_DMA, &maple_dma_irq); } static int maple_set_vblank_interrupt_handler(void) { - return setup_irq(HW_EVENT_VSYNC, &maple_vblank_irq); + return setup_irq(HW_EVENT_VSYNC, &maple_vblank_irq); } static int maple_get_dma_buffer(void) { - maple_sendbuf = - (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, - MAPLE_DMA_PAGES); - if (!maple_sendbuf) - return -ENOMEM; - return 0; + maple_sendbuf = + (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, + MAPLE_DMA_PAGES); + if (!maple_sendbuf) + return -ENOMEM; + return 0; } static int match_maple_bus_driver(struct device *devptr, - struct device_driver *drvptr) + struct device_driver *drvptr) { - struct maple_driver *maple_drv; - struct maple_device *maple_dev; - - maple_drv = container_of(drvptr, struct maple_driver, drv); - maple_dev = container_of(devptr, struct maple_device, dev); - /* Trap empty port case */ - if (maple_dev->devinfo.function == 0xFFFFFFFF) - return 0; - else if (maple_dev->devinfo.function & - be32_to_cpu(maple_drv->function)) - return 1; - return 0; + struct maple_driver *maple_drv; + struct maple_device *maple_dev; + + maple_drv = container_of(drvptr, struct maple_driver, drv); + maple_dev = container_of(devptr, struct maple_device, dev); + /* Trap empty port case */ + if (maple_dev->devinfo.function == 0xFFFFFFFF) + return 0; + else if (maple_dev->devinfo.function & + be32_to_cpu(maple_drv->function)) + return 1; + return 0; } -static int maple_bus_uevent(struct device *dev, struct kobj_uevent_env *env) +static int maple_bus_uevent(struct device *dev, + struct kobj_uevent_env *env) { - return 0; + return 0; } static void maple_bus_release(struct device *dev) @@ -611,124 +615,126 @@ static void maple_bus_release(struct device *dev) } static struct maple_driver maple_dummy_driver = { - .drv = { - .name = "maple_dummy_driver", - .bus = &maple_bus_type, - }, + .drv = { + .name = "maple_dummy_driver", + .bus = &maple_bus_type, + }, }; struct bus_type maple_bus_type = { - .name = "maple", - .match = match_maple_bus_driver, - .uevent = maple_bus_uevent, + .name = "maple", + .match = match_maple_bus_driver, + .uevent = maple_bus_uevent, }; + EXPORT_SYMBOL_GPL(maple_bus_type); static struct device maple_bus = { - .bus_id = "maple", - .release = maple_bus_release, + .bus_id = "maple", + .release = maple_bus_release, }; static int __init maple_bus_init(void) { - int retval, i; - struct maple_device *mdev[MAPLE_PORTS]; - ctrl_outl(0, MAPLE_STATE); - - retval = device_register(&maple_bus); - if (retval) - goto cleanup; - - retval = bus_register(&maple_bus_type); - if (retval) - goto cleanup_device; - - retval = driver_register(&maple_dummy_driver.drv); - - if (retval) - goto cleanup_bus; - - /* allocate memory for maple bus dma */ - retval = maple_get_dma_buffer(); - if (retval) { - printk(KERN_INFO - "Maple bus: Failed to allocate Maple DMA buffers\n"); - goto cleanup_basic; - } - - /* set up DMA interrupt handler */ - retval = maple_set_dma_interrupt_handler(); - if (retval) { - printk(KERN_INFO - "Maple bus: Failed to grab maple DMA IRQ\n"); - goto cleanup_dma; - } - - /* set up VBLANK interrupt handler */ - retval = maple_set_vblank_interrupt_handler(); - if (retval) { - printk(KERN_INFO "Maple bus: Failed to grab VBLANK IRQ\n"); - goto cleanup_irq; - } - - maple_queue_cache = - kmem_cache_create("maple_queue_cache", 0x400, 0, - SLAB_HWCACHE_ALIGN, NULL); - - if (!maple_queue_cache) - goto cleanup_bothirqs; - - /* setup maple ports */ - for (i = 0; i < MAPLE_PORTS; i++) { - mdev[i] = maple_alloc_dev(i, 0); - if (!mdev[i]) { - while (i-- > 0) - maple_free_dev(mdev[i]); - goto cleanup_cache; - } - mdev[i]->registered = 0; - mdev[i]->mq->command = MAPLE_COMMAND_DEVINFO; - mdev[i]->mq->length = 0; - maple_attach_driver(mdev[i]); - maple_add_packet(mdev[i]->mq); - subdevice_map[i] = 0; - } - - /* setup maplebus hardware */ - maplebus_dma_reset(); - - /* initial detection */ - maple_send(); - - maple_pnp_time = jiffies; - - printk(KERN_INFO "Maple bus core now registered.\n"); - - return 0; - -cleanup_cache: - kmem_cache_destroy(maple_queue_cache); - -cleanup_bothirqs: - free_irq(HW_EVENT_VSYNC, 0); - -cleanup_irq: - free_irq(HW_EVENT_MAPLE_DMA, 0); - -cleanup_dma: - free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES); - -cleanup_basic: - driver_unregister(&maple_dummy_driver.drv); - -cleanup_bus: - bus_unregister(&maple_bus_type); - -cleanup_device: - device_unregister(&maple_bus); - -cleanup: - printk(KERN_INFO "Maple bus registration failed\n"); - return retval; + int retval, i; + struct maple_device *mdev[MAPLE_PORTS]; + ctrl_outl(0, MAPLE_STATE); + + retval = device_register(&maple_bus); + if (retval) + goto cleanup; + + retval = bus_register(&maple_bus_type); + if (retval) + goto cleanup_device; + + retval = driver_register(&maple_dummy_driver.drv); + + if (retval) + goto cleanup_bus; + + /* allocate memory for maple bus dma */ + retval = maple_get_dma_buffer(); + if (retval) { + printk(KERN_INFO + "Maple bus: Failed to allocate Maple DMA buffers\n"); + goto cleanup_basic; + } + + /* set up DMA interrupt handler */ + retval = maple_set_dma_interrupt_handler(); + if (retval) { + printk(KERN_INFO + "Maple bus: Failed to grab maple DMA IRQ\n"); + goto cleanup_dma; + } + + /* set up VBLANK interrupt handler */ + retval = maple_set_vblank_interrupt_handler(); + if (retval) { + printk(KERN_INFO "Maple bus: Failed to grab VBLANK IRQ\n"); + goto cleanup_irq; + } + + maple_queue_cache = + kmem_cache_create("maple_queue_cache", 0x400, 0, + SLAB_HWCACHE_ALIGN, NULL); + + if (!maple_queue_cache) + goto cleanup_bothirqs; + + /* setup maple ports */ + for (i = 0; i < MAPLE_PORTS; i++) { + mdev[i] = maple_alloc_dev(i, 0); + if (!mdev[i]) { + while (i-- > 0) + maple_free_dev(mdev[i]); + goto cleanup_cache; + } + mdev[i]->registered = 0; + mdev[i]->mq->command = MAPLE_COMMAND_DEVINFO; + mdev[i]->mq->length = 0; + maple_attach_driver(mdev[i]); + maple_add_packet(mdev[i]->mq); + subdevice_map[i] = 0; + } + + /* setup maplebus hardware */ + maplebus_dma_reset(); + + /* initial detection */ + maple_send(); + + maple_pnp_time = jiffies; + + printk(KERN_INFO "Maple bus core now registered.\n"); + + return 0; + + cleanup_cache: + kmem_cache_destroy(maple_queue_cache); + + cleanup_bothirqs: + free_irq(HW_EVENT_VSYNC, 0); + + cleanup_irq: + free_irq(HW_EVENT_MAPLE_DMA, 0); + + cleanup_dma: + free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES); + + cleanup_basic: + driver_unregister(&maple_dummy_driver.drv); + + cleanup_bus: + bus_unregister(&maple_bus_type); + + cleanup_device: + device_unregister(&maple_bus); + + cleanup: + printk(KERN_INFO "Maple bus registration failed\n"); + return retval; } + subsys_initcall(maple_bus_init); diff --git a/include/linux/maple.h b/include/linux/maple.h index bad9a7b319de..f82c17b77158 100644 --- a/include/linux/maple.h +++ b/include/linux/maple.h @@ -7,74 +7,75 @@ extern struct bus_type maple_bus_type; /* Maple Bus command and response codes */ enum maple_code { - MAPLE_RESPONSE_FILEERR = -5, - MAPLE_RESPONSE_AGAIN = -4, /* request should be retransmitted */ - MAPLE_RESPONSE_BADCMD = -3, - MAPLE_RESPONSE_BADFUNC = -2, - MAPLE_RESPONSE_NONE = -1, /* unit didn't respond at all */ - MAPLE_COMMAND_DEVINFO = 1, - MAPLE_COMMAND_ALLINFO = 2, - MAPLE_COMMAND_RESET = 3, - MAPLE_COMMAND_KILL = 4, - MAPLE_RESPONSE_DEVINFO = 5, - MAPLE_RESPONSE_ALLINFO = 6, - MAPLE_RESPONSE_OK = 7, - MAPLE_RESPONSE_DATATRF = 8, - MAPLE_COMMAND_GETCOND = 9, - MAPLE_COMMAND_GETMINFO = 10, - MAPLE_COMMAND_BREAD = 11, - MAPLE_COMMAND_BWRITE = 12, - MAPLE_COMMAND_SETCOND = 14 + MAPLE_RESPONSE_FILEERR = -5, + MAPLE_RESPONSE_AGAIN = -4, /* request should be retransmitted */ + MAPLE_RESPONSE_BADCMD = -3, + MAPLE_RESPONSE_BADFUNC = -2, + MAPLE_RESPONSE_NONE = -1, /* unit didn't respond at all */ + MAPLE_COMMAND_DEVINFO = 1, + MAPLE_COMMAND_ALLINFO = 2, + MAPLE_COMMAND_RESET = 3, + MAPLE_COMMAND_KILL = 4, + MAPLE_RESPONSE_DEVINFO = 5, + MAPLE_RESPONSE_ALLINFO = 6, + MAPLE_RESPONSE_OK = 7, + MAPLE_RESPONSE_DATATRF = 8, + MAPLE_COMMAND_GETCOND = 9, + MAPLE_COMMAND_GETMINFO = 10, + MAPLE_COMMAND_BREAD = 11, + MAPLE_COMMAND_BWRITE = 12, + MAPLE_COMMAND_SETCOND = 14 }; struct mapleq { - struct list_head list; - struct maple_device *dev; - void *sendbuf, *recvbuf, *recvbufdcsp; - unsigned char length; - enum maple_code command; + struct list_head list; + struct maple_device *dev; + void *sendbuf, *recvbuf, *recvbufdcsp; + unsigned char length; + enum maple_code command; }; struct maple_devinfo { - unsigned long function; - unsigned long function_data[3]; - unsigned char area_code; - unsigned char connector_directon; - char product_name[31]; - char product_licence[61]; - unsigned short standby_power; - unsigned short max_power; + unsigned long function; + unsigned long function_data[3]; + unsigned char area_code; + unsigned char connector_directon; + char product_name[31]; + char product_licence[61]; + unsigned short standby_power; + unsigned short max_power; }; struct maple_device { - struct maple_driver *driver; - struct mapleq *mq; - void *private_data; - void (*callback) (struct mapleq * mq); - unsigned long when, interval, function; - struct maple_devinfo devinfo; - unsigned char port, unit; - char product_name[32]; - char product_licence[64]; - int registered; - struct device dev; + struct maple_driver *driver; + struct mapleq *mq; + void *private_data; + void (*callback) (struct mapleq * mq); + unsigned long when, interval, function; + struct maple_devinfo devinfo; + unsigned char port, unit; + char product_name[32]; + char product_licence[64]; + int registered; + struct device dev; }; struct maple_driver { - unsigned long function; - int (*connect) (struct maple_device * dev); - void (*disconnect) (struct maple_device * dev); - struct device_driver drv; + unsigned long function; + int (*connect) (struct maple_device * dev); + void (*disconnect) (struct maple_device * dev); + struct device_driver drv; + int registered; }; void maple_getcond_callback(struct maple_device *dev, - void (*callback) (struct mapleq * mq), - unsigned long interval, - unsigned long function); + void (*callback) (struct mapleq * mq), + unsigned long interval, + unsigned long function); int maple_driver_register(struct device_driver *drv); void maple_add_packet(struct mapleq *mq); #define to_maple_dev(n) container_of(n, struct maple_device, dev) #define to_maple_driver(n) container_of(n, struct maple_driver, drv) -#endif /* __LINUX_MAPLE_H */ +#endif /* __LINUX_MAPLE_H */ -- cgit v1.2.3 From 87153058b2e3bedfd339dbfec5dd6dd3d98677b0 Mon Sep 17 00:00:00 2001 From: Adrian McMenamin Date: Wed, 6 Feb 2008 23:59:56 +0000 Subject: maple: Drop unused prototypes from linux/maple.h. This patch removes the now unneeded registration check variable from struct maple_device. (This patch assumes the include/linux/maple.h file has already been patched for whitespace errors by http://lkml.org/lkml/2008/2/6/327) Signed-off-by: Adrian McMenamin Signed-off-by: Paul Mundt --- include/linux/maple.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/linux/maple.h b/include/linux/maple.h index f82c17b77158..35c3474ccf68 100644 --- a/include/linux/maple.h +++ b/include/linux/maple.h @@ -56,7 +56,6 @@ struct maple_device { unsigned char port, unit; char product_name[32]; char product_licence[64]; - int registered; struct device dev; }; -- cgit v1.2.3 From 5d0e146493a3306b931338ac030bc7a8de40c066 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 7 Feb 2008 13:05:13 +0900 Subject: sh: Wire up new timerfd syscalls. Signed-off-by: Paul Mundt --- arch/sh/kernel/syscalls_32.S | 4 +++- arch/sh/kernel/syscalls_64.S | 4 +++- include/asm-sh/unistd_32.h | 6 ++++-- include/asm-sh/unistd_64.h | 6 ++++-- 4 files changed, 14 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S index 719e127a7c05..a46cc3a41148 100644 --- a/arch/sh/kernel/syscalls_32.S +++ b/arch/sh/kernel/syscalls_32.S @@ -338,6 +338,8 @@ ENTRY(sys_call_table) .long sys_epoll_pwait .long sys_utimensat /* 320 */ .long sys_signalfd - .long sys_ni_syscall + .long sys_timerfd_create .long sys_eventfd .long sys_fallocate + .long sys_timerfd_settime /* 325 */ + .long sys_timerfd_gettime diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S index 12c7340356ae..d5d7843aad94 100644 --- a/arch/sh/kernel/syscalls_64.S +++ b/arch/sh/kernel/syscalls_64.S @@ -376,6 +376,8 @@ sys_call_table: .long sys_epoll_pwait .long sys_utimensat .long sys_signalfd - .long sys_ni_syscall /* 350 */ + .long sys_timerfd_create /* 350 */ .long sys_eventfd .long sys_fallocate + .long sys_timerfd_settime + .long sys_timerfd_gettime diff --git a/include/asm-sh/unistd_32.h b/include/asm-sh/unistd_32.h index 433fd1b48fa2..0b07212ec659 100644 --- a/include/asm-sh/unistd_32.h +++ b/include/asm-sh/unistd_32.h @@ -330,11 +330,13 @@ #define __NR_epoll_pwait 319 #define __NR_utimensat 320 #define __NR_signalfd 321 -/* #define __NR_timerfd 322 removed */ +#define __NR_timerfd_create 322 #define __NR_eventfd 323 #define __NR_fallocate 324 +#define __NR_timerfd_settime 325 +#define __NR_timerfd_gettime 326 -#define NR_syscalls 325 +#define NR_syscalls 327 #ifdef __KERNEL__ diff --git a/include/asm-sh/unistd_64.h b/include/asm-sh/unistd_64.h index 108d2ba897fe..9d21eab52427 100644 --- a/include/asm-sh/unistd_64.h +++ b/include/asm-sh/unistd_64.h @@ -90,7 +90,7 @@ #define __NR_sigpending 73 #define __NR_sethostname 74 #define __NR_setrlimit 75 -#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */ +#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */ #define __NR_getrusage 77 #define __NR_gettimeofday 78 #define __NR_settimeofday 79 @@ -370,9 +370,11 @@ #define __NR_epoll_pwait 347 #define __NR_utimensat 348 #define __NR_signalfd 349 -/* #define __NR_timerfd 350 removed */ +#define __NR_timerfd_create 350 #define __NR_eventfd 351 #define __NR_fallocate 352 +#define __NR_timerfd_settime 353 +#define __NR_timerfd_gettime 354 #ifdef __KERNEL__ -- cgit v1.2.3 From 1e6760c5c4589d02a6877fb256b99c33dd8f1ede Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Thu, 7 Feb 2008 19:50:52 +0900 Subject: sh: make copy_to/from_user() static inline This patch changes copy_from_user() and copy_to_user() from macros into static inline functions. This way we can use them as function pointers. Also unify the 64 bit and 32 bit versions. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- include/asm-sh/uaccess.h | 29 +++++++++++++++++++++++++++++ include/asm-sh/uaccess_32.h | 24 +++--------------------- include/asm-sh/uaccess_64.h | 19 ------------------- 3 files changed, 32 insertions(+), 40 deletions(-) (limited to 'include') diff --git a/include/asm-sh/uaccess.h b/include/asm-sh/uaccess.h index ff24ce95b238..b3440c305b5d 100644 --- a/include/asm-sh/uaccess.h +++ b/include/asm-sh/uaccess.h @@ -1,5 +1,34 @@ +#ifndef __ASM_SH_UACCESS_H +#define __ASM_SH_UACCESS_H + #ifdef CONFIG_SUPERH32 # include "uaccess_32.h" #else # include "uaccess_64.h" #endif + +static inline unsigned long +copy_from_user(void *to, const void __user *from, unsigned long n) +{ + unsigned long __copy_from = (unsigned long) from; + __kernel_size_t __copy_size = (__kernel_size_t) n; + + if (__copy_size && __access_ok(__copy_from, __copy_size)) + return __copy_user(to, from, __copy_size); + + return __copy_size; +} + +static inline unsigned long +copy_to_user(void __user *to, const void *from, unsigned long n) +{ + unsigned long __copy_to = (unsigned long) to; + __kernel_size_t __copy_size = (__kernel_size_t) n; + + if (__copy_size && __access_ok(__copy_to, __copy_size)) + return __copy_user(to, from, __copy_size); + + return __copy_size; +} + +#endif /* __ASM_SH_UACCESS_H */ diff --git a/include/asm-sh/uaccess_32.h b/include/asm-sh/uaccess_32.h index b6082f3c1dc4..c0318b608893 100644 --- a/include/asm-sh/uaccess_32.h +++ b/include/asm-sh/uaccess_32.h @@ -10,8 +10,8 @@ * Copyright (C) 1996, 1997, 1998 by Ralf Baechle * and i386 version. */ -#ifndef __ASM_SH_UACCESS_H -#define __ASM_SH_UACCESS_H +#ifndef __ASM_SH_UACCESS_32_H +#define __ASM_SH_UACCESS_32_H #include #include @@ -302,24 +302,6 @@ extern void __put_user_unknown(void); /* Return the number of bytes NOT copied */ __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); -#define copy_to_user(to,from,n) ({ \ -void *__copy_to = (void *) (to); \ -__kernel_size_t __copy_size = (__kernel_size_t) (n); \ -__kernel_size_t __copy_res; \ -if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \ -__copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \ -} else __copy_res = __copy_size; \ -__copy_res; }) - -#define copy_from_user(to,from,n) ({ \ -void *__copy_to = (void *) (to); \ -void *__copy_from = (void *) (from); \ -__kernel_size_t __copy_size = (__kernel_size_t) (n); \ -__kernel_size_t __copy_res; \ -if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \ -__copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \ -} else __copy_res = __copy_size; \ -__copy_res; }) static __always_inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) @@ -507,4 +489,4 @@ struct exception_table_entry extern int fixup_exception(struct pt_regs *regs); -#endif /* __ASM_SH_UACCESS_H */ +#endif /* __ASM_SH_UACCESS_32_H */ diff --git a/include/asm-sh/uaccess_64.h b/include/asm-sh/uaccess_64.h index d54ec082d25a..f956b7b316c7 100644 --- a/include/asm-sh/uaccess_64.h +++ b/include/asm-sh/uaccess_64.h @@ -202,15 +202,6 @@ extern void __put_user_unknown(void); /* XXX: should be such that: 4byte and the rest. */ extern __kernel_size_t __copy_user(void *__to, const void *__from, __kernel_size_t __n); -#define copy_to_user(to,from,n) ({ \ -void *__copy_to = (void *) (to); \ -__kernel_size_t __copy_size = (__kernel_size_t) (n); \ -__kernel_size_t __copy_res; \ -if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \ -__copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \ -} else __copy_res = __copy_size; \ -__copy_res; }) - #define copy_to_user_ret(to,from,n,retval) ({ \ if (copy_to_user(to,from,n)) \ return retval; \ @@ -225,16 +216,6 @@ if (__copy_to_user(to,from,n)) \ return retval; \ }) -#define copy_from_user(to,from,n) ({ \ -void *__copy_to = (void *) (to); \ -void *__copy_from = (void *) (from); \ -__kernel_size_t __copy_size = (__kernel_size_t) (n); \ -__kernel_size_t __copy_res; \ -if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \ -__copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \ -} else __copy_res = __copy_size; \ -__copy_res; }) - #define copy_from_user_ret(to,from,n,retval) ({ \ if (copy_from_user(to,from,n)) \ return retval; \ -- cgit v1.2.3 From e7cc9a7340b8ec018caa9eb1d035fdaef1f2fc51 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Thu, 7 Feb 2008 20:18:21 +0900 Subject: sh: trapped io support V2 The idea is that we want to get rid of the in/out/readb/writeb callbacks from the machvec and replace that with simple inline read and write operations to memory. Fast and simple for most hardware devices (think pci). Some devices require special treatment though - like 16-bit only CF devices - so we need to have some method to hook in callbacks. This patch makes it possible to add a per-device trap generating filter. This way we can get maximum performance of sane hardware - which doesn't need this filter - and crappy hardware works but gets punished by a performance hit. V2 changes things around a bit and replaces io access callbacks with a simple minimum_bus_width value. In the future we can add stride as well. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 3 + arch/sh/kernel/Makefile_32 | 1 + arch/sh/kernel/Makefile_64 | 1 + arch/sh/kernel/io.c | 8 +- arch/sh/kernel/io_generic.c | 24 ++-- arch/sh/kernel/io_trapped.c | 269 ++++++++++++++++++++++++++++++++++++++++++++ arch/sh/kernel/traps_32.c | 59 ++++++---- arch/sh/mm/fault_32.c | 3 + include/asm-sh/io.h | 10 ++ include/asm-sh/io_trapped.h | 58 ++++++++++ include/asm-sh/system.h | 5 + include/asm-sh/system_32.h | 3 + 12 files changed, 406 insertions(+), 38 deletions(-) create mode 100644 arch/sh/kernel/io_trapped.c create mode 100644 include/asm-sh/io_trapped.h (limited to 'include') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 8398cf105a00..f61bf17db39f 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -93,6 +93,9 @@ config ARCH_NO_VIRT_TO_BUS config ARCH_SUPPORTS_AOUT def_bool y +config IO_TRAPPED + bool + source "init/Kconfig" menu "System type" diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32 index c89289831053..62bf373266f7 100644 --- a/arch/sh/kernel/Makefile_32 +++ b/arch/sh/kernel/Makefile_32 @@ -22,5 +22,6 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_PM) += pm.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_BINFMT_ELF) += dump_task.o +obj-$(CONFIG_IO_TRAPPED) += io_trapped.o EXTRA_CFLAGS += -Werror diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64 index 1ef21cc087f3..e01283d49cbf 100644 --- a/arch/sh/kernel/Makefile_64 +++ b/arch/sh/kernel/Makefile_64 @@ -18,5 +18,6 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_PM) += pm.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_BINFMT_ELF) += dump_task.o +obj-$(CONFIG_IO_TRAPPED) += io_trapped.o EXTRA_CFLAGS += -Werror diff --git a/arch/sh/kernel/io.c b/arch/sh/kernel/io.c index 71c9fde2fd90..2b8991229900 100644 --- a/arch/sh/kernel/io.c +++ b/arch/sh/kernel/io.c @@ -63,7 +63,13 @@ EXPORT_SYMBOL(memset_io); void __iomem *ioport_map(unsigned long port, unsigned int nr) { - return sh_mv.mv_ioport_map(port, nr); + void __iomem *ret; + + ret = __ioport_map_trapped(port, nr); + if (ret) + return ret; + + return __ioport_map(port, nr); } EXPORT_SYMBOL(ioport_map); diff --git a/arch/sh/kernel/io_generic.c b/arch/sh/kernel/io_generic.c index 771ea4230441..db769449f5a7 100644 --- a/arch/sh/kernel/io_generic.c +++ b/arch/sh/kernel/io_generic.c @@ -33,17 +33,17 @@ static inline void delay(void) u8 generic_inb(unsigned long port) { - return ctrl_inb((unsigned long __force)ioport_map(port, 1)); + return ctrl_inb((unsigned long __force)__ioport_map(port, 1)); } u16 generic_inw(unsigned long port) { - return ctrl_inw((unsigned long __force)ioport_map(port, 2)); + return ctrl_inw((unsigned long __force)__ioport_map(port, 2)); } u32 generic_inl(unsigned long port) { - return ctrl_inl((unsigned long __force)ioport_map(port, 4)); + return ctrl_inl((unsigned long __force)__ioport_map(port, 4)); } u8 generic_inb_p(unsigned long port) @@ -81,7 +81,7 @@ void generic_insb(unsigned long port, void *dst, unsigned long count) volatile u8 *port_addr; u8 *buf = dst; - port_addr = (volatile u8 *)ioport_map(port, 1); + port_addr = (volatile u8 *)__ioport_map(port, 1); while (count--) *buf++ = *port_addr; } @@ -91,7 +91,7 @@ void generic_insw(unsigned long port, void *dst, unsigned long count) volatile u16 *port_addr; u16 *buf = dst; - port_addr = (volatile u16 *)ioport_map(port, 2); + port_addr = (volatile u16 *)__ioport_map(port, 2); while (count--) *buf++ = *port_addr; @@ -103,7 +103,7 @@ void generic_insl(unsigned long port, void *dst, unsigned long count) volatile u32 *port_addr; u32 *buf = dst; - port_addr = (volatile u32 *)ioport_map(port, 4); + port_addr = (volatile u32 *)__ioport_map(port, 4); while (count--) *buf++ = *port_addr; @@ -112,17 +112,17 @@ void generic_insl(unsigned long port, void *dst, unsigned long count) void generic_outb(u8 b, unsigned long port) { - ctrl_outb(b, (unsigned long __force)ioport_map(port, 1)); + ctrl_outb(b, (unsigned long __force)__ioport_map(port, 1)); } void generic_outw(u16 b, unsigned long port) { - ctrl_outw(b, (unsigned long __force)ioport_map(port, 2)); + ctrl_outw(b, (unsigned long __force)__ioport_map(port, 2)); } void generic_outl(u32 b, unsigned long port) { - ctrl_outl(b, (unsigned long __force)ioport_map(port, 4)); + ctrl_outl(b, (unsigned long __force)__ioport_map(port, 4)); } void generic_outb_p(u8 b, unsigned long port) @@ -153,7 +153,7 @@ void generic_outsb(unsigned long port, const void *src, unsigned long count) volatile u8 *port_addr; const u8 *buf = src; - port_addr = (volatile u8 __force *)ioport_map(port, 1); + port_addr = (volatile u8 __force *)__ioport_map(port, 1); while (count--) *port_addr = *buf++; @@ -164,7 +164,7 @@ void generic_outsw(unsigned long port, const void *src, unsigned long count) volatile u16 *port_addr; const u16 *buf = src; - port_addr = (volatile u16 __force *)ioport_map(port, 2); + port_addr = (volatile u16 __force *)__ioport_map(port, 2); while (count--) *port_addr = *buf++; @@ -177,7 +177,7 @@ void generic_outsl(unsigned long port, const void *src, unsigned long count) volatile u32 *port_addr; const u32 *buf = src; - port_addr = (volatile u32 __force *)ioport_map(port, 4); + port_addr = (volatile u32 __force *)__ioport_map(port, 4); while (count--) *port_addr = *buf++; diff --git a/arch/sh/kernel/io_trapped.c b/arch/sh/kernel/io_trapped.c new file mode 100644 index 000000000000..0bfdc9a34e1a --- /dev/null +++ b/arch/sh/kernel/io_trapped.c @@ -0,0 +1,269 @@ +/* + * Trapped io support + * + * Copyright (C) 2008 Magnus Damm + * + * Intercept io operations by trapping. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TRAPPED_PAGES_MAX 16 +#define MAX(a, b) (((a) >= (b)) ? (a) : (b)) + +#ifdef CONFIG_HAS_IOPORT +LIST_HEAD(trapped_io); +#endif +#ifdef CONFIG_HAS_IOMEM +LIST_HEAD(trapped_mem); +#endif +static DEFINE_SPINLOCK(trapped_lock); + +int __init register_trapped_io(struct trapped_io *tiop) +{ + struct resource *res; + unsigned long len = 0, flags = 0; + struct page *pages[TRAPPED_PAGES_MAX]; + int k, n; + + /* structure must be page aligned */ + if ((unsigned long)tiop & (PAGE_SIZE - 1)) + goto bad; + + for (k = 0; k < tiop->num_resources; k++) { + res = tiop->resource + k; + len += roundup((res->end - res->start) + 1, PAGE_SIZE); + flags |= res->flags; + } + + /* support IORESOURCE_IO _or_ MEM, not both */ + if (hweight_long(flags) != 1) + goto bad; + + n = len >> PAGE_SHIFT; + + if (n >= TRAPPED_PAGES_MAX) + goto bad; + + for (k = 0; k < n; k++) + pages[k] = virt_to_page(tiop); + + tiop->virt_base = vmap(pages, n, VM_MAP, PAGE_NONE); + if (!tiop->virt_base) + goto bad; + + len = 0; + for (k = 0; k < tiop->num_resources; k++) { + res = tiop->resource + k; + pr_info("trapped io 0x%08lx overrides %s 0x%08lx\n", + (unsigned long)(tiop->virt_base + len), + res->flags & IORESOURCE_IO ? "io" : "mmio", + (unsigned long)res->start); + len += roundup((res->end - res->start) + 1, PAGE_SIZE); + } + + tiop->magic = IO_TRAPPED_MAGIC; + INIT_LIST_HEAD(&tiop->list); + spin_lock_irq(&trapped_lock); + if (flags & IORESOURCE_IO) + list_add(&tiop->list, &trapped_io); + if (flags & IORESOURCE_MEM) + list_add(&tiop->list, &trapped_mem); + spin_unlock_irq(&trapped_lock); + + return 0; + bad: + pr_warning("unable to install trapped io filter\n"); + return -1; +} + +void __iomem *match_trapped_io_handler(struct list_head *list, + unsigned long offset, + unsigned long size) +{ + unsigned long voffs; + struct trapped_io *tiop; + struct resource *res; + int k, len; + + spin_lock_irq(&trapped_lock); + list_for_each_entry(tiop, list, list) { + voffs = 0; + for (k = 0; k < tiop->num_resources; k++) { + res = tiop->resource + k; + if (res->start == offset) { + spin_unlock_irq(&trapped_lock); + return tiop->virt_base + voffs; + } + + len = (res->end - res->start) + 1; + voffs += roundup(len, PAGE_SIZE); + } + } + spin_unlock_irq(&trapped_lock); + return NULL; +} + +static struct trapped_io *lookup_tiop(unsigned long address) +{ + pgd_t *pgd_k; + pud_t *pud_k; + pmd_t *pmd_k; + pte_t *pte_k; + pte_t entry; + + pgd_k = swapper_pg_dir + pgd_index(address); + if (!pgd_present(*pgd_k)) + return NULL; + + pud_k = pud_offset(pgd_k, address); + if (!pud_present(*pud_k)) + return NULL; + + pmd_k = pmd_offset(pud_k, address); + if (!pmd_present(*pmd_k)) + return NULL; + + pte_k = pte_offset_kernel(pmd_k, address); + entry = *pte_k; + + return pfn_to_kaddr(pte_pfn(entry)); +} + +static unsigned long lookup_address(struct trapped_io *tiop, + unsigned long address) +{ + struct resource *res; + unsigned long vaddr = (unsigned long)tiop->virt_base; + unsigned long len; + int k; + + for (k = 0; k < tiop->num_resources; k++) { + res = tiop->resource + k; + len = roundup((res->end - res->start) + 1, PAGE_SIZE); + if (address < (vaddr + len)) + return res->start + (address - vaddr); + vaddr += len; + } + return 0; +} + +static unsigned long long copy_word(unsigned long src_addr, int src_len, + unsigned long dst_addr, int dst_len) +{ + unsigned long long tmp = 0; + + switch (src_len) { + case 1: + tmp = ctrl_inb(src_addr); + break; + case 2: + tmp = ctrl_inw(src_addr); + break; + case 4: + tmp = ctrl_inl(src_addr); + break; + case 8: + tmp = ctrl_inq(src_addr); + break; + } + + switch (dst_len) { + case 1: + ctrl_outb(tmp, dst_addr); + break; + case 2: + ctrl_outw(tmp, dst_addr); + break; + case 4: + ctrl_outl(tmp, dst_addr); + break; + case 8: + ctrl_outq(tmp, dst_addr); + break; + } + + return tmp; +} + +static unsigned long from_device(void *dst, const void *src, unsigned long cnt) +{ + struct trapped_io *tiop; + unsigned long src_addr = (unsigned long)src; + unsigned long long tmp; + + pr_debug("trapped io read 0x%08lx (%ld)\n", src_addr, cnt); + tiop = lookup_tiop(src_addr); + WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC)); + + src_addr = lookup_address(tiop, src_addr); + if (!src_addr) + return cnt; + + tmp = copy_word(src_addr, MAX(cnt, (tiop->minimum_bus_width / 8)), + (unsigned long)dst, cnt); + + pr_debug("trapped io read 0x%08lx -> 0x%08llx\n", src_addr, tmp); + return 0; +} + +static unsigned long to_device(void *dst, const void *src, unsigned long cnt) +{ + struct trapped_io *tiop; + unsigned long dst_addr = (unsigned long)dst; + unsigned long long tmp; + + pr_debug("trapped io write 0x%08lx (%ld)\n", dst_addr, cnt); + tiop = lookup_tiop(dst_addr); + WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC)); + + dst_addr = lookup_address(tiop, dst_addr); + if (!dst_addr) + return cnt; + + tmp = copy_word((unsigned long)src, cnt, + dst_addr, MAX(cnt, (tiop->minimum_bus_width / 8))); + + pr_debug("trapped io write 0x%08lx -> 0x%08llx\n", dst_addr, tmp); + return 0; +} + +static struct mem_access trapped_io_access = { + from_device, + to_device, +}; + +int handle_trapped_io(struct pt_regs *regs, unsigned long address) +{ + mm_segment_t oldfs; + opcode_t instruction; + int tmp; + + if (!lookup_tiop(address)) + return 0; + + WARN_ON(user_mode(regs)); + + oldfs = get_fs(); + set_fs(KERNEL_DS); + if (copy_from_user(&instruction, (void *)(regs->pc), + sizeof(instruction))) { + set_fs(oldfs); + return 0; + } + + tmp = handle_unaligned_access(instruction, regs, &trapped_io_access); + set_fs(oldfs); + return tmp == 0; +} diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 25b1b8672cf0..baa4fa368dce 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c @@ -172,6 +172,11 @@ static inline void sign_extend(unsigned int count, unsigned char *dst) #endif } +static struct mem_access user_mem_access = { + copy_from_user, + copy_to_user, +}; + /* * handle an instruction that does an unaligned memory access by emulating the * desired behaviour @@ -179,7 +184,8 @@ static inline void sign_extend(unsigned int count, unsigned char *dst) * (if that instruction is in a branch delay slot) * - return 0 if emulation okay, -EFAULT on existential error */ -static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs) +static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs, + struct mem_access *ma) { int ret, index, count; unsigned long *rm, *rn; @@ -206,7 +212,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs) #if !defined(__LITTLE_ENDIAN__) dst += 4-count; #endif - if (copy_from_user(dst, src, count)) + if (ma->from(dst, src, count)) goto fetch_fault; sign_extend(count, dst); @@ -219,7 +225,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs) dst = (unsigned char*) *rn; dst += regs->regs[0]; - if (copy_to_user(dst, src, count)) + if (ma->to(dst, src, count)) goto fetch_fault; } ret = 0; @@ -230,7 +236,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs) dst = (unsigned char*) *rn; dst += (instruction&0x000F)<<2; - if (copy_to_user(dst,src,4)) + if (ma->to(dst, src, 4)) goto fetch_fault; ret = 0; break; @@ -243,7 +249,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs) #if !defined(__LITTLE_ENDIAN__) src += 4-count; #endif - if (copy_to_user(dst, src, count)) + if (ma->to(dst, src, count)) goto fetch_fault; ret = 0; break; @@ -254,7 +260,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs) dst = (unsigned char*) rn; *(unsigned long*)dst = 0; - if (copy_from_user(dst,src,4)) + if (ma->from(dst, src, 4)) goto fetch_fault; ret = 0; break; @@ -269,7 +275,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs) #if !defined(__LITTLE_ENDIAN__) dst += 4-count; #endif - if (copy_from_user(dst, src, count)) + if (ma->from(dst, src, count)) goto fetch_fault; sign_extend(count, dst); ret = 0; @@ -285,7 +291,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs) dst = (unsigned char*) *rm; /* called Rn in the spec */ dst += (instruction&0x000F)<<1; - if (copy_to_user(dst, src, 2)) + if (ma->to(dst, src, 2)) goto fetch_fault; ret = 0; break; @@ -299,7 +305,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs) #if !defined(__LITTLE_ENDIAN__) dst += 2; #endif - if (copy_from_user(dst, src, 2)) + if (ma->from(dst, src, 2)) goto fetch_fault; sign_extend(2, dst); ret = 0; @@ -320,8 +326,9 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs) * emulate the instruction in the delay slot * - fetches the instruction from PC+2 */ -static inline int handle_unaligned_delayslot(struct pt_regs *regs, - opcode_t old_instruction) +static inline int handle_delayslot(struct pt_regs *regs, + opcode_t old_instruction, + struct mem_access *ma) { opcode_t instruction; void *addr = (void *)(regs->pc + instruction_size(old_instruction)); @@ -336,7 +343,7 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs, regs, 0); } - return handle_unaligned_ins(instruction, regs); + return handle_unaligned_ins(instruction, regs, ma); } /* @@ -362,7 +369,8 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs, static int handle_unaligned_notify_count = 10; -static int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs) +int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs, + struct mem_access *ma) { u_int rm; int ret, index; @@ -385,19 +393,19 @@ static int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs) case 0x0000: if (instruction==0x000B) { /* rts */ - ret = handle_unaligned_delayslot(regs, instruction); + ret = handle_delayslot(regs, instruction, ma); if (ret==0) regs->pc = regs->pr; } else if ((instruction&0x00FF)==0x0023) { /* braf @Rm */ - ret = handle_unaligned_delayslot(regs, instruction); + ret = handle_delayslot(regs, instruction, ma); if (ret==0) regs->pc += rm + 4; } else if ((instruction&0x00FF)==0x0003) { /* bsrf @Rm */ - ret = handle_unaligned_delayslot(regs, instruction); + ret = handle_delayslot(regs, instruction, ma); if (ret==0) { regs->pr = regs->pc + 4; regs->pc += rm + 4; @@ -418,13 +426,13 @@ static int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs) case 0x4000: if ((instruction&0x00FF)==0x002B) { /* jmp @Rm */ - ret = handle_unaligned_delayslot(regs, instruction); + ret = handle_delayslot(regs, instruction, ma); if (ret==0) regs->pc = rm; } else if ((instruction&0x00FF)==0x000B) { /* jsr @Rm */ - ret = handle_unaligned_delayslot(regs, instruction); + ret = handle_delayslot(regs, instruction, ma); if (ret==0) { regs->pr = regs->pc + 4; regs->pc = rm; @@ -451,7 +459,7 @@ static int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs) case 0x0B00: /* bf lab - no delayslot*/ break; case 0x0F00: /* bf/s lab */ - ret = handle_unaligned_delayslot(regs, instruction); + ret = handle_delayslot(regs, instruction, ma); if (ret==0) { #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) if ((regs->sr & 0x00000001) != 0) @@ -464,7 +472,7 @@ static int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs) case 0x0900: /* bt lab - no delayslot */ break; case 0x0D00: /* bt/s lab */ - ret = handle_unaligned_delayslot(regs, instruction); + ret = handle_delayslot(regs, instruction, ma); if (ret==0) { #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) if ((regs->sr & 0x00000001) == 0) @@ -478,13 +486,13 @@ static int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs) break; case 0xA000: /* bra label */ - ret = handle_unaligned_delayslot(regs, instruction); + ret = handle_delayslot(regs, instruction, ma); if (ret==0) regs->pc += SH_PC_12BIT_OFFSET(instruction); break; case 0xB000: /* bsr label */ - ret = handle_unaligned_delayslot(regs, instruction); + ret = handle_delayslot(regs, instruction, ma); if (ret==0) { regs->pr = regs->pc + 4; regs->pc += SH_PC_12BIT_OFFSET(instruction); @@ -495,7 +503,7 @@ static int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs) /* handle non-delay-slot instruction */ simple: - ret = handle_unaligned_ins(instruction, regs); + ret = handle_unaligned_ins(instruction, regs, ma); if (ret==0) regs->pc += instruction_size(instruction); return ret; @@ -558,7 +566,8 @@ asmlinkage void do_address_error(struct pt_regs *regs, goto uspace_segv; } - tmp = handle_unaligned_access(instruction, regs); + tmp = handle_unaligned_access(instruction, regs, + &user_mem_access); set_fs(oldfs); if (tmp==0) @@ -587,7 +596,7 @@ uspace_segv: die("insn faulting in do_address_error", regs, 0); } - handle_unaligned_access(instruction, regs); + handle_unaligned_access(instruction, regs, &user_mem_access); set_fs(oldfs); } } diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index 33b43d20e9f6..4ef0a1f1a9ab 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -163,6 +164,8 @@ no_context: if (fixup_exception(regs)) return; + if (handle_trapped_io(regs, address)) + return; /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. diff --git a/include/asm-sh/io.h b/include/asm-sh/io.h index 94900c089519..3d2b114f9d57 100644 --- a/include/asm-sh/io.h +++ b/include/asm-sh/io.h @@ -38,6 +38,7 @@ */ #define __IO_PREFIX generic #include +#include #define maybebadio(port) \ printk(KERN_ERR "bad PC-like io %s:%u for port 0x%lx at 0x%08x\n", \ @@ -207,6 +208,8 @@ static inline void __set_io_port_base(unsigned long pbase) generic_io_base = pbase; } +#define __ioport_map(p, n) sh_mv.mv_ioport_map((p), (n)) + /* We really want to try and get these to memcpy etc */ extern void memcpy_fromio(void *, volatile void __iomem *, unsigned long); extern void memcpy_toio(volatile void __iomem *, const void *, unsigned long); @@ -309,7 +312,14 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) { #ifdef CONFIG_SUPERH32 unsigned long last_addr = offset + size - 1; +#endif + void __iomem *ret; + ret = __ioremap_trapped(offset, size); + if (ret) + return ret; + +#ifdef CONFIG_SUPERH32 /* * For P1 and P2 space this is trivial, as everything is already * mapped. Uncached access for P1 addresses are done through P2. diff --git a/include/asm-sh/io_trapped.h b/include/asm-sh/io_trapped.h new file mode 100644 index 000000000000..f1251d4f0ba9 --- /dev/null +++ b/include/asm-sh/io_trapped.h @@ -0,0 +1,58 @@ +#ifndef __ASM_SH_IO_TRAPPED_H +#define __ASM_SH_IO_TRAPPED_H + +#include +#include +#include + +#define IO_TRAPPED_MAGIC 0xfeedbeef + +struct trapped_io { + unsigned int magic; + struct resource *resource; + unsigned int num_resources; + unsigned int minimum_bus_width; + struct list_head list; + void __iomem *virt_base; +} __aligned(PAGE_SIZE); + +#ifdef CONFIG_IO_TRAPPED +int register_trapped_io(struct trapped_io *tiop); +int handle_trapped_io(struct pt_regs *regs, unsigned long address); + +void __iomem *match_trapped_io_handler(struct list_head *list, + unsigned long offset, + unsigned long size); + +#ifdef CONFIG_HAS_IOMEM +extern struct list_head trapped_mem; + +static inline void __iomem * +__ioremap_trapped(unsigned long offset, unsigned long size) +{ + return match_trapped_io_handler(&trapped_mem, offset, size); +} +#else +#define __ioremap_trapped(offset, size) NULL +#endif + +#ifdef CONFIG_HAS_IOPORT +extern struct list_head trapped_io; + +static inline void __iomem * +__ioport_map_trapped(unsigned long offset, unsigned long size) +{ + return match_trapped_io_handler(&trapped_io, offset, size); +} +#else +#define __ioport_map_trapped(offset, size) NULL +#endif + +#else +#define register_trapped_io(tiop) (-1) +#define handle_trapped_io(tiop, address) 0 +#define __ioremap_trapped(offset, size) NULL +#define __ioport_map_trapped(offset, size) NULL +#endif + +#endif /* __ASM_SH_IO_TRAPPED_H */ diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h index 772cd1a0a674..5145aa2a0ce9 100644 --- a/include/asm-sh/system.h +++ b/include/asm-sh/system.h @@ -182,6 +182,11 @@ BUILD_TRAP_HANDLER(fpu_state_restore); #define arch_align_stack(x) (x) +struct mem_access { + unsigned long (*from)(void *dst, const void *src, unsigned long cnt); + unsigned long (*to)(void *dst, const void *src, unsigned long cnt); +}; + #ifdef CONFIG_SUPERH32 # include "system_32.h" #else diff --git a/include/asm-sh/system_32.h b/include/asm-sh/system_32.h index 7ff08d956ba8..f11bcf0855ed 100644 --- a/include/asm-sh/system_32.h +++ b/include/asm-sh/system_32.h @@ -96,4 +96,7 @@ do { \ : "=&r" (__dummy)); \ } while (0) +int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs, + struct mem_access *ma); + #endif /* __ASM_SH_SYSTEM_32_H */ -- cgit v1.2.3 From 2d952b4b8c94ed8576b4221dad9654c5d98275d0 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Thu, 7 Feb 2008 20:21:10 +0900 Subject: sh: trapped io support for r2d V2 This patch converts the CF device on r2d boards from machvec readb/writeb to trapped io. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 1 + arch/sh/boards/renesas/rts7751r2d/setup.c | 45 ++++++++++--------------------- include/asm-sh/rts7751r2d.h | 3 --- 3 files changed, 15 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index f61bf17db39f..0d288fe87021 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -459,6 +459,7 @@ config SH_RTS7751R2D bool "RTS7751R2D" depends on CPU_SUBTYPE_SH7751R select SYS_SUPPORTS_PCI + select IO_TRAPPED help Select RTS7751R2D if configuring for a Renesas Technology Sales SH-Graphics board. diff --git a/arch/sh/boards/renesas/rts7751r2d/setup.c b/arch/sh/boards/renesas/rts7751r2d/setup.c index a0ef81b7de37..f21ee49ef3a5 100644 --- a/arch/sh/boards/renesas/rts7751r2d/setup.c +++ b/arch/sh/boards/renesas/rts7751r2d/setup.c @@ -21,6 +21,7 @@ #include #include #include +#include #include static struct resource cf_ide_resources[] = { @@ -214,13 +215,25 @@ static struct platform_device *rts7751r2d_devices[] __initdata = { &uart_device, &sm501_device, #endif - &cf_ide_device, &heartbeat_device, &spi_sh_sci_device, }; +/* + * The CF is connected with a 16-bit bus where 8-bit operations are + * unsupported. The linux ata driver is however using 8-bit operations, so + * insert a trapped io filter to convert 8-bit operations into 16-bit. + */ +static struct trapped_io cf_trapped_io = { + .resource = cf_ide_resources, + .num_resources = 2, + .minimum_bus_width = 16, +}; + static int __init rts7751r2d_devices_setup(void) { + if (register_trapped_io(&cf_trapped_io) == 0) + platform_device_register(&cf_ide_device); spi_register_board_info(spi_bus, ARRAY_SIZE(spi_bus)); return platform_add_devices(rts7751r2d_devices, ARRAY_SIZE(rts7751r2d_devices)); @@ -232,34 +245,6 @@ static void rts7751r2d_power_off(void) ctrl_outw(0x0001, PA_POWOFF); } -static inline unsigned char is_ide_ioaddr(unsigned long addr) -{ - return ((cf_ide_resources[0].start <= addr && - addr <= cf_ide_resources[0].end) || - (cf_ide_resources[1].start <= addr && - addr <= cf_ide_resources[1].end)); -} - -void rts7751r2d_writeb(u8 b, void __iomem *addr) -{ - unsigned long tmp = (unsigned long __force)addr; - - if (is_ide_ioaddr(tmp)) - ctrl_outw((u16)b, tmp); - else - ctrl_outb(b, tmp); -} - -u8 rts7751r2d_readb(void __iomem *addr) -{ - unsigned long tmp = (unsigned long __force)addr; - - if (is_ide_ioaddr(tmp)) - return ctrl_inw(tmp) & 0xff; - else - return ctrl_inb(tmp); -} - /* * Initialize the board */ @@ -310,6 +295,4 @@ static struct sh_machine_vector mv_rts7751r2d __initmv = { .mv_setup = rts7751r2d_setup, .mv_init_irq = init_rts7751r2d_IRQ, .mv_irq_demux = rts7751r2d_irq_demux, - .mv_writeb = rts7751r2d_writeb, - .mv_readb = rts7751r2d_readb, }; diff --git a/include/asm-sh/rts7751r2d.h b/include/asm-sh/rts7751r2d.h index 83b9c111f171..0a800157b826 100644 --- a/include/asm-sh/rts7751r2d.h +++ b/include/asm-sh/rts7751r2d.h @@ -67,7 +67,4 @@ void init_rts7751r2d_IRQ(void); int rts7751r2d_irq_demux(int); -#define __IO_PREFIX rts7751r2d -#include - #endif /* __ASM_SH_RENESAS_RTS7751R2D */ -- cgit v1.2.3 From c1a34e4c547a7e6185078bf5e65a3ca0e1081df2 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Thu, 7 Feb 2008 20:23:53 +0900 Subject: sh: trapped io support for highlander V2 This patch converts the highlander CF device from good old machvec readb/writeb to the new shiny trapped io. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 1 + arch/sh/boards/renesas/r7780rp/setup.c | 47 ++++++++++++---------------------- include/asm-sh/r7780rp.h | 3 --- 3 files changed, 17 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 0d288fe87021..3297b87a40d6 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -476,6 +476,7 @@ config SH_HIGHLANDER bool "Highlander" depends on CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785 select SYS_SUPPORTS_PCI + select IO_TRAPPED config SH_MIGOR bool "Migo-R" diff --git a/arch/sh/boards/renesas/r7780rp/setup.c b/arch/sh/boards/renesas/r7780rp/setup.c index f7a8d5c9d510..2f68bea7890c 100644 --- a/arch/sh/boards/renesas/r7780rp/setup.c +++ b/arch/sh/boards/renesas/r7780rp/setup.c @@ -23,6 +23,7 @@ #include #include #include +#include static struct resource r8a66597_usb_host_resources[] = { [0] = { @@ -181,13 +182,27 @@ static struct platform_device *r7780rp_devices[] __initdata = { &m66592_usb_peripheral_device, &heartbeat_device, #ifndef CONFIG_SH_R7780RP - &cf_ide_device, &ax88796_device, #endif }; +/* + * The CF is connected using a 16-bit bus where 8-bit operations are + * unsupported. The linux ata driver is however using 8-bit operations, so + * insert a trapped io filter to convert 8-bit operations into 16-bit. + */ +static struct trapped_io cf_trapped_io = { + .resource = cf_ide_resources, + .num_resources = 2, + .minimum_bus_width = 16, +}; + static int __init r7780rp_devices_setup(void) { +#ifndef CONFIG_SH_R7780RP + if (register_trapped_io(&cf_trapped_io) == 0) + platform_device_register(&cf_ide_device); +#endif return platform_add_devices(r7780rp_devices, ARRAY_SIZE(r7780rp_devices)); } @@ -226,34 +241,6 @@ static void r7780rp_power_off(void) ctrl_outw(0x0001, PA_POFF); } -static inline unsigned char is_ide_ioaddr(unsigned long addr) -{ - return ((cf_ide_resources[0].start <= addr && - addr <= cf_ide_resources[0].end) || - (cf_ide_resources[1].start <= addr && - addr <= cf_ide_resources[1].end)); -} - -void highlander_writeb(u8 b, void __iomem *addr) -{ - unsigned long tmp = (unsigned long __force)addr; - - if (is_ide_ioaddr(tmp)) - ctrl_outw((u16)b, tmp); - else - ctrl_outb(b, tmp); -} - -u8 highlander_readb(void __iomem *addr) -{ - unsigned long tmp = (unsigned long __force)addr; - - if (is_ide_ioaddr(tmp)) - return ctrl_inw(tmp) & 0xff; - else - return ctrl_inb(tmp); -} - /* * Initialize the board */ @@ -338,6 +325,4 @@ static struct sh_machine_vector mv_highlander __initmv = { .mv_setup = highlander_setup, .mv_init_irq = highlander_init_irq, .mv_irq_demux = highlander_irq_demux, - .mv_readb = highlander_readb, - .mv_writeb = highlander_writeb, }; diff --git a/include/asm-sh/r7780rp.h b/include/asm-sh/r7780rp.h index bdecea0840a0..1770460a4616 100644 --- a/include/asm-sh/r7780rp.h +++ b/include/asm-sh/r7780rp.h @@ -195,7 +195,4 @@ unsigned char *highlander_init_irq_r7780mp(void); unsigned char *highlander_init_irq_r7780rp(void); unsigned char *highlander_init_irq_r7785rp(void); -#define __IO_PREFIX r7780rp -#include - #endif /* __ASM_SH_RENESAS_R7780RP */ -- cgit v1.2.3 From 9109a30e5a548b39463b5a777943cf103da507af Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 8 Feb 2008 17:31:24 +0900 Subject: sh: add support for sh7366 processor This patch adds sh7366 cpu supports. Just the most basic things like interrupt controller, clocks and serial port are included at this point. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 7 ++ arch/sh/Kconfig.debug | 2 +- arch/sh/kernel/cpu/sh4/probe.c | 6 ++ arch/sh/kernel/cpu/sh4a/Makefile | 2 + arch/sh/kernel/cpu/sh4a/clock-sh7722.c | 10 +- arch/sh/kernel/cpu/sh4a/setup-sh7366.c | 177 +++++++++++++++++++++++++++++++++ arch/sh/kernel/setup.c | 2 +- drivers/serial/sh-sci.c | 2 +- drivers/serial/sh-sci.h | 8 +- include/asm-sh/cpu-sh4/freq.h | 4 +- include/asm-sh/processor.h | 2 +- 11 files changed, 215 insertions(+), 7 deletions(-) create mode 100644 arch/sh/kernel/cpu/sh4a/setup-sh7366.c (limited to 'include') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 3297b87a40d6..b3400b5ad5c6 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -315,6 +315,13 @@ config CPU_SUBTYPE_SH7722 select ARCH_SPARSEMEM_ENABLE select SYS_SUPPORTS_NUMA +config CPU_SUBTYPE_SH7366 + bool "Support SH7366 processor" + select CPU_SH4AL_DSP + select CPU_SHX2 + select ARCH_SPARSEMEM_ENABLE + select SYS_SUPPORTS_NUMA + # SH-5 Processor Support config CPU_SUBTYPE_SH5_101 diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug index ccfa0b23d366..93722d099e7a 100644 --- a/arch/sh/Kconfig.debug +++ b/arch/sh/Kconfig.debug @@ -30,7 +30,7 @@ config EARLY_SCIF_CONSOLE_PORT hex depends on EARLY_SCIF_CONSOLE default "0xffe00000" if CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7763 - default "0xffe00000" if CPU_SUBTYPE_SH7722 + default "0xffe00000" if CPU_SUBTYPE_SH7722 || CPU_SUBTYPE_SH7366 default "0xffea0000" if CPU_SUBTYPE_SH7785 default "0xfffe8000" if CPU_SUBTYPE_SH7203 default "0xfffe9800" if CPU_SUBTYPE_SH7206 || CPU_SUBTYPE_SH7263 diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c index 89b454b1f0f1..9e89984c4f1d 100644 --- a/arch/sh/kernel/cpu/sh4/probe.c +++ b/arch/sh/kernel/cpu/sh4/probe.c @@ -132,6 +132,12 @@ int __init detect_cpu_and_cache_system(void) boot_cpu_data.dcache.ways = 4; boot_cpu_data.flags |= CPU_HAS_LLSC; } + else if (prr == 0x70) { + boot_cpu_data.type = CPU_SH7366; + boot_cpu_data.icache.ways = 4; + boot_cpu_data.dcache.ways = 4; + boot_cpu_data.flags |= CPU_HAS_LLSC; + } break; case 0x4000: /* 1st cut */ case 0x4001: /* 2nd cut */ diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile index 08ac6387bf17..5d890ac8e793 100644 --- a/arch/sh/kernel/cpu/sh4a/Makefile +++ b/arch/sh/kernel/cpu/sh4a/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_CPU_SUBTYPE_SH7780) += setup-sh7780.o obj-$(CONFIG_CPU_SUBTYPE_SH7785) += setup-sh7785.o obj-$(CONFIG_CPU_SUBTYPE_SH7343) += setup-sh7343.o obj-$(CONFIG_CPU_SUBTYPE_SH7722) += setup-sh7722.o +obj-$(CONFIG_CPU_SUBTYPE_SH7366) += setup-sh7366.o obj-$(CONFIG_CPU_SUBTYPE_SHX3) += setup-shx3.o # SMP setup @@ -21,6 +22,7 @@ clock-$(CONFIG_CPU_SUBTYPE_SH7780) := clock-sh7780.o clock-$(CONFIG_CPU_SUBTYPE_SH7785) := clock-sh7785.o clock-$(CONFIG_CPU_SUBTYPE_SH7343) := clock-sh7343.o clock-$(CONFIG_CPU_SUBTYPE_SH7722) := clock-sh7722.o +clock-$(CONFIG_CPU_SUBTYPE_SH7366) := clock-sh7722.o clock-$(CONFIG_CPU_SUBTYPE_SHX3) := clock-shx3.o obj-y += $(clock-y) diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c index a0fd8bb21f7c..299138ebe160 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c @@ -1,7 +1,7 @@ /* * arch/sh/kernel/cpu/sh4a/clock-sh7722.c * - * SH7722 support for the clock framework + * SH7722 & SH7366 support for the clock framework * * Copyright (c) 2006-2007 Nomad Global Solutions Inc * Based on code for sh7343 by Paul Mundt @@ -417,15 +417,19 @@ static int sh7722_siu_which(struct clk *clk) return 0; if (!strcmp(clk->name, "siu_b_clk")) return 1; +#if defined(CONFIG_CPU_SUBTYPE_SH7722) if (!strcmp(clk->name, "irda_clk")) return 2; +#endif return -EINVAL; } static unsigned long sh7722_siu_regs[] = { [0] = SCLKACR, [1] = SCLKBCR, +#if defined(CONFIG_CPU_SUBTYPE_SH7722) [2] = IrDACLKCR, +#endif }; static int sh7722_siu_start_stop(struct clk *clk, int enable) @@ -571,10 +575,12 @@ static struct clk sh7722_siu_b_clock = { .ops = &sh7722_siu_clk_ops, }; +#if defined(CONFIG_CPU_SUBTYPE_SH7722) static struct clk sh7722_irda_clock = { .name = "irda_clk", .ops = &sh7722_siu_clk_ops, }; +#endif static struct clk sh7722_video_clock = { .name = "video_clk", @@ -588,7 +594,9 @@ static struct clk *sh7722_clocks[] = { &sh7722_sdram_clock, &sh7722_siu_a_clock, &sh7722_siu_b_clock, +#if defined(CONFIG_CPU_SUBTYPE_SH7722) &sh7722_irda_clock, +#endif &sh7722_video_clock, }; diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c new file mode 100644 index 000000000000..967e8b69a2f8 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c @@ -0,0 +1,177 @@ +/* + * SH7366 Setup + * + * Copyright (C) 2008 Renesas Solutions + * + * Based on linux/arch/sh/kernel/cpu/sh4a/setup-sh7722.c + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include + +static struct plat_sci_port sci_platform_data[] = { + { + .mapbase = 0xffe00000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 80, 80, 80, 80 }, + }, { + .flags = 0, + } +}; + +static struct platform_device sci_device = { + .name = "sh-sci", + .id = -1, + .dev = { + .platform_data = sci_platform_data, + }, +}; + +static struct platform_device *sh7366_devices[] __initdata = { + &sci_device, +}; + +static int __init sh7366_devices_setup(void) +{ + return platform_add_devices(sh7366_devices, + ARRAY_SIZE(sh7366_devices)); +} +__initcall(sh7366_devices_setup); + +enum { + UNUSED=0, + + /* interrupt sources */ + IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, + ICB, + DMAC0, DMAC1, DMAC2, DMAC3, + VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU, + MFI, VPU, USB, + MMC_MMC1I, MMC_MMC2I, MMC_MMC3I, + DMAC4, DMAC5, DMAC_DADERR, + SCIF, SCIFA1, SCIFA2, + DENC, MSIOF, + FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I, + I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI, + SDHI0, SDHI1, SDHI2, SDHI3, + CMT, TSIF, SIU, + TMU0, TMU1, TMU2, + VEU2, LCDC, + + /* interrupt groups */ + + DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C, SDHI, +}; + +static struct intc_vect vectors[] __initdata = { + INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620), + INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660), + INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0), + INTC_VECT(IRQ6, 0x6c0), INTC_VECT(IRQ7, 0x6e0), + INTC_VECT(ICB, 0x700), + INTC_VECT(DMAC0, 0x800), INTC_VECT(DMAC1, 0x820), + INTC_VECT(DMAC2, 0x840), INTC_VECT(DMAC3, 0x860), + INTC_VECT(VIO_CEUI, 0x880), INTC_VECT(VIO_BEUI, 0x8a0), + INTC_VECT(VIO_VEUI, 0x8c0), INTC_VECT(VOU, 0x8e0), + INTC_VECT(MFI, 0x900), INTC_VECT(VPU, 0x980), INTC_VECT(USB, 0xa20), + INTC_VECT(MMC_MMC1I, 0xb00), INTC_VECT(MMC_MMC2I, 0xb20), + INTC_VECT(MMC_MMC3I, 0xb40), + INTC_VECT(DMAC4, 0xb80), INTC_VECT(DMAC5, 0xba0), + INTC_VECT(DMAC_DADERR, 0xbc0), + INTC_VECT(SCIF, 0xc00), INTC_VECT(SCIFA1, 0xc20), + INTC_VECT(SCIFA2, 0xc40), + INTC_VECT(DENC, 0xc60), INTC_VECT(MSIOF, 0xc80), + INTC_VECT(FLCTL_FLSTEI, 0xd80), INTC_VECT(FLCTL_FLENDI, 0xda0), + INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0), + INTC_VECT(I2C_ALI, 0xe00), INTC_VECT(I2C_TACKI, 0xe20), + INTC_VECT(I2C_WAITI, 0xe40), INTC_VECT(I2C_DTEI, 0xe60), + INTC_VECT(SDHI0, 0xe80), INTC_VECT(SDHI1, 0xea0), + INTC_VECT(SDHI2, 0xec0), INTC_VECT(SDHI3, 0xee0), + INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20), + INTC_VECT(SIU, 0xf80), + INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), + INTC_VECT(TMU2, 0x440), + INTC_VECT(VEU2, 0x580), INTC_VECT(LCDC, 0x580), +}; + +static struct intc_group groups[] __initdata = { + INTC_GROUP(DMAC0123, DMAC0, DMAC1, DMAC2, DMAC3), + INTC_GROUP(VIOVOU, VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU), + INTC_GROUP(MMC, MMC_MMC1I, MMC_MMC2I, MMC_MMC3I), + INTC_GROUP(DMAC45, DMAC4, DMAC5, DMAC_DADERR), + INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI, + FLCTL_FLTREQ0I, FLCTL_FLTREQ1I), + INTC_GROUP(I2C, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI), + INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2, SDHI3), +}; + +static struct intc_mask_reg mask_registers[] __initdata = { + { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */ + { } }, + { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */ + { VOU, VIO_VEUI, VIO_BEUI, VIO_CEUI, DMAC3, DMAC2, DMAC1, DMAC0 } }, + { 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */ + { 0, 0, 0, VPU, 0, 0, 0, MFI } }, + { 0xa408008c, 0xa40800cc, 8, /* IMR3 / IMCR3 */ + { 0, 0, 0, ICB } }, + { 0xa4080090, 0xa40800d0, 8, /* IMR4 / IMCR4 */ + { 0, TMU2, TMU1, TMU0, VEU2, 0, 0, LCDC } }, + { 0xa4080094, 0xa40800d4, 8, /* IMR5 / IMCR5 */ + { 0, DMAC_DADERR, DMAC5, DMAC4, DENC, SCIFA2, SCIFA1, SCIF } }, + { 0xa4080098, 0xa40800d8, 8, /* IMR6 / IMCR6 */ + { 0, 0, 0, 0, 0, 0, 0, MSIOF } }, + { 0xa408009c, 0xa40800dc, 8, /* IMR7 / IMCR7 */ + { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI, + FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } }, + { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ + { SDHI3, SDHI2, SDHI1, SDHI0, 0, 0, 0, SIU } }, + { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ + { 0, 0, 0, CMT, 0, USB, } }, + { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */ + { 0, MMC_MMC3I, MMC_MMC2I, MMC_MMC1I } }, + { 0xa40800ac, 0xa40800ec, 8, /* IMR11 / IMCR11 */ + { 0, 0, 0, 0, 0, 0, 0, TSIF } }, + { 0xa4140044, 0xa4140064, 8, /* INTMSK00 / INTMSKCLR00 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xa4080000, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2 } }, + { 0xa4080004, 0, 16, 4, /* IPRB */ { VEU2, LCDC, ICB } }, + { 0xa4080008, 0, 16, 4, /* IPRC */ { } }, + { 0xa408000c, 0, 16, 4, /* IPRD */ { } }, + { 0xa4080010, 0, 16, 4, /* IPRE */ { DMAC0123, VIOVOU, MFI, VPU } }, + { 0xa4080014, 0, 16, 4, /* IPRF */ { 0, DMAC45, USB, CMT } }, + { 0xa4080018, 0, 16, 4, /* IPRG */ { SCIF, SCIFA1, SCIFA2, DENC } }, + { 0xa408001c, 0, 16, 4, /* IPRH */ { MSIOF, 0, FLCTL, I2C } }, + { 0xa4080020, 0, 16, 4, /* IPRI */ { 0, 0, TSIF, } }, + { 0xa4080024, 0, 16, 4, /* IPRJ */ { 0, 0, SIU } }, + { 0xa4080028, 0, 16, 4, /* IPRK */ { 0, MMC, 0, SDHI } }, + { 0xa408002c, 0, 16, 4, /* IPRL */ { } }, + { 0xa4140010, 0, 32, 4, /* INTPRI00 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_sense_reg sense_registers[] __initdata = { + { 0xa414001c, 16, 2, /* ICR1 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static DECLARE_INTC_DESC(intc_desc, "sh7366", vectors, groups, + mask_registers, prio_registers, sense_registers); + +void __init plat_irq_setup(void) +{ + register_intc_controller(&intc_desc); +} + +void __init plat_mem_setup(void) +{ + /* TODO: Register Node 1 */ +} diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index 18a5baf2cbad..ff4f54a47c07 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -333,7 +333,7 @@ static const char *cpu_name[] = { [CPU_SH7343] = "SH7343", [CPU_SH7785] = "SH7785", [CPU_SH7722] = "SH7722", [CPU_SHX3] = "SH-X3", [CPU_SH5_101] = "SH5-101", [CPU_SH5_103] = "SH5-103", - [CPU_SH_NONE] = "Unknown" + [CPU_SH7366] = "SH7366", [CPU_SH_NONE] = "Unknown" }; const char *get_cpu_subtype(struct sh_cpuinfo *c) diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c index ddf639144538..9ce12cb2cebc 100644 --- a/drivers/serial/sh-sci.c +++ b/drivers/serial/sh-sci.c @@ -393,7 +393,7 @@ static void sci_init_pins_scif(struct uart_port *port, unsigned int cflag) if (cflag & CRTSCTS) { fcr_val |= SCFCR_MCE; } else { -#ifdef CONFIG_CPU_SUBTYPE_SH7343 +#if defined(CONFIG_CPU_SUBTYPE_SH7343) || defined(CONFIG_CPU_SUBTYPE_SH7366) /* Nothing */ #elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \ defined(CONFIG_CPU_SUBTYPE_SH7780) || \ diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h index f5764ebcfe07..57aaa09811ea 100644 --- a/drivers/serial/sh-sci.h +++ b/drivers/serial/sh-sci.h @@ -97,6 +97,12 @@ # define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ # define SCIF_ONLY # define PORT_PSCR 0xA405011E +#elif defined(CONFIG_CPU_SUBTYPE_SH7366) +# define SCPDR0 0xA405013E /* 16 bit SCIF0 PSDR */ +# define SCSPTR0 SCPDR0 +# define SCIF_ORER 0x0001 /* overrun error bit */ +# define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ +# define SCIF_ONLY #elif defined(CONFIG_CPU_SUBTYPE_SH4_202) # define SCSPTR2 0xffe80020 /* 16 bit SCIF */ # define SCIF_ORER 0x0001 /* overrun error bit */ @@ -577,7 +583,7 @@ static inline int sci_rxd_in(struct uart_port *port) return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */ return 1; } -#elif defined(CONFIG_CPU_SUBTYPE_SH7722) +#elif defined(CONFIG_CPU_SUBTYPE_SH7722) || defined(CONFIG_CPU_SUBTYPE_SH7366) static inline int sci_rxd_in(struct uart_port *port) { if (port->mapbase == 0xffe00000) diff --git a/include/asm-sh/cpu-sh4/freq.h b/include/asm-sh/cpu-sh4/freq.h index 1ac10b9a078f..ec028c649215 100644 --- a/include/asm-sh/cpu-sh4/freq.h +++ b/include/asm-sh/cpu-sh4/freq.h @@ -10,12 +10,14 @@ #ifndef __ASM_CPU_SH4_FREQ_H #define __ASM_CPU_SH4_FREQ_H -#if defined(CONFIG_CPU_SUBTYPE_SH7722) +#if defined(CONFIG_CPU_SUBTYPE_SH7722) || defined(CONFIG_CPU_SUBTYPE_SH7366) #define FRQCR 0xa4150000 #define VCLKCR 0xa4150004 #define SCLKACR 0xa4150008 #define SCLKBCR 0xa415000c +#if defined(CONFIG_CPU_SUBTYPE_SH7722) #define IrDACLKCR 0xa4150010 +#endif #elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \ defined(CONFIG_CPU_SUBTYPE_SH7780) #define FRQCR 0xffc80000 diff --git a/include/asm-sh/processor.h b/include/asm-sh/processor.h index c9b14161f73d..19fe47c1ca17 100644 --- a/include/asm-sh/processor.h +++ b/include/asm-sh/processor.h @@ -33,7 +33,7 @@ enum cpu_type { CPU_SH7763, CPU_SH7770, CPU_SH7780, CPU_SH7781, CPU_SH7785, CPU_SHX3, /* SH4AL-DSP types */ - CPU_SH7343, CPU_SH7722, + CPU_SH7343, CPU_SH7722, CPU_SH7366, /* SH-5 types */ CPU_SH5_101, CPU_SH5_103, -- cgit v1.2.3 From 829c773da599285e2780dbaab5c795b7b498ef22 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 12 Feb 2008 16:48:16 +0900 Subject: sh: Handle SH7366 CPU in check_bugs(). Signed-off-by: Paul Mundt --- include/asm-sh/bugs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-sh/bugs.h b/include/asm-sh/bugs.h index def8128b8b78..cfda7d5bf026 100644 --- a/include/asm-sh/bugs.h +++ b/include/asm-sh/bugs.h @@ -39,7 +39,7 @@ static void __init check_bugs(void) *p++ = '4'; *p++ = 'a'; break; - case CPU_SH7343 ... CPU_SH7722: + case CPU_SH7343 ... CPU_SH7366: *p++ = '4'; *p++ = 'a'; *p++ = 'l'; -- cgit v1.2.3 From 5286031693d14ae20ce4298d002eddc2044e19a4 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 12 Feb 2008 16:55:21 +0900 Subject: sh: Fix up pte_mkhuge() build breakage for SH-5. Applies the fix from 5b67954e804465a4658dd4da8d52b87a8d1ea00c to pgtable_64.h. Signed-off-by: Paul Mundt --- include/asm-sh/pgtable_64.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'include') diff --git a/include/asm-sh/pgtable_64.h b/include/asm-sh/pgtable_64.h index 972211671c9a..bfdcb2084fa4 100644 --- a/include/asm-sh/pgtable_64.h +++ b/include/asm-sh/pgtable_64.h @@ -137,6 +137,14 @@ static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep) #define _PAGE_SZHUGE (_PAGE_SIZE0 | _PAGE_SIZE1) #endif +/* + * Stub out _PAGE_SZHUGE if we don't have a good definition for it, + * to make pte_mkhuge() happy. + */ +#ifndef _PAGE_SZHUGE +# define _PAGE_SZHUGE (0) +#endif + /* * Default flags for a Kernel page. * This is fundametally also SHARED because the main use of this define -- cgit v1.2.3 From 5e9c8ac5699f2a830fab2c224b6f57bd7da338b8 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 12 Feb 2008 16:59:30 +0900 Subject: sh: Fix up set_fixmap_nocache() for SH-5. This needs a PAGE_KERNEL_NOCACHE definition, as provided by pgtable_32.h. Signed-off-by: Paul Mundt --- include/asm-sh/pgtable_64.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/asm-sh/pgtable_64.h b/include/asm-sh/pgtable_64.h index bfdcb2084fa4..f9dd9d311441 100644 --- a/include/asm-sh/pgtable_64.h +++ b/include/asm-sh/pgtable_64.h @@ -187,6 +187,11 @@ static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep) _PAGE_WRITE | _PAGE_EXECUTE) #define PAGE_KERNEL __pgprot(_KERNPG_TABLE) +#define PAGE_KERNEL_NOCACHE \ + __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ + _PAGE_EXECUTE | _PAGE_ACCESSED | \ + _PAGE_DIRTY | _PAGE_SHARED) + /* Make it a device mapping for maximum safety (e.g. for mapping device registers into user-space via /dev/map). */ #define pgprot_noncached(x) __pgprot(((x).pgprot & ~(_PAGE_CACHABLE)) | _PAGE_DEVICE) -- cgit v1.2.3 From f1f8926a94132e6433b559a3eced65404226f5cd Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 12 Feb 2008 17:00:39 +0900 Subject: sh: Update SH-5 flush_cache_sigtramp() for API changes. Previously this took an explicit range, update this to use the same behaviour as the rest of the SH parts where we simply flush out a line from the start address. Signed-off-by: Paul Mundt --- arch/sh/mm/cache-sh5.c | 8 +++++--- include/asm-sh/cpu-sh5/cacheflush.h | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c index 4617e3aeee73..5d1f615fe525 100644 --- a/arch/sh/mm/cache-sh5.c +++ b/arch/sh/mm/cache-sh5.c @@ -1015,15 +1015,17 @@ void flush_icache_user_range(struct vm_area_struct *vma, ARCH/SH64 PRIVATE CALLABLE API. ##########################################################################*/ -void flush_cache_sigtramp(unsigned long start, unsigned long end) +void flush_cache_sigtramp(unsigned long vaddr) { + unsigned long end = vaddr + L1_CACHE_BYTES; + /* For the address range [start,end), write back the data from the D-cache and invalidate the corresponding region of the I-cache for the current process. Used to flush signal trampolines on the stack to make them executable. */ - sh64_dcache_wback_current_user_range(start, end); + sh64_dcache_wback_current_user_range(vaddr, end); wmb(); - sh64_icache_inv_current_user_range(start, end); + sh64_icache_inv_current_user_range(vaddr, end); } diff --git a/include/asm-sh/cpu-sh5/cacheflush.h b/include/asm-sh/cpu-sh5/cacheflush.h index 98edb5b1da32..f935acbacf38 100644 --- a/include/asm-sh/cpu-sh5/cacheflush.h +++ b/include/asm-sh/cpu-sh5/cacheflush.h @@ -11,7 +11,7 @@ struct mm_struct; extern void flush_cache_all(void); extern void flush_cache_mm(struct mm_struct *mm); -extern void flush_cache_sigtramp(unsigned long start, unsigned long end); +extern void flush_cache_sigtramp(unsigned long vaddr); extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); -- cgit v1.2.3 From c2f4d36640947ddd13af7a2c36d197eb9fe5280a Mon Sep 17 00:00:00 2001 From: Kristoffer Ericson Date: Mon, 11 Feb 2008 18:41:49 +0100 Subject: sh: Tidy include/asm-sh/hp6xx.h This patch removes defunct. led support functions from hp6xx.h since they are now added in a proper driver (see commit below). Also adds tabs instead of spaces before comments. *commit d39a7a63eb3971b1b3cc5c181ed526bf437b1c72 Signed-off-by: Kristoffer Ericson Signed-off-by: Paul Mundt --- include/asm-sh/hp6xx.h | 28 +++------------------------- 1 file changed, 3 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/include/asm-sh/hp6xx.h b/include/asm-sh/hp6xx.h index 53ca5643d9c7..0d4165a32dcd 100644 --- a/include/asm-sh/hp6xx.h +++ b/include/asm-sh/hp6xx.h @@ -10,9 +10,9 @@ * */ -#define HP680_BTN_IRQ 32 /* IRQ0_IRQ */ -#define HP680_TS_IRQ 35 /* IRQ3_IRQ */ -#define HP680_HD64461_IRQ 36 /* IRQ4_IRQ */ +#define HP680_BTN_IRQ 32 /* IRQ0_IRQ */ +#define HP680_TS_IRQ 35 /* IRQ3_IRQ */ +#define HP680_HD64461_IRQ 36 /* IRQ4_IRQ */ #define DAC_LCD_BRIGHTNESS 0 #define DAC_SPEAKER_VOLUME 1 @@ -55,26 +55,4 @@ #define PJDR 0xa4000130 #define PKDR 0xa4000132 -static inline void hp6xx_led_red(int on) -{ - u16 v16; - v16 = ctrl_inw(CONFIG_HD64461_IOBASE + HD64461_GPBDR - 0x10000); - if (on) - ctrl_outw(v16 & (~HD64461_GPBDR_LED_RED), CONFIG_HD64461_IOBASE + HD64461_GPBDR - 0x10000); - else - ctrl_outw(v16 | HD64461_GPBDR_LED_RED, CONFIG_HD64461_IOBASE + HD64461_GPBDR - 0x10000); -} - -static inline void hp6xx_led_green(int on) -{ - u8 v8; - - v8 = ctrl_inb(PKDR); - if (on) - ctrl_outb(v8 & (~PKDR_LED_GREEN), PKDR); - else - ctrl_outb(v8 | PKDR_LED_GREEN, PKDR); -} - - #endif /* __ASM_SH_HP6XX_H */ -- cgit v1.2.3 From bb7de070d2cf11f92341c40cd1810e8eebfbcbf8 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 13 Feb 2008 14:09:03 +0900 Subject: sh: asm/tlb.h needs linux/pagemap.h for CONFIG_SWAP=n. linux/swap.h really wants to include linux/pagemap.h in order to satisfy the page_cache_release()/release_pages() definition requirements when CONFIG_SWAP=n. Unfortunately the code in question contains: /* only sparc can not include linux/pagemap.h in this file * so leave page_cache_release and release_pages undeclared... */ #define free_page_and_swap_cache(page) \ page_cache_release(page) #define free_pages_and_swap_cache(pages, nr) \ release_pages((pages), (nr), 0); so it looks like we're stuck with doing it in asm/tlb.h instead, as others already do (ARM, CRIS, etc.). Grumble. Signed-off-by: Paul Mundt --- include/asm-sh/tlb.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/asm-sh/tlb.h b/include/asm-sh/tlb.h index 56ad1fb888a2..88ff1ae8a6b8 100644 --- a/include/asm-sh/tlb.h +++ b/include/asm-sh/tlb.h @@ -20,6 +20,7 @@ */ #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) +#include #include #endif /* __ASSEMBLY__ */ -- cgit v1.2.3 From 5c8f82c64941594cdab53bf9f9a66c190781f4f6 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 13 Feb 2008 19:44:53 +0900 Subject: maple: Fix up maple build failure. maple_devinfo->connector_direction had a typo, fix it up.. Signed-off-by: Paul Mundt --- include/linux/maple.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/maple.h b/include/linux/maple.h index 35c3474ccf68..3f01e2bae1a1 100644 --- a/include/linux/maple.h +++ b/include/linux/maple.h @@ -39,7 +39,7 @@ struct maple_devinfo { unsigned long function; unsigned long function_data[3]; unsigned char area_code; - unsigned char connector_directon; + unsigned char connector_direction; char product_name[31]; char product_licence[61]; unsigned short standby_power; -- cgit v1.2.3 From 38350e0a00f973dd9c6556beeff0f7eb5ef3f58b Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 13 Feb 2008 20:14:10 +0900 Subject: sh: Get SH-5 caches working again post-unification. A number of cleanups to get the SH-5 cache management code in line with the rest of the SH backend. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh5/probe.c | 61 +-- arch/sh/mm/cache-sh5.c | 1021 ++++++++++++++--------------------- include/asm-sh/cpu-sh5/cacheflush.h | 4 +- include/asm-sh/mmu_context_64.h | 3 + include/asm-sh/page.h | 7 +- 5 files changed, 448 insertions(+), 648 deletions(-) (limited to 'include') diff --git a/arch/sh/kernel/cpu/sh5/probe.c b/arch/sh/kernel/cpu/sh5/probe.c index 15d167fd0ae7..31f8cb0f6374 100644 --- a/arch/sh/kernel/cpu/sh5/probe.c +++ b/arch/sh/kernel/cpu/sh5/probe.c @@ -20,19 +20,18 @@ int __init detect_cpu_and_cache_system(void) { unsigned long long cir; - /* Do peeks in real mode to avoid having to set up a mapping for the - WPC registers. On SH5-101 cut2, such a mapping would be exposed to - an address translation erratum which would make it hard to set up - correctly. */ + /* + * Do peeks in real mode to avoid having to set up a mapping for + * the WPC registers. On SH5-101 cut2, such a mapping would be + * exposed to an address translation erratum which would make it + * hard to set up correctly. + */ cir = peek_real_address_q(0x0d000008); - if ((cir & 0xffff) == 0x5103) { + if ((cir & 0xffff) == 0x5103) boot_cpu_data.type = CPU_SH5_103; - } else if (((cir >> 32) & 0xffff) == 0x51e2) { + else if (((cir >> 32) & 0xffff) == 0x51e2) /* CPU.VCR aliased at CIR address on SH5-101 */ boot_cpu_data.type = CPU_SH5_101; - } else { - boot_cpu_data.type = CPU_SH_NONE; - } /* * First, setup some sane values for the I-cache. @@ -40,37 +39,33 @@ int __init detect_cpu_and_cache_system(void) boot_cpu_data.icache.ways = 4; boot_cpu_data.icache.sets = 256; boot_cpu_data.icache.linesz = L1_CACHE_BYTES; + boot_cpu_data.icache.way_incr = (1 << 13); + boot_cpu_data.icache.entry_shift = 5; + boot_cpu_data.icache.way_size = boot_cpu_data.icache.sets * + boot_cpu_data.icache.linesz; + boot_cpu_data.icache.entry_mask = 0x1fe0; + boot_cpu_data.icache.flags = 0; -#if 0 /* - * FIXME: This can probably be cleaned up a bit as well.. for example, - * do we really need the way shift _and_ the way_step_shift ?? Judging - * by the existing code, I would guess no.. is there any valid reason - * why we need to be tracking this around? + * Next, setup some sane values for the D-cache. + * + * On the SH5, these are pretty consistent with the I-cache settings, + * so we just copy over the existing definitions.. these can be fixed + * up later, especially if we add runtime CPU probing. + * + * Though in the meantime it saves us from having to duplicate all of + * the above definitions.. */ - boot_cpu_data.icache.way_shift = 13; - boot_cpu_data.icache.entry_shift = 5; - boot_cpu_data.icache.set_shift = 4; - boot_cpu_data.icache.way_step_shift = 16; - boot_cpu_data.icache.asid_shift = 2; + boot_cpu_data.dcache = boot_cpu_data.icache; /* - * way offset = cache size / associativity, so just don't factor in - * associativity in the first place.. + * Setup any cache-related flags here */ - boot_cpu_data.icache.way_ofs = boot_cpu_data.icache.sets * - boot_cpu_data.icache.linesz; - - boot_cpu_data.icache.asid_mask = 0x3fc; - boot_cpu_data.icache.idx_mask = 0x1fe0; - boot_cpu_data.icache.epn_mask = 0xffffe000; +#if defined(CONFIG_CACHE_WRITETHROUGH) + set_bit(SH_CACHE_MODE_WT, &(boot_cpu_data.dcache.flags)); +#elif defined(CONFIG_CACHE_WRITEBACK) + set_bit(SH_CACHE_MODE_WB, &(boot_cpu_data.dcache.flags)); #endif - boot_cpu_data.icache.flags = 0; - - /* A trivial starting point.. */ - memcpy(&boot_cpu_data.dcache, - &boot_cpu_data.icache, sizeof(struct cache_info)); - return 0; } diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c index 5d1f615fe525..3877321fcede 100644 --- a/arch/sh/mm/cache-sh5.c +++ b/arch/sh/mm/cache-sh5.c @@ -1,10 +1,10 @@ /* * arch/sh/mm/cache-sh5.c * - * Original version Copyright (C) 2000, 2001 Paolo Alberelli - * Second version Copyright (C) benedict.gaster@superh.com 2002 - * Third version Copyright Richard.Curnow@superh.com 2003 - * Hacks to third version Copyright (C) 2003 Paul Mundt + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2002 Benedict Gaster + * Copyright (C) 2003 Richard Curnow + * Copyright (C) 2003 - 2008 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -13,101 +13,20 @@ #include #include #include -#include -#include -#include +#include #include #include -#include -#include +#include #include #include -#include /* for flush_itlb_range */ - -#include - -/* This function is in entry.S */ -extern unsigned long switch_and_save_asid(unsigned long new_asid); /* Wired TLB entry for the D-cache */ static unsigned long long dtlb_cache_slot; -/** - * sh64_cache_init() - * - * This is pretty much just a straightforward clone of the SH - * detect_cpu_and_cache_system(). - * - * This function is responsible for setting up all of the cache - * info dynamically as well as taking care of CPU probing and - * setting up the relevant subtype data. - * - * FIXME: For the time being, we only really support the SH5-101 - * out of the box, and don't support dynamic probing for things - * like the SH5-103 or even cut2 of the SH5-101. Implement this - * later! - */ -int __init sh64_cache_init(void) +void __init p3_cache_init(void) { - /* - * First, setup some sane values for the I-cache. - */ - cpu_data->icache.ways = 4; - cpu_data->icache.sets = 256; - cpu_data->icache.linesz = L1_CACHE_BYTES; - - /* - * FIXME: This can probably be cleaned up a bit as well.. for example, - * do we really need the way shift _and_ the way_step_shift ?? Judging - * by the existing code, I would guess no.. is there any valid reason - * why we need to be tracking this around? - */ - cpu_data->icache.way_shift = 13; - cpu_data->icache.entry_shift = 5; - cpu_data->icache.set_shift = 4; - cpu_data->icache.way_step_shift = 16; - cpu_data->icache.asid_shift = 2; - - /* - * way offset = cache size / associativity, so just don't factor in - * associativity in the first place.. - */ - cpu_data->icache.way_ofs = cpu_data->icache.sets * - cpu_data->icache.linesz; - - cpu_data->icache.asid_mask = 0x3fc; - cpu_data->icache.idx_mask = 0x1fe0; - cpu_data->icache.epn_mask = 0xffffe000; - cpu_data->icache.flags = 0; - - /* - * Next, setup some sane values for the D-cache. - * - * On the SH5, these are pretty consistent with the I-cache settings, - * so we just copy over the existing definitions.. these can be fixed - * up later, especially if we add runtime CPU probing. - * - * Though in the meantime it saves us from having to duplicate all of - * the above definitions.. - */ - cpu_data->dcache = cpu_data->icache; - - /* - * Setup any cache-related flags here - */ -#if defined(CONFIG_DCACHE_WRITE_THROUGH) - set_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags)); -#elif defined(CONFIG_DCACHE_WRITE_BACK) - set_bit(SH_CACHE_MODE_WB, &(cpu_data->dcache.flags)); -#endif - - /* - * We also need to reserve a slot for the D-cache in the DTLB, so we - * do this now .. - */ - dtlb_cache_slot = sh64_get_wired_dtlb_entry(); - - return 0; + /* Reserve a slot for dcache colouring in the DTLB */ + dtlb_cache_slot = sh64_get_wired_dtlb_entry(); } #ifdef CONFIG_DCACHE_DISABLED @@ -116,73 +35,48 @@ int __init sh64_cache_init(void) #define sh64_dcache_purge_user_range(mm, start, end) do { } while (0) #define sh64_dcache_purge_phy_page(paddr) do { } while (0) #define sh64_dcache_purge_virt_page(mm, eaddr) do { } while (0) -#define sh64_dcache_purge_kernel_range(start, end) do { } while (0) -#define sh64_dcache_wback_current_user_range(start, end) do { } while (0) #endif -/*##########################################################################*/ - -/* From here onwards, a rewrite of the implementation, - by Richard.Curnow@superh.com. - - The major changes in this compared to the old version are; - 1. use more selective purging through OCBP instead of using ALLOCO to purge - by natural replacement. This avoids purging out unrelated cache lines - that happen to be in the same set. - 2. exploit the APIs copy_user_page and clear_user_page better - 3. be more selective about I-cache purging, in particular use invalidate_all - more sparingly. - - */ - -/*########################################################################## - SUPPORT FUNCTIONS - ##########################################################################*/ - -/****************************************************************************/ -/* The following group of functions deal with mapping and unmapping a temporary - page into the DTLB slot that have been set aside for our exclusive use. */ -/* In order to accomplish this, we use the generic interface for adding and - removing a wired slot entry as defined in arch/sh/mm/tlb-sh5.c */ -/****************************************************************************/ - -static unsigned long slot_own_flags; - -static inline void sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid, unsigned long paddr) +/* + * The following group of functions deal with mapping and unmapping a + * temporary page into a DTLB slot that has been set aside for exclusive + * use. + */ +static inline void +sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid, + unsigned long paddr) { - local_irq_save(slot_own_flags); + local_irq_disable(); sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr); } static inline void sh64_teardown_dtlb_cache_slot(void) { sh64_teardown_tlb_slot(dtlb_cache_slot); - local_irq_restore(slot_own_flags); + local_irq_enable(); } -/****************************************************************************/ - #ifndef CONFIG_ICACHE_DISABLED - -static void __inline__ sh64_icache_inv_all(void) +static inline void sh64_icache_inv_all(void) { unsigned long long addr, flag, data; unsigned int flags; - addr=ICCR0; - flag=ICCR0_ICI; - data=0; + addr = ICCR0; + flag = ICCR0_ICI; + data = 0; /* Make this a critical section for safety (probably not strictly necessary.) */ local_irq_save(flags); /* Without %1 it gets unexplicably wrong */ - asm volatile("getcfg %3, 0, %0\n\t" - "or %0, %2, %0\n\t" - "putcfg %3, 0, %0\n\t" - "synci" - : "=&r" (data) - : "0" (data), "r" (flag), "r" (addr)); + __asm__ __volatile__ ( + "getcfg %3, 0, %0\n\t" + "or %0, %2, %0\n\t" + "putcfg %3, 0, %0\n\t" + "synci" + : "=&r" (data) + : "0" (data), "r" (flag), "r" (addr)); local_irq_restore(flags); } @@ -193,20 +87,12 @@ static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end) * the addresses lie in the kernel superpage. */ unsigned long long ullend, addr, aligned_start; -#if (NEFF == 32) aligned_start = (unsigned long long)(signed long long)(signed long) start; -#else -#error "NEFF != 32" -#endif - aligned_start &= L1_CACHE_ALIGN_MASK; - addr = aligned_start; -#if (NEFF == 32) + addr = L1_CACHE_ALIGN(aligned_start); ullend = (unsigned long long) (signed long long) (signed long) end; -#else -#error "NEFF != 32" -#endif + while (addr <= ullend) { - asm __volatile__ ("icbi %0, 0" : : "r" (addr)); + __asm__ __volatile__ ("icbi %0, 0" : : "r" (addr)); addr += L1_CACHE_BYTES; } } @@ -215,7 +101,7 @@ static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long { /* If we get called, we know that vma->vm_flags contains VM_EXEC. Also, eaddr is page-aligned. */ - + unsigned int cpu = smp_processor_id(); unsigned long long addr, end_addr; unsigned long flags = 0; unsigned long running_asid, vma_asid; @@ -237,17 +123,17 @@ static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long */ running_asid = get_asid(); - vma_asid = (vma->vm_mm->context & MMU_CONTEXT_ASID_MASK); + vma_asid = cpu_asid(cpu, vma->vm_mm); if (running_asid != vma_asid) { local_irq_save(flags); switch_and_save_asid(vma_asid); } while (addr < end_addr) { /* Worth unrolling a little */ - asm __volatile__("icbi %0, 0" : : "r" (addr)); - asm __volatile__("icbi %0, 32" : : "r" (addr)); - asm __volatile__("icbi %0, 64" : : "r" (addr)); - asm __volatile__("icbi %0, 96" : : "r" (addr)); + __asm__ __volatile__("icbi %0, 0" : : "r" (addr)); + __asm__ __volatile__("icbi %0, 32" : : "r" (addr)); + __asm__ __volatile__("icbi %0, 64" : : "r" (addr)); + __asm__ __volatile__("icbi %0, 96" : : "r" (addr)); addr += 128; } if (running_asid != vma_asid) { @@ -256,8 +142,6 @@ static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long } } -/****************************************************************************/ - static void sh64_icache_inv_user_page_range(struct mm_struct *mm, unsigned long start, unsigned long end) { @@ -275,10 +159,10 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm, possible with the D-cache. Just assume 64 for now as a working figure. */ - int n_pages; - if (!mm) return; + if (!mm) + return; n_pages = ((end - start) >> PAGE_SHIFT); if (n_pages >= 64) { @@ -290,7 +174,7 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm, unsigned long mm_asid, current_asid; unsigned long long flags = 0ULL; - mm_asid = mm->context & MMU_CONTEXT_ASID_MASK; + mm_asid = cpu_asid(smp_processor_id(), mm); current_asid = get_asid(); if (mm_asid != current_asid) { @@ -322,6 +206,7 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm, } aligned_start = vma->vm_end; /* Skip to start of next region */ } + if (mm_asid != current_asid) { switch_and_save_asid(current_asid); local_irq_restore(flags); @@ -329,47 +214,46 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm, } } +/* + * Invalidate a small range of user context I-cache, not necessarily page + * (or even cache-line) aligned. + * + * Since this is used inside ptrace, the ASID in the mm context typically + * won't match current_asid. We'll have to switch ASID to do this. For + * safety, and given that the range will be small, do all this under cli. + * + * Note, there is a hazard that the ASID in mm->context is no longer + * actually associated with mm, i.e. if the mm->context has started a new + * cycle since mm was last active. However, this is just a performance + * issue: all that happens is that we invalidate lines belonging to + * another mm, so the owning process has to refill them when that mm goes + * live again. mm itself can't have any cache entries because there will + * have been a flush_cache_all when the new mm->context cycle started. + */ static void sh64_icache_inv_user_small_range(struct mm_struct *mm, unsigned long start, int len) { - - /* Invalidate a small range of user context I-cache, not necessarily - page (or even cache-line) aligned. */ - unsigned long long eaddr = start; unsigned long long eaddr_end = start + len; unsigned long current_asid, mm_asid; unsigned long long flags; unsigned long long epage_start; - /* Since this is used inside ptrace, the ASID in the mm context - typically won't match current_asid. We'll have to switch ASID to do - this. For safety, and given that the range will be small, do all - this under cli. - - Note, there is a hazard that the ASID in mm->context is no longer - actually associated with mm, i.e. if the mm->context has started a - new cycle since mm was last active. However, this is just a - performance issue: all that happens is that we invalidate lines - belonging to another mm, so the owning process has to refill them - when that mm goes live again. mm itself can't have any cache - entries because there will have been a flush_cache_all when the new - mm->context cycle started. */ - - /* Align to start of cache line. Otherwise, suppose len==8 and start - was at 32N+28 : the last 4 bytes wouldn't get invalidated. */ - eaddr = start & L1_CACHE_ALIGN_MASK; + /* + * Align to start of cache line. Otherwise, suppose len==8 and + * start was at 32N+28 : the last 4 bytes wouldn't get invalidated. + */ + eaddr = L1_CACHE_ALIGN(start); eaddr_end = start + len; + mm_asid = cpu_asid(smp_processor_id(), mm); local_irq_save(flags); - mm_asid = mm->context & MMU_CONTEXT_ASID_MASK; current_asid = switch_and_save_asid(mm_asid); epage_start = eaddr & PAGE_MASK; - while (eaddr < eaddr_end) - { - asm __volatile__("icbi %0, 0" : : "r" (eaddr)); + while (eaddr < eaddr_end) { + __asm__ __volatile__("icbi %0, 0" : : "r" (eaddr)); eaddr += L1_CACHE_BYTES; } switch_and_save_asid(current_asid); @@ -394,30 +278,24 @@ static void sh64_icache_inv_current_user_range(unsigned long start, unsigned lon been recycled since we were last active in which case we might just invalidate another processes I-cache entries : no worries, just a performance drop for him. */ - aligned_start = start & L1_CACHE_ALIGN_MASK; + aligned_start = L1_CACHE_ALIGN(start); addr = aligned_start; while (addr < ull_end) { - asm __volatile__ ("icbi %0, 0" : : "r" (addr)); - asm __volatile__ ("nop"); - asm __volatile__ ("nop"); + __asm__ __volatile__ ("icbi %0, 0" : : "r" (addr)); + __asm__ __volatile__ ("nop"); + __asm__ __volatile__ ("nop"); addr += L1_CACHE_BYTES; } } - #endif /* !CONFIG_ICACHE_DISABLED */ -/****************************************************************************/ - #ifndef CONFIG_DCACHE_DISABLED - /* Buffer used as the target of alloco instructions to purge data from cache sets by natural eviction. -- RPC */ -#define DUMMY_ALLOCO_AREA_SIZE L1_CACHE_SIZE_BYTES + (1024 * 4) +#define DUMMY_ALLOCO_AREA_SIZE ((L1_CACHE_BYTES << 10) + (1024 * 4)) static unsigned char dummy_alloco_area[DUMMY_ALLOCO_AREA_SIZE] __cacheline_aligned = { 0, }; -/****************************************************************************/ - -static void __inline__ sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets) +static void inline sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets) { /* Purge all ways in a particular block of sets, specified by the base set number and number of sets. Can handle wrap-around, if that's @@ -428,102 +306,86 @@ static void __inline__ sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets int j; int set_offset; - dummy_buffer_base_set = ((int)&dummy_alloco_area & cpu_data->dcache.idx_mask) >> cpu_data->dcache.entry_shift; + dummy_buffer_base_set = ((int)&dummy_alloco_area & + cpu_data->dcache.entry_mask) >> + cpu_data->dcache.entry_shift; set_offset = sets_to_purge_base - dummy_buffer_base_set; - for (j=0; jdcache.sets - 1); - eaddr0 = (unsigned long long)dummy_alloco_area + (set_offset << cpu_data->dcache.entry_shift); - - /* Do one alloco which hits the required set per cache way. For - write-back mode, this will purge the #ways resident lines. There's - little point unrolling this loop because the allocos stall more if - they're too close together. */ - eaddr1 = eaddr0 + cpu_data->dcache.way_ofs * cpu_data->dcache.ways; - for (eaddr=eaddr0; eaddrdcache.way_ofs) { - asm __volatile__ ("alloco %0, 0" : : "r" (eaddr)); - asm __volatile__ ("synco"); /* TAKum03020 */ + eaddr0 = (unsigned long long)dummy_alloco_area + + (set_offset << cpu_data->dcache.entry_shift); + + /* + * Do one alloco which hits the required set per cache + * way. For write-back mode, this will purge the #ways + * resident lines. There's little point unrolling this + * loop because the allocos stall more if they're too + * close together. + */ + eaddr1 = eaddr0 + cpu_data->dcache.way_size * + cpu_data->dcache.ways; + + for (eaddr = eaddr0; eaddr < eaddr1; + eaddr += cpu_data->dcache.way_size) { + __asm__ __volatile__ ("alloco %0, 0" : : "r" (eaddr)); + __asm__ __volatile__ ("synco"); /* TAKum03020 */ } - eaddr1 = eaddr0 + cpu_data->dcache.way_ofs * cpu_data->dcache.ways; - for (eaddr=eaddr0; eaddrdcache.way_ofs) { - /* Load from each address. Required because alloco is a NOP if - the cache is write-through. Write-through is a config option. */ + eaddr1 = eaddr0 + cpu_data->dcache.way_size * + cpu_data->dcache.ways; + + for (eaddr = eaddr0; eaddr < eaddr1; + eaddr += cpu_data->dcache.way_size) { + /* + * Load from each address. Required because + * alloco is a NOP if the cache is write-through. + */ if (test_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags))) - *(volatile unsigned char *)(int)eaddr; + ctrl_inb(eaddr); } } - /* Don't use OCBI to invalidate the lines. That costs cycles directly. - If the dummy block is just left resident, it will naturally get - evicted as required. */ - - return; + /* + * Don't use OCBI to invalidate the lines. That costs cycles + * directly. If the dummy block is just left resident, it will + * naturally get evicted as required. + */ } -/****************************************************************************/ - +/* + * Purge the entire contents of the dcache. The most efficient way to + * achieve this is to use alloco instructions on a region of unused + * memory equal in size to the cache, thereby causing the current + * contents to be discarded by natural eviction. The alternative, namely + * reading every tag, setting up a mapping for the corresponding page and + * doing an OCBP for the line, would be much more expensive. + */ static void sh64_dcache_purge_all(void) { - /* Purge the entire contents of the dcache. The most efficient way to - achieve this is to use alloco instructions on a region of unused - memory equal in size to the cache, thereby causing the current - contents to be discarded by natural eviction. The alternative, - namely reading every tag, setting up a mapping for the corresponding - page and doing an OCBP for the line, would be much more expensive. - */ sh64_dcache_purge_sets(0, cpu_data->dcache.sets); - - return; - } -/****************************************************************************/ - -static void sh64_dcache_purge_kernel_range(unsigned long start, unsigned long end) -{ - /* Purge the range of addresses [start,end] from the D-cache. The - addresses lie in the superpage mapping. There's no harm if we - overpurge at either end - just a small performance loss. */ - unsigned long long ullend, addr, aligned_start; -#if (NEFF == 32) - aligned_start = (unsigned long long)(signed long long)(signed long) start; -#else -#error "NEFF != 32" -#endif - aligned_start &= L1_CACHE_ALIGN_MASK; - addr = aligned_start; -#if (NEFF == 32) - ullend = (unsigned long long) (signed long long) (signed long) end; -#else -#error "NEFF != 32" -#endif - while (addr <= ullend) { - asm __volatile__ ("ocbp %0, 0" : : "r" (addr)); - addr += L1_CACHE_BYTES; - } - return; -} /* Assumes this address (+ (2**n_synbits) pages up from it) aren't used for anything else in the kernel */ #define MAGIC_PAGE0_START 0xffffffffec000000ULL -static void sh64_dcache_purge_coloured_phy_page(unsigned long paddr, unsigned long eaddr) +/* Purge the physical page 'paddr' from the cache. It's known that any + * cache lines requiring attention have the same page colour as the the + * address 'eaddr'. + * + * This relies on the fact that the D-cache matches on physical tags when + * no virtual tag matches. So we create an alias for the original page + * and purge through that. (Alternatively, we could have done this by + * switching ASID to match the original mapping and purged through that, + * but that involves ASID switching cost + probably a TLBMISS + refill + * anyway.) + */ +static void sh64_dcache_purge_coloured_phy_page(unsigned long paddr, + unsigned long eaddr) { - /* Purge the physical page 'paddr' from the cache. It's known that any - cache lines requiring attention have the same page colour as the the - address 'eaddr'. - - This relies on the fact that the D-cache matches on physical tags - when no virtual tag matches. So we create an alias for the original - page and purge through that. (Alternatively, we could have done - this by switching ASID to match the original mapping and purged - through that, but that involves ASID switching cost + probably a - TLBMISS + refill anyway.) - */ - unsigned long long magic_page_start; unsigned long long magic_eaddr, magic_eaddr_end; @@ -531,47 +393,45 @@ static void sh64_dcache_purge_coloured_phy_page(unsigned long paddr, unsigned lo /* As long as the kernel is not pre-emptible, this doesn't need to be under cli/sti. */ - sh64_setup_dtlb_cache_slot(magic_page_start, get_asid(), paddr); magic_eaddr = magic_page_start; magic_eaddr_end = magic_eaddr + PAGE_SIZE; + while (magic_eaddr < magic_eaddr_end) { /* Little point in unrolling this loop - the OCBPs are blocking and won't go any quicker (i.e. the loop overhead is parallel to part of the OCBP execution.) */ - asm __volatile__ ("ocbp %0, 0" : : "r" (magic_eaddr)); + __asm__ __volatile__ ("ocbp %0, 0" : : "r" (magic_eaddr)); magic_eaddr += L1_CACHE_BYTES; } sh64_teardown_dtlb_cache_slot(); } -/****************************************************************************/ - +/* + * Purge a page given its physical start address, by creating a temporary + * 1 page mapping and purging across that. Even if we know the virtual + * address (& vma or mm) of the page, the method here is more elegant + * because it avoids issues of coping with page faults on the purge + * instructions (i.e. no special-case code required in the critical path + * in the TLB miss handling). + */ static void sh64_dcache_purge_phy_page(unsigned long paddr) { - /* Pure a page given its physical start address, by creating a - temporary 1 page mapping and purging across that. Even if we know - the virtual address (& vma or mm) of the page, the method here is - more elegant because it avoids issues of coping with page faults on - the purge instructions (i.e. no special-case code required in the - critical path in the TLB miss handling). */ - unsigned long long eaddr_start, eaddr, eaddr_end; int i; /* As long as the kernel is not pre-emptible, this doesn't need to be under cli/sti. */ - eaddr_start = MAGIC_PAGE0_START; - for (i=0; i < (1 << CACHE_OC_N_SYNBITS); i++) { + for (i = 0; i < (1 << CACHE_OC_N_SYNBITS); i++) { sh64_setup_dtlb_cache_slot(eaddr_start, get_asid(), paddr); eaddr = eaddr_start; eaddr_end = eaddr + PAGE_SIZE; while (eaddr < eaddr_end) { - asm __volatile__ ("ocbp %0, 0" : : "r" (eaddr)); + __asm__ __volatile__ ("ocbp %0, 0" : : "r" (eaddr)); eaddr += L1_CACHE_BYTES; } @@ -584,6 +444,7 @@ static void sh64_dcache_purge_user_pages(struct mm_struct *mm, unsigned long addr, unsigned long end) { pgd_t *pgd; + pud_t *pud; pmd_t *pmd; pte_t *pte; pte_t entry; @@ -597,7 +458,11 @@ static void sh64_dcache_purge_user_pages(struct mm_struct *mm, if (pgd_bad(*pgd)) return; - pmd = pmd_offset(pgd, addr); + pud = pud_offset(pgd, addr); + if (pud_none(*pud) || pud_bad(*pud)) + return; + + pmd = pmd_offset(pud, addr); if (pmd_none(*pmd) || pmd_bad(*pmd)) return; @@ -611,421 +476,357 @@ static void sh64_dcache_purge_user_pages(struct mm_struct *mm, } while (pte++, addr += PAGE_SIZE, addr != end); pte_unmap_unlock(pte - 1, ptl); } -/****************************************************************************/ +/* + * There are at least 5 choices for the implementation of this, with + * pros (+), cons(-), comments(*): + * + * 1. ocbp each line in the range through the original user's ASID + * + no lines spuriously evicted + * - tlbmiss handling (must either handle faults on demand => extra + * special-case code in tlbmiss critical path), or map the page in + * advance (=> flush_tlb_range in advance to avoid multiple hits) + * - ASID switching + * - expensive for large ranges + * + * 2. temporarily map each page in the range to a special effective + * address and ocbp through the temporary mapping; relies on the + * fact that SH-5 OCB* always do TLB lookup and match on ptags (they + * never look at the etags) + * + no spurious evictions + * - expensive for large ranges + * * surely cheaper than (1) + * + * 3. walk all the lines in the cache, check the tags, if a match + * occurs create a page mapping to ocbp the line through + * + no spurious evictions + * - tag inspection overhead + * - (especially for small ranges) + * - potential cost of setting up/tearing down page mapping for + * every line that matches the range + * * cost partly independent of range size + * + * 4. walk all the lines in the cache, check the tags, if a match + * occurs use 4 * alloco to purge the line (+3 other probably + * innocent victims) by natural eviction + * + no tlb mapping overheads + * - spurious evictions + * - tag inspection overhead + * + * 5. implement like flush_cache_all + * + no tag inspection overhead + * - spurious evictions + * - bad for small ranges + * + * (1) can be ruled out as more expensive than (2). (2) appears best + * for small ranges. The choice between (3), (4) and (5) for large + * ranges and the range size for the large/small boundary need + * benchmarking to determine. + * + * For now use approach (2) for small ranges and (5) for large ones. + */ static void sh64_dcache_purge_user_range(struct mm_struct *mm, unsigned long start, unsigned long end) { - /* There are at least 5 choices for the implementation of this, with - pros (+), cons(-), comments(*): - - 1. ocbp each line in the range through the original user's ASID - + no lines spuriously evicted - - tlbmiss handling (must either handle faults on demand => extra - special-case code in tlbmiss critical path), or map the page in - advance (=> flush_tlb_range in advance to avoid multiple hits) - - ASID switching - - expensive for large ranges - - 2. temporarily map each page in the range to a special effective - address and ocbp through the temporary mapping; relies on the - fact that SH-5 OCB* always do TLB lookup and match on ptags (they - never look at the etags) - + no spurious evictions - - expensive for large ranges - * surely cheaper than (1) - - 3. walk all the lines in the cache, check the tags, if a match - occurs create a page mapping to ocbp the line through - + no spurious evictions - - tag inspection overhead - - (especially for small ranges) - - potential cost of setting up/tearing down page mapping for - every line that matches the range - * cost partly independent of range size - - 4. walk all the lines in the cache, check the tags, if a match - occurs use 4 * alloco to purge the line (+3 other probably - innocent victims) by natural eviction - + no tlb mapping overheads - - spurious evictions - - tag inspection overhead - - 5. implement like flush_cache_all - + no tag inspection overhead - - spurious evictions - - bad for small ranges - - (1) can be ruled out as more expensive than (2). (2) appears best - for small ranges. The choice between (3), (4) and (5) for large - ranges and the range size for the large/small boundary need - benchmarking to determine. - - For now use approach (2) for small ranges and (5) for large ones. - - */ + int n_pages = ((end - start) >> PAGE_SHIFT); - int n_pages; - - n_pages = ((end - start) >> PAGE_SHIFT); if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) { -#if 1 sh64_dcache_purge_all(); -#else - unsigned long long set, way; - unsigned long mm_asid = mm->context & MMU_CONTEXT_ASID_MASK; - for (set = 0; set < cpu_data->dcache.sets; set++) { - unsigned long long set_base_config_addr = CACHE_OC_ADDRESS_ARRAY + (set << cpu_data->dcache.set_shift); - for (way = 0; way < cpu_data->dcache.ways; way++) { - unsigned long long config_addr = set_base_config_addr + (way << cpu_data->dcache.way_step_shift); - unsigned long long tag0; - unsigned long line_valid; - - asm __volatile__("getcfg %1, 0, %0" : "=r" (tag0) : "r" (config_addr)); - line_valid = tag0 & SH_CACHE_VALID; - if (line_valid) { - unsigned long cache_asid; - unsigned long epn; - - cache_asid = (tag0 & cpu_data->dcache.asid_mask) >> cpu_data->dcache.asid_shift; - /* The next line needs some - explanation. The virtual tags - encode bits [31:13] of the virtual - address, bit [12] of the 'tag' being - implied by the cache set index. */ - epn = (tag0 & cpu_data->dcache.epn_mask) | ((set & 0x80) << cpu_data->dcache.entry_shift); - - if ((cache_asid == mm_asid) && (start <= epn) && (epn < end)) { - /* TODO : could optimise this - call by batching multiple - adjacent sets together. */ - sh64_dcache_purge_sets(set, 1); - break; /* Don't waste time inspecting other ways for this set */ - } - } - } - } -#endif } else { /* Small range, covered by a single page table page */ start &= PAGE_MASK; /* should already be so */ end = PAGE_ALIGN(end); /* should already be so */ sh64_dcache_purge_user_pages(mm, start, end); } - return; } -static void sh64_dcache_wback_current_user_range(unsigned long start, unsigned long end) +/* + * Purge the range of addresses from the D-cache. + * + * The addresses lie in the superpage mapping. There's no harm if we + * overpurge at either end - just a small performance loss. + */ +void __flush_purge_region(void *start, int size) { - unsigned long long aligned_start; - unsigned long long ull_end; - unsigned long long addr; - - ull_end = end; + unsigned long long ullend, addr, aligned_start; - /* Just wback over the range using the natural addresses. TLB miss - handling will be OK (TBC) : the range has just been written to by - the signal frame setup code, so the PTEs must exist. + aligned_start = (unsigned long long)(signed long long)(signed long) start; + addr = L1_CACHE_ALIGN(aligned_start); + ullend = (unsigned long long) (signed long long) (signed long) start + size; - Note, if we have CONFIG_PREEMPT and get preempted inside this loop, - it doesn't matter, even if the pid->ASID mapping changes whilst - we're away. In that case the cache will have been flushed when the - mapping was renewed. So the writebacks below will be nugatory (and - we'll doubtless have to fault the TLB entry/ies in again with the - new ASID), but it's a rare case. - */ - aligned_start = start & L1_CACHE_ALIGN_MASK; - addr = aligned_start; - while (addr < ull_end) { - asm __volatile__ ("ocbwb %0, 0" : : "r" (addr)); + while (addr <= ullend) { + __asm__ __volatile__ ("ocbp %0, 0" : : "r" (addr)); addr += L1_CACHE_BYTES; } } -/****************************************************************************/ - -/* These *MUST* lie in an area of virtual address space that's otherwise unused. */ -#define UNIQUE_EADDR_START 0xe0000000UL -#define UNIQUE_EADDR_END 0xe8000000UL - -static unsigned long sh64_make_unique_eaddr(unsigned long user_eaddr, unsigned long paddr) +void __flush_wback_region(void *start, int size) { - /* Given a physical address paddr, and a user virtual address - user_eaddr which will eventually be mapped to it, create a one-off - kernel-private eaddr mapped to the same paddr. This is used for - creating special destination pages for copy_user_page and - clear_user_page */ + unsigned long long ullend, addr, aligned_start; - static unsigned long current_pointer = UNIQUE_EADDR_START; - unsigned long coloured_pointer; + aligned_start = (unsigned long long)(signed long long)(signed long) start; + addr = L1_CACHE_ALIGN(aligned_start); + ullend = (unsigned long long) (signed long long) (signed long) start + size; - if (current_pointer == UNIQUE_EADDR_END) { - sh64_dcache_purge_all(); - current_pointer = UNIQUE_EADDR_START; + while (addr < ullend) { + __asm__ __volatile__ ("ocbwb %0, 0" : : "r" (addr)); + addr += L1_CACHE_BYTES; } - - coloured_pointer = (current_pointer & ~CACHE_OC_SYN_MASK) | (user_eaddr & CACHE_OC_SYN_MASK); - sh64_setup_dtlb_cache_slot(coloured_pointer, get_asid(), paddr); - - current_pointer += (PAGE_SIZE << CACHE_OC_N_SYNBITS); - - return coloured_pointer; } -/****************************************************************************/ - -static void sh64_copy_user_page_coloured(void *to, void *from, unsigned long address) +void __flush_invalidate_region(void *start, int size) { - void *coloured_to; - - /* Discard any existing cache entries of the wrong colour. These are - present quite often, if the kernel has recently used the page - internally, then given it up, then it's been allocated to the user. - */ - sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long) to); - - coloured_to = (void *) sh64_make_unique_eaddr(address, __pa(to)); - sh64_page_copy(from, coloured_to); - - sh64_teardown_dtlb_cache_slot(); -} - -static void sh64_clear_user_page_coloured(void *to, unsigned long address) -{ - void *coloured_to; - - /* Discard any existing kernel-originated lines of the wrong colour (as - above) */ - sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long) to); + unsigned long long ullend, addr, aligned_start; - coloured_to = (void *) sh64_make_unique_eaddr(address, __pa(to)); - sh64_page_clear(coloured_to); + aligned_start = (unsigned long long)(signed long long)(signed long) start; + addr = L1_CACHE_ALIGN(aligned_start); + ullend = (unsigned long long) (signed long long) (signed long) start + size; - sh64_teardown_dtlb_cache_slot(); + while (addr < ullend) { + __asm__ __volatile__ ("ocbi %0, 0" : : "r" (addr)); + addr += L1_CACHE_BYTES; + } } - #endif /* !CONFIG_DCACHE_DISABLED */ -/****************************************************************************/ - -/*########################################################################## - EXTERNALLY CALLABLE API. - ##########################################################################*/ - -/* These functions are described in Documentation/cachetlb.txt. - Each one of these functions varies in behaviour depending on whether the - I-cache and/or D-cache are configured out. - - Note that the Linux term 'flush' corresponds to what is termed 'purge' in - the sh/sh64 jargon for the D-cache, i.e. write back dirty data then - invalidate the cache lines, and 'invalidate' for the I-cache. - */ - -#undef FLUSH_TRACE - +/* + * Invalidate the entire contents of both caches, after writing back to + * memory any dirty data from the D-cache. + */ void flush_cache_all(void) { - /* Invalidate the entire contents of both caches, after writing back to - memory any dirty data from the D-cache. */ sh64_dcache_purge_all(); sh64_icache_inv_all(); } -/****************************************************************************/ - +/* + * Invalidate an entire user-address space from both caches, after + * writing back dirty data (e.g. for shared mmap etc). + * + * This could be coded selectively by inspecting all the tags then + * doing 4*alloco on any set containing a match (as for + * flush_cache_range), but fork/exit/execve (where this is called from) + * are expensive anyway. + * + * Have to do a purge here, despite the comments re I-cache below. + * There could be odd-coloured dirty data associated with the mm still + * in the cache - if this gets written out through natural eviction + * after the kernel has reused the page there will be chaos. + * + * The mm being torn down won't ever be active again, so any Icache + * lines tagged with its ASID won't be visible for the rest of the + * lifetime of this ASID cycle. Before the ASID gets reused, there + * will be a flush_cache_all. Hence we don't need to touch the + * I-cache. This is similar to the lack of action needed in + * flush_tlb_mm - see fault.c. + */ void flush_cache_mm(struct mm_struct *mm) { - /* Invalidate an entire user-address space from both caches, after - writing back dirty data (e.g. for shared mmap etc). */ - - /* This could be coded selectively by inspecting all the tags then - doing 4*alloco on any set containing a match (as for - flush_cache_range), but fork/exit/execve (where this is called from) - are expensive anyway. */ - - /* Have to do a purge here, despite the comments re I-cache below. - There could be odd-coloured dirty data associated with the mm still - in the cache - if this gets written out through natural eviction - after the kernel has reused the page there will be chaos. - */ - sh64_dcache_purge_all(); - - /* The mm being torn down won't ever be active again, so any Icache - lines tagged with its ASID won't be visible for the rest of the - lifetime of this ASID cycle. Before the ASID gets reused, there - will be a flush_cache_all. Hence we don't need to touch the - I-cache. This is similar to the lack of action needed in - flush_tlb_mm - see fault.c. */ } -/****************************************************************************/ - +/* + * Invalidate (from both caches) the range [start,end) of virtual + * addresses from the user address space specified by mm, after writing + * back any dirty data. + * + * Note, 'end' is 1 byte beyond the end of the range to flush. + */ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; - /* Invalidate (from both caches) the range [start,end) of virtual - addresses from the user address space specified by mm, after writing - back any dirty data. - - Note, 'end' is 1 byte beyond the end of the range to flush. */ - sh64_dcache_purge_user_range(mm, start, end); sh64_icache_inv_user_page_range(mm, start, end); } -/****************************************************************************/ - -void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr, unsigned long pfn) +/* + * Invalidate any entries in either cache for the vma within the user + * address space vma->vm_mm for the page starting at virtual address + * 'eaddr'. This seems to be used primarily in breaking COW. Note, + * the I-cache must be searched too in case the page in question is + * both writable and being executed from (e.g. stack trampolines.) + * + * Note, this is called with pte lock held. + */ +void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr, + unsigned long pfn) { - /* Invalidate any entries in either cache for the vma within the user - address space vma->vm_mm for the page starting at virtual address - 'eaddr'. This seems to be used primarily in breaking COW. Note, - the I-cache must be searched too in case the page in question is - both writable and being executed from (e.g. stack trampolines.) - - Note, this is called with pte lock held. - */ - sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT); - if (vma->vm_flags & VM_EXEC) { + if (vma->vm_flags & VM_EXEC) sh64_icache_inv_user_page(vma, eaddr); - } } -/****************************************************************************/ +void flush_dcache_page(struct page *page) +{ + sh64_dcache_purge_phy_page(page_to_phys(page)); + wmb(); +} -#ifndef CONFIG_DCACHE_DISABLED +/* + * Flush the range [start,end] of kernel virtual adddress space from + * the I-cache. The corresponding range must be purged from the + * D-cache also because the SH-5 doesn't have cache snooping between + * the caches. The addresses will be visible through the superpage + * mapping, therefore it's guaranteed that there no cache entries for + * the range in cache sets of the wrong colour. + */ +void flush_icache_range(unsigned long start, unsigned long end) +{ + __flush_purge_region((void *)start, end); + wmb(); + sh64_icache_inv_kernel_range(start, end); +} -void copy_user_page(void *to, void *from, unsigned long address, struct page *page) +/* + * Flush the range of user (defined by vma->vm_mm) address space starting + * at 'addr' for 'len' bytes from the cache. The range does not straddle + * a page boundary, the unique physical page containing the range is + * 'page'. This seems to be used mainly for invalidating an address + * range following a poke into the program text through the ptrace() call + * from another process (e.g. for BRK instruction insertion). + */ +void flush_icache_user_range(struct vm_area_struct *vma, + struct page *page, unsigned long addr, int len) { - /* 'from' and 'to' are kernel virtual addresses (within the superpage - mapping of the physical RAM). 'address' is the user virtual address - where the copy 'to' will be mapped after. This allows a custom - mapping to be used to ensure that the new copy is placed in the - right cache sets for the user to see it without having to bounce it - out via memory. Note however : the call to flush_page_to_ram in - (generic)/mm/memory.c:(break_cow) undoes all this good work in that one - very important case! - - TBD : can we guarantee that on every call, any cache entries for - 'from' are in the same colour sets as 'address' also? i.e. is this - always used just to deal with COW? (I suspect not). */ - - /* There are two possibilities here for when the page 'from' was last accessed: - * by the kernel : this is OK, no purge required. - * by the/a user (e.g. for break_COW) : need to purge. - - If the potential user mapping at 'address' is the same colour as - 'from' there is no need to purge any cache lines from the 'from' - page mapped into cache sets of colour 'address'. (The copy will be - accessing the page through 'from'). - */ - if (((address ^ (unsigned long) from) & CACHE_OC_SYN_MASK) != 0) { - sh64_dcache_purge_coloured_phy_page(__pa(from), address); - } + sh64_dcache_purge_coloured_phy_page(page_to_phys(page), addr); + mb(); - if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) { - /* No synonym problem on destination */ - sh64_page_copy(from, to); - } else { - sh64_copy_user_page_coloured(to, from, address); - } + if (vma->vm_flags & VM_EXEC) + sh64_icache_inv_user_small_range(vma->vm_mm, addr, len); +} - /* Note, don't need to flush 'from' page from the cache again - it's - done anyway by the generic code */ +/* + * For the address range [start,end), write back the data from the + * D-cache and invalidate the corresponding region of the I-cache for the + * current process. Used to flush signal trampolines on the stack to + * make them executable. + */ +void flush_cache_sigtramp(unsigned long vaddr) +{ + unsigned long end = vaddr + L1_CACHE_BYTES; + + __flush_wback_region((void *)vaddr, L1_CACHE_BYTES); + wmb(); + sh64_icache_inv_current_user_range(vaddr, end); } -void clear_user_page(void *to, unsigned long address, struct page *page) +/* + * These *MUST* lie in an area of virtual address space that's otherwise + * unused. + */ +#define UNIQUE_EADDR_START 0xe0000000UL +#define UNIQUE_EADDR_END 0xe8000000UL + +/* + * Given a physical address paddr, and a user virtual address user_eaddr + * which will eventually be mapped to it, create a one-off kernel-private + * eaddr mapped to the same paddr. This is used for creating special + * destination pages for copy_user_page and clear_user_page. + */ +static unsigned long sh64_make_unique_eaddr(unsigned long user_eaddr, + unsigned long paddr) { - /* 'to' is a kernel virtual address (within the superpage - mapping of the physical RAM). 'address' is the user virtual address - where the 'to' page will be mapped after. This allows a custom - mapping to be used to ensure that the new copy is placed in the - right cache sets for the user to see it without having to bounce it - out via memory. - */ + static unsigned long current_pointer = UNIQUE_EADDR_START; + unsigned long coloured_pointer; - if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) { - /* No synonym problem on destination */ - sh64_page_clear(to); - } else { - sh64_clear_user_page_coloured(to, address); + if (current_pointer == UNIQUE_EADDR_END) { + sh64_dcache_purge_all(); + current_pointer = UNIQUE_EADDR_START; } -} -#endif /* !CONFIG_DCACHE_DISABLED */ + coloured_pointer = (current_pointer & ~CACHE_OC_SYN_MASK) | + (user_eaddr & CACHE_OC_SYN_MASK); + sh64_setup_dtlb_cache_slot(coloured_pointer, get_asid(), paddr); -/****************************************************************************/ + current_pointer += (PAGE_SIZE << CACHE_OC_N_SYNBITS); -void flush_dcache_page(struct page *page) -{ - sh64_dcache_purge_phy_page(page_to_phys(page)); - wmb(); + return coloured_pointer; } -/****************************************************************************/ - -void flush_icache_range(unsigned long start, unsigned long end) +static void sh64_copy_user_page_coloured(void *to, void *from, + unsigned long address) { - /* Flush the range [start,end] of kernel virtual adddress space from - the I-cache. The corresponding range must be purged from the - D-cache also because the SH-5 doesn't have cache snooping between - the caches. The addresses will be visible through the superpage - mapping, therefore it's guaranteed that there no cache entries for - the range in cache sets of the wrong colour. + void *coloured_to; - Primarily used for cohering the I-cache after a module has - been loaded. */ + /* + * Discard any existing cache entries of the wrong colour. These are + * present quite often, if the kernel has recently used the page + * internally, then given it up, then it's been allocated to the user. + */ + sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long)to); - /* We also make sure to purge the same range from the D-cache since - flush_page_to_ram() won't be doing this for us! */ + coloured_to = (void *)sh64_make_unique_eaddr(address, __pa(to)); + copy_page(from, coloured_to); - sh64_dcache_purge_kernel_range(start, end); - wmb(); - sh64_icache_inv_kernel_range(start, end); + sh64_teardown_dtlb_cache_slot(); } -/****************************************************************************/ - -void flush_icache_user_range(struct vm_area_struct *vma, - struct page *page, unsigned long addr, int len) +static void sh64_clear_user_page_coloured(void *to, unsigned long address) { - /* Flush the range of user (defined by vma->vm_mm) address space - starting at 'addr' for 'len' bytes from the cache. The range does - not straddle a page boundary, the unique physical page containing - the range is 'page'. This seems to be used mainly for invalidating - an address range following a poke into the program text through the - ptrace() call from another process (e.g. for BRK instruction - insertion). */ + void *coloured_to; - sh64_dcache_purge_coloured_phy_page(page_to_phys(page), addr); - mb(); + /* + * Discard any existing kernel-originated lines of the wrong + * colour (as above) + */ + sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long)to); - if (vma->vm_flags & VM_EXEC) { - sh64_icache_inv_user_small_range(vma->vm_mm, addr, len); - } -} + coloured_to = (void *)sh64_make_unique_eaddr(address, __pa(to)); + clear_page(coloured_to); -/*########################################################################## - ARCH/SH64 PRIVATE CALLABLE API. - ##########################################################################*/ + sh64_teardown_dtlb_cache_slot(); +} -void flush_cache_sigtramp(unsigned long vaddr) +/* + * 'from' and 'to' are kernel virtual addresses (within the superpage + * mapping of the physical RAM). 'address' is the user virtual address + * where the copy 'to' will be mapped after. This allows a custom + * mapping to be used to ensure that the new copy is placed in the + * right cache sets for the user to see it without having to bounce it + * out via memory. Note however : the call to flush_page_to_ram in + * (generic)/mm/memory.c:(break_cow) undoes all this good work in that one + * very important case! + * + * TBD : can we guarantee that on every call, any cache entries for + * 'from' are in the same colour sets as 'address' also? i.e. is this + * always used just to deal with COW? (I suspect not). + * + * There are two possibilities here for when the page 'from' was last accessed: + * - by the kernel : this is OK, no purge required. + * - by the/a user (e.g. for break_COW) : need to purge. + * + * If the potential user mapping at 'address' is the same colour as + * 'from' there is no need to purge any cache lines from the 'from' + * page mapped into cache sets of colour 'address'. (The copy will be + * accessing the page through 'from'). + */ +void copy_user_page(void *to, void *from, unsigned long address, + struct page *page) { - unsigned long end = vaddr + L1_CACHE_BYTES; - - /* For the address range [start,end), write back the data from the - D-cache and invalidate the corresponding region of the I-cache for - the current process. Used to flush signal trampolines on the stack - to make them executable. */ + if (((address ^ (unsigned long) from) & CACHE_OC_SYN_MASK) != 0) + sh64_dcache_purge_coloured_phy_page(__pa(from), address); - sh64_dcache_wback_current_user_range(vaddr, end); - wmb(); - sh64_icache_inv_current_user_range(vaddr, end); + if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) + copy_page(to, from); + else + sh64_copy_user_page_coloured(to, from, address); } +/* + * 'to' is a kernel virtual address (within the superpage mapping of the + * physical RAM). 'address' is the user virtual address where the 'to' + * page will be mapped after. This allows a custom mapping to be used to + * ensure that the new copy is placed in the right cache sets for the + * user to see it without having to bounce it out via memory. + */ +void clear_user_page(void *to, unsigned long address, struct page *page) +{ + if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) + clear_page(to); + else + sh64_clear_user_page_coloured(to, address); +} diff --git a/include/asm-sh/cpu-sh5/cacheflush.h b/include/asm-sh/cpu-sh5/cacheflush.h index f935acbacf38..5a11f0b7e66a 100644 --- a/include/asm-sh/cpu-sh5/cacheflush.h +++ b/include/asm-sh/cpu-sh5/cacheflush.h @@ -3,8 +3,6 @@ #ifndef __ASSEMBLY__ -#include - struct vm_area_struct; struct page; struct mm_struct; @@ -27,7 +25,7 @@ extern void flush_icache_user_range(struct vm_area_struct *vma, #define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_icache_page(vma, page) do { } while (0) -#define p3_cache_init() do { } while (0) +void p3_cache_init(void); #endif /* __ASSEMBLY__ */ diff --git a/include/asm-sh/mmu_context_64.h b/include/asm-sh/mmu_context_64.h index 020be744b088..9649f1c07caf 100644 --- a/include/asm-sh/mmu_context_64.h +++ b/include/asm-sh/mmu_context_64.h @@ -66,6 +66,9 @@ static inline void set_asid(unsigned long asid) : "=r" (sr), "=r" (pc) : "0" (sr)); } +/* arch/sh/kernel/cpu/sh5/entry.S */ +extern unsigned long switch_and_save_asid(unsigned long new_asid); + /* No spare register to twiddle, so use a software cache */ extern pgd_t *mmu_pdtp_cache; diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h index 134562dc8c45..304c30b5d947 100644 --- a/include/asm-sh/page.h +++ b/include/asm-sh/page.h @@ -55,11 +55,14 @@ extern void clear_page(void *to); extern void copy_page(void *to, void *from); #if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \ - (defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)) + (defined(CONFIG_CPU_SH5) || defined(CONFIG_CPU_SH4) || \ + defined(CONFIG_SH7705_CACHE_32KB)) struct page; struct vm_area_struct; extern void clear_user_page(void *to, unsigned long address, struct page *page); -#ifdef CONFIG_CPU_SH4 +extern void copy_user_page(void *to, void *from, unsigned long address, + struct page *page); +#if defined(CONFIG_CPU_SH4) extern void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma); #define __HAVE_ARCH_COPY_USER_HIGHPAGE -- cgit v1.2.3 From c7a49dd42d15f066d13e26c24c22c600b58528e0 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 13 Feb 2008 20:16:47 +0900 Subject: sh: asm/irq.h needs asm/cpu/irq.h. The SH-5 build currently fails when trying to build the i8042 code due to the missing IRQ definitions. These are provided in asm/cpu/irq.h, so just include that there to get it building again. Signed-off-by: Paul Mundt --- include/asm-sh/irq.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/asm-sh/irq.h b/include/asm-sh/irq.h index 11850f65c922..ca66e5df69dc 100644 --- a/include/asm-sh/irq.h +++ b/include/asm-sh/irq.h @@ -50,4 +50,8 @@ extern void irq_ctx_exit(int cpu); # define irq_ctx_exit(cpu) do { } while (0) #endif +#ifdef CONFIG_CPU_SH5 +#include +#endif + #endif /* __ASM_SH_IRQ_H */ -- cgit v1.2.3 From f99cb7a43c5cca1813a97312487acf7a0f88ee2a Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 13 Feb 2008 20:28:12 +0900 Subject: sh: Kill off more dead symbols. Reported-by: Robert P. J. Day Signed-off-by: Paul Mundt --- arch/sh/cchips/hd6446x/hd64465/setup.c | 47 ++++++++++------------------------ arch/sh/configs/se7705_defconfig | 1 - arch/sh/kernel/irq.c | 3 --- arch/sh/kernel/traps_64.c | 4 +-- arch/sh/kernel/vmlinux_64.lds.S | 2 +- include/asm-sh/cpu-sh5/mmu_context.h | 6 ----- 6 files changed, 17 insertions(+), 46 deletions(-) (limited to 'include') diff --git a/arch/sh/cchips/hd6446x/hd64465/setup.c b/arch/sh/cchips/hd6446x/hd64465/setup.c index 5cef0db4018b..9b8820c36701 100644 --- a/arch/sh/cchips/hd6446x/hd64465/setup.c +++ b/arch/sh/cchips/hd6446x/hd64465/setup.c @@ -17,10 +17,8 @@ #include #include #include - #include #include - #include static void disable_hd64465_irq(unsigned int irq) @@ -28,51 +26,45 @@ static void disable_hd64465_irq(unsigned int irq) unsigned short nimr; unsigned short mask = 1 << (irq - HD64465_IRQ_BASE); - pr_debug("disable_hd64465_irq(%d): mask=%x\n", irq, mask); + pr_debug("disable_hd64465_irq(%d): mask=%x\n", irq, mask); nimr = inw(HD64465_REG_NIMR); nimr |= mask; outw(nimr, HD64465_REG_NIMR); } - static void enable_hd64465_irq(unsigned int irq) { unsigned short nimr; unsigned short mask = 1 << (irq - HD64465_IRQ_BASE); - pr_debug("enable_hd64465_irq(%d): mask=%x\n", irq, mask); + pr_debug("enable_hd64465_irq(%d): mask=%x\n", irq, mask); nimr = inw(HD64465_REG_NIMR); nimr &= ~mask; outw(nimr, HD64465_REG_NIMR); } - static void mask_and_ack_hd64465(unsigned int irq) { disable_hd64465_irq(irq); } - static void end_hd64465_irq(unsigned int irq) { if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) enable_hd64465_irq(irq); } - static unsigned int startup_hd64465_irq(unsigned int irq) -{ +{ enable_hd64465_irq(irq); return 0; } - static void shutdown_hd64465_irq(unsigned int irq) { disable_hd64465_irq(irq); } - static struct hw_interrupt_type hd64465_irq_type = { .typename = "HD64465-IRQ", .startup = startup_hd64465_irq, @@ -83,7 +75,6 @@ static struct hw_interrupt_type hd64465_irq_type = { .end = end_hd64465_irq, }; - static irqreturn_t hd64465_interrupt(int irq, void *dev_id) { printk(KERN_INFO @@ -93,9 +84,6 @@ static irqreturn_t hd64465_interrupt(int irq, void *dev_id) return IRQ_NONE; } - -/*====================================================*/ - /* * Support for a secondary IRQ demux step. This is necessary * because the HD64465 presents a very thin interface to the @@ -103,8 +91,7 @@ static irqreturn_t hd64465_interrupt(int irq, void *dev_id) * normally done in hardware by other PCMCIA host bridges is * instead done in software. */ -static struct -{ +static struct { int (*func)(int, void *); void *dev; } hd64465_demux[HD64465_IRQ_NUM]; @@ -112,19 +99,17 @@ static struct void hd64465_register_irq_demux(int irq, int (*demux)(int irq, void *dev), void *dev) { - hd64465_demux[irq - HD64465_IRQ_BASE].func = demux; - hd64465_demux[irq - HD64465_IRQ_BASE].dev = dev; + hd64465_demux[irq - HD64465_IRQ_BASE].func = demux; + hd64465_demux[irq - HD64465_IRQ_BASE].dev = dev; } EXPORT_SYMBOL(hd64465_register_irq_demux); void hd64465_unregister_irq_demux(int irq) { - hd64465_demux[irq - HD64465_IRQ_BASE].func = 0; + hd64465_demux[irq - HD64465_IRQ_BASE].func = 0; } EXPORT_SYMBOL(hd64465_unregister_irq_demux); - - int hd64465_irq_demux(int irq) { if (irq == CONFIG_HD64465_IRQ) { @@ -132,16 +117,16 @@ int hd64465_irq_demux(int irq) unsigned short nirr = inw(HD64465_REG_NIRR); unsigned short nimr = inw(HD64465_REG_NIMR); - pr_debug("hd64465_irq_demux, nirr=%04x, nimr=%04x\n", nirr, nimr); + pr_debug("hd64465_irq_demux, nirr=%04x, nimr=%04x\n", nirr, nimr); nirr &= ~nimr; for (bit = 1, i = 0 ; i < HD64465_IRQ_NUM ; bit <<= 1, i++) if (nirr & bit) - break; + break; - if (i < HD64465_IRQ_NUM) { + if (i < HD64465_IRQ_NUM) { irq = HD64465_IRQ_BASE + i; - if (hd64465_demux[i].func != 0) - irq = hd64465_demux[i].func(irq, hd64465_demux[i].dev); + if (hd64465_demux[i].func != 0) + irq = hd64465_demux[i].func(irq, hd64465_demux[i].dev); } } return irq; @@ -154,7 +139,6 @@ static struct irqaction irq0 = { .name = "HD64465", }; - static int __init setup_hd64465(void) { int i; @@ -176,8 +160,8 @@ static int __init setup_hd64465(void) rev = inw(HD64465_REG_SRR); printk(KERN_INFO "HD64465 hardware revision %d.%d\n", (rev >> 8) & 0xff, rev & 0xff); - - outw(0xffff, HD64465_REG_NIMR); /* mask all interrupts */ + + outw(0xffff, HD64465_REG_NIMR); /* mask all interrupts */ for (i = 0; i < HD64465_IRQ_NUM ; i++) { irq_desc[HD64465_IRQ_BASE + i].chip = &hd64465_irq_type; @@ -185,16 +169,13 @@ static int __init setup_hd64465(void) setup_irq(CONFIG_HD64465_IRQ, &irq0); -#ifdef CONFIG_SERIAL /* wake up the UART from STANDBY at this point */ smscr = inw(HD64465_REG_SMSCR); outw(smscr & (~HD64465_SMSCR_UARTST), HD64465_REG_SMSCR); /* remap IO ports for first ISA serial port to HD64465 UART */ hd64465_port_map(0x3f8, 8, CONFIG_HD64465_IOBASE + 0x8000, 1); -#endif return 0; } - module_init(setup_hd64465); diff --git a/arch/sh/configs/se7705_defconfig b/arch/sh/configs/se7705_defconfig index 87ae5c1f8629..84717d854867 100644 --- a/arch/sh/configs/se7705_defconfig +++ b/arch/sh/configs/se7705_defconfig @@ -231,7 +231,6 @@ CONFIG_CPU_LITTLE_ENDIAN=y # CONFIG_SH_DSP is not set # CONFIG_SH_ADC is not set CONFIG_CPU_HAS_INTEVT=y -CONFIG_CPU_HAS_PINT_IRQ=y CONFIG_CPU_HAS_IPR_IRQ=y CONFIG_CPU_HAS_SR_RB=y diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 0586bc62ad96..9bf19b00696a 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -248,9 +248,6 @@ asmlinkage void do_softirq(void) void __init init_IRQ(void) { -#ifdef CONFIG_CPU_HAS_PINT_IRQ - init_IRQ_pint(); -#endif plat_irq_setup(); /* Perform the machine specific initialisation */ diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c index c0b3c6f6edb5..a55ac81d795b 100644 --- a/arch/sh/kernel/traps_64.c +++ b/arch/sh/kernel/traps_64.c @@ -630,7 +630,7 @@ static int misaligned_fpu_load(struct pt_regs *regs, current->thread.fpu.hard.fp_regs[destreg] = buflo; current->thread.fpu.hard.fp_regs[destreg+1] = bufhi; } else { -#if defined(CONFIG_LITTLE_ENDIAN) +#if defined(CONFIG_CPU_LITTLE_ENDIAN) current->thread.fpu.hard.fp_regs[destreg] = bufhi; current->thread.fpu.hard.fp_regs[destreg+1] = buflo; #else @@ -700,7 +700,7 @@ static int misaligned_fpu_store(struct pt_regs *regs, buflo = current->thread.fpu.hard.fp_regs[srcreg]; bufhi = current->thread.fpu.hard.fp_regs[srcreg+1]; } else { -#if defined(CONFIG_LITTLE_ENDIAN) +#if defined(CONFIG_CPU_LITTLE_ENDIAN) bufhi = current->thread.fpu.hard.fp_regs[srcreg]; buflo = current->thread.fpu.hard.fp_regs[srcreg+1]; #else diff --git a/arch/sh/kernel/vmlinux_64.lds.S b/arch/sh/kernel/vmlinux_64.lds.S index 3f1bd6392bb3..d1e177009a41 100644 --- a/arch/sh/kernel/vmlinux_64.lds.S +++ b/arch/sh/kernel/vmlinux_64.lds.S @@ -51,7 +51,7 @@ SECTIONS KPROBES_TEXT *(.fixup) *(.gnu.warning) -#ifdef CONFIG_LITTLE_ENDIAN +#ifdef CONFIG_CPU_LITTLE_ENDIAN } = 0x6ff0fff0 #else } = 0xf0fff06f diff --git a/include/asm-sh/cpu-sh5/mmu_context.h b/include/asm-sh/cpu-sh5/mmu_context.h index df857fc09960..68a1d2cff457 100644 --- a/include/asm-sh/cpu-sh5/mmu_context.h +++ b/include/asm-sh/cpu-sh5/mmu_context.h @@ -16,12 +16,6 @@ /* This has to be a common function because the next location to fill * information is shared. */ extern void __do_tlb_refill(unsigned long address, unsigned long long is_text_not_data, pte_t *pte); - -/* Profiling counter. */ -#ifdef CONFIG_SH64_PROC_TLB -extern unsigned long long calls_to_do_fast_page_fault; -#endif - #endif /* __ASSEMBLY__ */ #endif /* __ASM_SH_CPU_SH5_MMU_CONTEXT_H */ -- cgit v1.2.3 From c0ca41a27ef40fbe6d5fe343b61d63d7e1b93d28 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Thu, 14 Feb 2008 13:59:02 +0900 Subject: sh: fix ioreadN_rep and iowriteN_rep This patch is a fix to make sure readsN/writesN are used over insN/outsN for ioreadN_rep/iowriteN_rep. The current state of the sh io code is that mmio operations like readN/writeN and ioreadN/iowriteN are unaffected by the value of generic_io_base. This is different fom port based io like inN/outN which gets adjusted using the value in generic_io_base. Without this patch ioreadN_rep/iowriteN_rep get their addresses adjusted. The address for mmio access is adjusted using generic_io_base. This is wrong. The ata core code currently crashes if generic_io_base is set. This patch changes ioreadN_rep/iowriteN_rep to follow the same rules as the rest of the mmio operations, ie don't adjust using generic_io_base. Signed-off-by: Magnus Damm Acked-by: Katsuya MATSUBARA Signed-off-by: Paul Mundt --- include/asm-sh/io.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/asm-sh/io.h b/include/asm-sh/io.h index 3d2b114f9d57..356e50d06745 100644 --- a/include/asm-sh/io.h +++ b/include/asm-sh/io.h @@ -182,13 +182,13 @@ __BUILD_MEMORY_STRING(w, u16) #define iowrite32(v,a) writel((v),(a)) #define iowrite32be(v,a) __raw_writel(cpu_to_be32((v)),(a)) -#define ioread8_rep(a,d,c) insb((a),(d),(c)) -#define ioread16_rep(a,d,c) insw((a),(d),(c)) -#define ioread32_rep(a,d,c) insl((a),(d),(c)) +#define ioread8_rep(a, d, c) readsb((a), (d), (c)) +#define ioread16_rep(a, d, c) readsw((a), (d), (c)) +#define ioread32_rep(a, d, c) readsl((a), (d), (c)) -#define iowrite8_rep(a,s,c) outsb((a),(s),(c)) -#define iowrite16_rep(a,s,c) outsw((a),(s),(c)) -#define iowrite32_rep(a,s,c) outsl((a),(s),(c)) +#define iowrite8_rep(a, s, c) writesb((a), (s), (c)) +#define iowrite16_rep(a, s, c) writesw((a), (s), (c)) +#define iowrite32_rep(a, s, c) writesl((a), (s), (c)) #define mmiowb() wmb() /* synco on SH-4A, otherwise a nop */ -- cgit v1.2.3