diff options
Diffstat (limited to 'drivers/misc')
70 files changed, 5498 insertions, 820 deletions
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 6cb388e8fb7d..a43d0c467274 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -54,6 +54,7 @@ config AD525X_DPOT_SPI config ATMEL_PWM tristate "Atmel AT32/AT91 PWM support" depends on HAVE_CLK + depends on AVR32 || AT91SAM9263 || AT91SAM9RL || AT91SAM9G45 help This option enables device driver support for the PWM channels on certain Atmel processors. Pulse Width Modulation is used for @@ -200,7 +201,7 @@ config ICS932S401 config ATMEL_SSC tristate "Device driver for Atmel SSC peripheral" - depends on HAS_IOMEM + depends on HAS_IOMEM && (AVR32 || ARCH_AT91 || COMPILE_TEST) ---help--- This option enables device driver support for Atmel Synchronized Serial Communication peripheral (SSC). @@ -235,7 +236,7 @@ config SGI_XP config CS5535_MFGPT tristate "CS5535/CS5536 Geode Multi-Function General Purpose Timer (MFGPT) support" - depends on PCI && X86 && MFD_CS5535 + depends on MFD_CS5535 default n help This driver provides access to MFGPT functionality for other @@ -300,8 +301,8 @@ config SGI_GRU_DEBUG depends on SGI_GRU default n ---help--- - This option enables addition debugging code for the SGI GRU driver. If - you are unsure, say N. + This option enables additional debugging code for the SGI GRU driver. + If you are unsure, say N. config APDS9802ALS tristate "Medfield Avago APDS9802 ALS Sensor module" @@ -468,7 +469,7 @@ config BMP085_SPI config PCH_PHUB tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB" select GENERIC_NET_UTILS - depends on PCI + depends on PCI && (X86_32 || COMPILE_TEST) help This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of Intel Topcliff which is an IOH(Input/Output Hub) for x86 embedded @@ -515,6 +516,15 @@ config SRAM the genalloc API. It is supposed to be used for small on-chip SRAM areas found on many SoCs. +config VEXPRESS_SYSCFG + bool "Versatile Express System Configuration driver" + depends on VEXPRESS_CONFIG + default y + help + ARM Ltd. Versatile Express uses specialised platform configuration + bus. System Configuration interface is one of the possible means + of generating transactions on this bus. + source "drivers/misc/c2port/Kconfig" source "drivers/misc/eeprom/Kconfig" source "drivers/misc/cb710/Kconfig" @@ -526,4 +536,5 @@ source "drivers/misc/mei/Kconfig" source "drivers/misc/vmw_vmci/Kconfig" source "drivers/misc/mic/Kconfig" source "drivers/misc/genwqe/Kconfig" +source "drivers/misc/echo/Kconfig" endmenu diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 99b9424ce31d..d59ce1261b38 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -54,3 +54,5 @@ obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o obj-$(CONFIG_SRAM) += sram.o obj-y += mic/ obj-$(CONFIG_GENWQE) += genwqe/ +obj-$(CONFIG_ECHO) += echo/ +obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c index d3eee113baeb..a43053daad0e 100644 --- a/drivers/misc/ad525x_dpot.c +++ b/drivers/misc/ad525x_dpot.c @@ -72,7 +72,6 @@ #include <linux/module.h> #include <linux/device.h> #include <linux/kernel.h> -#include <linux/init.h> #include <linux/delay.h> #include <linux/slab.h> diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c index 0c6e037153d2..c6cc3dc8ae1f 100644 --- a/drivers/misc/apds9802als.c +++ b/drivers/misc/apds9802als.c @@ -22,7 +22,6 @@ */ #include <linux/module.h> -#include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/err.h> diff --git a/drivers/misc/arm-charlcd.c b/drivers/misc/arm-charlcd.c index b7ebf8021d99..c72e96b523ed 100644 --- a/drivers/misc/arm-charlcd.c +++ b/drivers/misc/arm-charlcd.c @@ -11,6 +11,7 @@ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/platform_device.h> +#include <linux/of.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/io.h> @@ -366,11 +367,17 @@ static const struct dev_pm_ops charlcd_pm_ops = { .resume = charlcd_resume, }; +static const struct of_device_id charlcd_match[] = { + { .compatible = "arm,versatile-lcd", }, + {} +}; + static struct platform_driver charlcd_driver = { .driver = { .name = DRIVERNAME, .owner = THIS_MODULE, .pm = &charlcd_pm_ops, + .of_match_table = of_match_ptr(charlcd_match), }, .remove = __exit_p(charlcd_remove), }; diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c index 5be808406edc..22de13727641 100644 --- a/drivers/misc/atmel-ssc.c +++ b/drivers/misc/atmel-ssc.c @@ -150,6 +150,12 @@ static int ssc_probe(struct platform_device *pdev) return -ENODEV; ssc->pdata = (struct atmel_ssc_platform_data *)plat_dat; + if (pdev->dev.of_node) { + struct device_node *np = pdev->dev.of_node; + ssc->clk_from_rk_pin = + of_property_read_bool(np, "atmel,clk-from-rk-pin"); + } + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); ssc->regs = devm_ioremap_resource(&pdev->dev, regs); if (IS_ERR(ssc->regs)) diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c index 820e53d0048f..9b313f7810f5 100644 --- a/drivers/misc/bmp085.c +++ b/drivers/misc/bmp085.c @@ -47,7 +47,6 @@ #include <linux/module.h> #include <linux/device.h> -#include <linux/init.h> #include <linux/slab.h> #include <linux/of.h> #include "bmp085.h" diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c index 9e2b985293fc..14d90eae605b 100644 --- a/drivers/misc/carma/carma-fpga.c +++ b/drivers/misc/carma/carma-fpga.c @@ -101,7 +101,6 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/poll.h> -#include <linux/init.h> #include <linux/slab.h> #include <linux/kref.h> #include <linux/io.h> diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c index 154b02e5094f..b909fb30232a 100644 --- a/drivers/misc/ds1682.c +++ b/drivers/misc/ds1682.c @@ -32,7 +32,6 @@ */ #include <linux/module.h> -#include <linux/init.h> #include <linux/i2c.h> #include <linux/string.h> #include <linux/list.h> @@ -86,7 +85,6 @@ static ssize_t ds1682_store(struct device *dev, struct device_attribute *attr, { struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); struct i2c_client *client = to_i2c_client(dev); - char *endp; u64 val; __le32 val_le; int rc; @@ -94,8 +92,8 @@ static ssize_t ds1682_store(struct device *dev, struct device_attribute *attr, dev_dbg(dev, "ds1682_store() called on %s\n", attr->attr.name); /* Decode input */ - val = simple_strtoull(buf, &endp, 0); - if (buf == endp) { + rc = kstrtoull(buf, 0, &val); + if (rc < 0) { dev_dbg(dev, "input string not a number\n"); return -EINVAL; } diff --git a/drivers/misc/echo/Kconfig b/drivers/misc/echo/Kconfig new file mode 100644 index 000000000000..f1d41ea9cd48 --- /dev/null +++ b/drivers/misc/echo/Kconfig @@ -0,0 +1,9 @@ +config ECHO + tristate "Line Echo Canceller support" + default n + ---help--- + This driver provides line echo cancelling support for mISDN and + Zaptel drivers. + + To compile this driver as a module, choose M here. The module + will be called echo. diff --git a/drivers/misc/echo/Makefile b/drivers/misc/echo/Makefile new file mode 100644 index 000000000000..7d4caac12a8d --- /dev/null +++ b/drivers/misc/echo/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_ECHO) += echo.o diff --git a/drivers/misc/echo/echo.c b/drivers/misc/echo/echo.c new file mode 100644 index 000000000000..9597e9523cac --- /dev/null +++ b/drivers/misc/echo/echo.c @@ -0,0 +1,674 @@ +/* + * SpanDSP - a series of DSP components for telephony + * + * echo.c - A line echo canceller. This code is being developed + * against and partially complies with G168. + * + * Written by Steve Underwood <steveu@coppice.org> + * and David Rowe <david_at_rowetel_dot_com> + * + * Copyright (C) 2001, 2003 Steve Underwood, 2007 David Rowe + * + * Based on a bit from here, a bit from there, eye of toad, ear of + * bat, 15 years of failed attempts by David and a few fried brain + * cells. + * + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/*! \file */ + +/* Implementation Notes + David Rowe + April 2007 + + This code started life as Steve's NLMS algorithm with a tap + rotation algorithm to handle divergence during double talk. I + added a Geigel Double Talk Detector (DTD) [2] and performed some + G168 tests. However I had trouble meeting the G168 requirements, + especially for double talk - there were always cases where my DTD + failed, for example where near end speech was under the 6dB + threshold required for declaring double talk. + + So I tried a two path algorithm [1], which has so far given better + results. The original tap rotation/Geigel algorithm is available + in SVN http://svn.rowetel.com/software/oslec/tags/before_16bit. + It's probably possible to make it work if some one wants to put some + serious work into it. + + At present no special treatment is provided for tones, which + generally cause NLMS algorithms to diverge. Initial runs of a + subset of the G168 tests for tones (e.g ./echo_test 6) show the + current algorithm is passing OK, which is kind of surprising. The + full set of tests needs to be performed to confirm this result. + + One other interesting change is that I have managed to get the NLMS + code to work with 16 bit coefficients, rather than the original 32 + bit coefficents. This reduces the MIPs and storage required. + I evaulated the 16 bit port using g168_tests.sh and listening tests + on 4 real-world samples. + + I also attempted the implementation of a block based NLMS update + [2] but although this passes g168_tests.sh it didn't converge well + on the real-world samples. I have no idea why, perhaps a scaling + problem. The block based code is also available in SVN + http://svn.rowetel.com/software/oslec/tags/before_16bit. If this + code can be debugged, it will lead to further reduction in MIPS, as + the block update code maps nicely onto DSP instruction sets (it's a + dot product) compared to the current sample-by-sample update. + + Steve also has some nice notes on echo cancellers in echo.h + + References: + + [1] Ochiai, Areseki, and Ogihara, "Echo Canceller with Two Echo + Path Models", IEEE Transactions on communications, COM-25, + No. 6, June + 1977. + http://www.rowetel.com/images/echo/dual_path_paper.pdf + + [2] The classic, very useful paper that tells you how to + actually build a real world echo canceller: + Messerschmitt, Hedberg, Cole, Haoui, Winship, "Digital Voice + Echo Canceller with a TMS320020, + http://www.rowetel.com/images/echo/spra129.pdf + + [3] I have written a series of blog posts on this work, here is + Part 1: http://www.rowetel.com/blog/?p=18 + + [4] The source code http://svn.rowetel.com/software/oslec/ + + [5] A nice reference on LMS filters: + http://en.wikipedia.org/wiki/Least_mean_squares_filter + + Credits: + + Thanks to Steve Underwood, Jean-Marc Valin, and Ramakrishnan + Muthukrishnan for their suggestions and email discussions. Thanks + also to those people who collected echo samples for me such as + Mark, Pawel, and Pavel. +*/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> + +#include "echo.h" + +#define MIN_TX_POWER_FOR_ADAPTION 64 +#define MIN_RX_POWER_FOR_ADAPTION 64 +#define DTD_HANGOVER 600 /* 600 samples, or 75ms */ +#define DC_LOG2BETA 3 /* log2() of DC filter Beta */ + +/* adapting coeffs using the traditional stochastic descent (N)LMS algorithm */ + +#ifdef __bfin__ +static inline void lms_adapt_bg(struct oslec_state *ec, int clean, int shift) +{ + int i; + int offset1; + int offset2; + int factor; + int exp; + int16_t *phist; + int n; + + if (shift > 0) + factor = clean << shift; + else + factor = clean >> -shift; + + /* Update the FIR taps */ + + offset2 = ec->curr_pos; + offset1 = ec->taps - offset2; + phist = &ec->fir_state_bg.history[offset2]; + + /* st: and en: help us locate the assembler in echo.s */ + + /* asm("st:"); */ + n = ec->taps; + for (i = 0; i < n; i++) { + exp = *phist++ * factor; + ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15); + } + /* asm("en:"); */ + + /* Note the asm for the inner loop above generated by Blackfin gcc + 4.1.1 is pretty good (note even parallel instructions used): + + R0 = W [P0++] (X); + R0 *= R2; + R0 = R0 + R3 (NS) || + R1 = W [P1] (X) || + nop; + R0 >>>= 15; + R0 = R0 + R1; + W [P1++] = R0; + + A block based update algorithm would be much faster but the + above can't be improved on much. Every instruction saved in + the loop above is 2 MIPs/ch! The for loop above is where the + Blackfin spends most of it's time - about 17 MIPs/ch measured + with speedtest.c with 256 taps (32ms). Write-back and + Write-through cache gave about the same performance. + */ +} + +/* + IDEAS for further optimisation of lms_adapt_bg(): + + 1/ The rounding is quite costly. Could we keep as 32 bit coeffs + then make filter pluck the MS 16-bits of the coeffs when filtering? + However this would lower potential optimisation of filter, as I + think the dual-MAC architecture requires packed 16 bit coeffs. + + 2/ Block based update would be more efficient, as per comments above, + could use dual MAC architecture. + + 3/ Look for same sample Blackfin LMS code, see if we can get dual-MAC + packing. + + 4/ Execute the whole e/c in a block of say 20ms rather than sample + by sample. Processing a few samples every ms is inefficient. +*/ + +#else +static inline void lms_adapt_bg(struct oslec_state *ec, int clean, int shift) +{ + int i; + + int offset1; + int offset2; + int factor; + int exp; + + if (shift > 0) + factor = clean << shift; + else + factor = clean >> -shift; + + /* Update the FIR taps */ + + offset2 = ec->curr_pos; + offset1 = ec->taps - offset2; + + for (i = ec->taps - 1; i >= offset1; i--) { + exp = (ec->fir_state_bg.history[i - offset1] * factor); + ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15); + } + for (; i >= 0; i--) { + exp = (ec->fir_state_bg.history[i + offset2] * factor); + ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15); + } +} +#endif + +static inline int top_bit(unsigned int bits) +{ + if (bits == 0) + return -1; + else + return (int)fls((int32_t) bits) - 1; +} + +struct oslec_state *oslec_create(int len, int adaption_mode) +{ + struct oslec_state *ec; + int i; + const int16_t *history; + + ec = kzalloc(sizeof(*ec), GFP_KERNEL); + if (!ec) + return NULL; + + ec->taps = len; + ec->log2taps = top_bit(len); + ec->curr_pos = ec->taps - 1; + + ec->fir_taps16[0] = + kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL); + if (!ec->fir_taps16[0]) + goto error_oom_0; + + ec->fir_taps16[1] = + kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL); + if (!ec->fir_taps16[1]) + goto error_oom_1; + + history = fir16_create(&ec->fir_state, ec->fir_taps16[0], ec->taps); + if (!history) + goto error_state; + history = fir16_create(&ec->fir_state_bg, ec->fir_taps16[1], ec->taps); + if (!history) + goto error_state_bg; + + for (i = 0; i < 5; i++) + ec->xvtx[i] = ec->yvtx[i] = ec->xvrx[i] = ec->yvrx[i] = 0; + + ec->cng_level = 1000; + oslec_adaption_mode(ec, adaption_mode); + + ec->snapshot = kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL); + if (!ec->snapshot) + goto error_snap; + + ec->cond_met = 0; + ec->pstates = 0; + ec->ltxacc = ec->lrxacc = ec->lcleanacc = ec->lclean_bgacc = 0; + ec->ltx = ec->lrx = ec->lclean = ec->lclean_bg = 0; + ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0; + ec->lbgn = ec->lbgn_acc = 0; + ec->lbgn_upper = 200; + ec->lbgn_upper_acc = ec->lbgn_upper << 13; + + return ec; + +error_snap: + fir16_free(&ec->fir_state_bg); +error_state_bg: + fir16_free(&ec->fir_state); +error_state: + kfree(ec->fir_taps16[1]); +error_oom_1: + kfree(ec->fir_taps16[0]); +error_oom_0: + kfree(ec); + return NULL; +} +EXPORT_SYMBOL_GPL(oslec_create); + +void oslec_free(struct oslec_state *ec) +{ + int i; + + fir16_free(&ec->fir_state); + fir16_free(&ec->fir_state_bg); + for (i = 0; i < 2; i++) + kfree(ec->fir_taps16[i]); + kfree(ec->snapshot); + kfree(ec); +} +EXPORT_SYMBOL_GPL(oslec_free); + +void oslec_adaption_mode(struct oslec_state *ec, int adaption_mode) +{ + ec->adaption_mode = adaption_mode; +} +EXPORT_SYMBOL_GPL(oslec_adaption_mode); + +void oslec_flush(struct oslec_state *ec) +{ + int i; + + ec->ltxacc = ec->lrxacc = ec->lcleanacc = ec->lclean_bgacc = 0; + ec->ltx = ec->lrx = ec->lclean = ec->lclean_bg = 0; + ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0; + + ec->lbgn = ec->lbgn_acc = 0; + ec->lbgn_upper = 200; + ec->lbgn_upper_acc = ec->lbgn_upper << 13; + + ec->nonupdate_dwell = 0; + + fir16_flush(&ec->fir_state); + fir16_flush(&ec->fir_state_bg); + ec->fir_state.curr_pos = ec->taps - 1; + ec->fir_state_bg.curr_pos = ec->taps - 1; + for (i = 0; i < 2; i++) + memset(ec->fir_taps16[i], 0, ec->taps * sizeof(int16_t)); + + ec->curr_pos = ec->taps - 1; + ec->pstates = 0; +} +EXPORT_SYMBOL_GPL(oslec_flush); + +void oslec_snapshot(struct oslec_state *ec) +{ + memcpy(ec->snapshot, ec->fir_taps16[0], ec->taps * sizeof(int16_t)); +} +EXPORT_SYMBOL_GPL(oslec_snapshot); + +/* Dual Path Echo Canceller */ + +int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx) +{ + int32_t echo_value; + int clean_bg; + int tmp; + int tmp1; + + /* + * Input scaling was found be required to prevent problems when tx + * starts clipping. Another possible way to handle this would be the + * filter coefficent scaling. + */ + + ec->tx = tx; + ec->rx = rx; + tx >>= 1; + rx >>= 1; + + /* + * Filter DC, 3dB point is 160Hz (I think), note 32 bit precision + * required otherwise values do not track down to 0. Zero at DC, Pole + * at (1-Beta) on real axis. Some chip sets (like Si labs) don't + * need this, but something like a $10 X100P card does. Any DC really + * slows down convergence. + * + * Note: removes some low frequency from the signal, this reduces the + * speech quality when listening to samples through headphones but may + * not be obvious through a telephone handset. + * + * Note that the 3dB frequency in radians is approx Beta, e.g. for Beta + * = 2^(-3) = 0.125, 3dB freq is 0.125 rads = 159Hz. + */ + + if (ec->adaption_mode & ECHO_CAN_USE_RX_HPF) { + tmp = rx << 15; + + /* + * Make sure the gain of the HPF is 1.0. This can still + * saturate a little under impulse conditions, and it might + * roll to 32768 and need clipping on sustained peak level + * signals. However, the scale of such clipping is small, and + * the error due to any saturation should not markedly affect + * the downstream processing. + */ + tmp -= (tmp >> 4); + + ec->rx_1 += -(ec->rx_1 >> DC_LOG2BETA) + tmp - ec->rx_2; + + /* + * hard limit filter to prevent clipping. Note that at this + * stage rx should be limited to +/- 16383 due to right shift + * above + */ + tmp1 = ec->rx_1 >> 15; + if (tmp1 > 16383) + tmp1 = 16383; + if (tmp1 < -16383) + tmp1 = -16383; + rx = tmp1; + ec->rx_2 = tmp; + } + + /* Block average of power in the filter states. Used for + adaption power calculation. */ + + { + int new, old; + + /* efficient "out with the old and in with the new" algorithm so + we don't have to recalculate over the whole block of + samples. */ + new = (int)tx * (int)tx; + old = (int)ec->fir_state.history[ec->fir_state.curr_pos] * + (int)ec->fir_state.history[ec->fir_state.curr_pos]; + ec->pstates += + ((new - old) + (1 << (ec->log2taps - 1))) >> ec->log2taps; + if (ec->pstates < 0) + ec->pstates = 0; + } + + /* Calculate short term average levels using simple single pole IIRs */ + + ec->ltxacc += abs(tx) - ec->ltx; + ec->ltx = (ec->ltxacc + (1 << 4)) >> 5; + ec->lrxacc += abs(rx) - ec->lrx; + ec->lrx = (ec->lrxacc + (1 << 4)) >> 5; + + /* Foreground filter */ + + ec->fir_state.coeffs = ec->fir_taps16[0]; + echo_value = fir16(&ec->fir_state, tx); + ec->clean = rx - echo_value; + ec->lcleanacc += abs(ec->clean) - ec->lclean; + ec->lclean = (ec->lcleanacc + (1 << 4)) >> 5; + + /* Background filter */ + + echo_value = fir16(&ec->fir_state_bg, tx); + clean_bg = rx - echo_value; + ec->lclean_bgacc += abs(clean_bg) - ec->lclean_bg; + ec->lclean_bg = (ec->lclean_bgacc + (1 << 4)) >> 5; + + /* Background Filter adaption */ + + /* Almost always adap bg filter, just simple DT and energy + detection to minimise adaption in cases of strong double talk. + However this is not critical for the dual path algorithm. + */ + ec->factor = 0; + ec->shift = 0; + if ((ec->nonupdate_dwell == 0)) { + int p, logp, shift; + + /* Determine: + + f = Beta * clean_bg_rx/P ------ (1) + + where P is the total power in the filter states. + + The Boffins have shown that if we obey (1) we converge + quickly and avoid instability. + + The correct factor f must be in Q30, as this is the fixed + point format required by the lms_adapt_bg() function, + therefore the scaled version of (1) is: + + (2^30) * f = (2^30) * Beta * clean_bg_rx/P + factor = (2^30) * Beta * clean_bg_rx/P ----- (2) + + We have chosen Beta = 0.25 by experiment, so: + + factor = (2^30) * (2^-2) * clean_bg_rx/P + + (30 - 2 - log2(P)) + factor = clean_bg_rx 2 ----- (3) + + To avoid a divide we approximate log2(P) as top_bit(P), + which returns the position of the highest non-zero bit in + P. This approximation introduces an error as large as a + factor of 2, but the algorithm seems to handle it OK. + + Come to think of it a divide may not be a big deal on a + modern DSP, so its probably worth checking out the cycles + for a divide versus a top_bit() implementation. + */ + + p = MIN_TX_POWER_FOR_ADAPTION + ec->pstates; + logp = top_bit(p) + ec->log2taps; + shift = 30 - 2 - logp; + ec->shift = shift; + + lms_adapt_bg(ec, clean_bg, shift); + } + + /* very simple DTD to make sure we dont try and adapt with strong + near end speech */ + + ec->adapt = 0; + if ((ec->lrx > MIN_RX_POWER_FOR_ADAPTION) && (ec->lrx > ec->ltx)) + ec->nonupdate_dwell = DTD_HANGOVER; + if (ec->nonupdate_dwell) + ec->nonupdate_dwell--; + + /* Transfer logic */ + + /* These conditions are from the dual path paper [1], I messed with + them a bit to improve performance. */ + + if ((ec->adaption_mode & ECHO_CAN_USE_ADAPTION) && + (ec->nonupdate_dwell == 0) && + /* (ec->Lclean_bg < 0.875*ec->Lclean) */ + (8 * ec->lclean_bg < 7 * ec->lclean) && + /* (ec->Lclean_bg < 0.125*ec->Ltx) */ + (8 * ec->lclean_bg < ec->ltx)) { + if (ec->cond_met == 6) { + /* + * BG filter has had better results for 6 consecutive + * samples + */ + ec->adapt = 1; + memcpy(ec->fir_taps16[0], ec->fir_taps16[1], + ec->taps * sizeof(int16_t)); + } else + ec->cond_met++; + } else + ec->cond_met = 0; + + /* Non-Linear Processing */ + + ec->clean_nlp = ec->clean; + if (ec->adaption_mode & ECHO_CAN_USE_NLP) { + /* + * Non-linear processor - a fancy way to say "zap small + * signals, to avoid residual echo due to (uLaw/ALaw) + * non-linearity in the channel.". + */ + + if ((16 * ec->lclean < ec->ltx)) { + /* + * Our e/c has improved echo by at least 24 dB (each + * factor of 2 is 6dB, so 2*2*2*2=16 is the same as + * 6+6+6+6=24dB) + */ + if (ec->adaption_mode & ECHO_CAN_USE_CNG) { + ec->cng_level = ec->lbgn; + + /* + * Very elementary comfort noise generation. + * Just random numbers rolled off very vaguely + * Hoth-like. DR: This noise doesn't sound + * quite right to me - I suspect there are some + * overflow issues in the filtering as it's too + * "crackly". + * TODO: debug this, maybe just play noise at + * high level or look at spectrum. + */ + + ec->cng_rndnum = + 1664525U * ec->cng_rndnum + 1013904223U; + ec->cng_filter = + ((ec->cng_rndnum & 0xFFFF) - 32768 + + 5 * ec->cng_filter) >> 3; + ec->clean_nlp = + (ec->cng_filter * ec->cng_level * 8) >> 14; + + } else if (ec->adaption_mode & ECHO_CAN_USE_CLIP) { + /* This sounds much better than CNG */ + if (ec->clean_nlp > ec->lbgn) + ec->clean_nlp = ec->lbgn; + if (ec->clean_nlp < -ec->lbgn) + ec->clean_nlp = -ec->lbgn; + } else { + /* + * just mute the residual, doesn't sound very + * good, used mainly in G168 tests + */ + ec->clean_nlp = 0; + } + } else { + /* + * Background noise estimator. I tried a few + * algorithms here without much luck. This very simple + * one seems to work best, we just average the level + * using a slow (1 sec time const) filter if the + * current level is less than a (experimentally + * derived) constant. This means we dont include high + * level signals like near end speech. When combined + * with CNG or especially CLIP seems to work OK. + */ + if (ec->lclean < 40) { + ec->lbgn_acc += abs(ec->clean) - ec->lbgn; + ec->lbgn = (ec->lbgn_acc + (1 << 11)) >> 12; + } + } + } + + /* Roll around the taps buffer */ + if (ec->curr_pos <= 0) + ec->curr_pos = ec->taps; + ec->curr_pos--; + + if (ec->adaption_mode & ECHO_CAN_DISABLE) + ec->clean_nlp = rx; + + /* Output scaled back up again to match input scaling */ + + return (int16_t) ec->clean_nlp << 1; +} +EXPORT_SYMBOL_GPL(oslec_update); + +/* This function is separated from the echo canceller is it is usually called + as part of the tx process. See rx HP (DC blocking) filter above, it's + the same design. + + Some soft phones send speech signals with a lot of low frequency + energy, e.g. down to 20Hz. This can make the hybrid non-linear + which causes the echo canceller to fall over. This filter can help + by removing any low frequency before it gets to the tx port of the + hybrid. + + It can also help by removing and DC in the tx signal. DC is bad + for LMS algorithms. + + This is one of the classic DC removal filters, adjusted to provide + sufficient bass rolloff to meet the above requirement to protect hybrids + from things that upset them. The difference between successive samples + produces a lousy HPF, and then a suitably placed pole flattens things out. + The final result is a nicely rolled off bass end. The filtering is + implemented with extended fractional precision, which noise shapes things, + giving very clean DC removal. +*/ + +int16_t oslec_hpf_tx(struct oslec_state *ec, int16_t tx) +{ + int tmp; + int tmp1; + + if (ec->adaption_mode & ECHO_CAN_USE_TX_HPF) { + tmp = tx << 15; + + /* + * Make sure the gain of the HPF is 1.0. The first can still + * saturate a little under impulse conditions, and it might + * roll to 32768 and need clipping on sustained peak level + * signals. However, the scale of such clipping is small, and + * the error due to any saturation should not markedly affect + * the downstream processing. + */ + tmp -= (tmp >> 4); + + ec->tx_1 += -(ec->tx_1 >> DC_LOG2BETA) + tmp - ec->tx_2; + tmp1 = ec->tx_1 >> 15; + if (tmp1 > 32767) + tmp1 = 32767; + if (tmp1 < -32767) + tmp1 = -32767; + tx = tmp1; + ec->tx_2 = tmp; + } + + return tx; +} +EXPORT_SYMBOL_GPL(oslec_hpf_tx); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("David Rowe"); +MODULE_DESCRIPTION("Open Source Line Echo Canceller"); +MODULE_VERSION("0.3.0"); diff --git a/drivers/misc/echo/echo.h b/drivers/misc/echo/echo.h new file mode 100644 index 000000000000..9b08c63e6369 --- /dev/null +++ b/drivers/misc/echo/echo.h @@ -0,0 +1,187 @@ +/* + * SpanDSP - a series of DSP components for telephony + * + * echo.c - A line echo canceller. This code is being developed + * against and partially complies with G168. + * + * Written by Steve Underwood <steveu@coppice.org> + * and David Rowe <david_at_rowetel_dot_com> + * + * Copyright (C) 2001 Steve Underwood and 2007 David Rowe + * + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __ECHO_H +#define __ECHO_H + +/* +Line echo cancellation for voice + +What does it do? + +This module aims to provide G.168-2002 compliant echo cancellation, to remove +electrical echoes (e.g. from 2-4 wire hybrids) from voice calls. + +How does it work? + +The heart of the echo cancellor is FIR filter. This is adapted to match the +echo impulse response of the telephone line. It must be long enough to +adequately cover the duration of that impulse response. The signal transmitted +to the telephone line is passed through the FIR filter. Once the FIR is +properly adapted, the resulting output is an estimate of the echo signal +received from the line. This is subtracted from the received signal. The result +is an estimate of the signal which originated at the far end of the line, free +from echos of our own transmitted signal. + +The least mean squares (LMS) algorithm is attributed to Widrow and Hoff, and +was introduced in 1960. It is the commonest form of filter adaption used in +things like modem line equalisers and line echo cancellers. There it works very +well. However, it only works well for signals of constant amplitude. It works +very poorly for things like speech echo cancellation, where the signal level +varies widely. This is quite easy to fix. If the signal level is normalised - +similar to applying AGC - LMS can work as well for a signal of varying +amplitude as it does for a modem signal. This normalised least mean squares +(NLMS) algorithm is the commonest one used for speech echo cancellation. Many +other algorithms exist - e.g. RLS (essentially the same as Kalman filtering), +FAP, etc. Some perform significantly better than NLMS. However, factors such +as computational complexity and patents favour the use of NLMS. + +A simple refinement to NLMS can improve its performance with speech. NLMS tends +to adapt best to the strongest parts of a signal. If the signal is white noise, +the NLMS algorithm works very well. However, speech has more low frequency than +high frequency content. Pre-whitening (i.e. filtering the signal to flatten its +spectrum) the echo signal improves the adapt rate for speech, and ensures the +final residual signal is not heavily biased towards high frequencies. A very +low complexity filter is adequate for this, so pre-whitening adds little to the +compute requirements of the echo canceller. + +An FIR filter adapted using pre-whitened NLMS performs well, provided certain +conditions are met: + + - The transmitted signal has poor self-correlation. + - There is no signal being generated within the environment being + cancelled. + +The difficulty is that neither of these can be guaranteed. + +If the adaption is performed while transmitting noise (or something fairly +noise like, such as voice) the adaption works very well. If the adaption is +performed while transmitting something highly correlative (typically narrow +band energy such as signalling tones or DTMF), the adaption can go seriously +wrong. The reason is there is only one solution for the adaption on a near +random signal - the impulse response of the line. For a repetitive signal, +there are any number of solutions which converge the adaption, and nothing +guides the adaption to choose the generalised one. Allowing an untrained +canceller to converge on this kind of narrowband energy probably a good thing, +since at least it cancels the tones. Allowing a well converged canceller to +continue converging on such energy is just a way to ruin its generalised +adaption. A narrowband detector is needed, so adapation can be suspended at +appropriate times. + +The adaption process is based on trying to eliminate the received signal. When +there is any signal from within the environment being cancelled it may upset +the adaption process. Similarly, if the signal we are transmitting is small, +noise may dominate and disturb the adaption process. If we can ensure that the +adaption is only performed when we are transmitting a significant signal level, +and the environment is not, things will be OK. Clearly, it is easy to tell when +we are sending a significant signal. Telling, if the environment is generating +a significant signal, and doing it with sufficient speed that the adaption will +not have diverged too much more we stop it, is a little harder. + +The key problem in detecting when the environment is sourcing significant +energy is that we must do this very quickly. Given a reasonably long sample of +the received signal, there are a number of strategies which may be used to +assess whether that signal contains a strong far end component. However, by the +time that assessment is complete the far end signal will have already caused +major mis-convergence in the adaption process. An assessment algorithm is +needed which produces a fairly accurate result from a very short burst of far +end energy. + +How do I use it? + +The echo cancellor processes both the transmit and receive streams sample by +sample. The processing function is not declared inline. Unfortunately, +cancellation requires many operations per sample, so the call overhead is only +a minor burden. +*/ + +#include "fir.h" +#include "oslec.h" + +/* + G.168 echo canceller descriptor. This defines the working state for a line + echo canceller. +*/ +struct oslec_state { + int16_t tx; + int16_t rx; + int16_t clean; + int16_t clean_nlp; + + int nonupdate_dwell; + int curr_pos; + int taps; + int log2taps; + int adaption_mode; + + int cond_met; + int32_t pstates; + int16_t adapt; + int32_t factor; + int16_t shift; + + /* Average levels and averaging filter states */ + int ltxacc; + int lrxacc; + int lcleanacc; + int lclean_bgacc; + int ltx; + int lrx; + int lclean; + int lclean_bg; + int lbgn; + int lbgn_acc; + int lbgn_upper; + int lbgn_upper_acc; + + /* foreground and background filter states */ + struct fir16_state_t fir_state; + struct fir16_state_t fir_state_bg; + int16_t *fir_taps16[2]; + + /* DC blocking filter states */ + int tx_1; + int tx_2; + int rx_1; + int rx_2; + + /* optional High Pass Filter states */ + int32_t xvtx[5]; + int32_t yvtx[5]; + int32_t xvrx[5]; + int32_t yvrx[5]; + + /* Parameters for the optional Hoth noise generator */ + int cng_level; + int cng_rndnum; + int cng_filter; + + /* snapshot sample of coeffs used for development */ + int16_t *snapshot; +}; + +#endif /* __ECHO_H */ diff --git a/drivers/misc/echo/fir.h b/drivers/misc/echo/fir.h new file mode 100644 index 000000000000..7b9fabf1fea5 --- /dev/null +++ b/drivers/misc/echo/fir.h @@ -0,0 +1,216 @@ +/* + * SpanDSP - a series of DSP components for telephony + * + * fir.h - General telephony FIR routines + * + * Written by Steve Underwood <steveu@coppice.org> + * + * Copyright (C) 2002 Steve Underwood + * + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#if !defined(_FIR_H_) +#define _FIR_H_ + +/* + Blackfin NOTES & IDEAS: + + A simple dot product function is used to implement the filter. This performs + just one MAC/cycle which is inefficient but was easy to implement as a first + pass. The current Blackfin code also uses an unrolled form of the filter + history to avoid 0 length hardware loop issues. This is wasteful of + memory. + + Ideas for improvement: + + 1/ Rewrite filter for dual MAC inner loop. The issue here is handling + history sample offsets that are 16 bit aligned - the dual MAC needs + 32 bit aligmnent. There are some good examples in libbfdsp. + + 2/ Use the hardware circular buffer facility tohalve memory usage. + + 3/ Consider using internal memory. + + Using less memory might also improve speed as cache misses will be + reduced. A drop in MIPs and memory approaching 50% should be + possible. + + The foreground and background filters currenlty use a total of + about 10 MIPs/ch as measured with speedtest.c on a 256 TAP echo + can. +*/ + +/* + * 16 bit integer FIR descriptor. This defines the working state for a single + * instance of an FIR filter using 16 bit integer coefficients. + */ +struct fir16_state_t { + int taps; + int curr_pos; + const int16_t *coeffs; + int16_t *history; +}; + +/* + * 32 bit integer FIR descriptor. This defines the working state for a single + * instance of an FIR filter using 32 bit integer coefficients, and filtering + * 16 bit integer data. + */ +struct fir32_state_t { + int taps; + int curr_pos; + const int32_t *coeffs; + int16_t *history; +}; + +/* + * Floating point FIR descriptor. This defines the working state for a single + * instance of an FIR filter using floating point coefficients and data. + */ +struct fir_float_state_t { + int taps; + int curr_pos; + const float *coeffs; + float *history; +}; + +static inline const int16_t *fir16_create(struct fir16_state_t *fir, + const int16_t *coeffs, int taps) +{ + fir->taps = taps; + fir->curr_pos = taps - 1; + fir->coeffs = coeffs; +#if defined(__bfin__) + fir->history = kcalloc(2 * taps, sizeof(int16_t), GFP_KERNEL); +#else + fir->history = kcalloc(taps, sizeof(int16_t), GFP_KERNEL); +#endif + return fir->history; +} + +static inline void fir16_flush(struct fir16_state_t *fir) +{ +#if defined(__bfin__) + memset(fir->history, 0, 2 * fir->taps * sizeof(int16_t)); +#else + memset(fir->history, 0, fir->taps * sizeof(int16_t)); +#endif +} + +static inline void fir16_free(struct fir16_state_t *fir) +{ + kfree(fir->history); +} + +#ifdef __bfin__ +static inline int32_t dot_asm(short *x, short *y, int len) +{ + int dot; + + len--; + + __asm__("I0 = %1;\n\t" + "I1 = %2;\n\t" + "A0 = 0;\n\t" + "R0.L = W[I0++] || R1.L = W[I1++];\n\t" + "LOOP dot%= LC0 = %3;\n\t" + "LOOP_BEGIN dot%=;\n\t" + "A0 += R0.L * R1.L (IS) || R0.L = W[I0++] || R1.L = W[I1++];\n\t" + "LOOP_END dot%=;\n\t" + "A0 += R0.L*R1.L (IS);\n\t" + "R0 = A0;\n\t" + "%0 = R0;\n\t" + : "=&d"(dot) + : "a"(x), "a"(y), "a"(len) + : "I0", "I1", "A1", "A0", "R0", "R1" + ); + + return dot; +} +#endif + +static inline int16_t fir16(struct fir16_state_t *fir, int16_t sample) +{ + int32_t y; +#if defined(__bfin__) + fir->history[fir->curr_pos] = sample; + fir->history[fir->curr_pos + fir->taps] = sample; + y = dot_asm((int16_t *) fir->coeffs, &fir->history[fir->curr_pos], + fir->taps); +#else + int i; + int offset1; + int offset2; + + fir->history[fir->curr_pos] = sample; + + offset2 = fir->curr_pos; + offset1 = fir->taps - offset2; + y = 0; + for (i = fir->taps - 1; i >= offset1; i--) + y += fir->coeffs[i] * fir->history[i - offset1]; + for (; i >= 0; i--) + y += fir->coeffs[i] * fir->history[i + offset2]; +#endif + if (fir->curr_pos <= 0) + fir->curr_pos = fir->taps; + fir->curr_pos--; + return (int16_t) (y >> 15); +} + +static inline const int16_t *fir32_create(struct fir32_state_t *fir, + const int32_t *coeffs, int taps) +{ + fir->taps = taps; + fir->curr_pos = taps - 1; + fir->coeffs = coeffs; + fir->history = kcalloc(taps, sizeof(int16_t), GFP_KERNEL); + return fir->history; +} + +static inline void fir32_flush(struct fir32_state_t *fir) +{ + memset(fir->history, 0, fir->taps * sizeof(int16_t)); +} + +static inline void fir32_free(struct fir32_state_t *fir) +{ + kfree(fir->history); +} + +static inline int16_t fir32(struct fir32_state_t *fir, int16_t sample) +{ + int i; + int32_t y; + int offset1; + int offset2; + + fir->history[fir->curr_pos] = sample; + offset2 = fir->curr_pos; + offset1 = fir->taps - offset2; + y = 0; + for (i = fir->taps - 1; i >= offset1; i--) + y += fir->coeffs[i] * fir->history[i - offset1]; + for (; i >= 0; i--) + y += fir->coeffs[i] * fir->history[i + offset2]; + if (fir->curr_pos <= 0) + fir->curr_pos = fir->taps; + fir->curr_pos--; + return (int16_t) (y >> 15); +} + +#endif diff --git a/drivers/misc/echo/oslec.h b/drivers/misc/echo/oslec.h new file mode 100644 index 000000000000..f4175360ce27 --- /dev/null +++ b/drivers/misc/echo/oslec.h @@ -0,0 +1,94 @@ +/* + * OSLEC - A line echo canceller. This code is being developed + * against and partially complies with G168. Using code from SpanDSP + * + * Written by Steve Underwood <steveu@coppice.org> + * and David Rowe <david_at_rowetel_dot_com> + * + * Copyright (C) 2001 Steve Underwood and 2007-2008 David Rowe + * + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef __OSLEC_H +#define __OSLEC_H + +/* Mask bits for the adaption mode */ +#define ECHO_CAN_USE_ADAPTION 0x01 +#define ECHO_CAN_USE_NLP 0x02 +#define ECHO_CAN_USE_CNG 0x04 +#define ECHO_CAN_USE_CLIP 0x08 +#define ECHO_CAN_USE_TX_HPF 0x10 +#define ECHO_CAN_USE_RX_HPF 0x20 +#define ECHO_CAN_DISABLE 0x40 + +/** + * oslec_state: G.168 echo canceller descriptor. + * + * This defines the working state for a line echo canceller. + */ +struct oslec_state; + +/** + * oslec_create - Create a voice echo canceller context. + * @len: The length of the canceller, in samples. + * @return: The new canceller context, or NULL if the canceller could not be + * created. + */ +struct oslec_state *oslec_create(int len, int adaption_mode); + +/** + * oslec_free - Free a voice echo canceller context. + * @ec: The echo canceller context. + */ +void oslec_free(struct oslec_state *ec); + +/** + * oslec_flush - Flush (reinitialise) a voice echo canceller context. + * @ec: The echo canceller context. + */ +void oslec_flush(struct oslec_state *ec); + +/** + * oslec_adaption_mode - set the adaption mode of a voice echo canceller context. + * @ec The echo canceller context. + * @adaption_mode: The mode. + */ +void oslec_adaption_mode(struct oslec_state *ec, int adaption_mode); + +void oslec_snapshot(struct oslec_state *ec); + +/** + * oslec_update: Process a sample through a voice echo canceller. + * @ec: The echo canceller context. + * @tx: The transmitted audio sample. + * @rx: The received audio sample. + * + * The return value is the clean (echo cancelled) received sample. + */ +int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx); + +/** + * oslec_hpf_tx: Process to high pass filter the tx signal. + * @ec: The echo canceller context. + * @tx: The transmitted auio sample. + * + * The return value is the HP filtered transmit sample, send this to your D/A. + */ +int16_t oslec_hpf_tx(struct oslec_state *ec, int16_t tx); + +#endif /* __OSLEC_H */ diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c index 4f3bca1003a1..634f72929e12 100644 --- a/drivers/misc/eeprom/at25.c +++ b/drivers/misc/eeprom/at25.c @@ -10,7 +10,6 @@ */ #include <linux/kernel.h> -#include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/delay.h> diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c index f0fa4e8ca124..33f8673d23a6 100644 --- a/drivers/misc/eeprom/eeprom.c +++ b/drivers/misc/eeprom/eeprom.c @@ -17,7 +17,6 @@ */ #include <linux/kernel.h> -#include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/jiffies.h> diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c index 78e55b501c94..9ebeacdb8ec4 100644 --- a/drivers/misc/eeprom/eeprom_93xx46.c +++ b/drivers/misc/eeprom/eeprom_93xx46.c @@ -11,7 +11,6 @@ #include <linux/delay.h> #include <linux/device.h> #include <linux/kernel.h> -#include <linux/init.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c index e36157d5d3ab..580ff9df5529 100644 --- a/drivers/misc/eeprom/max6875.c +++ b/drivers/misc/eeprom/max6875.c @@ -27,7 +27,6 @@ */ #include <linux/kernel.h> -#include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/i2c.h> diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c index 9c34e5704304..3f2b625b2032 100644 --- a/drivers/misc/eeprom/sunxi_sid.c +++ b/drivers/misc/eeprom/sunxi_sid.c @@ -21,7 +21,6 @@ #include <linux/err.h> #include <linux/export.h> #include <linux/fs.h> -#include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/kobject.h> @@ -96,7 +95,7 @@ static int sunxi_sid_remove(struct platform_device *pdev) } static const struct of_device_id sunxi_sid_of_match[] = { - { .compatible = "allwinner,sun4i-sid", .data = (void *)16}, + { .compatible = "allwinner,sun4i-a10-sid", .data = (void *)16}, { .compatible = "allwinner,sun7i-a20-sid", .data = (void *)512}, {/* sentinel */}, }; diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h index 5e4dbd21f89a..0e608a288603 100644 --- a/drivers/misc/genwqe/card_base.h +++ b/drivers/misc/genwqe/card_base.h @@ -337,6 +337,44 @@ enum genwqe_requ_state { }; /** + * struct genwqe_sgl - Scatter gather list describing user-space memory + * @sgl: scatter gather list needs to be 128 byte aligned + * @sgl_dma_addr: dma address of sgl + * @sgl_size: size of area used for sgl + * @user_addr: user-space address of memory area + * @user_size: size of user-space memory area + * @page: buffer for partial pages if needed + * @page_dma_addr: dma address partial pages + */ +struct genwqe_sgl { + dma_addr_t sgl_dma_addr; + struct sg_entry *sgl; + size_t sgl_size; /* size of sgl */ + + void __user *user_addr; /* user-space base-address */ + size_t user_size; /* size of memory area */ + + unsigned long nr_pages; + unsigned long fpage_offs; + size_t fpage_size; + size_t lpage_size; + + void *fpage; + dma_addr_t fpage_dma_addr; + + void *lpage; + dma_addr_t lpage_dma_addr; +}; + +int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, + void __user *user_addr, size_t user_size); + +int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, + dma_addr_t *dma_list); + +int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl); + +/** * struct ddcb_requ - Kernel internal representation of the DDCB request * @cmd: User space representation of the DDCB execution request */ @@ -347,9 +385,7 @@ struct ddcb_requ { struct ddcb_queue *queue; /* associated queue */ struct dma_mapping dma_mappings[DDCB_FIXUPS]; - struct sg_entry *sgl[DDCB_FIXUPS]; - dma_addr_t sgl_dma_addr[DDCB_FIXUPS]; - size_t sgl_size[DDCB_FIXUPS]; + struct genwqe_sgl sgls[DDCB_FIXUPS]; /* kernel/user shared content */ struct genwqe_ddcb_cmd cmd; /* ddcb_no for this request */ @@ -453,22 +489,6 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m, struct ddcb_requ *req); -struct sg_entry *genwqe_alloc_sgl(struct genwqe_dev *cd, int num_pages, - dma_addr_t *dma_addr, size_t *sgl_size); - -void genwqe_free_sgl(struct genwqe_dev *cd, struct sg_entry *sg_list, - dma_addr_t dma_addr, size_t size); - -int genwqe_setup_sgl(struct genwqe_dev *cd, - unsigned long offs, - unsigned long size, - struct sg_entry *sgl, /* genwqe sgl */ - dma_addr_t dma_addr, size_t sgl_size, - dma_addr_t *dma_list, int page_offs, int num_pages); - -int genwqe_check_sgl(struct genwqe_dev *cd, struct sg_entry *sg_list, - int size); - static inline bool dma_mapping_used(struct dma_mapping *m) { if (!m) diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c index 6f1acc0ccf88..c8046db2d5a2 100644 --- a/drivers/misc/genwqe/card_ddcb.c +++ b/drivers/misc/genwqe/card_ddcb.c @@ -305,6 +305,8 @@ static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue, break; new = (old | DDCB_NEXT_BE32); + + wmb(); icrc_hsi_shi = cmpxchg(&prev_ddcb->icrc_hsi_shi_32, old, new); if (icrc_hsi_shi == old) @@ -314,6 +316,8 @@ static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue, /* Queue must be re-started by updating QUEUE_OFFSET */ ddcb_mark_tapped(pddcb); num = (u64)ddcb_no << 8; + + wmb(); __genwqe_writeq(cd, queue->IO_QUEUE_OFFSET, num); /* start queue */ return RET_DDCB_TAPPED; @@ -1306,7 +1310,7 @@ static int queue_wake_up_all(struct genwqe_dev *cd) */ int genwqe_finish_queue(struct genwqe_dev *cd) { - int i, rc, in_flight; + int i, rc = 0, in_flight; int waitmax = genwqe_ddcb_software_timeout; struct pci_dev *pci_dev = cd->pci_dev; struct ddcb_queue *queue = &cd->queue; diff --git a/drivers/misc/genwqe/card_debugfs.c b/drivers/misc/genwqe/card_debugfs.c index 3bfdc07a7248..0a33ade64109 100644 --- a/drivers/misc/genwqe/card_debugfs.c +++ b/drivers/misc/genwqe/card_debugfs.c @@ -26,7 +26,6 @@ #include <linux/module.h> #include <linux/kernel.h> -#include <linux/init.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/uaccess.h> @@ -349,7 +348,7 @@ int genwqe_init_debugfs(struct genwqe_dev *cd) char name[64]; unsigned int i; - sprintf(card_name, "%s%u_card", GENWQE_DEVNAME, cd->card_idx); + sprintf(card_name, "%s%d_card", GENWQE_DEVNAME, cd->card_idx); root = debugfs_create_dir(card_name, cd->debugfs_genwqe); if (!root) { @@ -455,7 +454,7 @@ int genwqe_init_debugfs(struct genwqe_dev *cd) } for (i = 0; i < GENWQE_MAX_VFS; i++) { - sprintf(name, "vf%d_jobtimeout_msec", i); + sprintf(name, "vf%u_jobtimeout_msec", i); file = debugfs_create_u32(name, 0666, root, &cd->vf_jobtimeout_msec[i]); diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c index 2c2c9cc75231..1d2f163a1906 100644 --- a/drivers/misc/genwqe/card_dev.c +++ b/drivers/misc/genwqe/card_dev.c @@ -531,7 +531,9 @@ static int do_flash_update(struct genwqe_file *cfile, case '1': cmdopts = 0x1C; break; /* download/erase_first/part_1 */ - case 'v': /* cmdopts = 0x0c (VPD) */ + case 'v': + cmdopts = 0x0C; + break; /* download/erase_first/vpd */ default: return -EINVAL; } @@ -665,6 +667,8 @@ static int do_flash_read(struct genwqe_file *cfile, cmdopts = 0x1A; break; /* upload/part_1 */ case 'v': + cmdopts = 0x0A; + break; /* upload/vpd */ default: return -EINVAL; } @@ -836,15 +840,8 @@ static int ddcb_cmd_cleanup(struct genwqe_file *cfile, struct ddcb_requ *req) __genwqe_del_mapping(cfile, dma_map); genwqe_user_vunmap(cd, dma_map, req); } - if (req->sgl[i] != NULL) { - genwqe_free_sgl(cd, req->sgl[i], - req->sgl_dma_addr[i], - req->sgl_size[i]); - req->sgl[i] = NULL; - req->sgl_dma_addr[i] = 0x0; - req->sgl_size[i] = 0; - } - + if (req->sgls[i].sgl != NULL) + genwqe_free_sync_sgl(cd, &req->sgls[i]); } return 0; } @@ -913,7 +910,7 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req) case ATS_TYPE_SGL_RDWR: case ATS_TYPE_SGL_RD: { - int page_offs, nr_pages, offs; + int page_offs; u_addr = be64_to_cpu(*((__be64 *) &cmd->asiv[asiv_offs])); @@ -951,27 +948,18 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req) page_offs = 0; } - offs = offset_in_page(u_addr); - nr_pages = DIV_ROUND_UP(offs + u_size, PAGE_SIZE); - /* create genwqe style scatter gather list */ - req->sgl[i] = genwqe_alloc_sgl(cd, m->nr_pages, - &req->sgl_dma_addr[i], - &req->sgl_size[i]); - if (req->sgl[i] == NULL) { - rc = -ENOMEM; + rc = genwqe_alloc_sync_sgl(cd, &req->sgls[i], + (void __user *)u_addr, + u_size); + if (rc != 0) goto err_out; - } - genwqe_setup_sgl(cd, offs, u_size, - req->sgl[i], - req->sgl_dma_addr[i], - req->sgl_size[i], - m->dma_list, - page_offs, - nr_pages); + + genwqe_setup_sgl(cd, &req->sgls[i], + &m->dma_list[page_offs]); *((__be64 *)&cmd->asiv[asiv_offs]) = - cpu_to_be64(req->sgl_dma_addr[i]); + cpu_to_be64(req->sgls[i].sgl_dma_addr); break; } diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c index 6b1a6ef9f1a8..62cc6bb3f62e 100644 --- a/drivers/misc/genwqe/card_utils.c +++ b/drivers/misc/genwqe/card_utils.c @@ -275,67 +275,107 @@ static int genwqe_sgl_size(int num_pages) return roundup(len, PAGE_SIZE); } -struct sg_entry *genwqe_alloc_sgl(struct genwqe_dev *cd, int num_pages, - dma_addr_t *dma_addr, size_t *sgl_size) +/** + * genwqe_alloc_sync_sgl() - Allocate memory for sgl and overlapping pages + * + * Allocates memory for sgl and overlapping pages. Pages which might + * overlap other user-space memory blocks are being cached for DMAs, + * such that we do not run into syncronization issues. Data is copied + * from user-space into the cached pages. + */ +int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, + void __user *user_addr, size_t user_size) { + int rc; struct pci_dev *pci_dev = cd->pci_dev; - struct sg_entry *sgl; - *sgl_size = genwqe_sgl_size(num_pages); - if (get_order(*sgl_size) > MAX_ORDER) { + sgl->fpage_offs = offset_in_page((unsigned long)user_addr); + sgl->fpage_size = min_t(size_t, PAGE_SIZE-sgl->fpage_offs, user_size); + sgl->nr_pages = DIV_ROUND_UP(sgl->fpage_offs + user_size, PAGE_SIZE); + sgl->lpage_size = (user_size - sgl->fpage_size) % PAGE_SIZE; + + dev_dbg(&pci_dev->dev, "[%s] uaddr=%p usize=%8ld nr_pages=%ld " + "fpage_offs=%lx fpage_size=%ld lpage_size=%ld\n", + __func__, user_addr, user_size, sgl->nr_pages, + sgl->fpage_offs, sgl->fpage_size, sgl->lpage_size); + + sgl->user_addr = user_addr; + sgl->user_size = user_size; + sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages); + + if (get_order(sgl->sgl_size) > MAX_ORDER) { dev_err(&pci_dev->dev, "[%s] err: too much memory requested!\n", __func__); - return NULL; + return -ENOMEM; } - sgl = __genwqe_alloc_consistent(cd, *sgl_size, dma_addr); - if (sgl == NULL) { + sgl->sgl = __genwqe_alloc_consistent(cd, sgl->sgl_size, + &sgl->sgl_dma_addr); + if (sgl->sgl == NULL) { dev_err(&pci_dev->dev, "[%s] err: no memory available!\n", __func__); - return NULL; + return -ENOMEM; } - return sgl; + /* Only use buffering on incomplete pages */ + if ((sgl->fpage_size != 0) && (sgl->fpage_size != PAGE_SIZE)) { + sgl->fpage = __genwqe_alloc_consistent(cd, PAGE_SIZE, + &sgl->fpage_dma_addr); + if (sgl->fpage == NULL) + goto err_out; + + /* Sync with user memory */ + if (copy_from_user(sgl->fpage + sgl->fpage_offs, + user_addr, sgl->fpage_size)) { + rc = -EFAULT; + goto err_out; + } + } + if (sgl->lpage_size != 0) { + sgl->lpage = __genwqe_alloc_consistent(cd, PAGE_SIZE, + &sgl->lpage_dma_addr); + if (sgl->lpage == NULL) + goto err_out1; + + /* Sync with user memory */ + if (copy_from_user(sgl->lpage, user_addr + user_size - + sgl->lpage_size, sgl->lpage_size)) { + rc = -EFAULT; + goto err_out1; + } + } + return 0; + + err_out1: + __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage, + sgl->fpage_dma_addr); + err_out: + __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl, + sgl->sgl_dma_addr); + return -ENOMEM; } -int genwqe_setup_sgl(struct genwqe_dev *cd, - unsigned long offs, - unsigned long size, - struct sg_entry *sgl, - dma_addr_t dma_addr, size_t sgl_size, - dma_addr_t *dma_list, int page_offs, int num_pages) +int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, + dma_addr_t *dma_list) { int i = 0, j = 0, p; unsigned long dma_offs, map_offs; - struct pci_dev *pci_dev = cd->pci_dev; dma_addr_t prev_daddr = 0; struct sg_entry *s, *last_s = NULL; - - /* sanity checks */ - if (offs > PAGE_SIZE) { - dev_err(&pci_dev->dev, - "[%s] too large start offs %08lx\n", __func__, offs); - return -EFAULT; - } - if (sgl_size < genwqe_sgl_size(num_pages)) { - dev_err(&pci_dev->dev, - "[%s] sgl_size too small %08lx for %d pages\n", - __func__, sgl_size, num_pages); - return -EFAULT; - } + size_t size = sgl->user_size; dma_offs = 128; /* next block if needed/dma_offset */ - map_offs = offs; /* offset in first page */ + map_offs = sgl->fpage_offs; /* offset in first page */ - s = &sgl[0]; /* first set of 8 entries */ + s = &sgl->sgl[0]; /* first set of 8 entries */ p = 0; /* page */ - while (p < num_pages) { + while (p < sgl->nr_pages) { dma_addr_t daddr; unsigned int size_to_map; /* always write the chaining entry, cleanup is done later */ j = 0; - s[j].target_addr = cpu_to_be64(dma_addr + dma_offs); + s[j].target_addr = cpu_to_be64(sgl->sgl_dma_addr + dma_offs); s[j].len = cpu_to_be32(128); s[j].flags = cpu_to_be32(SG_CHAINED); j++; @@ -343,7 +383,17 @@ int genwqe_setup_sgl(struct genwqe_dev *cd, while (j < 8) { /* DMA mapping for requested page, offs, size */ size_to_map = min(size, PAGE_SIZE - map_offs); - daddr = dma_list[page_offs + p] + map_offs; + + if ((p == 0) && (sgl->fpage != NULL)) { + daddr = sgl->fpage_dma_addr + map_offs; + + } else if ((p == sgl->nr_pages - 1) && + (sgl->lpage != NULL)) { + daddr = sgl->lpage_dma_addr; + } else { + daddr = dma_list[p] + map_offs; + } + size -= size_to_map; map_offs = 0; @@ -358,7 +408,7 @@ int genwqe_setup_sgl(struct genwqe_dev *cd, size_to_map); p++; /* process next page */ - if (p == num_pages) + if (p == sgl->nr_pages) goto fixup; /* nothing to do */ prev_daddr = daddr + size_to_map; @@ -374,7 +424,7 @@ int genwqe_setup_sgl(struct genwqe_dev *cd, j++; p++; /* process next page */ - if (p == num_pages) + if (p == sgl->nr_pages) goto fixup; /* nothing to do */ } dma_offs += 128; @@ -395,10 +445,50 @@ int genwqe_setup_sgl(struct genwqe_dev *cd, return 0; } -void genwqe_free_sgl(struct genwqe_dev *cd, struct sg_entry *sg_list, - dma_addr_t dma_addr, size_t size) +/** + * genwqe_free_sync_sgl() - Free memory for sgl and overlapping pages + * + * After the DMA transfer has been completed we free the memory for + * the sgl and the cached pages. Data is being transfered from cached + * pages into user-space buffers. + */ +int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl) { - __genwqe_free_consistent(cd, size, sg_list, dma_addr); + int rc = 0; + struct pci_dev *pci_dev = cd->pci_dev; + + if (sgl->fpage) { + if (copy_to_user(sgl->user_addr, sgl->fpage + sgl->fpage_offs, + sgl->fpage_size)) { + dev_err(&pci_dev->dev, "[%s] err: copying fpage!\n", + __func__); + rc = -EFAULT; + } + __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage, + sgl->fpage_dma_addr); + sgl->fpage = NULL; + sgl->fpage_dma_addr = 0; + } + if (sgl->lpage) { + if (copy_to_user(sgl->user_addr + sgl->user_size - + sgl->lpage_size, sgl->lpage, + sgl->lpage_size)) { + dev_err(&pci_dev->dev, "[%s] err: copying lpage!\n", + __func__); + rc = -EFAULT; + } + __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage, + sgl->lpage_dma_addr); + sgl->lpage = NULL; + sgl->lpage_dma_addr = 0; + } + __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl, + sgl->sgl_dma_addr); + + sgl->sgl = NULL; + sgl->sgl_dma_addr = 0x0; + sgl->sgl_size = 0; + return rc; } /** @@ -628,7 +718,7 @@ int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count) int rc; struct pci_dev *pci_dev = cd->pci_dev; - rc = pci_enable_msi_block(pci_dev, count); + rc = pci_enable_msi_exact(pci_dev, count); if (rc == 0) cd->flags |= GENWQE_FLAG_MSI_ENABLED; return rc; diff --git a/drivers/misc/genwqe/genwqe_driver.h b/drivers/misc/genwqe/genwqe_driver.h index 46e916b36c70..cd5263163a6e 100644 --- a/drivers/misc/genwqe/genwqe_driver.h +++ b/drivers/misc/genwqe/genwqe_driver.h @@ -36,7 +36,7 @@ #include <asm/byteorder.h> #include <linux/genwqe/genwqe_card.h> -#define DRV_VERS_STRING "2.0.0" +#define DRV_VERS_STRING "2.0.15" /* * Static minor number assignement, until we decide/implement diff --git a/drivers/misc/hmc6352.c b/drivers/misc/hmc6352.c index 170bd3daf336..90520d76633f 100644 --- a/drivers/misc/hmc6352.c +++ b/drivers/misc/hmc6352.c @@ -22,7 +22,6 @@ */ #include <linux/module.h> -#include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/err.h> diff --git a/drivers/misc/isl29003.c b/drivers/misc/isl29003.c index e3183f26216b..12c30b486b27 100644 --- a/drivers/misc/isl29003.c +++ b/drivers/misc/isl29003.c @@ -26,7 +26,6 @@ */ #include <linux/module.h> -#include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/mutex.h> diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c index b7f84dacf822..4a9c50a43afb 100644 --- a/drivers/misc/isl29020.c +++ b/drivers/misc/isl29020.c @@ -23,7 +23,6 @@ */ #include <linux/module.h> -#include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/err.h> diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c index 61fbe6acabef..0a1565e63c71 100644 --- a/drivers/misc/lattice-ecp3-config.c +++ b/drivers/misc/lattice-ecp3-config.c @@ -12,7 +12,6 @@ #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> -#include <linux/init.h> #include <linux/spi/spi.h> #include <linux/platform_device.h> #include <linux/delay.h> diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c index 036effe9a795..3ef4627f9cb1 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d.c +++ b/drivers/misc/lis3lv02d/lis3lv02d.c @@ -23,7 +23,6 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> -#include <linux/init.h> #include <linux/dmi.h> #include <linux/module.h> #include <linux/types.h> diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c index 7c97550240f1..d324f8a97b88 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c +++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c @@ -26,7 +26,6 @@ #include <linux/module.h> #include <linux/kernel.h> -#include <linux/init.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/pm_runtime.h> diff --git a/drivers/misc/lis3lv02d/lis3lv02d_spi.c b/drivers/misc/lis3lv02d/lis3lv02d_spi.c index 9aa2bd2a71ae..bd06d0cfac45 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d_spi.c +++ b/drivers/misc/lis3lv02d/lis3lv02d_spi.c @@ -10,7 +10,6 @@ #include <linux/module.h> #include <linux/kernel.h> -#include <linux/init.h> #include <linux/err.h> #include <linux/input.h> #include <linux/interrupt.h> diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c index 49c7a23f02fc..d66a2f24f6b3 100644 --- a/drivers/misc/lkdtm.c +++ b/drivers/misc/lkdtm.c @@ -30,6 +30,7 @@ * * See Documentation/fault-injection/provoke-crashes.txt for instructions */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/fs.h> @@ -45,6 +46,7 @@ #include <linux/debugfs.h> #include <linux/vmalloc.h> #include <linux/mman.h> +#include <asm/cacheflush.h> #ifdef CONFIG_IDE #include <linux/ide.h> @@ -101,6 +103,7 @@ enum ctype { CT_EXEC_USERSPACE, CT_ACCESS_USERSPACE, CT_WRITE_RO, + CT_WRITE_KERN, }; static char* cp_name[] = { @@ -137,6 +140,7 @@ static char* cp_type[] = { "EXEC_USERSPACE", "ACCESS_USERSPACE", "WRITE_RO", + "WRITE_KERN", }; static struct jprobe lkdtm; @@ -316,6 +320,13 @@ static void do_nothing(void) return; } +/* Must immediately follow do_nothing for size calculuations to work out. */ +static void do_overwritten(void) +{ + pr_info("do_overwritten wasn't overwritten!\n"); + return; +} + static noinline void corrupt_stack(void) { /* Use default char array length that triggers stack protection. */ @@ -328,7 +339,12 @@ static void execute_location(void *dst) { void (*func)(void) = dst; + pr_info("attempting ok execution at %p\n", do_nothing); + do_nothing(); + memcpy(dst, do_nothing, EXEC_SIZE); + flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE); + pr_info("attempting bad execution at %p\n", func); func(); } @@ -337,8 +353,13 @@ static void execute_user_location(void *dst) /* Intentionally crossing kernel/user memory boundary. */ void (*func)(void) = dst; + pr_info("attempting ok execution at %p\n", do_nothing); + do_nothing(); + if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE)) return; + flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE); + pr_info("attempting bad execution at %p\n", func); func(); } @@ -463,8 +484,12 @@ static void lkdtm_do_action(enum ctype which) } ptr = (unsigned long *)user_addr; + + pr_info("attempting bad read at %p\n", ptr); tmp = *ptr; tmp += 0xc0dec0de; + + pr_info("attempting bad write at %p\n", ptr); *ptr = tmp; vm_munmap(user_addr, PAGE_SIZE); @@ -475,10 +500,28 @@ static void lkdtm_do_action(enum ctype which) unsigned long *ptr; ptr = (unsigned long *)&rodata; + + pr_info("attempting bad write at %p\n", ptr); *ptr ^= 0xabcd1234; break; } + case CT_WRITE_KERN: { + size_t size; + unsigned char *ptr; + + size = (unsigned long)do_overwritten - + (unsigned long)do_nothing; + ptr = (unsigned char *)do_overwritten; + + pr_info("attempting bad %zu byte write at %p\n", size, ptr); + memcpy(ptr, (unsigned char *)do_nothing, size); + flush_icache_range((unsigned long)ptr, + (unsigned long)(ptr + size)); + + do_overwritten(); + break; + } case CT_NONE: default: break; @@ -493,8 +536,8 @@ static void lkdtm_handler(void) spin_lock_irqsave(&count_lock, flags); count--; - printk(KERN_INFO "lkdtm: Crash point %s of type %s hit, trigger in %d rounds\n", - cp_name_to_str(cpoint), cp_type_to_str(cptype), count); + pr_info("Crash point %s of type %s hit, trigger in %d rounds\n", + cp_name_to_str(cpoint), cp_type_to_str(cptype), count); if (count == 0) { do_it = true; @@ -551,18 +594,18 @@ static int lkdtm_register_cpoint(enum cname which) lkdtm.kp.symbol_name = "generic_ide_ioctl"; lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl; #else - printk(KERN_INFO "lkdtm: Crash point not available\n"); + pr_info("Crash point not available\n"); return -EINVAL; #endif break; default: - printk(KERN_INFO "lkdtm: Invalid Crash Point\n"); + pr_info("Invalid Crash Point\n"); return -EINVAL; } cpoint = which; if ((ret = register_jprobe(&lkdtm)) < 0) { - printk(KERN_INFO "lkdtm: Couldn't register jprobe\n"); + pr_info("Couldn't register jprobe\n"); cpoint = CN_INVALID; } @@ -709,8 +752,7 @@ static ssize_t direct_entry(struct file *f, const char __user *user_buf, if (type == CT_NONE) return -EINVAL; - printk(KERN_INFO "lkdtm: Performing direct entry %s\n", - cp_type_to_str(type)); + pr_info("Performing direct entry %s\n", cp_type_to_str(type)); lkdtm_do_action(type); *off += count; @@ -772,7 +814,7 @@ static int __init lkdtm_module_init(void) /* Register debugfs interface */ lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL); if (!lkdtm_debugfs_root) { - printk(KERN_ERR "lkdtm: creating root dir failed\n"); + pr_err("creating root dir failed\n"); return -ENODEV; } @@ -787,28 +829,26 @@ static int __init lkdtm_module_init(void) de = debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root, NULL, &cur->fops); if (de == NULL) { - printk(KERN_ERR "lkdtm: could not create %s\n", - cur->name); + pr_err("could not create %s\n", cur->name); goto out_err; } } if (lkdtm_parse_commandline() == -EINVAL) { - printk(KERN_INFO "lkdtm: Invalid command\n"); + pr_info("Invalid command\n"); goto out_err; } if (cpoint != CN_INVALID && cptype != CT_NONE) { ret = lkdtm_register_cpoint(cpoint); if (ret < 0) { - printk(KERN_INFO "lkdtm: Invalid crash point %d\n", - cpoint); + pr_info("Invalid crash point %d\n", cpoint); goto out_err; } - printk(KERN_INFO "lkdtm: Crash point %s of type %s registered\n", - cpoint_name, cpoint_type); + pr_info("Crash point %s of type %s registered\n", + cpoint_name, cpoint_type); } else { - printk(KERN_INFO "lkdtm: No crash points registered, enable through debugfs\n"); + pr_info("No crash points registered, enable through debugfs\n"); } return 0; @@ -823,7 +863,7 @@ static void __exit lkdtm_module_exit(void) debugfs_remove_recursive(lkdtm_debugfs_root); unregister_jprobe(&lkdtm); - printk(KERN_INFO "lkdtm: Crash point unregistered\n"); + pr_info("Crash point unregistered\n"); } module_init(lkdtm_module_init); diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig index c76fa31e9bf6..d23384dde73b 100644 --- a/drivers/misc/mei/Kconfig +++ b/drivers/misc/mei/Kconfig @@ -34,3 +34,12 @@ config INTEL_MEI_ME 82Q33 Express 82X38/X48 Express +config INTEL_MEI_TXE + tristate "Intel Trusted Execution Environment with ME Interface" + select INTEL_MEI + depends on X86 && PCI && WATCHDOG_CORE + help + MEI Support for Trusted Execution Environment device on Intel SoCs + + Supported SoCs: + Intel Bay Trail diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile index 08698a466268..8ebc6cda1373 100644 --- a/drivers/misc/mei/Makefile +++ b/drivers/misc/mei/Makefile @@ -1,6 +1,6 @@ # # Makefile - Intel Management Engine Interface (Intel MEI) Linux driver -# Copyright (c) 2010-2011, Intel Corporation. +# Copyright (c) 2010-2014, Intel Corporation. # obj-$(CONFIG_INTEL_MEI) += mei.o mei-objs := init.o @@ -17,3 +17,7 @@ mei-$(CONFIG_DEBUG_FS) += debugfs.o obj-$(CONFIG_INTEL_MEI_ME) += mei-me.o mei-me-objs := pci-me.o mei-me-objs += hw-me.o + +obj-$(CONFIG_INTEL_MEI_TXE) += mei-txe.o +mei-txe-objs := pci-txe.o +mei-txe-objs += hw-txe.o diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c index 2fad84432829..0d6234db00fa 100644 --- a/drivers/misc/mei/amthif.c +++ b/drivers/misc/mei/amthif.c @@ -21,7 +21,6 @@ #include <linux/fcntl.h> #include <linux/aio.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/ioctl.h> #include <linux/cdev.h> #include <linux/list.h> @@ -35,7 +34,6 @@ #include "mei_dev.h" #include "hbm.h" -#include "hw-me.h" #include "client.h" const uuid_le mei_amthif_guid = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d, @@ -79,10 +77,9 @@ int mei_amthif_host_init(struct mei_device *dev) i = mei_me_cl_by_uuid(dev, &mei_amthif_guid); if (i < 0) { - ret = i; dev_info(&dev->pdev->dev, - "amthif: failed to find the client %d\n", ret); - return ret; + "amthif: failed to find the client %d\n", i); + return -ENOTTY; } cl->me_client_id = dev->me_clients[i].client_id; @@ -114,16 +111,11 @@ int mei_amthif_host_init(struct mei_device *dev) return ret; } - cl->state = MEI_FILE_CONNECTING; + ret = mei_cl_connect(cl, NULL); - if (mei_hbm_cl_connect_req(dev, cl)) { - dev_dbg(&dev->pdev->dev, "amthif: Failed to connect to ME client\n"); - cl->state = MEI_FILE_DISCONNECTED; - cl->host_client_id = 0; - } else { - cl->timer_count = MEI_CONNECT_TIMEOUT; - } - return 0; + dev->iamthif_state = MEI_IAMTHIF_IDLE; + + return ret; } /** @@ -137,14 +129,12 @@ int mei_amthif_host_init(struct mei_device *dev) struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev, struct file *file) { - struct mei_cl_cb *pos = NULL; - struct mei_cl_cb *next = NULL; + struct mei_cl_cb *cb; - list_for_each_entry_safe(pos, next, - &dev->amthif_rd_complete_list.list, list) { - if (pos->cl && pos->cl == &dev->iamthif_cl && - pos->file_object == file) - return pos; + list_for_each_entry(cb, &dev->amthif_rd_complete_list.list, list) { + if (cb->cl && cb->cl == &dev->iamthif_cl && + cb->file_object == file) + return cb; } return NULL; } @@ -180,14 +170,13 @@ int mei_amthif_read(struct mei_device *dev, struct file *file, /* Only possible if we are in timeout */ if (!cl || cl != &dev->iamthif_cl) { dev_dbg(&dev->pdev->dev, "bad file ext.\n"); - return -ETIMEDOUT; + return -ETIME; } i = mei_me_cl_by_id(dev, dev->iamthif_cl.me_client_id); - if (i < 0) { dev_dbg(&dev->pdev->dev, "amthif client not found.\n"); - return -ENODEV; + return -ENOTTY; } dev_dbg(&dev->pdev->dev, "checking amthif data\n"); cb = mei_amthif_find_read_list_entry(dev, file); @@ -228,7 +217,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file, dev_dbg(&dev->pdev->dev, "amthif Time out\n"); /* 15 sec for the message has expired */ list_del(&cb->list); - rets = -ETIMEDOUT; + rets = -ETIME; goto free; } } @@ -253,9 +242,10 @@ int mei_amthif_read(struct mei_device *dev, struct file *file, * the buf_idx may point beyond */ length = min_t(size_t, length, (cb->buf_idx - *offset)); - if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) + if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) { + dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n"); rets = -EFAULT; - else { + } else { rets = length; if ((*offset + length) < cb->buf_idx) { *offset += length; @@ -302,9 +292,8 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb) if (ret < 0) return ret; - if (ret && dev->hbuf_is_ready) { + if (ret && mei_hbuf_acquire(dev)) { ret = 0; - dev->hbuf_is_ready = false; if (cb->request_buffer.size > mei_hbuf_max_len(dev)) { mei_hdr.length = mei_hbuf_max_len(dev); mei_hdr.msg_complete = 0; @@ -336,10 +325,6 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb) list_add_tail(&cb->list, &dev->write_list.list); } } else { - if (!dev->hbuf_is_ready) - dev_dbg(&dev->pdev->dev, "host buffer is not empty"); - - dev_dbg(&dev->pdev->dev, "No flow control credentials, so add iamthif cb to write list.\n"); list_add_tail(&cb->list, &dev->write_list.list); } return 0; @@ -365,7 +350,7 @@ int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *cb) if (ret) return ret; - cb->fop_type = MEI_FOP_IOCTL; + cb->fop_type = MEI_FOP_WRITE; if (!list_empty(&dev->amthif_cmd_list.list) || dev->iamthif_state != MEI_IAMTHIF_IDLE) { @@ -447,23 +432,23 @@ unsigned int mei_amthif_poll(struct mei_device *dev, /** - * mei_amthif_irq_write_completed - processes completed iamthif operation. + * mei_amthif_irq_write - write iamthif command in irq thread context. * * @dev: the device structure. - * @slots: free slots. * @cb_pos: callback block. * @cl: private data of the file object. * @cmpl_list: complete list. * * returns 0, OK; otherwise, error. */ -int mei_amthif_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb, - s32 *slots, struct mei_cl_cb *cmpl_list) +int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, + struct mei_cl_cb *cmpl_list) { struct mei_device *dev = cl->dev; struct mei_msg_hdr mei_hdr; size_t len = dev->iamthif_msg_buf_size - dev->iamthif_msg_buf_index; u32 msg_slots = mei_data2slots(len); + int slots; int rets; rets = mei_cl_flow_ctrl_creds(cl); @@ -480,13 +465,15 @@ int mei_amthif_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb, mei_hdr.reserved = 0; mei_hdr.internal = 0; - if (*slots >= msg_slots) { + slots = mei_hbuf_empty_slots(dev); + + if (slots >= msg_slots) { mei_hdr.length = len; mei_hdr.msg_complete = 1; /* Split the message only if we can write the whole host buffer */ - } else if (*slots == dev->hbuf_depth) { - msg_slots = *slots; - len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr); + } else if (slots == dev->hbuf_depth) { + msg_slots = slots; + len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr); mei_hdr.length = len; mei_hdr.msg_complete = 0; } else { @@ -496,7 +483,6 @@ int mei_amthif_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb, dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr)); - *slots -= msg_slots; rets = mei_write_message(dev, &mei_hdr, dev->iamthif_msg_buf + dev->iamthif_msg_buf_index); if (rets) { diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 4bc7d620d695..0e993ef28b94 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -26,7 +26,6 @@ #include <linux/mei_cl_bus.h> #include "mei_dev.h" -#include "hw-me.h" #include "client.h" #define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver) @@ -145,9 +144,9 @@ static struct device_type mei_cl_device_type = { static struct mei_cl *mei_bus_find_mei_cl_by_uuid(struct mei_device *dev, uuid_le uuid) { - struct mei_cl *cl, *next; + struct mei_cl *cl; - list_for_each_entry_safe(cl, next, &dev->device_list, device_link) { + list_for_each_entry(cl, &dev->device_list, device_link) { if (!uuid_le_cmp(uuid, cl->device_uuid)) return cl; } @@ -248,7 +247,7 @@ static int ___mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, return id; if (length > dev->me_clients[id].props.max_msg_length) - return -EINVAL; + return -EFBIG; cb = mei_io_cb_init(cl, NULL); if (!cb) @@ -428,8 +427,6 @@ int mei_cl_enable_device(struct mei_cl_device *device) mutex_lock(&dev->device_lock); - cl->state = MEI_FILE_CONNECTING; - err = mei_cl_connect(cl, NULL); if (err < 0) { mutex_unlock(&dev->device_lock); @@ -524,6 +521,22 @@ void mei_cl_bus_rx_event(struct mei_cl *cl) schedule_work(&device->event_work); } +void mei_cl_bus_remove_devices(struct mei_device *dev) +{ + struct mei_cl *cl, *next; + + mutex_lock(&dev->device_lock); + list_for_each_entry_safe(cl, next, &dev->device_list, device_link) { + if (cl->device) + mei_cl_remove_device(cl->device); + + list_del(&cl->device_link); + mei_cl_unlink(cl); + kfree(cl); + } + mutex_unlock(&dev->device_lock); +} + int __init mei_cl_bus_init(void) { return bus_register(&mei_cl_bus_type); diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 89a557972d1b..59d20c599b16 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -18,6 +18,7 @@ #include <linux/sched.h> #include <linux/wait.h> #include <linux/delay.h> +#include <linux/pm_runtime.h> #include <linux/mei.h> @@ -29,20 +30,21 @@ * mei_me_cl_by_uuid - locate index of me client * * @dev: mei device + * + * Locking: called under "dev->device_lock" lock + * * returns me client index or -ENOENT if not found */ int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid) { - int i, res = -ENOENT; + int i; for (i = 0; i < dev->me_clients_num; ++i) if (uuid_le_cmp(*uuid, - dev->me_clients[i].props.protocol_name) == 0) { - res = i; - break; - } + dev->me_clients[i].props.protocol_name) == 0) + return i; - return res; + return -ENOENT; } @@ -60,37 +62,79 @@ int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid) int mei_me_cl_by_id(struct mei_device *dev, u8 client_id) { int i; + for (i = 0; i < dev->me_clients_num; i++) if (dev->me_clients[i].client_id == client_id) - break; - if (WARN_ON(dev->me_clients[i].client_id != client_id)) - return -ENOENT; - - if (i == dev->me_clients_num) - return -ENOENT; + return i; - return i; + return -ENOENT; } /** - * mei_io_list_flush - removes list entry belonging to cl. + * mei_cl_cmp_id - tells if the clients are the same * - * @list: An instance of our list structure - * @cl: host client + * @cl1: host client 1 + * @cl2: host client 2 + * + * returns true - if the clients has same host and me ids + * false - otherwise + */ +static inline bool mei_cl_cmp_id(const struct mei_cl *cl1, + const struct mei_cl *cl2) +{ + return cl1 && cl2 && + (cl1->host_client_id == cl2->host_client_id) && + (cl1->me_client_id == cl2->me_client_id); +} + +/** + * mei_io_list_flush - removes cbs belonging to cl. + * + * @list: an instance of our list structure + * @cl: host client, can be NULL for flushing the whole list + * @free: whether to free the cbs */ -void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl) +static void __mei_io_list_flush(struct mei_cl_cb *list, + struct mei_cl *cl, bool free) { struct mei_cl_cb *cb; struct mei_cl_cb *next; + /* enable removing everything if no cl is specified */ list_for_each_entry_safe(cb, next, &list->list, list) { - if (cb->cl && mei_cl_cmp_id(cl, cb->cl)) + if (!cl || (cb->cl && mei_cl_cmp_id(cl, cb->cl))) { list_del(&cb->list); + if (free) + mei_io_cb_free(cb); + } } } /** + * mei_io_list_flush - removes list entry belonging to cl. + * + * @list: An instance of our list structure + * @cl: host client + */ +static inline void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl) +{ + __mei_io_list_flush(list, cl, false); +} + + +/** + * mei_io_list_free - removes cb belonging to cl and free them + * + * @list: An instance of our list structure + * @cl: host client + */ +static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl) +{ + __mei_io_list_flush(list, cl, true); +} + +/** * mei_io_cb_free - free mei_cb_private related memory * * @cb: mei callback struct @@ -196,8 +240,8 @@ int mei_cl_flush_queues(struct mei_cl *cl) cl_dbg(dev, cl, "remove list entry belonging to cl\n"); mei_io_list_flush(&cl->dev->read_list, cl); - mei_io_list_flush(&cl->dev->write_list, cl); - mei_io_list_flush(&cl->dev->write_waiting_list, cl); + mei_io_list_free(&cl->dev->write_list, cl); + mei_io_list_free(&cl->dev->write_waiting_list, cl); mei_io_list_flush(&cl->dev->ctrl_wr_list, cl); mei_io_list_flush(&cl->dev->ctrl_rd_list, cl); mei_io_list_flush(&cl->dev->amthif_cmd_list, cl); @@ -254,10 +298,9 @@ struct mei_cl *mei_cl_allocate(struct mei_device *dev) struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl) { struct mei_device *dev = cl->dev; - struct mei_cl_cb *cb = NULL; - struct mei_cl_cb *next = NULL; + struct mei_cl_cb *cb; - list_for_each_entry_safe(cb, next, &dev->read_list.list, list) + list_for_each_entry(cb, &dev->read_list.list, list) if (mei_cl_cmp_id(cl, cb->cl)) return cb; return NULL; @@ -373,8 +416,35 @@ void mei_host_client_init(struct work_struct *work) dev->reset_count = 0; mutex_unlock(&dev->device_lock); + + pm_runtime_mark_last_busy(&dev->pdev->dev); + dev_dbg(&dev->pdev->dev, "rpm: autosuspend\n"); + pm_runtime_autosuspend(&dev->pdev->dev); } +/** + * mei_hbuf_acquire: try to acquire host buffer + * + * @dev: the device structure + * returns true if host buffer was acquired + */ +bool mei_hbuf_acquire(struct mei_device *dev) +{ + if (mei_pg_state(dev) == MEI_PG_ON || + dev->pg_event == MEI_PG_EVENT_WAIT) { + dev_dbg(&dev->pdev->dev, "device is in pg\n"); + return false; + } + + if (!dev->hbuf_is_ready) { + dev_dbg(&dev->pdev->dev, "hbuf is not ready\n"); + return false; + } + + dev->hbuf_is_ready = false; + + return true; +} /** * mei_cl_disconnect - disconnect host client from the me one @@ -401,13 +471,21 @@ int mei_cl_disconnect(struct mei_cl *cl) if (cl->state != MEI_FILE_DISCONNECTING) return 0; + rets = pm_runtime_get(&dev->pdev->dev); + if (rets < 0 && rets != -EINPROGRESS) { + pm_runtime_put_noidle(&dev->pdev->dev); + cl_err(dev, cl, "rpm: get failed %d\n", rets); + return rets; + } + cb = mei_io_cb_init(cl, NULL); - if (!cb) - return -ENOMEM; + if (!cb) { + rets = -ENOMEM; + goto free; + } cb->fop_type = MEI_FOP_CLOSE; - if (dev->hbuf_is_ready) { - dev->hbuf_is_ready = false; + if (mei_hbuf_acquire(dev)) { if (mei_hbm_cl_disconnect_req(dev, cl)) { rets = -ENODEV; cl_err(dev, cl, "failed to disconnect.\n"); @@ -436,8 +514,7 @@ int mei_cl_disconnect(struct mei_cl *cl) cl_err(dev, cl, "wrong status client disconnect.\n"); if (err) - cl_dbg(dev, cl, "wait failed disconnect err=%08x\n", - err); + cl_dbg(dev, cl, "wait failed disconnect err=%d\n", err); cl_err(dev, cl, "failed to disconnect from FW client.\n"); } @@ -445,6 +522,10 @@ int mei_cl_disconnect(struct mei_cl *cl) mei_io_list_flush(&dev->ctrl_rd_list, cl); mei_io_list_flush(&dev->ctrl_wr_list, cl); free: + cl_dbg(dev, cl, "rpm: autosuspend\n"); + pm_runtime_mark_last_busy(&dev->pdev->dev); + pm_runtime_put_autosuspend(&dev->pdev->dev); + mei_io_cb_free(cb); return rets; } @@ -461,17 +542,17 @@ free: bool mei_cl_is_other_connecting(struct mei_cl *cl) { struct mei_device *dev; - struct mei_cl *pos; - struct mei_cl *next; + struct mei_cl *ocl; /* the other client */ if (WARN_ON(!cl || !cl->dev)) return false; dev = cl->dev; - list_for_each_entry_safe(pos, next, &dev->file_list, link) { - if ((pos->state == MEI_FILE_CONNECTING) && - (pos != cl) && cl->me_client_id == pos->me_client_id) + list_for_each_entry(ocl, &dev->file_list, link) { + if (ocl->state == MEI_FILE_CONNECTING && + ocl != cl && + cl->me_client_id == ocl->me_client_id) return true; } @@ -499,17 +580,24 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file) dev = cl->dev; + rets = pm_runtime_get(&dev->pdev->dev); + if (rets < 0 && rets != -EINPROGRESS) { + pm_runtime_put_noidle(&dev->pdev->dev); + cl_err(dev, cl, "rpm: get failed %d\n", rets); + return rets; + } + cb = mei_io_cb_init(cl, file); if (!cb) { rets = -ENOMEM; goto out; } - cb->fop_type = MEI_FOP_IOCTL; - - if (dev->hbuf_is_ready && !mei_cl_is_other_connecting(cl)) { - dev->hbuf_is_ready = false; + cb->fop_type = MEI_FOP_CONNECT; + /* run hbuf acquire last so we don't have to undo */ + if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) { + cl->state = MEI_FILE_CONNECTING; if (mei_hbm_cl_connect_req(dev, cl)) { rets = -ENODEV; goto out; @@ -521,23 +609,28 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file) } mutex_unlock(&dev->device_lock); - rets = wait_event_timeout(dev->wait_recvd_msg, - (cl->state == MEI_FILE_CONNECTED || - cl->state == MEI_FILE_DISCONNECTED), - mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); + wait_event_timeout(dev->wait_recvd_msg, + (cl->state == MEI_FILE_CONNECTED || + cl->state == MEI_FILE_DISCONNECTED), + mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); mutex_lock(&dev->device_lock); if (cl->state != MEI_FILE_CONNECTED) { - rets = -EFAULT; + /* something went really wrong */ + if (!cl->status) + cl->status = -EFAULT; mei_io_list_flush(&dev->ctrl_rd_list, cl); mei_io_list_flush(&dev->ctrl_wr_list, cl); - goto out; } rets = cl->status; out: + cl_dbg(dev, cl, "rpm: autosuspend\n"); + pm_runtime_mark_last_busy(&dev->pdev->dev); + pm_runtime_put_autosuspend(&dev->pdev->dev); + mei_io_cb_free(cb); return rets; } @@ -554,7 +647,8 @@ out: int mei_cl_flow_ctrl_creds(struct mei_cl *cl) { struct mei_device *dev; - int i; + struct mei_me_client *me_cl; + int id; if (WARN_ON(!cl || !cl->dev)) return -EINVAL; @@ -567,19 +661,19 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl) if (cl->mei_flow_ctrl_creds > 0) return 1; - for (i = 0; i < dev->me_clients_num; i++) { - struct mei_me_client *me_cl = &dev->me_clients[i]; - if (me_cl->client_id == cl->me_client_id) { - if (me_cl->mei_flow_ctrl_creds) { - if (WARN_ON(me_cl->props.single_recv_buf == 0)) - return -EINVAL; - return 1; - } else { - return 0; - } - } + id = mei_me_cl_by_id(dev, cl->me_client_id); + if (id < 0) { + cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); + return id; } - return -ENOENT; + + me_cl = &dev->me_clients[id]; + if (me_cl->mei_flow_ctrl_creds) { + if (WARN_ON(me_cl->props.single_recv_buf == 0)) + return -EINVAL; + return 1; + } + return 0; } /** @@ -595,32 +689,31 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl) int mei_cl_flow_ctrl_reduce(struct mei_cl *cl) { struct mei_device *dev; - int i; + struct mei_me_client *me_cl; + int id; if (WARN_ON(!cl || !cl->dev)) return -EINVAL; dev = cl->dev; - if (!dev->me_clients_num) - return -ENOENT; + id = mei_me_cl_by_id(dev, cl->me_client_id); + if (id < 0) { + cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); + return id; + } - for (i = 0; i < dev->me_clients_num; i++) { - struct mei_me_client *me_cl = &dev->me_clients[i]; - if (me_cl->client_id == cl->me_client_id) { - if (me_cl->props.single_recv_buf != 0) { - if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0)) - return -EINVAL; - dev->me_clients[i].mei_flow_ctrl_creds--; - } else { - if (WARN_ON(cl->mei_flow_ctrl_creds <= 0)) - return -EINVAL; - cl->mei_flow_ctrl_creds--; - } - return 0; - } + me_cl = &dev->me_clients[id]; + if (me_cl->props.single_recv_buf != 0) { + if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0)) + return -EINVAL; + me_cl->mei_flow_ctrl_creds--; + } else { + if (WARN_ON(cl->mei_flow_ctrl_creds <= 0)) + return -EINVAL; + cl->mei_flow_ctrl_creds--; } - return -ENOENT; + return 0; } /** @@ -652,27 +745,34 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length) i = mei_me_cl_by_id(dev, cl->me_client_id); if (i < 0) { cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); - return -ENODEV; + return -ENOTTY; + } + + rets = pm_runtime_get(&dev->pdev->dev); + if (rets < 0 && rets != -EINPROGRESS) { + pm_runtime_put_noidle(&dev->pdev->dev); + cl_err(dev, cl, "rpm: get failed %d\n", rets); + return rets; } cb = mei_io_cb_init(cl, NULL); - if (!cb) - return -ENOMEM; + if (!cb) { + rets = -ENOMEM; + goto out; + } /* always allocate at least client max message */ length = max_t(size_t, length, dev->me_clients[i].props.max_msg_length); rets = mei_io_cb_alloc_resp_buf(cb, length); if (rets) - goto err; + goto out; cb->fop_type = MEI_FOP_READ; - if (dev->hbuf_is_ready) { - dev->hbuf_is_ready = false; - if (mei_hbm_cl_flow_control_req(dev, cl)) { - cl_err(dev, cl, "flow control send failed\n"); - rets = -ENODEV; - goto err; - } + if (mei_hbuf_acquire(dev)) { + rets = mei_hbm_cl_flow_control_req(dev, cl); + if (rets < 0) + goto out; + list_add_tail(&cb->list, &dev->read_list.list); } else { list_add_tail(&cb->list, &dev->ctrl_wr_list.list); @@ -680,34 +780,38 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length) cl->read_cb = cb; - return rets; -err: - mei_io_cb_free(cb); +out: + cl_dbg(dev, cl, "rpm: autosuspend\n"); + pm_runtime_mark_last_busy(&dev->pdev->dev); + pm_runtime_put_autosuspend(&dev->pdev->dev); + + if (rets) + mei_io_cb_free(cb); + return rets; } /** - * mei_cl_irq_write_complete - write a message to device + * mei_cl_irq_write - write a message to device * from the interrupt thread context * * @cl: client * @cb: callback block. - * @slots: free slots. * @cmpl_list: complete list. * * returns 0, OK; otherwise error. */ -int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb, - s32 *slots, struct mei_cl_cb *cmpl_list) +int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, + struct mei_cl_cb *cmpl_list) { struct mei_device *dev; struct mei_msg_data *buf; struct mei_msg_hdr mei_hdr; size_t len; u32 msg_slots; + int slots; int rets; - if (WARN_ON(!cl || !cl->dev)) return -ENODEV; @@ -720,10 +824,11 @@ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb, return rets; if (rets == 0) { - cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); + cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); return 0; } + slots = mei_hbuf_empty_slots(dev); len = buf->size - cb->buf_idx; msg_slots = mei_data2slots(len); @@ -732,13 +837,13 @@ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb, mei_hdr.reserved = 0; mei_hdr.internal = cb->internal; - if (*slots >= msg_slots) { + if (slots >= msg_slots) { mei_hdr.length = len; mei_hdr.msg_complete = 1; /* Split the message only if we can write the whole host buffer */ - } else if (*slots == dev->hbuf_depth) { - msg_slots = *slots; - len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr); + } else if (slots == dev->hbuf_depth) { + msg_slots = slots; + len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr); mei_hdr.length = len; mei_hdr.msg_complete = 0; } else { @@ -749,7 +854,6 @@ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb, cl_dbg(dev, cl, "buf: size = %d idx = %lu\n", cb->request_buffer.size, cb->buf_idx); - *slots -= msg_slots; rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx); if (rets) { cl->status = rets; @@ -800,23 +904,37 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) cl_dbg(dev, cl, "mei_cl_write %d\n", buf->size); + rets = pm_runtime_get(&dev->pdev->dev); + if (rets < 0 && rets != -EINPROGRESS) { + pm_runtime_put_noidle(&dev->pdev->dev); + cl_err(dev, cl, "rpm: get failed %d\n", rets); + return rets; + } cb->fop_type = MEI_FOP_WRITE; + cb->buf_idx = 0; + cl->writing_state = MEI_IDLE; + + mei_hdr.host_addr = cl->host_client_id; + mei_hdr.me_addr = cl->me_client_id; + mei_hdr.reserved = 0; + mei_hdr.msg_complete = 0; + mei_hdr.internal = cb->internal; rets = mei_cl_flow_ctrl_creds(cl); if (rets < 0) goto err; - /* Host buffer is not ready, we queue the request */ - if (rets == 0 || !dev->hbuf_is_ready) { - cb->buf_idx = 0; - /* unseting complete will enqueue the cb for write */ - mei_hdr.msg_complete = 0; + if (rets == 0) { + cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); + rets = buf->size; + goto out; + } + if (!mei_hbuf_acquire(dev)) { + cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n"); rets = buf->size; goto out; } - - dev->hbuf_is_ready = false; /* Check for a maximum length */ if (buf->size > mei_hbuf_max_len(dev)) { @@ -827,12 +945,6 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) mei_hdr.msg_complete = 1; } - mei_hdr.host_addr = cl->host_client_id; - mei_hdr.me_addr = cl->me_client_id; - mei_hdr.reserved = 0; - mei_hdr.internal = cb->internal; - - rets = mei_write_message(dev, &mei_hdr, buf->data); if (rets) goto err; @@ -840,13 +952,12 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) cl->writing_state = MEI_WRITING; cb->buf_idx = mei_hdr.length; - rets = buf->size; out: if (mei_hdr.msg_complete) { - if (mei_cl_flow_ctrl_reduce(cl)) { - rets = -ENODEV; + rets = mei_cl_flow_ctrl_reduce(cl); + if (rets < 0) goto err; - } + list_add_tail(&cb->list, &dev->write_waiting_list.list); } else { list_add_tail(&cb->list, &dev->write_list.list); @@ -856,16 +967,23 @@ out: if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) { mutex_unlock(&dev->device_lock); - if (wait_event_interruptible(cl->tx_wait, - cl->writing_state == MEI_WRITE_COMPLETE)) { - if (signal_pending(current)) - rets = -EINTR; - else - rets = -ERESTARTSYS; - } + rets = wait_event_interruptible(cl->tx_wait, + cl->writing_state == MEI_WRITE_COMPLETE); mutex_lock(&dev->device_lock); + /* wait_event_interruptible returns -ERESTARTSYS */ + if (rets) { + if (signal_pending(current)) + rets = -EINTR; + goto err; + } } + + rets = buf->size; err: + cl_dbg(dev, cl, "rpm: autosuspend\n"); + pm_runtime_mark_last_busy(&dev->pdev->dev); + pm_runtime_put_autosuspend(&dev->pdev->dev); + return rets; } @@ -905,9 +1023,9 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb) void mei_cl_all_disconnect(struct mei_device *dev) { - struct mei_cl *cl, *next; + struct mei_cl *cl; - list_for_each_entry_safe(cl, next, &dev->file_list, link) { + list_for_each_entry(cl, &dev->file_list, link) { cl->state = MEI_FILE_DISCONNECTED; cl->mei_flow_ctrl_creds = 0; cl->timer_count = 0; @@ -922,8 +1040,8 @@ void mei_cl_all_disconnect(struct mei_device *dev) */ void mei_cl_all_wakeup(struct mei_device *dev) { - struct mei_cl *cl, *next; - list_for_each_entry_safe(cl, next, &dev->file_list, link) { + struct mei_cl *cl; + list_for_each_entry(cl, &dev->file_list, link) { if (waitqueue_active(&cl->rx_wait)) { cl_dbg(dev, cl, "Waking up reading client!\n"); wake_up_interruptible(&cl->rx_wait); @@ -942,20 +1060,8 @@ void mei_cl_all_wakeup(struct mei_device *dev) */ void mei_cl_all_write_clear(struct mei_device *dev) { - struct mei_cl_cb *cb, *next; - struct list_head *list; - - list = &dev->write_list.list; - list_for_each_entry_safe(cb, next, list, list) { - list_del(&cb->list); - mei_io_cb_free(cb); - } - - list = &dev->write_waiting_list.list; - list_for_each_entry_safe(cb, next, list, list) { - list_del(&cb->list); - mei_io_cb_free(cb); - } + mei_io_list_free(&dev->write_list, NULL); + mei_io_list_free(&dev->write_waiting_list, NULL); } diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h index c8396e582f1c..96d5de0389f9 100644 --- a/drivers/misc/mei/client.h +++ b/drivers/misc/mei/client.h @@ -45,8 +45,6 @@ static inline void mei_io_list_init(struct mei_cl_cb *list) { INIT_LIST_HEAD(&list->list); } -void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl); - /* * MEI Host Client Functions */ @@ -61,22 +59,6 @@ int mei_cl_unlink(struct mei_cl *cl); int mei_cl_flush_queues(struct mei_cl *cl); struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl); -/** - * mei_cl_cmp_id - tells if file private data have same id - * - * @fe1: private data of 1. file object - * @fe2: private data of 2. file object - * - * returns true - if ids are the same and not NULL - */ -static inline bool mei_cl_cmp_id(const struct mei_cl *cl1, - const struct mei_cl *cl2) -{ - return cl1 && cl2 && - (cl1->host_client_id == cl2->host_client_id) && - (cl1->me_client_id == cl2->me_client_id); -} - int mei_cl_flow_ctrl_creds(struct mei_cl *cl); @@ -86,15 +68,15 @@ int mei_cl_flow_ctrl_reduce(struct mei_cl *cl); */ static inline bool mei_cl_is_connected(struct mei_cl *cl) { - return (cl->dev && + return cl->dev && cl->dev->dev_state == MEI_DEV_ENABLED && - cl->state == MEI_FILE_CONNECTED); + cl->state == MEI_FILE_CONNECTED; } static inline bool mei_cl_is_transitioning(struct mei_cl *cl) { - return (MEI_FILE_INITIALIZING == cl->state || + return MEI_FILE_INITIALIZING == cl->state || MEI_FILE_DISCONNECTED == cl->state || - MEI_FILE_DISCONNECTING == cl->state); + MEI_FILE_DISCONNECTING == cl->state; } bool mei_cl_is_other_connecting(struct mei_cl *cl); @@ -102,8 +84,8 @@ int mei_cl_disconnect(struct mei_cl *cl); int mei_cl_connect(struct mei_cl *cl, struct file *file); int mei_cl_read_start(struct mei_cl *cl, size_t length); int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking); -int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb, - s32 *slots, struct mei_cl_cb *cmpl_list); +int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, + struct mei_cl_cb *cmpl_list); void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb); diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c index a3ae154444b2..ced5b777c70f 100644 --- a/drivers/misc/mei/debugfs.c +++ b/drivers/misc/mei/debugfs.c @@ -75,6 +75,54 @@ static const struct file_operations mei_dbgfs_fops_meclients = { .llseek = generic_file_llseek, }; +static ssize_t mei_dbgfs_read_active(struct file *fp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + struct mei_device *dev = fp->private_data; + struct mei_cl *cl; + const size_t bufsz = 1024; + char *buf; + int i = 0; + int pos = 0; + int ret; + + if (!dev) + return -ENODEV; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf + pos, bufsz - pos, + " |me|host|state|rd|wr|\n"); + + mutex_lock(&dev->device_lock); + + /* if the driver is not enabled the list won't b consitent */ + if (dev->dev_state != MEI_DEV_ENABLED) + goto out; + + list_for_each_entry(cl, &dev->file_list, link) { + + pos += scnprintf(buf + pos, bufsz - pos, + "%2d|%2d|%4d|%5d|%2d|%2d|\n", + i, cl->me_client_id, cl->host_client_id, cl->state, + cl->reading_state, cl->writing_state); + i++; + } +out: + mutex_unlock(&dev->device_lock); + ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, pos); + kfree(buf); + return ret; +} + +static const struct file_operations mei_dbgfs_fops_active = { + .open = simple_open, + .read = mei_dbgfs_read_active, + .llseek = generic_file_llseek, +}; + static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf, size_t cnt, loff_t *ppos) { @@ -128,6 +176,12 @@ int mei_dbgfs_register(struct mei_device *dev, const char *name) dev_err(&dev->pdev->dev, "meclients: registration failed\n"); goto err; } + f = debugfs_create_file("active", S_IRUSR, dir, + dev, &mei_dbgfs_fops_active); + if (!f) { + dev_err(&dev->pdev->dev, "meclients: registration failed\n"); + goto err; + } f = debugfs_create_file("devstate", S_IRUSR, dir, dev, &mei_dbgfs_fops_devstate); if (!f) { diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index 28cd74c073b9..804106209d76 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -14,14 +14,78 @@ * */ +#include <linux/export.h> #include <linux/pci.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/mei.h> +#include <linux/pm_runtime.h> #include "mei_dev.h" #include "hbm.h" -#include "hw-me.h" +#include "client.h" + +static const char *mei_cl_conn_status_str(enum mei_cl_connect_status status) +{ +#define MEI_CL_CS(status) case MEI_CL_CONN_##status: return #status + switch (status) { + MEI_CL_CS(SUCCESS); + MEI_CL_CS(NOT_FOUND); + MEI_CL_CS(ALREADY_STARTED); + MEI_CL_CS(OUT_OF_RESOURCES); + MEI_CL_CS(MESSAGE_SMALL); + default: return "unknown"; + } +#undef MEI_CL_CCS +} + +/** + * mei_cl_conn_status_to_errno - convert client connect response + * status to error code + * + * @status: client connect response status + * + * returns corresponding error code + */ +static int mei_cl_conn_status_to_errno(enum mei_cl_connect_status status) +{ + switch (status) { + case MEI_CL_CONN_SUCCESS: return 0; + case MEI_CL_CONN_NOT_FOUND: return -ENOTTY; + case MEI_CL_CONN_ALREADY_STARTED: return -EBUSY; + case MEI_CL_CONN_OUT_OF_RESOURCES: return -EBUSY; + case MEI_CL_CONN_MESSAGE_SMALL: return -EINVAL; + default: return -EINVAL; + } +} + +/** + * mei_hbm_idle - set hbm to idle state + * + * @dev: the device structure + */ +void mei_hbm_idle(struct mei_device *dev) +{ + dev->init_clients_timer = 0; + dev->hbm_state = MEI_HBM_IDLE; +} + +/** + * mei_hbm_reset - reset hbm counters and book keeping data structurs + * + * @dev: the device structure + */ +void mei_hbm_reset(struct mei_device *dev) +{ + dev->me_clients_num = 0; + dev->me_client_presentation_num = 0; + dev->me_client_index = 0; + + kfree(dev->me_clients); + dev->me_clients = NULL; + + mei_hbm_idle(dev); +} /** * mei_hbm_me_cl_allocate - allocates storage for me clients @@ -35,9 +99,7 @@ static int mei_hbm_me_cl_allocate(struct mei_device *dev) struct mei_me_client *clients; int b; - dev->me_clients_num = 0; - dev->me_client_presentation_num = 0; - dev->me_client_index = 0; + mei_hbm_reset(dev); /* count how many ME clients we have */ for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX) @@ -46,9 +108,6 @@ static int mei_hbm_me_cl_allocate(struct mei_device *dev) if (dev->me_clients_num == 0) return 0; - kfree(dev->me_clients); - dev->me_clients = NULL; - dev_dbg(&dev->pdev->dev, "memory allocation for ME clients size=%ld.\n", dev->me_clients_num * sizeof(struct mei_me_client)); /* allocate storage for ME clients representation */ @@ -99,44 +158,6 @@ bool mei_hbm_cl_addr_equal(struct mei_cl *cl, void *buf) } -/** - * is_treat_specially_client - checks if the message belongs - * to the file private data. - * - * @cl: private data of the file object - * @rs: connect response bus message - * - */ -static bool is_treat_specially_client(struct mei_cl *cl, - struct hbm_client_connect_response *rs) -{ - if (mei_hbm_cl_addr_equal(cl, rs)) { - if (!rs->status) { - cl->state = MEI_FILE_CONNECTED; - cl->status = 0; - - } else { - cl->state = MEI_FILE_DISCONNECTED; - cl->status = -ENODEV; - } - cl->timer_count = 0; - - return true; - } - return false; -} - -/** - * mei_hbm_idle - set hbm to idle state - * - * @dev: the device structure - */ -void mei_hbm_idle(struct mei_device *dev) -{ - dev->init_clients_timer = 0; - dev->hbm_state = MEI_HBM_IDLE; -} - int mei_hbm_start_wait(struct mei_device *dev) { int ret; @@ -147,13 +168,13 @@ int mei_hbm_start_wait(struct mei_device *dev) ret = wait_event_interruptible_timeout(dev->wait_recvd_msg, dev->hbm_state == MEI_HBM_IDLE || dev->hbm_state >= MEI_HBM_STARTED, - mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT)); + mei_secs_to_jiffies(MEI_HBM_TIMEOUT)); mutex_lock(&dev->device_lock); if (ret <= 0 && (dev->hbm_state <= MEI_HBM_START)) { dev->hbm_state = MEI_HBM_IDLE; dev_err(&dev->pdev->dev, "waiting for mei start failed\n"); - return -ETIMEDOUT; + return -ETIME; } return 0; } @@ -282,18 +303,47 @@ static int mei_hbm_prop_req(struct mei_device *dev) return 0; } +/* + * mei_hbm_pg - sends pg command + * + * @dev: the device structure + * @pg_cmd: the pg command code + * + * This function returns -EIO on write failure + */ +int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd) +{ + struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; + struct hbm_power_gate *req; + const size_t len = sizeof(struct hbm_power_gate); + int ret; + + mei_hbm_hdr(mei_hdr, len); + + req = (struct hbm_power_gate *)dev->wr_msg.data; + memset(req, 0, len); + req->hbm_cmd = pg_cmd; + + ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); + if (ret) + dev_err(&dev->pdev->dev, "power gate command write failed.\n"); + return ret; +} +EXPORT_SYMBOL_GPL(mei_hbm_pg); + /** - * mei_hbm_stop_req_prepare - prepare stop request message + * mei_hbm_stop_req - send stop request message * * @dev - mei device - * @mei_hdr - mei message header - * @data - hbm message body buffer + * @cl: client info + * + * This function returns -EIO on write failure */ -static void mei_hbm_stop_req_prepare(struct mei_device *dev, - struct mei_msg_hdr *mei_hdr, unsigned char *data) +static int mei_hbm_stop_req(struct mei_device *dev) { + struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; struct hbm_host_stop_request *req = - (struct hbm_host_stop_request *)data; + (struct hbm_host_stop_request *)dev->wr_msg.data; const size_t len = sizeof(struct hbm_host_stop_request); mei_hbm_hdr(mei_hdr, len); @@ -301,6 +351,8 @@ static void mei_hbm_stop_req_prepare(struct mei_device *dev, memset(req, 0, len); req->hbm_cmd = HOST_STOP_REQ_CMD; req->reason = DRIVER_STOP_REQUEST; + + return mei_write_message(dev, mei_hdr, dev->wr_msg.data); } /** @@ -319,8 +371,7 @@ int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl) mei_hbm_hdr(mei_hdr, len); mei_hbm_cl_hdr(cl, MEI_FLOW_CONTROL_CMD, dev->wr_msg.data, len); - dev_dbg(&dev->pdev->dev, "sending flow control host client = %d, ME client = %d\n", - cl->host_client_id, cl->me_client_id); + cl_dbg(dev, cl, "sending flow control\n"); return mei_write_message(dev, mei_hdr, dev->wr_msg.data); } @@ -330,27 +381,34 @@ int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl) * * @dev: the device structure * @flow: flow control. + * + * return 0 on success, < 0 otherwise */ -static void mei_hbm_add_single_flow_creds(struct mei_device *dev, +static int mei_hbm_add_single_flow_creds(struct mei_device *dev, struct hbm_flow_control *flow) { - struct mei_me_client *client; - int i; - - for (i = 0; i < dev->me_clients_num; i++) { - client = &dev->me_clients[i]; - if (client && flow->me_addr == client->client_id) { - if (client->props.single_recv_buf) { - client->mei_flow_ctrl_creds++; - dev_dbg(&dev->pdev->dev, "recv flow ctrl msg ME %d (single).\n", - flow->me_addr); - dev_dbg(&dev->pdev->dev, "flow control credentials =%d.\n", - client->mei_flow_ctrl_creds); - } else { - BUG(); /* error in flow control */ - } - } + struct mei_me_client *me_cl; + int id; + + id = mei_me_cl_by_id(dev, flow->me_addr); + if (id < 0) { + dev_err(&dev->pdev->dev, "no such me client %d\n", + flow->me_addr); + return id; + } + + me_cl = &dev->me_clients[id]; + if (me_cl->props.single_recv_buf) { + me_cl->mei_flow_ctrl_creds++; + dev_dbg(&dev->pdev->dev, "recv flow ctrl msg ME %d (single).\n", + flow->me_addr); + dev_dbg(&dev->pdev->dev, "flow control credentials =%d.\n", + me_cl->mei_flow_ctrl_creds); + } else { + BUG(); /* error in flow control */ } + + return 0; } /** @@ -362,8 +420,7 @@ static void mei_hbm_add_single_flow_creds(struct mei_device *dev, static void mei_hbm_cl_flow_control_res(struct mei_device *dev, struct hbm_flow_control *flow_control) { - struct mei_cl *cl = NULL; - struct mei_cl *next = NULL; + struct mei_cl *cl; if (!flow_control->host_addr) { /* single receive buffer */ @@ -372,7 +429,7 @@ static void mei_hbm_cl_flow_control_res(struct mei_device *dev, } /* normal connection */ - list_for_each_entry_safe(cl, next, &dev->file_list, link) { + list_for_each_entry(cl, &dev->file_list, link) { if (mei_hbm_cl_addr_equal(cl, flow_control)) { cl->mei_flow_ctrl_creds++; dev_dbg(&dev->pdev->dev, "flow ctrl msg for host %d ME %d.\n", @@ -405,6 +462,25 @@ int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl) } /** + * mei_hbm_cl_disconnect_rsp - sends disconnect respose to the FW + * + * @dev: the device structure + * @cl: a client to disconnect from + * + * This function returns -EIO on write failure + */ +int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl) +{ + struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; + const size_t len = sizeof(struct hbm_client_connect_response); + + mei_hbm_hdr(mei_hdr, len); + mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_RES_CMD, dev->wr_msg.data, len); + + return mei_write_message(dev, mei_hdr, dev->wr_msg.data); +} + +/** * mei_hbm_cl_disconnect_res - disconnect response from ME * * @dev: the device structure @@ -414,29 +490,23 @@ static void mei_hbm_cl_disconnect_res(struct mei_device *dev, struct hbm_client_connect_response *rs) { struct mei_cl *cl; - struct mei_cl_cb *pos = NULL, *next = NULL; - - dev_dbg(&dev->pdev->dev, - "disconnect_response:\n" - "ME Client = %d\n" - "Host Client = %d\n" - "Status = %d\n", - rs->me_addr, - rs->host_addr, - rs->status); - - list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) { - cl = pos->cl; - - if (!cl) { - list_del(&pos->list); + struct mei_cl_cb *cb, *next; + + dev_dbg(&dev->pdev->dev, "hbm: disconnect response cl:host=%02d me=%02d status=%d\n", + rs->me_addr, rs->host_addr, rs->status); + + list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list.list, list) { + cl = cb->cl; + + /* this should not happen */ + if (WARN_ON(!cl)) { + list_del(&cb->list); return; } - dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in ctrl_rd_list.\n"); if (mei_hbm_cl_addr_equal(cl, rs)) { - list_del(&pos->list); - if (!rs->status) + list_del(&cb->list); + if (rs->status == MEI_CL_DISCONN_SUCCESS) cl->state = MEI_FILE_DISCONNECTED; cl->status = 0; @@ -476,46 +546,41 @@ static void mei_hbm_cl_connect_res(struct mei_device *dev, { struct mei_cl *cl; - struct mei_cl_cb *pos = NULL, *next = NULL; + struct mei_cl_cb *cb, *next; - dev_dbg(&dev->pdev->dev, - "connect_response:\n" - "ME Client = %d\n" - "Host Client = %d\n" - "Status = %d\n", - rs->me_addr, - rs->host_addr, - rs->status); + dev_dbg(&dev->pdev->dev, "hbm: connect response cl:host=%02d me=%02d status=%s\n", + rs->me_addr, rs->host_addr, + mei_cl_conn_status_str(rs->status)); - /* if WD or iamthif client treat specially */ + cl = NULL; - if (is_treat_specially_client(&dev->wd_cl, rs)) { - dev_dbg(&dev->pdev->dev, "successfully connected to WD client.\n"); - mei_watchdog_register(dev); + list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list.list, list) { - return; - } + cl = cb->cl; + /* this should not happen */ + if (WARN_ON(!cl)) { + list_del_init(&cb->list); + continue; + } - if (is_treat_specially_client(&dev->iamthif_cl, rs)) { - dev->iamthif_state = MEI_IAMTHIF_IDLE; - return; - } - list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) { + if (cb->fop_type != MEI_FOP_CONNECT) + continue; - cl = pos->cl; - if (!cl) { - list_del(&pos->list); - return; - } - if (pos->fop_type == MEI_FOP_IOCTL) { - if (is_treat_specially_client(cl, rs)) { - list_del(&pos->list); - cl->status = 0; - cl->timer_count = 0; - break; - } + if (mei_hbm_cl_addr_equal(cl, rs)) { + list_del(&cb->list); + break; } } + + if (!cl) + return; + + cl->timer_count = 0; + if (rs->status == MEI_CL_CONN_SUCCESS) + cl->state = MEI_FILE_CONNECTED; + else + cl->state = MEI_FILE_DISCONNECTED; + cl->status = mei_cl_conn_status_to_errno(rs->status); } @@ -525,32 +590,34 @@ static void mei_hbm_cl_connect_res(struct mei_device *dev, * * @dev: the device structure. * @disconnect_req: disconnect request bus message from the me + * + * returns -ENOMEM on allocation failure */ -static void mei_hbm_fw_disconnect_req(struct mei_device *dev, +static int mei_hbm_fw_disconnect_req(struct mei_device *dev, struct hbm_client_connect_request *disconnect_req) { - struct mei_cl *cl, *next; - const size_t len = sizeof(struct hbm_client_connect_response); + struct mei_cl *cl; + struct mei_cl_cb *cb; - list_for_each_entry_safe(cl, next, &dev->file_list, link) { + list_for_each_entry(cl, &dev->file_list, link) { if (mei_hbm_cl_addr_equal(cl, disconnect_req)) { dev_dbg(&dev->pdev->dev, "disconnect request host client %d ME client %d.\n", disconnect_req->host_addr, disconnect_req->me_addr); cl->state = MEI_FILE_DISCONNECTED; cl->timer_count = 0; - if (cl == &dev->wd_cl) - dev->wd_pending = false; - else if (cl == &dev->iamthif_cl) - dev->iamthif_timer = 0; - - /* prepare disconnect response */ - mei_hbm_hdr(&dev->wr_ext_msg.hdr, len); - mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_RES_CMD, - dev->wr_ext_msg.data, len); + + cb = mei_io_cb_init(cl, NULL); + if (!cb) + return -ENOMEM; + cb->fop_type = MEI_FOP_DISCONNECT_RSP; + cl_dbg(dev, cl, "add disconnect response as first\n"); + list_add(&cb->list, &dev->ctrl_wr_list.list); + break; } } + return 0; } @@ -629,10 +696,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) dev_warn(&dev->pdev->dev, "hbm: start: version mismatch - stopping the driver.\n"); dev->hbm_state = MEI_HBM_STOPPED; - mei_hbm_stop_req_prepare(dev, &dev->wr_msg.hdr, - dev->wr_msg.data); - if (mei_write_message(dev, &dev->wr_msg.hdr, - dev->wr_msg.data)) { + if (mei_hbm_stop_req(dev)) { dev_err(&dev->pdev->dev, "hbm: start: failed to send stop request\n"); return -EIO; } @@ -679,6 +743,27 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) mei_hbm_cl_flow_control_res(dev, flow_control); break; + case MEI_PG_ISOLATION_ENTRY_RES_CMD: + dev_dbg(&dev->pdev->dev, "power gate isolation entry response received\n"); + dev->pg_event = MEI_PG_EVENT_RECEIVED; + if (waitqueue_active(&dev->wait_pg)) + wake_up(&dev->wait_pg); + break; + + case MEI_PG_ISOLATION_EXIT_REQ_CMD: + dev_dbg(&dev->pdev->dev, "power gate isolation exit request received\n"); + dev->pg_event = MEI_PG_EVENT_RECEIVED; + if (waitqueue_active(&dev->wait_pg)) + wake_up(&dev->wait_pg); + else + /* + * If the driver is not waiting on this then + * this is HW initiated exit from PG. + * Start runtime pm resume sequence to exit from PG. + */ + pm_request_resume(&dev->pdev->dev); + break; + case HOST_CLIENT_PROPERTIES_RES_CMD: dev_dbg(&dev->pdev->dev, "hbm: properties response: message received.\n"); @@ -778,10 +863,11 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) case ME_STOP_REQ_CMD: dev_dbg(&dev->pdev->dev, "hbm: stop request: message received\n"); - dev->hbm_state = MEI_HBM_STOPPED; - mei_hbm_stop_req_prepare(dev, &dev->wr_ext_msg.hdr, - dev->wr_ext_msg.data); + if (mei_hbm_stop_req(dev)) { + dev_err(&dev->pdev->dev, "hbm: start: failed to send stop request\n"); + return -EIO; + } break; default: BUG(); diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h index 5f92188a5cd7..683eb2835cec 100644 --- a/drivers/misc/mei/hbm.h +++ b/drivers/misc/mei/hbm.h @@ -50,12 +50,15 @@ static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length) } void mei_hbm_idle(struct mei_device *dev); +void mei_hbm_reset(struct mei_device *dev); int mei_hbm_start_req(struct mei_device *dev); int mei_hbm_start_wait(struct mei_device *dev); int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl); int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl); +int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl); int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl); bool mei_hbm_version_is_supported(struct mei_device *dev); +int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd); #endif /* _MEI_HBM_H_ */ diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 66f411a6e8ea..a7856c0ac576 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -115,6 +115,11 @@ #define MEI_DEV_ID_LPT_HR 0x8CBA /* Lynx Point H Refresh */ #define MEI_DEV_ID_WPT_LP 0x9CBA /* Wildcat Point LP */ + +/* Host Firmware Status Registers in PCI Config Space */ +#define PCI_CFG_HFS_1 0x40 +#define PCI_CFG_HFS_2 0x48 + /* * MEI HW Section */ @@ -128,6 +133,8 @@ #define ME_CB_RW 8 /* ME_CSR_HA - ME Control Status Host Access register (read only) */ #define ME_CSR_HA 0xC +/* H_HGC_CSR - PGI register */ +#define H_HPG_CSR 0x10 /* register bits of H_CSR (Host Control Status register) */ @@ -157,6 +164,8 @@ access to ME_CBD */ #define ME_CBWP_HRA 0x00FF0000 /* ME CB Read Pointer HRA - host read only access to ME_CBRP */ #define ME_CBRP_HRA 0x0000FF00 +/* ME Power Gate Isolation Capability HRA - host ready only access */ +#define ME_PGIC_HRA 0x00000040 /* ME Reset HRA - host read only access to ME_RST */ #define ME_RST_HRA 0x00000010 /* ME Ready HRA - host read only access to ME_RDY */ @@ -168,4 +177,9 @@ access to ME_CBD */ /* ME Interrupt Enable HRA - host read only access to ME_IE */ #define ME_IE_HRA 0x00000001 + +/* register bits - H_HPG_CSR */ +#define H_HPG_CSR_PGIHEXR 0x00000001 +#define H_HPG_CSR_PGI 0x00000002 + #endif /* _MEI_HW_MEI_REGS_H_ */ diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index 6f656c053b14..6a2d272cea43 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -20,10 +20,10 @@ #include <linux/interrupt.h> #include "mei_dev.h" -#include "hw-me.h" - #include "hbm.h" +#include "hw-me.h" +#include "hw-me-regs.h" /** * mei_me_reg_read - Reads 32bit data from the mei device @@ -109,10 +109,27 @@ static inline void mei_hcsr_set(struct mei_me_hw *hw, u32 hcsr) */ static void mei_me_hw_config(struct mei_device *dev) { + struct mei_me_hw *hw = to_me_hw(dev); u32 hcsr = mei_hcsr_read(to_me_hw(dev)); /* Doesn't change in runtime */ dev->hbuf_depth = (hcsr & H_CBD) >> 24; + + hw->pg_state = MEI_PG_OFF; } + +/** + * mei_me_pg_state - translate internal pg state + * to the mei power gating state + * + * @hw - me hardware + * returns: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise + */ +static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + return hw->pg_state; +} + /** * mei_clear_interrupts - clear and stop interrupts * @@ -164,6 +181,9 @@ static void mei_me_hw_reset_release(struct mei_device *dev) hcsr |= H_IG; hcsr &= ~H_RST; mei_hcsr_set(hw, hcsr); + + /* complete this write before we set host ready on another CPU */ + mmiowb(); } /** * mei_me_hw_reset - resets fw via mei csr register. @@ -183,8 +203,21 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) else hcsr &= ~H_IE; + dev->recvd_hw_ready = false; mei_me_reg_write(hw, H_CSR, hcsr); + /* + * Host reads the H_CSR once to ensure that the + * posted write to H_CSR completes. + */ + hcsr = mei_hcsr_read(hw); + + if ((hcsr & H_RST) == 0) + dev_warn(&dev->pdev->dev, "H_RST is not set = 0x%08X", hcsr); + + if ((hcsr & H_RDY) == H_RDY) + dev_warn(&dev->pdev->dev, "H_RDY is not cleared 0x%08X", hcsr); + if (intr_enable == false) mei_me_hw_reset_release(dev); @@ -201,6 +234,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) static void mei_me_host_set_ready(struct mei_device *dev) { struct mei_me_hw *hw = to_me_hw(dev); + hw->host_hw_state = mei_hcsr_read(hw); hw->host_hw_state |= H_IE | H_IG | H_RDY; mei_hcsr_set(hw, hw->host_hw_state); } @@ -233,18 +267,15 @@ static bool mei_me_hw_is_ready(struct mei_device *dev) static int mei_me_hw_ready_wait(struct mei_device *dev) { int err; - if (mei_me_hw_is_ready(dev)) - return 0; - dev->recvd_hw_ready = false; mutex_unlock(&dev->device_lock); err = wait_event_interruptible_timeout(dev->wait_hw_ready, dev->recvd_hw_ready, - mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT)); + mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT)); mutex_lock(&dev->device_lock); if (!err && !dev->recvd_hw_ready) { if (!err) - err = -ETIMEDOUT; + err = -ETIME; dev_err(&dev->pdev->dev, "wait hw ready failed. status = %d\n", err); return err; @@ -303,7 +334,7 @@ static bool mei_me_hbuf_is_empty(struct mei_device *dev) * * @dev: the device structure * - * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise empty slots count + * returns -EOVERFLOW if overflow, otherwise empty slots count */ static int mei_me_hbuf_empty_slots(struct mei_device *dev) { @@ -326,7 +357,7 @@ static size_t mei_me_hbuf_max_len(const struct mei_device *dev) /** - * mei_write_message - writes a message to mei device. + * mei_me_write_message - writes a message to mei device. * * @dev: the device structure * @header: mei HECI header of message @@ -354,7 +385,7 @@ static int mei_me_write_message(struct mei_device *dev, dw_cnt = mei_data2slots(length); if (empty_slots < 0 || dw_cnt > empty_slots) - return -EIO; + return -EMSGSIZE; mei_me_reg_write(hw, H_CB_WW, *((u32 *) header)); @@ -381,7 +412,7 @@ static int mei_me_write_message(struct mei_device *dev, * * @dev: the device structure * - * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise filled slots count + * returns -EOVERFLOW if overflow, otherwise filled slots count */ static int mei_me_count_full_read_slots(struct mei_device *dev) { @@ -431,6 +462,144 @@ static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer, } /** + * mei_me_pg_enter - write pg enter register to mei device. + * + * @dev: the device structure + */ +static void mei_me_pg_enter(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + u32 reg = mei_me_reg_read(hw, H_HPG_CSR); + reg |= H_HPG_CSR_PGI; + mei_me_reg_write(hw, H_HPG_CSR, reg); +} + +/** + * mei_me_pg_enter - write pg enter register to mei device. + * + * @dev: the device structure + */ +static void mei_me_pg_exit(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + u32 reg = mei_me_reg_read(hw, H_HPG_CSR); + + WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n"); + + reg |= H_HPG_CSR_PGIHEXR; + mei_me_reg_write(hw, H_HPG_CSR, reg); +} + +/** + * mei_me_pg_set_sync - perform pg entry procedure + * + * @dev: the device structure + * + * returns 0 on success an error code otherwise + */ +int mei_me_pg_set_sync(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); + int ret; + + dev->pg_event = MEI_PG_EVENT_WAIT; + + ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD); + if (ret) + return ret; + + mutex_unlock(&dev->device_lock); + wait_event_timeout(dev->wait_pg, + dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout); + mutex_lock(&dev->device_lock); + + if (dev->pg_event == MEI_PG_EVENT_RECEIVED) { + mei_me_pg_enter(dev); + ret = 0; + } else { + ret = -ETIME; + } + + dev->pg_event = MEI_PG_EVENT_IDLE; + hw->pg_state = MEI_PG_ON; + + return ret; +} + +/** + * mei_me_pg_unset_sync - perform pg exit procedure + * + * @dev: the device structure + * + * returns 0 on success an error code otherwise + */ +int mei_me_pg_unset_sync(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); + int ret; + + if (dev->pg_event == MEI_PG_EVENT_RECEIVED) + goto reply; + + dev->pg_event = MEI_PG_EVENT_WAIT; + + mei_me_pg_exit(dev); + + mutex_unlock(&dev->device_lock); + wait_event_timeout(dev->wait_pg, + dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout); + mutex_lock(&dev->device_lock); + +reply: + if (dev->pg_event == MEI_PG_EVENT_RECEIVED) + ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD); + else + ret = -ETIME; + + dev->pg_event = MEI_PG_EVENT_IDLE; + hw->pg_state = MEI_PG_OFF; + + return ret; +} + +/** + * mei_me_pg_is_enabled - detect if PG is supported by HW + * + * @dev: the device structure + * + * returns: true is pg supported, false otherwise + */ +static bool mei_me_pg_is_enabled(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + u32 reg = mei_me_reg_read(hw, ME_CSR_HA); + + if ((reg & ME_PGIC_HRA) == 0) + goto notsupported; + + if (dev->version.major_version < HBM_MAJOR_VERSION_PGI) + goto notsupported; + + if (dev->version.major_version == HBM_MAJOR_VERSION_PGI && + dev->version.minor_version < HBM_MINOR_VERSION_PGI) + goto notsupported; + + return true; + +notsupported: + dev_dbg(&dev->pdev->dev, "pg: not supported: HGP = %d hbm version %d.%d ?= %d.%d\n", + !!(reg & ME_PGIC_HRA), + dev->version.major_version, + dev->version.minor_version, + HBM_MAJOR_VERSION_PGI, + HBM_MINOR_VERSION_PGI); + + return false; +} + +/** * mei_me_irq_quick_handler - The ISR of the MEI device * * @irq: The irq number @@ -491,35 +660,48 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) /* check if we need to start the dev */ if (!mei_host_is_ready(dev)) { if (mei_hw_is_ready(dev)) { + mei_me_hw_reset_release(dev); dev_dbg(&dev->pdev->dev, "we need to start the dev.\n"); dev->recvd_hw_ready = true; wake_up_interruptible(&dev->wait_hw_ready); } else { - - dev_dbg(&dev->pdev->dev, "Reset Completed.\n"); - mei_me_hw_reset_release(dev); + dev_dbg(&dev->pdev->dev, "Spurious Interrupt\n"); } goto end; } /* check slots available for reading */ slots = mei_count_full_read_slots(dev); while (slots > 0) { - /* we have urgent data to send so break the read */ - if (dev->wr_ext_msg.hdr.length) - break; dev_dbg(&dev->pdev->dev, "slots to read = %08x\n", slots); rets = mei_irq_read_handler(dev, &complete_list, &slots); + /* There is a race between ME write and interrupt delivery: + * Not all data is always available immediately after the + * interrupt, so try to read again on the next interrupt. + */ + if (rets == -ENODATA) + break; + if (rets && dev->dev_state != MEI_DEV_RESETTING) { + dev_err(&dev->pdev->dev, "mei_irq_read_handler ret = %d.\n", + rets); schedule_work(&dev->reset_work); goto end; } } - rets = mei_irq_write_handler(dev, &complete_list); - dev->hbuf_is_ready = mei_hbuf_is_ready(dev); + /* + * During PG handshake only allowed write is the replay to the + * PG exit message, so block calling write function + * if the pg state is not idle + */ + if (dev->pg_event == MEI_PG_EVENT_IDLE) { + rets = mei_irq_write_handler(dev, &complete_list); + dev->hbuf_is_ready = mei_hbuf_is_ready(dev); + } + mei_irq_compl_handler(dev, &complete_list); end: @@ -527,8 +709,65 @@ end: mutex_unlock(&dev->device_lock); return IRQ_HANDLED; } + +/** + * mei_me_fw_status - retrieve fw status from the pci config space + * + * @dev: the device structure + * @fw_status: fw status registers storage + * + * returns 0 on success an error code otherwise + */ +static int mei_me_fw_status(struct mei_device *dev, + struct mei_fw_status *fw_status) +{ + const u32 pci_cfg_reg[] = {PCI_CFG_HFS_1, PCI_CFG_HFS_2}; + int i; + + if (!fw_status) + return -EINVAL; + + switch (dev->pdev->device) { + case MEI_DEV_ID_IBXPK_1: + case MEI_DEV_ID_IBXPK_2: + case MEI_DEV_ID_CPT_1: + case MEI_DEV_ID_PBG_1: + case MEI_DEV_ID_PPT_1: + case MEI_DEV_ID_PPT_2: + case MEI_DEV_ID_PPT_3: + case MEI_DEV_ID_LPT_H: + case MEI_DEV_ID_LPT_W: + case MEI_DEV_ID_LPT_LP: + case MEI_DEV_ID_LPT_HR: + case MEI_DEV_ID_WPT_LP: + fw_status->count = 2; + break; + case MEI_DEV_ID_ICH10_1: + case MEI_DEV_ID_ICH10_2: + case MEI_DEV_ID_ICH10_3: + case MEI_DEV_ID_ICH10_4: + fw_status->count = 1; + break; + default: + fw_status->count = 0; + break; + } + + for (i = 0; i < fw_status->count && i < MEI_FW_STATUS_MAX; i++) { + int ret; + ret = pci_read_config_dword(dev->pdev, + pci_cfg_reg[i], &fw_status->status[i]); + if (ret) + return ret; + } + return 0; +} + static const struct mei_hw_ops mei_me_hw_ops = { + .pg_state = mei_me_pg_state, + + .fw_status = mei_me_fw_status, .host_is_ready = mei_me_host_is_ready, .hw_is_ready = mei_me_hw_is_ready, @@ -536,6 +775,8 @@ static const struct mei_hw_ops mei_me_hw_ops = { .hw_config = mei_me_hw_config, .hw_start = mei_me_hw_start, + .pg_is_enabled = mei_me_pg_is_enabled, + .intr_clear = mei_me_intr_clear, .intr_enable = mei_me_intr_enable, .intr_disable = mei_me_intr_disable, @@ -551,14 +792,81 @@ static const struct mei_hw_ops mei_me_hw_ops = { .read = mei_me_read_slots }; +static bool mei_me_fw_type_nm(struct pci_dev *pdev) +{ + u32 reg; + pci_read_config_dword(pdev, PCI_CFG_HFS_2, ®); + /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */ + return (reg & 0x600) == 0x200; +} + +#define MEI_CFG_FW_NM \ + .quirk_probe = mei_me_fw_type_nm + +static bool mei_me_fw_type_sps(struct pci_dev *pdev) +{ + u32 reg; + /* Read ME FW Status check for SPS Firmware */ + pci_read_config_dword(pdev, PCI_CFG_HFS_1, ®); + /* if bits [19:16] = 15, running SPS Firmware */ + return (reg & 0xf0000) == 0xf0000; +} + +#define MEI_CFG_FW_SPS \ + .quirk_probe = mei_me_fw_type_sps + + +#define MEI_CFG_LEGACY_HFS \ + .fw_status.count = 0 + +#define MEI_CFG_ICH_HFS \ + .fw_status.count = 1, \ + .fw_status.status[0] = PCI_CFG_HFS_1 + +#define MEI_CFG_PCH_HFS \ + .fw_status.count = 2, \ + .fw_status.status[0] = PCI_CFG_HFS_1, \ + .fw_status.status[1] = PCI_CFG_HFS_2 + + +/* ICH Legacy devices */ +const struct mei_cfg mei_me_legacy_cfg = { + MEI_CFG_LEGACY_HFS, +}; + +/* ICH devices */ +const struct mei_cfg mei_me_ich_cfg = { + MEI_CFG_ICH_HFS, +}; + +/* PCH devices */ +const struct mei_cfg mei_me_pch_cfg = { + MEI_CFG_PCH_HFS, +}; + + +/* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */ +const struct mei_cfg mei_me_pch_cpt_pbg_cfg = { + MEI_CFG_PCH_HFS, + MEI_CFG_FW_NM, +}; + +/* PCH Lynx Point with quirk for SPS Firmware exclusion */ +const struct mei_cfg mei_me_lpt_cfg = { + MEI_CFG_PCH_HFS, + MEI_CFG_FW_SPS, +}; + /** * mei_me_dev_init - allocates and initializes the mei device structure * * @pdev: The pci device structure + * @cfg: per device generation config * * returns The mei_device_device pointer on success, NULL on failure. */ -struct mei_device *mei_me_dev_init(struct pci_dev *pdev) +struct mei_device *mei_me_dev_init(struct pci_dev *pdev, + const struct mei_cfg *cfg) { struct mei_device *dev; @@ -567,7 +875,7 @@ struct mei_device *mei_me_dev_init(struct pci_dev *pdev) if (!dev) return NULL; - mei_device_init(dev); + mei_device_init(dev, cfg); dev->ops = &mei_me_hw_ops; diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h index 80bd829fbd9a..12b0f4bbe1f1 100644 --- a/drivers/misc/mei/hw-me.h +++ b/drivers/misc/mei/hw-me.h @@ -20,9 +20,12 @@ #define _MEI_INTERFACE_H_ #include <linux/mei.h> +#include <linux/irqreturn.h> #include "mei_dev.h" #include "client.h" +#define MEI_ME_RPM_TIMEOUT 500 /* ms */ + struct mei_me_hw { void __iomem *mem_addr; /* @@ -30,11 +33,22 @@ struct mei_me_hw { */ u32 host_hw_state; u32 me_hw_state; + enum mei_pg_state pg_state; }; #define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw) -struct mei_device *mei_me_dev_init(struct pci_dev *pdev); +extern const struct mei_cfg mei_me_legacy_cfg; +extern const struct mei_cfg mei_me_ich_cfg; +extern const struct mei_cfg mei_me_pch_cfg; +extern const struct mei_cfg mei_me_pch_cpt_pbg_cfg; +extern const struct mei_cfg mei_me_lpt_cfg; + +struct mei_device *mei_me_dev_init(struct pci_dev *pdev, + const struct mei_cfg *cfg); + +int mei_me_pg_set_sync(struct mei_device *dev); +int mei_me_pg_unset_sync(struct mei_device *dev); irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id); irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id); diff --git a/drivers/misc/mei/hw-txe-regs.h b/drivers/misc/mei/hw-txe-regs.h new file mode 100644 index 000000000000..f19229c4e655 --- /dev/null +++ b/drivers/misc/mei/hw-txe-regs.h @@ -0,0 +1,294 @@ +/****************************************************************************** + * Intel Management Engine Interface (Intel MEI) Linux driver + * Intel MEI Interface Header + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef _MEI_HW_TXE_REGS_H_ +#define _MEI_HW_TXE_REGS_H_ + +#include "hw.h" + +#define SEC_ALIVENESS_TIMER_TIMEOUT (5 * MSEC_PER_SEC) +#define SEC_ALIVENESS_WAIT_TIMEOUT (1 * MSEC_PER_SEC) +#define SEC_RESET_WAIT_TIMEOUT (1 * MSEC_PER_SEC) +#define SEC_READY_WAIT_TIMEOUT (5 * MSEC_PER_SEC) +#define START_MESSAGE_RESPONSE_WAIT_TIMEOUT (5 * MSEC_PER_SEC) +#define RESET_CANCEL_WAIT_TIMEOUT (1 * MSEC_PER_SEC) + +enum { + SEC_BAR, + BRIDGE_BAR, + + NUM_OF_MEM_BARS +}; + +/* SeC FW Status Register + * + * FW uses this register in order to report its status to host. + * This register resides in PCI-E config space. + */ +#define PCI_CFG_TXE_FW_STS0 0x40 +# define PCI_CFG_TXE_FW_STS0_WRK_ST_MSK 0x0000000F +# define PCI_CFG_TXE_FW_STS0_OP_ST_MSK 0x000001C0 +# define PCI_CFG_TXE_FW_STS0_FW_INIT_CMPLT 0x00000200 +# define PCI_CFG_TXE_FW_STS0_ERR_CODE_MSK 0x0000F000 +# define PCI_CFG_TXE_FW_STS0_OP_MODE_MSK 0x000F0000 +# define PCI_CFG_TXE_FW_STS0_RST_CNT_MSK 0x00F00000 +#define PCI_CFG_TXE_FW_STS1 0x48 + +#define IPC_BASE_ADDR 0x80400 /* SeC IPC Base Address */ + +/* IPC Input Doorbell Register */ +#define SEC_IPC_INPUT_DOORBELL_REG (0x0000 + IPC_BASE_ADDR) + +/* IPC Input Status Register + * This register indicates whether or not processing of + * the most recent command has been completed by the SEC + * New commands and payloads should not be written by the Host + * until this indicates that the previous command has been processed. + */ +#define SEC_IPC_INPUT_STATUS_REG (0x0008 + IPC_BASE_ADDR) +# define SEC_IPC_INPUT_STATUS_RDY BIT(0) + +/* IPC Host Interrupt Status Register */ +#define SEC_IPC_HOST_INT_STATUS_REG (0x0010 + IPC_BASE_ADDR) +#define SEC_IPC_HOST_INT_STATUS_OUT_DB BIT(0) +#define SEC_IPC_HOST_INT_STATUS_IN_RDY BIT(1) +#define SEC_IPC_HOST_INT_STATUS_HDCP_M0_RCVD BIT(5) +#define SEC_IPC_HOST_INT_STATUS_ILL_MEM_ACCESS BIT(17) +#define SEC_IPC_HOST_INT_STATUS_AES_HKEY_ERR BIT(18) +#define SEC_IPC_HOST_INT_STATUS_DES_HKEY_ERR BIT(19) +#define SEC_IPC_HOST_INT_STATUS_TMRMTB_OVERFLOW BIT(21) + +/* Convenient mask for pending interrupts */ +#define SEC_IPC_HOST_INT_STATUS_PENDING \ + (SEC_IPC_HOST_INT_STATUS_OUT_DB| \ + SEC_IPC_HOST_INT_STATUS_IN_RDY) + +/* IPC Host Interrupt Mask Register */ +#define SEC_IPC_HOST_INT_MASK_REG (0x0014 + IPC_BASE_ADDR) + +# define SEC_IPC_HOST_INT_MASK_OUT_DB BIT(0) /* Output Doorbell Int Mask */ +# define SEC_IPC_HOST_INT_MASK_IN_RDY BIT(1) /* Input Ready Int Mask */ + +/* IPC Input Payload RAM */ +#define SEC_IPC_INPUT_PAYLOAD_REG (0x0100 + IPC_BASE_ADDR) +/* IPC Shared Payload RAM */ +#define IPC_SHARED_PAYLOAD_REG (0x0200 + IPC_BASE_ADDR) + +/* SeC Address Translation Table Entry 2 - Ctrl + * + * This register resides also in SeC's PCI-E Memory space. + */ +#define SATT2_CTRL_REG 0x1040 +# define SATT2_CTRL_VALID_MSK BIT(0) +# define SATT2_CTRL_BR_BASE_ADDR_REG_SHIFT 8 +# define SATT2_CTRL_BRIDGE_HOST_EN_MSK BIT(12) + +/* SATT Table Entry 2 SAP Base Address Register */ +#define SATT2_SAP_BA_REG 0x1044 +/* SATT Table Entry 2 SAP Size Register. */ +#define SATT2_SAP_SIZE_REG 0x1048 + /* SATT Table Entry 2 SAP Bridge Address - LSB Register */ +#define SATT2_BRG_BA_LSB_REG 0x104C + +/* Host High-level Interrupt Status Register */ +#define HHISR_REG 0x2020 +/* Host High-level Interrupt Enable Register + * + * Resides in PCI memory space. This is the top hierarchy for + * interrupts from SeC to host, aggregating both interrupts that + * arrive through HICR registers as well as interrupts + * that arrive via IPC. + */ +#define HHIER_REG 0x2024 +#define IPC_HHIER_SEC BIT(0) +#define IPC_HHIER_BRIDGE BIT(1) +#define IPC_HHIER_MSK (IPC_HHIER_SEC | IPC_HHIER_BRIDGE) + +/* Host High-level Interrupt Mask Register. + * + * Resides in PCI memory space. + * This is the top hierarchy for masking interrupts from SeC to host. + */ +#define HHIMR_REG 0x2028 +#define IPC_HHIMR_SEC BIT(0) +#define IPC_HHIMR_BRIDGE BIT(1) + +/* Host High-level IRQ Status Register */ +#define HHIRQSR_REG 0x202C + +/* Host Interrupt Cause Register 0 - SeC IPC Readiness + * + * This register is both an ICR to Host from PCI Memory Space + * and it is also exposed in the SeC memory space. + * This register is used by SeC's IPC driver in order + * to synchronize with host about IPC interface state. + */ +#define HICR_SEC_IPC_READINESS_REG 0x2040 +#define HICR_SEC_IPC_READINESS_HOST_RDY BIT(0) +#define HICR_SEC_IPC_READINESS_SEC_RDY BIT(1) +#define HICR_SEC_IPC_READINESS_SYS_RDY \ + (HICR_SEC_IPC_READINESS_HOST_RDY | \ + HICR_SEC_IPC_READINESS_SEC_RDY) +#define HICR_SEC_IPC_READINESS_RDY_CLR BIT(2) + +/* Host Interrupt Cause Register 1 - Aliveness Response */ +/* This register is both an ICR to Host from PCI Memory Space + * and it is also exposed in the SeC memory space. + * The register may be used by SeC to ACK a host request for aliveness. + */ +#define HICR_HOST_ALIVENESS_RESP_REG 0x2044 +#define HICR_HOST_ALIVENESS_RESP_ACK BIT(0) + +/* Host Interrupt Cause Register 2 - SeC IPC Output Doorbell */ +#define HICR_SEC_IPC_OUTPUT_DOORBELL_REG 0x2048 + +/* Host Interrupt Status Register. + * + * Resides in PCI memory space. + * This is the main register involved in generating interrupts + * from SeC to host via HICRs. + * The interrupt generation rules are as follows: + * An interrupt will be generated whenever for any i, + * there is a transition from a state where at least one of + * the following conditions did not hold, to a state where + * ALL the following conditions hold: + * A) HISR.INT[i]_STS == 1. + * B) HIER.INT[i]_EN == 1. + */ +#define HISR_REG 0x2060 +#define HISR_INT_0_STS BIT(0) +#define HISR_INT_1_STS BIT(1) +#define HISR_INT_2_STS BIT(2) +#define HISR_INT_3_STS BIT(3) +#define HISR_INT_4_STS BIT(4) +#define HISR_INT_5_STS BIT(5) +#define HISR_INT_6_STS BIT(6) +#define HISR_INT_7_STS BIT(7) +#define HISR_INT_STS_MSK \ + (HISR_INT_0_STS | HISR_INT_1_STS | HISR_INT_2_STS) + +/* Host Interrupt Enable Register. Resides in PCI memory space. */ +#define HIER_REG 0x2064 +#define HIER_INT_0_EN BIT(0) +#define HIER_INT_1_EN BIT(1) +#define HIER_INT_2_EN BIT(2) +#define HIER_INT_3_EN BIT(3) +#define HIER_INT_4_EN BIT(4) +#define HIER_INT_5_EN BIT(5) +#define HIER_INT_6_EN BIT(6) +#define HIER_INT_7_EN BIT(7) + +#define HIER_INT_EN_MSK \ + (HIER_INT_0_EN | HIER_INT_1_EN | HIER_INT_2_EN) + + +/* SEC Memory Space IPC output payload. + * + * This register is part of the output payload which SEC provides to host. + */ +#define BRIDGE_IPC_OUTPUT_PAYLOAD_REG 0x20C0 + +/* SeC Interrupt Cause Register - Host Aliveness Request + * This register is both an ICR to SeC and it is also exposed + * in the host-visible PCI memory space. + * The register is used by host to request SeC aliveness. + */ +#define SICR_HOST_ALIVENESS_REQ_REG 0x214C +#define SICR_HOST_ALIVENESS_REQ_REQUESTED BIT(0) + + +/* SeC Interrupt Cause Register - Host IPC Readiness + * + * This register is both an ICR to SeC and it is also exposed + * in the host-visible PCI memory space. + * This register is used by the host's SeC driver uses in order + * to synchronize with SeC about IPC interface state. + */ +#define SICR_HOST_IPC_READINESS_REQ_REG 0x2150 + + +#define SICR_HOST_IPC_READINESS_HOST_RDY BIT(0) +#define SICR_HOST_IPC_READINESS_SEC_RDY BIT(1) +#define SICR_HOST_IPC_READINESS_SYS_RDY \ + (SICR_HOST_IPC_READINESS_HOST_RDY | \ + SICR_HOST_IPC_READINESS_SEC_RDY) +#define SICR_HOST_IPC_READINESS_RDY_CLR BIT(2) + +/* SeC Interrupt Cause Register - SeC IPC Output Status + * + * This register indicates whether or not processing of the most recent + * command has been completed by the Host. + * New commands and payloads should not be written by SeC until this + * register indicates that the previous command has been processed. + */ +#define SICR_SEC_IPC_OUTPUT_STATUS_REG 0x2154 +# define SEC_IPC_OUTPUT_STATUS_RDY BIT(0) + + + +/* MEI IPC Message payload size 64 bytes */ +#define PAYLOAD_SIZE 64 + +/* MAX size for SATT range 32MB */ +#define SATT_RANGE_MAX (32 << 20) + + +#endif /* _MEI_HW_TXE_REGS_H_ */ + diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c new file mode 100644 index 000000000000..93273783dec5 --- /dev/null +++ b/drivers/misc/mei/hw-txe.c @@ -0,0 +1,1190 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2013-2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/pci.h> +#include <linux/jiffies.h> +#include <linux/delay.h> +#include <linux/kthread.h> +#include <linux/irqreturn.h> + +#include <linux/mei.h> + +#include "mei_dev.h" +#include "hw-txe.h" +#include "client.h" +#include "hbm.h" + +/** + * mei_txe_reg_read - Reads 32bit data from the device + * + * @base_addr: registers base address + * @offset: register offset + * + */ +static inline u32 mei_txe_reg_read(void __iomem *base_addr, + unsigned long offset) +{ + return ioread32(base_addr + offset); +} + +/** + * mei_txe_reg_write - Writes 32bit data to the device + * + * @base_addr: registers base address + * @offset: register offset + * @value: the value to write + */ +static inline void mei_txe_reg_write(void __iomem *base_addr, + unsigned long offset, u32 value) +{ + iowrite32(value, base_addr + offset); +} + +/** + * mei_txe_sec_reg_read_silent - Reads 32bit data from the SeC BAR + * + * @dev: the device structure + * @offset: register offset + * + * Doesn't check for aliveness while Reads 32bit data from the SeC BAR + */ +static inline u32 mei_txe_sec_reg_read_silent(struct mei_txe_hw *hw, + unsigned long offset) +{ + return mei_txe_reg_read(hw->mem_addr[SEC_BAR], offset); +} + +/** + * mei_txe_sec_reg_read - Reads 32bit data from the SeC BAR + * + * @dev: the device structure + * @offset: register offset + * + * Reads 32bit data from the SeC BAR and shout loud if aliveness is not set + */ +static inline u32 mei_txe_sec_reg_read(struct mei_txe_hw *hw, + unsigned long offset) +{ + WARN(!hw->aliveness, "sec read: aliveness not asserted\n"); + return mei_txe_sec_reg_read_silent(hw, offset); +} +/** + * mei_txe_sec_reg_write_silent - Writes 32bit data to the SeC BAR + * doesn't check for aliveness + * + * @dev: the device structure + * @offset: register offset + * @value: value to write + * + * Doesn't check for aliveness while writes 32bit data from to the SeC BAR + */ +static inline void mei_txe_sec_reg_write_silent(struct mei_txe_hw *hw, + unsigned long offset, u32 value) +{ + mei_txe_reg_write(hw->mem_addr[SEC_BAR], offset, value); +} + +/** + * mei_txe_sec_reg_write - Writes 32bit data to the SeC BAR + * + * @dev: the device structure + * @offset: register offset + * @value: value to write + * + * Writes 32bit data from the SeC BAR and shout loud if aliveness is not set + */ +static inline void mei_txe_sec_reg_write(struct mei_txe_hw *hw, + unsigned long offset, u32 value) +{ + WARN(!hw->aliveness, "sec write: aliveness not asserted\n"); + mei_txe_sec_reg_write_silent(hw, offset, value); +} +/** + * mei_txe_br_reg_read - Reads 32bit data from the Bridge BAR + * + * @hw: the device structure + * @offset: offset from which to read the data + * + */ +static inline u32 mei_txe_br_reg_read(struct mei_txe_hw *hw, + unsigned long offset) +{ + return mei_txe_reg_read(hw->mem_addr[BRIDGE_BAR], offset); +} + +/** + * mei_txe_br_reg_write - Writes 32bit data to the Bridge BAR + * + * @hw: the device structure + * @offset: offset from which to write the data + * @value: the byte to write + */ +static inline void mei_txe_br_reg_write(struct mei_txe_hw *hw, + unsigned long offset, u32 value) +{ + mei_txe_reg_write(hw->mem_addr[BRIDGE_BAR], offset, value); +} + +/** + * mei_txe_aliveness_set - request for aliveness change + * + * @dev: the device structure + * @req: requested aliveness value + * + * Request for aliveness change and returns true if the change is + * really needed and false if aliveness is already + * in the requested state + * Requires device lock to be held + */ +static bool mei_txe_aliveness_set(struct mei_device *dev, u32 req) +{ + + struct mei_txe_hw *hw = to_txe_hw(dev); + bool do_req = hw->aliveness != req; + + dev_dbg(&dev->pdev->dev, "Aliveness current=%d request=%d\n", + hw->aliveness, req); + if (do_req) { + dev->pg_event = MEI_PG_EVENT_WAIT; + mei_txe_br_reg_write(hw, SICR_HOST_ALIVENESS_REQ_REG, req); + } + return do_req; +} + + +/** + * mei_txe_aliveness_req_get - get aliveness requested register value + * + * @dev: the device structure + * + * Extract HICR_HOST_ALIVENESS_RESP_ACK bit from + * from HICR_HOST_ALIVENESS_REQ register value + */ +static u32 mei_txe_aliveness_req_get(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + u32 reg; + reg = mei_txe_br_reg_read(hw, SICR_HOST_ALIVENESS_REQ_REG); + return reg & SICR_HOST_ALIVENESS_REQ_REQUESTED; +} + +/** + * mei_txe_aliveness_get - get aliveness response register value + * @dev: the device structure + * + * Extract HICR_HOST_ALIVENESS_RESP_ACK bit + * from HICR_HOST_ALIVENESS_RESP register value + */ +static u32 mei_txe_aliveness_get(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + u32 reg; + reg = mei_txe_br_reg_read(hw, HICR_HOST_ALIVENESS_RESP_REG); + return reg & HICR_HOST_ALIVENESS_RESP_ACK; +} + +/** + * mei_txe_aliveness_poll - waits for aliveness to settle + * + * @dev: the device structure + * @expected: expected aliveness value + * + * Polls for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set + * returns > 0 if the expected value was received, -ETIME otherwise + */ +static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + int t = 0; + + do { + hw->aliveness = mei_txe_aliveness_get(dev); + if (hw->aliveness == expected) { + dev->pg_event = MEI_PG_EVENT_IDLE; + dev_dbg(&dev->pdev->dev, + "aliveness settled after %d msecs\n", t); + return t; + } + mutex_unlock(&dev->device_lock); + msleep(MSEC_PER_SEC / 5); + mutex_lock(&dev->device_lock); + t += MSEC_PER_SEC / 5; + } while (t < SEC_ALIVENESS_WAIT_TIMEOUT); + + dev->pg_event = MEI_PG_EVENT_IDLE; + dev_err(&dev->pdev->dev, "aliveness timed out\n"); + return -ETIME; +} + +/** + * mei_txe_aliveness_wait - waits for aliveness to settle + * + * @dev: the device structure + * @expected: expected aliveness value + * + * Waits for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set + * returns returns 0 on success and < 0 otherwise + */ +static int mei_txe_aliveness_wait(struct mei_device *dev, u32 expected) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + const unsigned long timeout = + msecs_to_jiffies(SEC_ALIVENESS_WAIT_TIMEOUT); + long err; + int ret; + + hw->aliveness = mei_txe_aliveness_get(dev); + if (hw->aliveness == expected) + return 0; + + mutex_unlock(&dev->device_lock); + err = wait_event_timeout(hw->wait_aliveness_resp, + dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout); + mutex_lock(&dev->device_lock); + + hw->aliveness = mei_txe_aliveness_get(dev); + ret = hw->aliveness == expected ? 0 : -ETIME; + + if (ret) + dev_warn(&dev->pdev->dev, "aliveness timed out = %ld aliveness = %d event = %d\n", + err, hw->aliveness, dev->pg_event); + else + dev_dbg(&dev->pdev->dev, "aliveness settled after = %d msec aliveness = %d event = %d\n", + jiffies_to_msecs(timeout - err), + hw->aliveness, dev->pg_event); + + dev->pg_event = MEI_PG_EVENT_IDLE; + return ret; +} + +/** + * mei_txe_aliveness_set_sync - sets an wait for aliveness to complete + * + * @dev: the device structure + * + * returns returns 0 on success and < 0 otherwise + */ +int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req) +{ + if (mei_txe_aliveness_set(dev, req)) + return mei_txe_aliveness_wait(dev, req); + return 0; +} + +/** + * mei_txe_pg_is_enabled - detect if PG is supported by HW + * + * @dev: the device structure + * + * returns: true is pg supported, false otherwise + */ +static bool mei_txe_pg_is_enabled(struct mei_device *dev) +{ + return true; +} + +/** + * mei_txe_pg_state - translate aliveness register value + * to the mei power gating state + * + * @dev: the device structure + * + * returns: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise + */ +static inline enum mei_pg_state mei_txe_pg_state(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + return hw->aliveness ? MEI_PG_OFF : MEI_PG_ON; +} + +/** + * mei_txe_input_ready_interrupt_enable - sets the Input Ready Interrupt + * + * @dev: the device structure + */ +static void mei_txe_input_ready_interrupt_enable(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + u32 hintmsk; + /* Enable the SEC_IPC_HOST_INT_MASK_IN_RDY interrupt */ + hintmsk = mei_txe_sec_reg_read(hw, SEC_IPC_HOST_INT_MASK_REG); + hintmsk |= SEC_IPC_HOST_INT_MASK_IN_RDY; + mei_txe_sec_reg_write(hw, SEC_IPC_HOST_INT_MASK_REG, hintmsk); +} + +/** + * mei_txe_input_doorbell_set + * - Sets bit 0 in SEC_IPC_INPUT_DOORBELL.IPC_INPUT_DOORBELL. + * @dev: the device structure + */ +static void mei_txe_input_doorbell_set(struct mei_txe_hw *hw) +{ + /* Clear the interrupt cause */ + clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause); + mei_txe_sec_reg_write(hw, SEC_IPC_INPUT_DOORBELL_REG, 1); +} + +/** + * mei_txe_output_ready_set - Sets the SICR_SEC_IPC_OUTPUT_STATUS bit to 1 + * + * @dev: the device structure + */ +static void mei_txe_output_ready_set(struct mei_txe_hw *hw) +{ + mei_txe_br_reg_write(hw, + SICR_SEC_IPC_OUTPUT_STATUS_REG, + SEC_IPC_OUTPUT_STATUS_RDY); +} + +/** + * mei_txe_is_input_ready - check if TXE is ready for receiving data + * + * @dev: the device structure + */ +static bool mei_txe_is_input_ready(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + u32 status; + status = mei_txe_sec_reg_read(hw, SEC_IPC_INPUT_STATUS_REG); + return !!(SEC_IPC_INPUT_STATUS_RDY & status); +} + +/** + * mei_txe_intr_clear - clear all interrupts + * + * @dev: the device structure + */ +static inline void mei_txe_intr_clear(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + mei_txe_sec_reg_write_silent(hw, SEC_IPC_HOST_INT_STATUS_REG, + SEC_IPC_HOST_INT_STATUS_PENDING); + mei_txe_br_reg_write(hw, HISR_REG, HISR_INT_STS_MSK); + mei_txe_br_reg_write(hw, HHISR_REG, IPC_HHIER_MSK); +} + +/** + * mei_txe_intr_disable - disable all interrupts + * + * @dev: the device structure + */ +static void mei_txe_intr_disable(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + mei_txe_br_reg_write(hw, HHIER_REG, 0); + mei_txe_br_reg_write(hw, HIER_REG, 0); +} +/** + * mei_txe_intr_disable - enable all interrupts + * + * @dev: the device structure + */ +static void mei_txe_intr_enable(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + mei_txe_br_reg_write(hw, HHIER_REG, IPC_HHIER_MSK); + mei_txe_br_reg_write(hw, HIER_REG, HIER_INT_EN_MSK); +} + +/** + * mei_txe_pending_interrupts - check if there are pending interrupts + * only Aliveness, Input ready, and output doorbell are of relevance + * + * @dev: the device structure + * + * Checks if there are pending interrupts + * only Aliveness, Readiness, Input ready, and Output doorbell are relevant + */ +static bool mei_txe_pending_interrupts(struct mei_device *dev) +{ + + struct mei_txe_hw *hw = to_txe_hw(dev); + bool ret = (hw->intr_cause & (TXE_INTR_READINESS | + TXE_INTR_ALIVENESS | + TXE_INTR_IN_READY | + TXE_INTR_OUT_DB)); + + if (ret) { + dev_dbg(&dev->pdev->dev, + "Pending Interrupts InReady=%01d Readiness=%01d, Aliveness=%01d, OutDoor=%01d\n", + !!(hw->intr_cause & TXE_INTR_IN_READY), + !!(hw->intr_cause & TXE_INTR_READINESS), + !!(hw->intr_cause & TXE_INTR_ALIVENESS), + !!(hw->intr_cause & TXE_INTR_OUT_DB)); + } + return ret; +} + +/** + * mei_txe_input_payload_write - write a dword to the host buffer + * at offset idx + * + * @dev: the device structure + * @idx: index in the host buffer + * @value: value + */ +static void mei_txe_input_payload_write(struct mei_device *dev, + unsigned long idx, u32 value) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + mei_txe_sec_reg_write(hw, SEC_IPC_INPUT_PAYLOAD_REG + + (idx * sizeof(u32)), value); +} + +/** + * mei_txe_out_data_read - read dword from the device buffer + * at offset idx + * + * @dev: the device structure + * @idx: index in the device buffer + * + * returns register value at index + */ +static u32 mei_txe_out_data_read(const struct mei_device *dev, + unsigned long idx) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + return mei_txe_br_reg_read(hw, + BRIDGE_IPC_OUTPUT_PAYLOAD_REG + (idx * sizeof(u32))); +} + +/* Readiness */ + +/** + * mei_txe_readiness_set_host_rdy + * + * @dev: the device structure + */ +static void mei_txe_readiness_set_host_rdy(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + mei_txe_br_reg_write(hw, + SICR_HOST_IPC_READINESS_REQ_REG, + SICR_HOST_IPC_READINESS_HOST_RDY); +} + +/** + * mei_txe_readiness_clear + * + * @dev: the device structure + */ +static void mei_txe_readiness_clear(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + mei_txe_br_reg_write(hw, SICR_HOST_IPC_READINESS_REQ_REG, + SICR_HOST_IPC_READINESS_RDY_CLR); +} +/** + * mei_txe_readiness_get - Reads and returns + * the HICR_SEC_IPC_READINESS register value + * + * @dev: the device structure + */ +static u32 mei_txe_readiness_get(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + return mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG); +} + + +/** + * mei_txe_readiness_is_sec_rdy - check readiness + * for HICR_SEC_IPC_READINESS_SEC_RDY + * + * @readiness - cached readiness state + */ +static inline bool mei_txe_readiness_is_sec_rdy(u32 readiness) +{ + return !!(readiness & HICR_SEC_IPC_READINESS_SEC_RDY); +} + +/** + * mei_txe_hw_is_ready - check if the hw is ready + * + * @dev: the device structure + */ +static bool mei_txe_hw_is_ready(struct mei_device *dev) +{ + u32 readiness = mei_txe_readiness_get(dev); + return mei_txe_readiness_is_sec_rdy(readiness); +} + +/** + * mei_txe_host_is_ready - check if the host is ready + * + * @dev: the device structure + */ +static inline bool mei_txe_host_is_ready(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + u32 reg = mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG); + return !!(reg & HICR_SEC_IPC_READINESS_HOST_RDY); +} + +/** + * mei_txe_readiness_wait - wait till readiness settles + * + * @dev: the device structure + * + * returns 0 on success and -ETIME on timeout + */ +static int mei_txe_readiness_wait(struct mei_device *dev) +{ + if (mei_txe_hw_is_ready(dev)) + return 0; + + mutex_unlock(&dev->device_lock); + wait_event_timeout(dev->wait_hw_ready, dev->recvd_hw_ready, + msecs_to_jiffies(SEC_RESET_WAIT_TIMEOUT)); + mutex_lock(&dev->device_lock); + if (!dev->recvd_hw_ready) { + dev_err(&dev->pdev->dev, "wait for readiness failed\n"); + return -ETIME; + } + + dev->recvd_hw_ready = false; + return 0; +} + +/** + * mei_txe_hw_config - configure hardware at the start of the devices + * + * @dev: the device structure + * + * Configure hardware at the start of the device should be done only + * once at the device probe time + */ +static void mei_txe_hw_config(struct mei_device *dev) +{ + + struct mei_txe_hw *hw = to_txe_hw(dev); + /* Doesn't change in runtime */ + dev->hbuf_depth = PAYLOAD_SIZE / 4; + + hw->aliveness = mei_txe_aliveness_get(dev); + hw->readiness = mei_txe_readiness_get(dev); + + dev_dbg(&dev->pdev->dev, "aliveness_resp = 0x%08x, readiness = 0x%08x.\n", + hw->aliveness, hw->readiness); +} + + +/** + * mei_txe_write - writes a message to device. + * + * @dev: the device structure + * @header: header of message + * @buf: message buffer will be written + * returns 1 if success, 0 - otherwise. + */ + +static int mei_txe_write(struct mei_device *dev, + struct mei_msg_hdr *header, unsigned char *buf) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + unsigned long rem; + unsigned long length; + int slots = dev->hbuf_depth; + u32 *reg_buf = (u32 *)buf; + u32 dw_cnt; + int i; + + if (WARN_ON(!header || !buf)) + return -EINVAL; + + length = header->length; + + dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header)); + + dw_cnt = mei_data2slots(length); + if (dw_cnt > slots) + return -EMSGSIZE; + + if (WARN(!hw->aliveness, "txe write: aliveness not asserted\n")) + return -EAGAIN; + + /* Enable Input Ready Interrupt. */ + mei_txe_input_ready_interrupt_enable(dev); + + if (!mei_txe_is_input_ready(dev)) { + struct mei_fw_status fw_status; + mei_fw_status(dev, &fw_status); + dev_err(&dev->pdev->dev, "Input is not ready " FW_STS_FMT "\n", + FW_STS_PRM(fw_status)); + return -EAGAIN; + } + + mei_txe_input_payload_write(dev, 0, *((u32 *)header)); + + for (i = 0; i < length / 4; i++) + mei_txe_input_payload_write(dev, i + 1, reg_buf[i]); + + rem = length & 0x3; + if (rem > 0) { + u32 reg = 0; + memcpy(®, &buf[length - rem], rem); + mei_txe_input_payload_write(dev, i + 1, reg); + } + + /* after each write the whole buffer is consumed */ + hw->slots = 0; + + /* Set Input-Doorbell */ + mei_txe_input_doorbell_set(hw); + + return 0; +} + +/** + * mei_txe_hbuf_max_len - mimics the me hbuf circular buffer + * + * @dev: the device structure + * + * returns the PAYLOAD_SIZE - 4 + */ +static size_t mei_txe_hbuf_max_len(const struct mei_device *dev) +{ + return PAYLOAD_SIZE - sizeof(struct mei_msg_hdr); +} + +/** + * mei_txe_hbuf_empty_slots - mimics the me hbuf circular buffer + * + * @dev: the device structure + * + * returns always hbuf_depth + */ +static int mei_txe_hbuf_empty_slots(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + return hw->slots; +} + +/** + * mei_txe_count_full_read_slots - mimics the me device circular buffer + * + * @dev: the device structure + * + * returns always buffer size in dwords count + */ +static int mei_txe_count_full_read_slots(struct mei_device *dev) +{ + /* read buffers has static size */ + return PAYLOAD_SIZE / 4; +} + +/** + * mei_txe_read_hdr - read message header which is always in 4 first bytes + * + * @dev: the device structure + * + * returns mei message header + */ + +static u32 mei_txe_read_hdr(const struct mei_device *dev) +{ + return mei_txe_out_data_read(dev, 0); +} +/** + * mei_txe_read - reads a message from the txe device. + * + * @dev: the device structure + * @buf: message buffer will be written + * @len: message size will be read + * + * returns -EINVAL on error wrong argument and 0 on success + */ +static int mei_txe_read(struct mei_device *dev, + unsigned char *buf, unsigned long len) +{ + + struct mei_txe_hw *hw = to_txe_hw(dev); + u32 i; + u32 *reg_buf = (u32 *)buf; + u32 rem = len & 0x3; + + if (WARN_ON(!buf || !len)) + return -EINVAL; + + dev_dbg(&dev->pdev->dev, + "buffer-length = %lu buf[0]0x%08X\n", + len, mei_txe_out_data_read(dev, 0)); + + for (i = 0; i < len / 4; i++) { + /* skip header: index starts from 1 */ + u32 reg = mei_txe_out_data_read(dev, i + 1); + dev_dbg(&dev->pdev->dev, "buf[%d] = 0x%08X\n", i, reg); + *reg_buf++ = reg; + } + + if (rem) { + u32 reg = mei_txe_out_data_read(dev, i + 1); + memcpy(reg_buf, ®, rem); + } + + mei_txe_output_ready_set(hw); + return 0; +} + +/** + * mei_txe_hw_reset - resets host and fw. + * + * @dev: the device structure + * @intr_enable: if interrupt should be enabled after reset. + * + * returns 0 on success and < 0 in case of error + */ +static int mei_txe_hw_reset(struct mei_device *dev, bool intr_enable) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + + u32 aliveness_req; + /* + * read input doorbell to ensure consistency between Bridge and SeC + * return value might be garbage return + */ + (void)mei_txe_sec_reg_read_silent(hw, SEC_IPC_INPUT_DOORBELL_REG); + + aliveness_req = mei_txe_aliveness_req_get(dev); + hw->aliveness = mei_txe_aliveness_get(dev); + + /* Disable interrupts in this stage we will poll */ + mei_txe_intr_disable(dev); + + /* + * If Aliveness Request and Aliveness Response are not equal then + * wait for them to be equal + * Since we might have interrupts disabled - poll for it + */ + if (aliveness_req != hw->aliveness) + if (mei_txe_aliveness_poll(dev, aliveness_req) < 0) { + dev_err(&dev->pdev->dev, + "wait for aliveness settle failed ... bailing out\n"); + return -EIO; + } + + /* + * If Aliveness Request and Aliveness Response are set then clear them + */ + if (aliveness_req) { + mei_txe_aliveness_set(dev, 0); + if (mei_txe_aliveness_poll(dev, 0) < 0) { + dev_err(&dev->pdev->dev, + "wait for aliveness failed ... bailing out\n"); + return -EIO; + } + } + + /* + * Set rediness RDY_CLR bit + */ + mei_txe_readiness_clear(dev); + + return 0; +} + +/** + * mei_txe_hw_start - start the hardware after reset + * + * @dev: the device structure + * + * returns 0 on success and < 0 in case of error + */ +static int mei_txe_hw_start(struct mei_device *dev) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + int ret; + + u32 hisr; + + /* bring back interrupts */ + mei_txe_intr_enable(dev); + + ret = mei_txe_readiness_wait(dev); + if (ret < 0) { + dev_err(&dev->pdev->dev, "wating for readiness failed\n"); + return ret; + } + + /* + * If HISR.INT2_STS interrupt status bit is set then clear it. + */ + hisr = mei_txe_br_reg_read(hw, HISR_REG); + if (hisr & HISR_INT_2_STS) + mei_txe_br_reg_write(hw, HISR_REG, HISR_INT_2_STS); + + /* Clear the interrupt cause of OutputDoorbell */ + clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause); + + ret = mei_txe_aliveness_set_sync(dev, 1); + if (ret < 0) { + dev_err(&dev->pdev->dev, "wait for aliveness failed ... bailing out\n"); + return ret; + } + + /* enable input ready interrupts: + * SEC_IPC_HOST_INT_MASK.IPC_INPUT_READY_INT_MASK + */ + mei_txe_input_ready_interrupt_enable(dev); + + + /* Set the SICR_SEC_IPC_OUTPUT_STATUS.IPC_OUTPUT_READY bit */ + mei_txe_output_ready_set(hw); + + /* Set bit SICR_HOST_IPC_READINESS.HOST_RDY + */ + mei_txe_readiness_set_host_rdy(dev); + + return 0; +} + +/** + * mei_txe_check_and_ack_intrs - translate multi BAR interrupt into + * single bit mask and acknowledge the interrupts + * + * @dev: the device structure + * @do_ack: acknowledge interrupts + */ +static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + u32 hisr; + u32 hhisr; + u32 ipc_isr; + u32 aliveness; + bool generated; + + /* read interrupt registers */ + hhisr = mei_txe_br_reg_read(hw, HHISR_REG); + generated = (hhisr & IPC_HHIER_MSK); + if (!generated) + goto out; + + hisr = mei_txe_br_reg_read(hw, HISR_REG); + + aliveness = mei_txe_aliveness_get(dev); + if (hhisr & IPC_HHIER_SEC && aliveness) + ipc_isr = mei_txe_sec_reg_read_silent(hw, + SEC_IPC_HOST_INT_STATUS_REG); + else + ipc_isr = 0; + + generated = generated || + (hisr & HISR_INT_STS_MSK) || + (ipc_isr & SEC_IPC_HOST_INT_STATUS_PENDING); + + if (generated && do_ack) { + /* Save the interrupt causes */ + hw->intr_cause |= hisr & HISR_INT_STS_MSK; + if (ipc_isr & SEC_IPC_HOST_INT_STATUS_IN_RDY) + hw->intr_cause |= TXE_INTR_IN_READY; + + + mei_txe_intr_disable(dev); + /* Clear the interrupts in hierarchy: + * IPC and Bridge, than the High Level */ + mei_txe_sec_reg_write_silent(hw, + SEC_IPC_HOST_INT_STATUS_REG, ipc_isr); + mei_txe_br_reg_write(hw, HISR_REG, hisr); + mei_txe_br_reg_write(hw, HHISR_REG, hhisr); + } + +out: + return generated; +} + +/** + * mei_txe_irq_quick_handler - The ISR of the MEI device + * + * @irq: The irq number + * @dev_id: pointer to the device structure + * + * returns irqreturn_t + */ +irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id) +{ + struct mei_device *dev = dev_id; + + if (mei_txe_check_and_ack_intrs(dev, true)) + return IRQ_WAKE_THREAD; + return IRQ_NONE; +} + + +/** + * mei_txe_irq_thread_handler - txe interrupt thread + * + * @irq: The irq number + * @dev_id: pointer to the device structure + * + * returns irqreturn_t + * + */ +irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id) +{ + struct mei_device *dev = (struct mei_device *) dev_id; + struct mei_txe_hw *hw = to_txe_hw(dev); + struct mei_cl_cb complete_list; + s32 slots; + int rets = 0; + + dev_dbg(&dev->pdev->dev, "irq thread: Interrupt Registers HHISR|HISR|SEC=%02X|%04X|%02X\n", + mei_txe_br_reg_read(hw, HHISR_REG), + mei_txe_br_reg_read(hw, HISR_REG), + mei_txe_sec_reg_read_silent(hw, SEC_IPC_HOST_INT_STATUS_REG)); + + + /* initialize our complete list */ + mutex_lock(&dev->device_lock); + mei_io_list_init(&complete_list); + + if (pci_dev_msi_enabled(dev->pdev)) + mei_txe_check_and_ack_intrs(dev, true); + + /* show irq events */ + mei_txe_pending_interrupts(dev); + + hw->aliveness = mei_txe_aliveness_get(dev); + hw->readiness = mei_txe_readiness_get(dev); + + /* Readiness: + * Detection of TXE driver going through reset + * or TXE driver resetting the HECI interface. + */ + if (test_and_clear_bit(TXE_INTR_READINESS_BIT, &hw->intr_cause)) { + dev_dbg(&dev->pdev->dev, "Readiness Interrupt was received...\n"); + + /* Check if SeC is going through reset */ + if (mei_txe_readiness_is_sec_rdy(hw->readiness)) { + dev_dbg(&dev->pdev->dev, "we need to start the dev.\n"); + dev->recvd_hw_ready = true; + } else { + dev->recvd_hw_ready = false; + if (dev->dev_state != MEI_DEV_RESETTING) { + + dev_warn(&dev->pdev->dev, "FW not ready: resetting.\n"); + schedule_work(&dev->reset_work); + goto end; + + } + } + wake_up(&dev->wait_hw_ready); + } + + /************************************************************/ + /* Check interrupt cause: + * Aliveness: Detection of SeC acknowledge of host request that + * it remain alive or host cancellation of that request. + */ + + if (test_and_clear_bit(TXE_INTR_ALIVENESS_BIT, &hw->intr_cause)) { + /* Clear the interrupt cause */ + dev_dbg(&dev->pdev->dev, + "Aliveness Interrupt: Status: %d\n", hw->aliveness); + dev->pg_event = MEI_PG_EVENT_RECEIVED; + if (waitqueue_active(&hw->wait_aliveness_resp)) + wake_up(&hw->wait_aliveness_resp); + } + + + /* Output Doorbell: + * Detection of SeC having sent output to host + */ + slots = mei_count_full_read_slots(dev); + if (test_and_clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause)) { + /* Read from TXE */ + rets = mei_irq_read_handler(dev, &complete_list, &slots); + if (rets && dev->dev_state != MEI_DEV_RESETTING) { + dev_err(&dev->pdev->dev, + "mei_irq_read_handler ret = %d.\n", rets); + + schedule_work(&dev->reset_work); + goto end; + } + } + /* Input Ready: Detection if host can write to SeC */ + if (test_and_clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause)) { + dev->hbuf_is_ready = true; + hw->slots = dev->hbuf_depth; + } + + if (hw->aliveness && dev->hbuf_is_ready) { + /* get the real register value */ + dev->hbuf_is_ready = mei_hbuf_is_ready(dev); + rets = mei_irq_write_handler(dev, &complete_list); + if (rets && rets != -EMSGSIZE) + dev_err(&dev->pdev->dev, "mei_irq_write_handler ret = %d.\n", + rets); + dev->hbuf_is_ready = mei_hbuf_is_ready(dev); + } + + mei_irq_compl_handler(dev, &complete_list); + +end: + dev_dbg(&dev->pdev->dev, "interrupt thread end ret = %d\n", rets); + + mutex_unlock(&dev->device_lock); + + mei_enable_interrupts(dev); + return IRQ_HANDLED; +} + + +/** + * mei_txe_fw_status - retrieve fw status from the pci config space + * + * @dev: the device structure + * @fw_status: fw status registers storage + * + * returns: 0 on success an error code otherwise + */ +static int mei_txe_fw_status(struct mei_device *dev, + struct mei_fw_status *fw_status) +{ + const u32 pci_cfg_reg[] = {PCI_CFG_TXE_FW_STS0, PCI_CFG_TXE_FW_STS1}; + int i; + + if (!fw_status) + return -EINVAL; + + fw_status->count = 2; + + for (i = 0; i < fw_status->count && i < MEI_FW_STATUS_MAX; i++) { + int ret; + ret = pci_read_config_dword(dev->pdev, + pci_cfg_reg[i], &fw_status->status[i]); + if (ret) + return ret; + } + + return 0; +} + +static const struct mei_hw_ops mei_txe_hw_ops = { + + .fw_status = mei_txe_fw_status, + .host_is_ready = mei_txe_host_is_ready, + + .pg_state = mei_txe_pg_state, + + .hw_is_ready = mei_txe_hw_is_ready, + .hw_reset = mei_txe_hw_reset, + .hw_config = mei_txe_hw_config, + .hw_start = mei_txe_hw_start, + + .pg_is_enabled = mei_txe_pg_is_enabled, + + .intr_clear = mei_txe_intr_clear, + .intr_enable = mei_txe_intr_enable, + .intr_disable = mei_txe_intr_disable, + + .hbuf_free_slots = mei_txe_hbuf_empty_slots, + .hbuf_is_ready = mei_txe_is_input_ready, + .hbuf_max_len = mei_txe_hbuf_max_len, + + .write = mei_txe_write, + + .rdbuf_full_slots = mei_txe_count_full_read_slots, + .read_hdr = mei_txe_read_hdr, + + .read = mei_txe_read, + +}; + +#define MEI_CFG_TXE_FW_STS \ + .fw_status.count = 2, \ + .fw_status.status[0] = PCI_CFG_TXE_FW_STS0, \ + .fw_status.status[1] = PCI_CFG_TXE_FW_STS1 + +const struct mei_cfg mei_txe_cfg = { + MEI_CFG_TXE_FW_STS, +}; + + +/** + * mei_txe_dev_init - allocates and initializes txe hardware specific structure + * + * @pdev - pci device + * @cfg - per device generation config + * + * returns struct mei_device * on success or NULL; + * + */ +struct mei_device *mei_txe_dev_init(struct pci_dev *pdev, + const struct mei_cfg *cfg) +{ + struct mei_device *dev; + struct mei_txe_hw *hw; + + dev = kzalloc(sizeof(struct mei_device) + + sizeof(struct mei_txe_hw), GFP_KERNEL); + if (!dev) + return NULL; + + mei_device_init(dev, cfg); + + hw = to_txe_hw(dev); + + init_waitqueue_head(&hw->wait_aliveness_resp); + + dev->ops = &mei_txe_hw_ops; + + dev->pdev = pdev; + return dev; +} + +/** + * mei_txe_setup_satt2 - SATT2 configuration for DMA support. + * + * @dev: the device structure + * @addr: physical address start of the range + * @range: physical range size + */ +int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range) +{ + struct mei_txe_hw *hw = to_txe_hw(dev); + + u32 lo32 = lower_32_bits(addr); + u32 hi32 = upper_32_bits(addr); + u32 ctrl; + + /* SATT is limited to 36 Bits */ + if (hi32 & ~0xF) + return -EINVAL; + + /* SATT has to be 16Byte aligned */ + if (lo32 & 0xF) + return -EINVAL; + + /* SATT range has to be 4Bytes aligned */ + if (range & 0x4) + return -EINVAL; + + /* SATT is limited to 32 MB range*/ + if (range > SATT_RANGE_MAX) + return -EINVAL; + + ctrl = SATT2_CTRL_VALID_MSK; + ctrl |= hi32 << SATT2_CTRL_BR_BASE_ADDR_REG_SHIFT; + + mei_txe_br_reg_write(hw, SATT2_SAP_SIZE_REG, range); + mei_txe_br_reg_write(hw, SATT2_BRG_BA_LSB_REG, lo32); + mei_txe_br_reg_write(hw, SATT2_CTRL_REG, ctrl); + dev_dbg(&dev->pdev->dev, "SATT2: SAP_SIZE_OFFSET=0x%08X, BRG_BA_LSB_OFFSET=0x%08X, CTRL_OFFSET=0x%08X\n", + range, lo32, ctrl); + + return 0; +} diff --git a/drivers/misc/mei/hw-txe.h b/drivers/misc/mei/hw-txe.h new file mode 100644 index 000000000000..e244af79167f --- /dev/null +++ b/drivers/misc/mei/hw-txe.h @@ -0,0 +1,77 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2013-2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef _MEI_HW_TXE_H_ +#define _MEI_HW_TXE_H_ + +#include <linux/irqreturn.h> + +#include "hw.h" +#include "hw-txe-regs.h" + +#define MEI_TXI_RPM_TIMEOUT 500 /* ms */ + +/* Flatten Hierarchy interrupt cause */ +#define TXE_INTR_READINESS_BIT 0 /* HISR_INT_0_STS */ +#define TXE_INTR_READINESS HISR_INT_0_STS +#define TXE_INTR_ALIVENESS_BIT 1 /* HISR_INT_1_STS */ +#define TXE_INTR_ALIVENESS HISR_INT_1_STS +#define TXE_INTR_OUT_DB_BIT 2 /* HISR_INT_2_STS */ +#define TXE_INTR_OUT_DB HISR_INT_2_STS +#define TXE_INTR_IN_READY_BIT 8 /* beyond HISR */ +#define TXE_INTR_IN_READY BIT(8) + +/** + * struct mei_txe_hw - txe hardware specifics + * + * @mem_addr: SeC and BRIDGE bars + * @aliveness: aliveness (power gating) state of the hardware + * @readiness: readiness state of the hardware + * @wait_aliveness_resp: aliveness wait queue + * @intr_cause: translated interrupt cause + */ +struct mei_txe_hw { + void __iomem *mem_addr[NUM_OF_MEM_BARS]; + u32 aliveness; + u32 readiness; + u32 slots; + + wait_queue_head_t wait_aliveness_resp; + + unsigned long intr_cause; +}; + +#define to_txe_hw(dev) (struct mei_txe_hw *)((dev)->hw) + +static inline struct mei_device *hw_txe_to_mei(struct mei_txe_hw *hw) +{ + return container_of((void *)hw, struct mei_device, hw); +} + +extern const struct mei_cfg mei_txe_cfg; + +struct mei_device *mei_txe_dev_init(struct pci_dev *pdev, + const struct mei_cfg *cfg); + +irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id); +irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id); + +int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req); + +int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range); + + +#endif /* _MEI_HW_TXE_H_ */ diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h index dd44e33ad2b6..dd448e58cc87 100644 --- a/drivers/misc/mei/hw.h +++ b/drivers/misc/mei/hw.h @@ -22,7 +22,7 @@ /* * Timeouts in Seconds */ -#define MEI_INTEROP_TIMEOUT 7 /* Timeout on ready message */ +#define MEI_HW_READY_TIMEOUT 2 /* Timeout on ready message */ #define MEI_CONNECT_TIMEOUT 3 /* HPS: at least 2 seconds */ #define MEI_CL_CONNECT_TIMEOUT 15 /* HPS: Client Connect Timeout */ @@ -31,13 +31,20 @@ #define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */ #define MEI_IAMTHIF_READ_TIMER 10 /* HPS */ +#define MEI_PGI_TIMEOUT 1 /* PG Isolation time response 1 sec */ +#define MEI_HBM_TIMEOUT 1 /* 1 second */ /* * MEI Version */ -#define HBM_MINOR_VERSION 0 +#define HBM_MINOR_VERSION 1 #define HBM_MAJOR_VERSION 1 -#define HBM_TIMEOUT 1 /* 1 second */ + +/* + * MEI version with PGI support + */ +#define HBM_MINOR_VERSION_PGI 1 +#define HBM_MAJOR_VERSION_PGI 1 /* Host bus message command opcode */ #define MEI_HBM_CMD_OP_MSK 0x7f @@ -69,6 +76,11 @@ #define MEI_FLOW_CONTROL_CMD 0x08 +#define MEI_PG_ISOLATION_ENTRY_REQ_CMD 0x0a +#define MEI_PG_ISOLATION_ENTRY_RES_CMD 0x8a +#define MEI_PG_ISOLATION_EXIT_REQ_CMD 0x0b +#define MEI_PG_ISOLATION_EXIT_RES_CMD 0x8b + /* * MEI Stop Reason * used by hbm_host_stop_request.reason @@ -89,19 +101,19 @@ enum mei_stop_reason_types { * Client Connect Status * used by hbm_client_connect_response.status */ -enum client_connect_status_types { - CCS_SUCCESS = 0x00, - CCS_NOT_FOUND = 0x01, - CCS_ALREADY_STARTED = 0x02, - CCS_OUT_OF_RESOURCES = 0x03, - CCS_MESSAGE_SMALL = 0x04 +enum mei_cl_connect_status { + MEI_CL_CONN_SUCCESS = 0x00, + MEI_CL_CONN_NOT_FOUND = 0x01, + MEI_CL_CONN_ALREADY_STARTED = 0x02, + MEI_CL_CONN_OUT_OF_RESOURCES = 0x03, + MEI_CL_CONN_MESSAGE_SMALL = 0x04 }; /* * Client Disconnect Status */ -enum client_disconnect_status_types { - CDS_SUCCESS = 0x00 +enum mei_cl_disconnect_status { + MEI_CL_DISCONN_SUCCESS = 0x00 }; /* @@ -208,6 +220,17 @@ struct hbm_props_response { } __packed; /** + * struct hbm_power_gate - power gate request/response + * + * @hbm_cmd - bus message command header + * @reserved[3] + */ +struct hbm_power_gate { + u8 hbm_cmd; + u8 reserved[3]; +} __packed; + +/** * struct hbm_client_connect_request - connect/disconnect request * * @hbm_cmd - bus message command header diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index cdd31c2a2a2b..006929222481 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c @@ -74,9 +74,13 @@ int mei_reset(struct mei_device *dev) if (state != MEI_DEV_INITIALIZING && state != MEI_DEV_DISABLED && state != MEI_DEV_POWER_DOWN && - state != MEI_DEV_POWER_UP) - dev_warn(&dev->pdev->dev, "unexpected reset: dev_state = %s\n", - mei_dev_state_str(state)); + state != MEI_DEV_POWER_UP) { + struct mei_fw_status fw_status; + mei_fw_status(dev, &fw_status); + dev_warn(&dev->pdev->dev, + "unexpected reset: dev_state = %s " FW_STS_FMT "\n", + mei_dev_state_str(state), FW_STS_PRM(fw_status)); + } /* we're already in reset, cancel the init timer * if the reset was called due the hbm protocol error @@ -116,17 +120,15 @@ int mei_reset(struct mei_device *dev) mei_cl_unlink(&dev->wd_cl); mei_cl_unlink(&dev->iamthif_cl); mei_amthif_reset_params(dev); - memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg)); } + mei_hbm_reset(dev); - dev->me_clients_num = 0; dev->rd_msg_hdr = 0; dev->wd_pending = false; if (ret) { dev_err(&dev->pdev->dev, "hw_reset failed ret = %d\n", ret); - dev->dev_state = MEI_DEV_DISABLED; return ret; } @@ -139,7 +141,6 @@ int mei_reset(struct mei_device *dev) ret = mei_hw_start(dev); if (ret) { dev_err(&dev->pdev->dev, "hw_start failed ret = %d\n", ret); - dev->dev_state = MEI_DEV_DISABLED; return ret; } @@ -149,7 +150,7 @@ int mei_reset(struct mei_device *dev) ret = mei_hbm_start_req(dev); if (ret) { dev_err(&dev->pdev->dev, "hbm_start failed ret = %d\n", ret); - dev->dev_state = MEI_DEV_DISABLED; + dev->dev_state = MEI_DEV_RESETTING; return ret; } @@ -166,6 +167,7 @@ EXPORT_SYMBOL_GPL(mei_reset); */ int mei_start(struct mei_device *dev) { + int ret; mutex_lock(&dev->device_lock); /* acknowledge interrupt and stop interrupts */ @@ -175,10 +177,18 @@ int mei_start(struct mei_device *dev) dev_dbg(&dev->pdev->dev, "reset in start the mei device.\n"); - dev->dev_state = MEI_DEV_INITIALIZING; dev->reset_count = 0; - mei_reset(dev); + do { + dev->dev_state = MEI_DEV_INITIALIZING; + ret = mei_reset(dev); + + if (ret == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) { + dev_err(&dev->pdev->dev, "reset failed ret = %d", ret); + goto err; + } + } while (ret); + /* we cannot start the device w/o hbm start message completed */ if (dev->dev_state == MEI_DEV_DISABLED) { dev_err(&dev->pdev->dev, "reset failed"); goto err; @@ -238,27 +248,40 @@ int mei_restart(struct mei_device *dev) mutex_unlock(&dev->device_lock); - if (err || dev->dev_state == MEI_DEV_DISABLED) + if (err == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) { + dev_err(&dev->pdev->dev, "device disabled = %d\n", err); return -ENODEV; + } + + /* try to start again */ + if (err) + schedule_work(&dev->reset_work); + return 0; } EXPORT_SYMBOL_GPL(mei_restart); - static void mei_reset_work(struct work_struct *work) { struct mei_device *dev = container_of(work, struct mei_device, reset_work); + int ret; mutex_lock(&dev->device_lock); - mei_reset(dev); + ret = mei_reset(dev); mutex_unlock(&dev->device_lock); - if (dev->dev_state == MEI_DEV_DISABLED) - dev_err(&dev->pdev->dev, "reset failed"); + if (dev->dev_state == MEI_DEV_DISABLED) { + dev_err(&dev->pdev->dev, "device disabled = %d\n", ret); + return; + } + + /* retry reset in case of failure */ + if (ret) + schedule_work(&dev->reset_work); } void mei_stop(struct mei_device *dev) @@ -269,6 +292,8 @@ void mei_stop(struct mei_device *dev) mei_nfc_host_exit(dev); + mei_cl_bus_remove_devices(dev); + mutex_lock(&dev->device_lock); mei_wd_stop(dev); @@ -282,15 +307,58 @@ void mei_stop(struct mei_device *dev) } EXPORT_SYMBOL_GPL(mei_stop); +/** + * mei_write_is_idle - check if the write queues are idle + * + * @dev: the device structure + * + * returns true of there is no pending write + */ +bool mei_write_is_idle(struct mei_device *dev) +{ + bool idle = (dev->dev_state == MEI_DEV_ENABLED && + list_empty(&dev->ctrl_wr_list.list) && + list_empty(&dev->write_list.list)); + dev_dbg(&dev->pdev->dev, "write pg: is idle[%d] state=%s ctrl=%d write=%d\n", + idle, + mei_dev_state_str(dev->dev_state), + list_empty(&dev->ctrl_wr_list.list), + list_empty(&dev->write_list.list)); -void mei_device_init(struct mei_device *dev) + return idle; +} +EXPORT_SYMBOL_GPL(mei_write_is_idle); + +int mei_fw_status(struct mei_device *dev, struct mei_fw_status *fw_status) +{ + int i; + const struct mei_fw_status *fw_src = &dev->cfg->fw_status; + + if (!fw_status) + return -EINVAL; + + fw_status->count = fw_src->count; + for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) { + int ret; + ret = pci_read_config_dword(dev->pdev, + fw_src->status[i], &fw_status->status[i]); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(mei_fw_status); + +void mei_device_init(struct mei_device *dev, const struct mei_cfg *cfg) { /* setup our list array */ INIT_LIST_HEAD(&dev->file_list); INIT_LIST_HEAD(&dev->device_list); mutex_init(&dev->device_lock); init_waitqueue_head(&dev->wait_hw_ready); + init_waitqueue_head(&dev->wait_pg); init_waitqueue_head(&dev->wait_recvd_msg); init_waitqueue_head(&dev->wait_stop_wd); dev->dev_state = MEI_DEV_INITIALIZING; @@ -319,6 +387,9 @@ void mei_device_init(struct mei_device *dev) * 0: Reserved for MEI Bus Message communications */ bitmap_set(dev->host_clients_map, 0, 1); + + dev->pg_event = MEI_PG_EVENT_IDLE; + dev->cfg = cfg; } EXPORT_SYMBOL_GPL(mei_device_init); diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index f0fbb5179f80..4e3cba6da3f5 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c @@ -26,7 +26,6 @@ #include "mei_dev.h" #include "hbm.h" -#include "hw-me.h" #include "client.h" @@ -161,29 +160,63 @@ static int mei_cl_irq_read_msg(struct mei_device *dev, } /** + * mei_cl_irq_disconnect_rsp - send disconnection response message + * + * @cl: client + * @cb: callback block. + * @cmpl_list: complete list. + * + * returns 0, OK; otherwise, error. + */ +static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb, + struct mei_cl_cb *cmpl_list) +{ + struct mei_device *dev = cl->dev; + u32 msg_slots; + int slots; + int ret; + + slots = mei_hbuf_empty_slots(dev); + msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_response)); + + if (slots < msg_slots) + return -EMSGSIZE; + + ret = mei_hbm_cl_disconnect_rsp(dev, cl); + + cl->state = MEI_FILE_DISCONNECTED; + cl->status = 0; + list_del(&cb->list); + mei_io_cb_free(cb); + + return ret; +} + + + +/** * mei_cl_irq_close - processes close related operation from * interrupt thread context - send disconnect request * * @cl: client * @cb: callback block. - * @slots: free slots. * @cmpl_list: complete list. * * returns 0, OK; otherwise, error. */ static int mei_cl_irq_close(struct mei_cl *cl, struct mei_cl_cb *cb, - s32 *slots, struct mei_cl_cb *cmpl_list) + struct mei_cl_cb *cmpl_list) { struct mei_device *dev = cl->dev; + u32 msg_slots; + int slots; - u32 msg_slots = - mei_data2slots(sizeof(struct hbm_client_connect_request)); + msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); + slots = mei_hbuf_empty_slots(dev); - if (*slots < msg_slots) + if (slots < msg_slots) return -EMSGSIZE; - *slots -= msg_slots; - if (mei_hbm_cl_disconnect_req(dev, cl)) { cl->status = 0; cb->buf_idx = 0; @@ -207,27 +240,23 @@ static int mei_cl_irq_close(struct mei_cl *cl, struct mei_cl_cb *cb, * * @cl: client * @cb: callback block. - * @slots: free slots. * @cmpl_list: complete list. * * returns 0, OK; otherwise, error. */ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb, - s32 *slots, struct mei_cl_cb *cmpl_list) + struct mei_cl_cb *cmpl_list) { struct mei_device *dev = cl->dev; - u32 msg_slots = mei_data2slots(sizeof(struct hbm_flow_control)); - + u32 msg_slots; + int slots; int ret; + msg_slots = mei_data2slots(sizeof(struct hbm_flow_control)); + slots = mei_hbuf_empty_slots(dev); - if (*slots < msg_slots) { - /* return the cancel routine */ - list_del(&cb->list); + if (slots < msg_slots) return -EMSGSIZE; - } - - *slots -= msg_slots; ret = mei_hbm_cl_flow_control_req(dev, cl); if (ret) { @@ -244,32 +273,30 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb, /** - * mei_cl_irq_ioctl - processes client ioctl related operation from the - * interrupt thread context - send connection request + * mei_cl_irq_connect - send connect request in irq_thread context * * @cl: client * @cb: callback block. - * @slots: free slots. * @cmpl_list: complete list. * * returns 0, OK; otherwise, error. */ -static int mei_cl_irq_ioctl(struct mei_cl *cl, struct mei_cl_cb *cb, - s32 *slots, struct mei_cl_cb *cmpl_list) +static int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, + struct mei_cl_cb *cmpl_list) { struct mei_device *dev = cl->dev; + u32 msg_slots; + int slots; int ret; - u32 msg_slots = - mei_data2slots(sizeof(struct hbm_client_connect_request)); + msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); + slots = mei_hbuf_empty_slots(dev); - if (*slots < msg_slots) { - /* return the cancel routine */ - list_del(&cb->list); - return -EMSGSIZE; - } + if (mei_cl_is_other_connecting(cl)) + return 0; - *slots -= msg_slots; + if (slots < msg_slots) + return -EMSGSIZE; cl->state = MEI_FILE_CONNECTING; @@ -323,7 +350,7 @@ int mei_irq_read_handler(struct mei_device *dev, dev_err(&dev->pdev->dev, "less data available than length=%08x.\n", *slots); /* we can't read the message */ - ret = -ERANGE; + ret = -ENODATA; goto end; } @@ -409,10 +436,10 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list) s32 slots; int ret; - if (!mei_hbuf_is_ready(dev)) { - dev_dbg(&dev->pdev->dev, "host buffer is not empty.\n"); + + if (!mei_hbuf_acquire(dev)) return 0; - } + slots = mei_hbuf_empty_slots(dev); if (slots <= 0) return -EMSGSIZE; @@ -428,8 +455,7 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list) cl->status = 0; list_del(&cb->list); - if (MEI_WRITING == cl->writing_state && - cb->fop_type == MEI_FOP_WRITE && + if (cb->fop_type == MEI_FOP_WRITE && cl != &dev->iamthif_cl) { cl_dbg(dev, cl, "MEI WRITE COMPLETE\n"); cl->writing_state = MEI_WRITE_COMPLETE; @@ -447,29 +473,16 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list) if (dev->wd_state == MEI_WD_STOPPING) { dev->wd_state = MEI_WD_IDLE; - wake_up_interruptible(&dev->wait_stop_wd); + wake_up(&dev->wait_stop_wd); } - if (dev->wr_ext_msg.hdr.length) { - mei_write_message(dev, &dev->wr_ext_msg.hdr, - dev->wr_ext_msg.data); - slots -= mei_data2slots(dev->wr_ext_msg.hdr.length); - dev->wr_ext_msg.hdr.length = 0; - } - if (dev->dev_state == MEI_DEV_ENABLED) { + if (mei_cl_is_connected(&dev->wd_cl)) { if (dev->wd_pending && mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) { - if (mei_wd_send(dev)) - dev_dbg(&dev->pdev->dev, "wd send failed.\n"); - else if (mei_cl_flow_ctrl_reduce(&dev->wd_cl)) - return -ENODEV; - + ret = mei_wd_send(dev); + if (ret) + return ret; dev->wd_pending = false; - - if (dev->wd_state == MEI_WD_RUNNING) - slots -= mei_data2slots(MEI_WD_START_MSG_SIZE); - else - slots -= mei_data2slots(MEI_WD_STOP_MSG_SIZE); } } @@ -484,28 +497,31 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list) switch (cb->fop_type) { case MEI_FOP_CLOSE: /* send disconnect message */ - ret = mei_cl_irq_close(cl, cb, &slots, cmpl_list); + ret = mei_cl_irq_close(cl, cb, cmpl_list); if (ret) return ret; break; case MEI_FOP_READ: /* send flow control message */ - ret = mei_cl_irq_read(cl, cb, &slots, cmpl_list); + ret = mei_cl_irq_read(cl, cb, cmpl_list); if (ret) return ret; break; - case MEI_FOP_IOCTL: + case MEI_FOP_CONNECT: /* connect message */ - if (mei_cl_is_other_connecting(cl)) - continue; - ret = mei_cl_irq_ioctl(cl, cb, &slots, cmpl_list); + ret = mei_cl_irq_connect(cl, cb, cmpl_list); if (ret) return ret; break; - + case MEI_FOP_DISCONNECT_RSP: + /* send disconnect resp */ + ret = mei_cl_irq_disconnect_rsp(cl, cb, cmpl_list); + if (ret) + return ret; + break; default: BUG(); } @@ -518,11 +534,9 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list) if (cl == NULL) continue; if (cl == &dev->iamthif_cl) - ret = mei_amthif_irq_write_complete(cl, cb, - &slots, cmpl_list); + ret = mei_amthif_irq_write(cl, cb, cmpl_list); else - ret = mei_cl_irq_write_complete(cl, cb, - &slots, cmpl_list); + ret = mei_cl_irq_write(cl, cb, cmpl_list); if (ret) return ret; } @@ -541,8 +555,7 @@ EXPORT_SYMBOL_GPL(mei_irq_write_handler); void mei_timer(struct work_struct *work) { unsigned long timeout; - struct mei_cl *cl_pos = NULL; - struct mei_cl *cl_next = NULL; + struct mei_cl *cl; struct mei_cl_cb *cb_pos = NULL; struct mei_cl_cb *cb_next = NULL; @@ -570,9 +583,9 @@ void mei_timer(struct work_struct *work) goto out; /*** connect/disconnect timeouts ***/ - list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) { - if (cl_pos->timer_count) { - if (--cl_pos->timer_count == 0) { + list_for_each_entry(cl, &dev->file_list, link) { + if (cl->timer_count) { + if (--cl->timer_count == 0) { dev_err(&dev->pdev->dev, "timer: connect/disconnect timeout.\n"); mei_reset(dev); goto out; @@ -580,6 +593,9 @@ void mei_timer(struct work_struct *work) } } + if (!mei_cl_is_connected(&dev->iamthif_cl)) + goto out; + if (dev->iamthif_stall_timer) { if (--dev->iamthif_stall_timer == 0) { dev_err(&dev->pdev->dev, "timer: amthif hanged.\n"); @@ -619,10 +635,10 @@ void mei_timer(struct work_struct *work) list_for_each_entry_safe(cb_pos, cb_next, &dev->amthif_rd_complete_list.list, list) { - cl_pos = cb_pos->file_object->private_data; + cl = cb_pos->file_object->private_data; /* Finding the AMTHI entry. */ - if (cl_pos == &dev->iamthif_cl) + if (cl == &dev->iamthif_cl) list_del(&cb_pos->list); } mei_io_cb_free(dev->iamthif_current_cb); diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 5424f8ff3f7f..66f0a1a06451 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -13,9 +13,6 @@ * more details. * */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> @@ -40,7 +37,6 @@ #include <linux/mei.h> #include "mei_dev.h" -#include "hw-me.h" #include "client.h" /** @@ -129,17 +125,11 @@ static int mei_release(struct inode *inode, struct file *file) } if (cl->state == MEI_FILE_CONNECTED) { cl->state = MEI_FILE_DISCONNECTING; - dev_dbg(&dev->pdev->dev, - "disconnecting client host client = %d, " - "ME client = %d\n", - cl->host_client_id, - cl->me_client_id); + cl_dbg(dev, cl, "disconnecting\n"); rets = mei_cl_disconnect(cl); } mei_cl_flush_queues(cl); - dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n", - cl->host_client_id, - cl->me_client_id); + cl_dbg(dev, cl, "removing\n"); mei_cl_unlink(cl); @@ -284,6 +274,7 @@ copy_buffer: length = min_t(size_t, length, cb->buf_idx - *offset); if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) { + dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n"); rets = -EFAULT; goto free; } @@ -340,7 +331,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, id = mei_me_cl_by_id(dev, cl->me_client_id); if (id < 0) { - rets = -ENODEV; + rets = -ENOTTY; goto out; } @@ -404,7 +395,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, rets = copy_from_user(write_cb->request_buffer.data, ubuf, length); if (rets) { - dev_err(&dev->pdev->dev, "failed to copy data from userland\n"); + dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n"); rets = -EFAULT; goto out; } @@ -471,12 +462,11 @@ static int mei_ioctl_connect_client(struct file *file, if (i < 0 || dev->me_clients[i].props.fixed_address) { dev_dbg(&dev->pdev->dev, "Cannot connect to FW Client UUID = %pUl\n", &data->in_client_uuid); - rets = -ENODEV; + rets = -ENOTTY; goto end; } cl->me_client_id = dev->me_clients[i].client_id; - cl->state = MEI_FILE_CONNECTING; dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n", cl->me_client_id); @@ -569,7 +559,7 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) dev_dbg(&dev->pdev->dev, "copy connect data from user\n"); if (copy_from_user(connect_data, (char __user *)data, sizeof(struct mei_connect_client_data))) { - dev_err(&dev->pdev->dev, "failed to copy data from userland\n"); + dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n"); rets = -EFAULT; goto out; } @@ -653,8 +643,7 @@ static unsigned int mei_poll(struct file *file, poll_table *wait) goto out; } - if (MEI_WRITE_COMPLETE == cl->writing_state) - mask |= (POLLIN | POLLRDNORM); + mask |= (POLLIN | POLLRDNORM); out: mutex_unlock(&dev->device_lock); diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index f7de95b4cdd9..5c7e990e2f22 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h @@ -24,7 +24,6 @@ #include <linux/mei_cl_bus.h> #include "hw.h" -#include "hw-me-regs.h" #include "hbm.h" /* @@ -130,16 +129,18 @@ enum mei_wd_states { /** * enum mei_cb_file_ops - file operation associated with the callback - * @MEI_FOP_READ - read - * @MEI_FOP_WRITE - write - * @MEI_FOP_IOCTL - ioctl - * @MEI_FOP_OPEN - open - * @MEI_FOP_CLOSE - close + * @MEI_FOP_READ - read + * @MEI_FOP_WRITE - write + * @MEI_FOP_CONNECT - connect + * @MEI_FOP_DISCONNECT_RSP - disconnect response + * @MEI_FOP_OPEN - open + * @MEI_FOP_CLOSE - close */ enum mei_cb_file_ops { MEI_FOP_READ = 0, MEI_FOP_WRITE, - MEI_FOP_IOCTL, + MEI_FOP_CONNECT, + MEI_FOP_DISCONNECT_RSP, MEI_FOP_OPEN, MEI_FOP_CLOSE }; @@ -152,6 +153,20 @@ struct mei_msg_data { unsigned char *data; }; +/* Maximum number of processed FW status registers */ +#define MEI_FW_STATUS_MAX 2 + +/* + * struct mei_fw_status - storage of FW status data + * + * @count - number of actually available elements in array + * @status - FW status registers + */ +struct mei_fw_status { + int count; + u32 status[MEI_FW_STATUS_MAX]; +}; + /** * struct mei_me_client - representation of me (fw) client * @@ -212,6 +227,7 @@ struct mei_cl { /** struct mei_hw_ops * + * @fw_status - read FW status from PCI config space * @host_is_ready - query for host readiness * @hw_is_ready - query if hw is ready @@ -219,6 +235,9 @@ struct mei_cl { * @hw_start - start hw after reset * @hw_config - configure hw + * @pg_state - power gating state of the device + * @pg_is_enabled - is power gating enabled + * @intr_clear - clear pending interrupts * @intr_enable - enable interrupts * @intr_disable - disable interrupts @@ -236,20 +255,25 @@ struct mei_cl { */ struct mei_hw_ops { - bool (*host_is_ready) (struct mei_device *dev); + int (*fw_status)(struct mei_device *dev, + struct mei_fw_status *fw_status); + bool (*host_is_ready)(struct mei_device *dev); + + bool (*hw_is_ready)(struct mei_device *dev); + int (*hw_reset)(struct mei_device *dev, bool enable); + int (*hw_start)(struct mei_device *dev); + void (*hw_config)(struct mei_device *dev); - bool (*hw_is_ready) (struct mei_device *dev); - int (*hw_reset) (struct mei_device *dev, bool enable); - int (*hw_start) (struct mei_device *dev); - void (*hw_config) (struct mei_device *dev); + enum mei_pg_state (*pg_state)(struct mei_device *dev); + bool (*pg_is_enabled)(struct mei_device *dev); - void (*intr_clear) (struct mei_device *dev); - void (*intr_enable) (struct mei_device *dev); - void (*intr_disable) (struct mei_device *dev); + void (*intr_clear)(struct mei_device *dev); + void (*intr_enable)(struct mei_device *dev); + void (*intr_disable)(struct mei_device *dev); - int (*hbuf_free_slots) (struct mei_device *dev); - bool (*hbuf_is_ready) (struct mei_device *dev); - size_t (*hbuf_max_len) (const struct mei_device *dev); + int (*hbuf_free_slots)(struct mei_device *dev); + bool (*hbuf_is_ready)(struct mei_device *dev); + size_t (*hbuf_max_len)(const struct mei_device *dev); int (*write)(struct mei_device *dev, struct mei_msg_hdr *hdr, @@ -258,7 +282,7 @@ struct mei_hw_ops { int (*rdbuf_full_slots)(struct mei_device *dev); u32 (*read_hdr)(const struct mei_device *dev); - int (*read) (struct mei_device *dev, + int (*read)(struct mei_device *dev, unsigned char *buf, unsigned long len); }; @@ -294,6 +318,7 @@ int __mei_cl_async_send(struct mei_cl *cl, u8 *buf, size_t length); int __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length); int __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length); void mei_cl_bus_rx_event(struct mei_cl *cl); +void mei_cl_bus_remove_devices(struct mei_device *dev); int mei_cl_bus_init(void); void mei_cl_bus_exit(void); @@ -329,17 +354,61 @@ struct mei_cl_device { void *priv_data; }; + + /** + * enum mei_pg_event - power gating transition events + * + * @MEI_PG_EVENT_IDLE: the driver is not in power gating transition + * @MEI_PG_EVENT_WAIT: the driver is waiting for a pg event to complete + * @MEI_PG_EVENT_RECEIVED: the driver received pg event + */ +enum mei_pg_event { + MEI_PG_EVENT_IDLE, + MEI_PG_EVENT_WAIT, + MEI_PG_EVENT_RECEIVED, +}; + +/** + * enum mei_pg_state - device internal power gating state + * + * @MEI_PG_OFF: device is not power gated - it is active + * @MEI_PG_ON: device is power gated - it is in lower power state + */ +enum mei_pg_state { + MEI_PG_OFF = 0, + MEI_PG_ON = 1, +}; + +/* + * mei_cfg + * + * @fw_status - FW status + * @quirk_probe - device exclusion quirk + */ +struct mei_cfg { + const struct mei_fw_status fw_status; + bool (*quirk_probe)(struct pci_dev *pdev); +}; + + +#define MEI_PCI_DEVICE(dev, cfg) \ + .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, \ + .driver_data = (kernel_ulong_t)&(cfg) + + /** * struct mei_device - MEI private device struct * @reset_count - limits the number of consecutive resets * @hbm_state - state of host bus message protocol + * @pg_event - power gating event * @mem_addr - mem mapped base register address * @hbuf_depth - depth of hardware host/write buffer is slots * @hbuf_is_ready - query if the host host/write buffer is ready * @wr_msg - the buffer for hbm control messages - * @wr_ext_msg - the buffer for hbm control responses (set in read cycle) + * @cfg - per device generation config and ops */ struct mei_device { struct pci_dev *pdev; /* pointer to pci device struct */ @@ -370,6 +439,7 @@ struct mei_device { * waiting queue for receive message from FW */ wait_queue_head_t wait_hw_ready; + wait_queue_head_t wait_pg; wait_queue_head_t wait_recvd_msg; wait_queue_head_t wait_stop_wd; @@ -381,6 +451,14 @@ struct mei_device { enum mei_hbm_state hbm_state; u16 init_clients_timer; + /* + * Power Gating support + */ + enum mei_pg_event pg_event; +#ifdef CONFIG_PM_RUNTIME + struct dev_pm_domain pg_domain; +#endif /* CONFIG_PM_RUNTIME */ + unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE]; /* control messages */ u32 rd_msg_hdr; @@ -394,11 +472,6 @@ struct mei_device { unsigned char data[128]; } wr_msg; - struct { - struct mei_msg_hdr hdr; - unsigned char data[4]; /* All HBM messages are 4 bytes */ - } wr_ext_msg; /* for control responses */ - struct hbm_version version; struct mei_me_client *me_clients; /* Note: memory has to be allocated */ @@ -446,6 +519,7 @@ struct mei_device { const struct mei_hw_ops *ops; + const struct mei_cfg *cfg; char hw[0] __aligned(sizeof(void *)); }; @@ -478,7 +552,7 @@ static inline u32 mei_slots2data(int slots) /* * mei init function prototypes */ -void mei_device_init(struct mei_device *dev); +void mei_device_init(struct mei_device *dev, const struct mei_cfg *cfg); int mei_reset(struct mei_device *dev); int mei_start(struct mei_device *dev); int mei_restart(struct mei_device *dev); @@ -518,8 +592,8 @@ struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev, void mei_amthif_run_next_cmd(struct mei_device *dev); -int mei_amthif_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb, - s32 *slots, struct mei_cl_cb *cmpl_list); +int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, + struct mei_cl_cb *cmpl_list); void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb); int mei_amthif_irq_read_msg(struct mei_device *dev, @@ -546,7 +620,7 @@ int mei_wd_host_init(struct mei_device *dev); * once we got connection to the WD Client * @dev - mei device */ -void mei_watchdog_register(struct mei_device *dev); +int mei_watchdog_register(struct mei_device *dev); /* * mei_watchdog_unregister - Unregistering watchdog interface * @dev - mei device @@ -557,10 +631,22 @@ void mei_watchdog_unregister(struct mei_device *dev); * Register Access Function */ + static inline void mei_hw_config(struct mei_device *dev) { dev->ops->hw_config(dev); } + +static inline enum mei_pg_state mei_pg_state(struct mei_device *dev) +{ + return dev->ops->pg_state(dev); +} + +static inline bool mei_pg_is_enabled(struct mei_device *dev) +{ + return dev->ops->pg_is_enabled(dev); +} + static inline int mei_hw_reset(struct mei_device *dev, bool enable) { return dev->ops->hw_reset(dev, enable); @@ -633,6 +719,17 @@ static inline int mei_count_full_read_slots(struct mei_device *dev) return dev->ops->rdbuf_full_slots(dev); } +int mei_fw_status(struct mei_device *dev, struct mei_fw_status *fw_status); + +#define FW_STS_FMT "%08X %08X" +#define FW_STS_PRM(fw_status) \ + (fw_status).count > 0 ? (fw_status).status[0] : 0xDEADBEEF, \ + (fw_status).count > 1 ? (fw_status).status[1] : 0xDEADBEEF + +bool mei_hbuf_acquire(struct mei_device *dev); + +bool mei_write_is_idle(struct mei_device *dev); + #if IS_ENABLED(CONFIG_DEBUG_FS) int mei_dbgfs_register(struct mei_device *dev, const char *name); void mei_dbgfs_deregister(struct mei_device *dev); diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c index a58320c0c049..3095fc514a65 100644 --- a/drivers/misc/mei/nfc.c +++ b/drivers/misc/mei/nfc.c @@ -364,7 +364,7 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length) if (!wait_event_interruptible_timeout(ndev->send_wq, ndev->recv_req_id == ndev->req_id, HZ)) { dev_err(&dev->pdev->dev, "NFC MEI command timeout\n"); - err = -ETIMEDOUT; + err = -ETIME; } else { ndev->req_id++; } @@ -502,7 +502,7 @@ int mei_nfc_host_init(struct mei_device *dev) i = mei_me_cl_by_uuid(dev, &mei_nfc_info_guid); if (i < 0) { dev_info(&dev->pdev->dev, "nfc: failed to find the client\n"); - ret = -ENOENT; + ret = -ENOTTY; goto err; } @@ -520,7 +520,7 @@ int mei_nfc_host_init(struct mei_device *dev) i = mei_me_cl_by_uuid(dev, &mei_nfc_guid); if (i < 0) { dev_info(&dev->pdev->dev, "nfc: failed to find the client\n"); - ret = -ENOENT; + ret = -ENOTTY; goto err; } @@ -552,13 +552,7 @@ err: void mei_nfc_host_exit(struct mei_device *dev) { struct mei_nfc_dev *ndev = &nfc_dev; - cancel_work_sync(&ndev->init_work); +} - mutex_lock(&dev->device_lock); - if (ndev->cl && ndev->cl->device) - mei_cl_remove_device(ndev->cl->device); - mei_nfc_free(ndev); - mutex_unlock(&dev->device_lock); -} diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index ddadd08956f4..1b46c64a649f 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -13,9 +13,6 @@ * more details. * */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> @@ -27,7 +24,6 @@ #include <linux/aio.h> #include <linux/pci.h> #include <linux/poll.h> -#include <linux/init.h> #include <linux/ioctl.h> #include <linux/cdev.h> #include <linux/sched.h> @@ -37,50 +33,55 @@ #include <linux/interrupt.h> #include <linux/miscdevice.h> +#include <linux/pm_runtime.h> + #include <linux/mei.h> #include "mei_dev.h" -#include "hw-me.h" #include "client.h" +#include "hw-me-regs.h" +#include "hw-me.h" /* mei_pci_tbl - PCI Device ID Table */ -static DEFINE_PCI_DEVICE_TABLE(mei_me_pci_tbl) = { - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_H)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_W)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_HR)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_WPT_LP)}, +static const struct pci_device_id mei_me_pci_tbl[] = { + {MEI_PCI_DEVICE(MEI_DEV_ID_82946GZ, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_82G35, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_82Q965, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_82G965, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_82GM965, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_82GME965, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q35, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82G33, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q33, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82X38, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_3200, mei_me_legacy_cfg)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_6, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_7, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_8, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_9, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_10, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_1, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_2, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_3, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_4, mei_me_legacy_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_1, mei_me_ich_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_2, mei_me_ich_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, mei_me_ich_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, mei_me_ich_cfg)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, mei_me_pch_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, mei_me_pch_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, mei_me_pch_cpt_pbg_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, mei_me_pch_cpt_pbg_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, mei_me_pch_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, mei_me_pch_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, mei_me_pch_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, mei_me_lpt_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, mei_me_lpt_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, mei_me_pch_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, mei_me_lpt_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, mei_me_pch_cfg)}, /* required last entry */ {0, } @@ -88,28 +89,33 @@ static DEFINE_PCI_DEVICE_TABLE(mei_me_pci_tbl) = { MODULE_DEVICE_TABLE(pci, mei_me_pci_tbl); +#ifdef CONFIG_PM_RUNTIME +static inline void mei_me_set_pm_domain(struct mei_device *dev); +static inline void mei_me_unset_pm_domain(struct mei_device *dev); +#else +static inline void mei_me_set_pm_domain(struct mei_device *dev) {} +static inline void mei_me_unset_pm_domain(struct mei_device *dev) {} +#endif /* CONFIG_PM_RUNTIME */ + /** * mei_quirk_probe - probe for devices that doesn't valid ME interface * * @pdev: PCI device structure - * @ent: entry into pci_device_table + * @cfg: per generation config * * returns true if ME Interface is valid, false otherwise */ static bool mei_me_quirk_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) + const struct mei_cfg *cfg) { - u32 reg; - if (ent->device == MEI_DEV_ID_PBG_1) { - pci_read_config_dword(pdev, 0x48, ®); - /* make sure that bit 9 is up and bit 10 is down */ - if ((reg & 0x600) == 0x200) { - dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n"); - return false; - } + if (cfg->quirk_probe && cfg->quirk_probe(pdev)) { + dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n"); + return false; } + return true; } + /** * mei_probe - Device Initialization Routine * @@ -120,15 +126,14 @@ static bool mei_me_quirk_probe(struct pci_dev *pdev, */ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { + const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data); struct mei_device *dev; struct mei_me_hw *hw; int err; - if (!mei_me_quirk_probe(pdev, ent)) { - err = -ENODEV; - goto end; - } + if (!mei_me_quirk_probe(pdev, cfg)) + return -ENODEV; /* enable pci dev */ err = pci_enable_device(pdev); @@ -160,7 +165,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* allocates and initializes the mei dev structure */ - dev = mei_me_dev_init(pdev); + dev = mei_me_dev_init(pdev, cfg); if (!dev) { err = -ENOMEM; goto release_regions; @@ -199,6 +204,9 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto release_irq; } + pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT); + pm_runtime_use_autosuspend(&pdev->dev); + err = mei_register(dev); if (err) goto release_irq; @@ -207,6 +215,17 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) schedule_delayed_work(&dev->timer_work, HZ); + /* + * For not wake-able HW runtime pm framework + * can't be used on pci device level. + * Use domain runtime pm callbacks instead. + */ + if (!pci_dev_run_wake(pdev)) + mei_me_set_pm_domain(dev); + + if (mei_pg_is_enabled(dev)) + pm_runtime_put_noidle(&pdev->dev); + dev_dbg(&pdev->dev, "initialization successful.\n"); return 0; @@ -246,12 +265,18 @@ static void mei_me_remove(struct pci_dev *pdev) if (!dev) return; + if (mei_pg_is_enabled(dev)) + pm_runtime_get_noresume(&pdev->dev); + hw = to_me_hw(dev); dev_dbg(&pdev->dev, "stop\n"); mei_stop(dev); + if (!pci_dev_run_wake(pdev)) + mei_me_unset_pm_domain(dev); + /* disable interrupts */ mei_disable_interrupts(dev); @@ -270,7 +295,7 @@ static void mei_me_remove(struct pci_dev *pdev) } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int mei_me_pci_suspend(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); @@ -330,7 +355,116 @@ static int mei_me_pci_resume(struct device *device) return 0; } -static SIMPLE_DEV_PM_OPS(mei_me_pm_ops, mei_me_pci_suspend, mei_me_pci_resume); +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM_RUNTIME +static int mei_me_pm_runtime_idle(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct mei_device *dev; + + dev_dbg(&pdev->dev, "rpm: me: runtime_idle\n"); + + dev = pci_get_drvdata(pdev); + if (!dev) + return -ENODEV; + if (mei_write_is_idle(dev)) + pm_schedule_suspend(device, MEI_ME_RPM_TIMEOUT * 2); + + return -EBUSY; +} + +static int mei_me_pm_runtime_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct mei_device *dev; + int ret; + + dev_dbg(&pdev->dev, "rpm: me: runtime suspend\n"); + + dev = pci_get_drvdata(pdev); + if (!dev) + return -ENODEV; + + mutex_lock(&dev->device_lock); + + if (mei_write_is_idle(dev)) + ret = mei_me_pg_set_sync(dev); + else + ret = -EAGAIN; + + mutex_unlock(&dev->device_lock); + + dev_dbg(&pdev->dev, "rpm: me: runtime suspend ret=%d\n", ret); + + return ret; +} + +static int mei_me_pm_runtime_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct mei_device *dev; + int ret; + + dev_dbg(&pdev->dev, "rpm: me: runtime resume\n"); + + dev = pci_get_drvdata(pdev); + if (!dev) + return -ENODEV; + + mutex_lock(&dev->device_lock); + + ret = mei_me_pg_unset_sync(dev); + + mutex_unlock(&dev->device_lock); + + dev_dbg(&pdev->dev, "rpm: me: runtime resume ret = %d\n", ret); + + return ret; +} + +/** + * mei_me_set_pm_domain - fill and set pm domian stucture for device + * + * @dev: mei_device + */ +static inline void mei_me_set_pm_domain(struct mei_device *dev) +{ + struct pci_dev *pdev = dev->pdev; + + if (pdev->dev.bus && pdev->dev.bus->pm) { + dev->pg_domain.ops = *pdev->dev.bus->pm; + + dev->pg_domain.ops.runtime_suspend = mei_me_pm_runtime_suspend; + dev->pg_domain.ops.runtime_resume = mei_me_pm_runtime_resume; + dev->pg_domain.ops.runtime_idle = mei_me_pm_runtime_idle; + + pdev->dev.pm_domain = &dev->pg_domain; + } +} + +/** + * mei_me_unset_pm_domain - clean pm domian stucture for device + * + * @dev: mei_device + */ +static inline void mei_me_unset_pm_domain(struct mei_device *dev) +{ + /* stop using pm callbacks if any */ + dev->pdev->dev.pm_domain = NULL; +} +#endif /* CONFIG_PM_RUNTIME */ + +#ifdef CONFIG_PM +static const struct dev_pm_ops mei_me_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend, + mei_me_pci_resume) + SET_RUNTIME_PM_OPS( + mei_me_pm_runtime_suspend, + mei_me_pm_runtime_resume, + mei_me_pm_runtime_idle) +}; + #define MEI_ME_PM_OPS (&mei_me_pm_ops) #else #define MEI_ME_PM_OPS NULL diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c new file mode 100644 index 000000000000..2343c6236df9 --- /dev/null +++ b/drivers/misc/mei/pci-txe.c @@ -0,0 +1,436 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2013-2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/uuid.h> +#include <linux/jiffies.h> +#include <linux/interrupt.h> +#include <linux/workqueue.h> +#include <linux/pm_runtime.h> + +#include <linux/mei.h> + + +#include "mei_dev.h" +#include "hw-txe.h" + +static const struct pci_device_id mei_txe_pci_tbl[] = { + {MEI_PCI_DEVICE(0x0F18, mei_txe_cfg)}, /* Baytrail */ + {0, } +}; +MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl); + +#ifdef CONFIG_PM_RUNTIME +static inline void mei_txe_set_pm_domain(struct mei_device *dev); +static inline void mei_txe_unset_pm_domain(struct mei_device *dev); +#else +static inline void mei_txe_set_pm_domain(struct mei_device *dev) {} +static inline void mei_txe_unset_pm_domain(struct mei_device *dev) {} +#endif /* CONFIG_PM_RUNTIME */ + +static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw) +{ + int i; + for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) { + if (hw->mem_addr[i]) { + pci_iounmap(pdev, hw->mem_addr[i]); + hw->mem_addr[i] = NULL; + } + } +} +/** + * mei_probe - Device Initialization Routine + * + * @pdev: PCI device structure + * @ent: entry in mei_txe_pci_tbl + * + * returns 0 on success, <0 on failure. + */ +static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data); + struct mei_device *dev; + struct mei_txe_hw *hw; + int err; + int i; + + /* enable pci dev */ + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "failed to enable pci device.\n"); + goto end; + } + /* set PCI host mastering */ + pci_set_master(pdev); + /* pci request regions for mei driver */ + err = pci_request_regions(pdev, KBUILD_MODNAME); + if (err) { + dev_err(&pdev->dev, "failed to get pci regions.\n"); + goto disable_device; + } + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); + if (err) { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "No suitable DMA available.\n"); + goto release_regions; + } + } + + /* allocates and initializes the mei dev structure */ + dev = mei_txe_dev_init(pdev, cfg); + if (!dev) { + err = -ENOMEM; + goto release_regions; + } + hw = to_txe_hw(dev); + + /* mapping IO device memory */ + for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) { + hw->mem_addr[i] = pci_iomap(pdev, i, 0); + if (!hw->mem_addr[i]) { + dev_err(&pdev->dev, "mapping I/O device memory failure.\n"); + err = -ENOMEM; + goto free_device; + } + } + + + pci_enable_msi(pdev); + + /* clear spurious interrupts */ + mei_clear_interrupts(dev); + + /* request and enable interrupt */ + if (pci_dev_msi_enabled(pdev)) + err = request_threaded_irq(pdev->irq, + NULL, + mei_txe_irq_thread_handler, + IRQF_ONESHOT, KBUILD_MODNAME, dev); + else + err = request_threaded_irq(pdev->irq, + mei_txe_irq_quick_handler, + mei_txe_irq_thread_handler, + IRQF_SHARED, KBUILD_MODNAME, dev); + if (err) { + dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n", + pdev->irq); + goto free_device; + } + + if (mei_start(dev)) { + dev_err(&pdev->dev, "init hw failure.\n"); + err = -ENODEV; + goto release_irq; + } + + pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT); + pm_runtime_use_autosuspend(&pdev->dev); + + err = mei_register(dev); + if (err) + goto release_irq; + + pci_set_drvdata(pdev, dev); + + /* + * For not wake-able HW runtime pm framework + * can't be used on pci device level. + * Use domain runtime pm callbacks instead. + */ + if (!pci_dev_run_wake(pdev)) + mei_txe_set_pm_domain(dev); + + pm_runtime_put_noidle(&pdev->dev); + + return 0; + +release_irq: + + mei_cancel_work(dev); + + /* disable interrupts */ + mei_disable_interrupts(dev); + + free_irq(pdev->irq, dev); + pci_disable_msi(pdev); + +free_device: + mei_txe_pci_iounmap(pdev, hw); + + kfree(dev); +release_regions: + pci_release_regions(pdev); +disable_device: + pci_disable_device(pdev); +end: + dev_err(&pdev->dev, "initialization failed.\n"); + return err; +} + +/** + * mei_remove - Device Removal Routine + * + * @pdev: PCI device structure + * + * mei_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. + */ +static void mei_txe_remove(struct pci_dev *pdev) +{ + struct mei_device *dev; + struct mei_txe_hw *hw; + + dev = pci_get_drvdata(pdev); + if (!dev) { + dev_err(&pdev->dev, "mei: dev =NULL\n"); + return; + } + + pm_runtime_get_noresume(&pdev->dev); + + hw = to_txe_hw(dev); + + mei_stop(dev); + + if (!pci_dev_run_wake(pdev)) + mei_txe_unset_pm_domain(dev); + + /* disable interrupts */ + mei_disable_interrupts(dev); + free_irq(pdev->irq, dev); + pci_disable_msi(pdev); + + pci_set_drvdata(pdev, NULL); + + mei_txe_pci_iounmap(pdev, hw); + + mei_deregister(dev); + + kfree(dev); + + pci_release_regions(pdev); + pci_disable_device(pdev); +} + + +#ifdef CONFIG_PM_SLEEP +static int mei_txe_pci_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct mei_device *dev = pci_get_drvdata(pdev); + + if (!dev) + return -ENODEV; + + dev_dbg(&pdev->dev, "suspend\n"); + + mei_stop(dev); + + mei_disable_interrupts(dev); + + free_irq(pdev->irq, dev); + pci_disable_msi(pdev); + + return 0; +} + +static int mei_txe_pci_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct mei_device *dev; + int err; + + dev = pci_get_drvdata(pdev); + if (!dev) + return -ENODEV; + + pci_enable_msi(pdev); + + mei_clear_interrupts(dev); + + /* request and enable interrupt */ + if (pci_dev_msi_enabled(pdev)) + err = request_threaded_irq(pdev->irq, + NULL, + mei_txe_irq_thread_handler, + IRQF_ONESHOT, KBUILD_MODNAME, dev); + else + err = request_threaded_irq(pdev->irq, + mei_txe_irq_quick_handler, + mei_txe_irq_thread_handler, + IRQF_SHARED, KBUILD_MODNAME, dev); + if (err) { + dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n", + pdev->irq); + return err; + } + + err = mei_restart(dev); + + return err; +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM_RUNTIME +static int mei_txe_pm_runtime_idle(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct mei_device *dev; + + dev_dbg(&pdev->dev, "rpm: txe: runtime_idle\n"); + + dev = pci_get_drvdata(pdev); + if (!dev) + return -ENODEV; + if (mei_write_is_idle(dev)) + pm_schedule_suspend(device, MEI_TXI_RPM_TIMEOUT * 2); + + return -EBUSY; +} +static int mei_txe_pm_runtime_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct mei_device *dev; + int ret; + + dev_dbg(&pdev->dev, "rpm: txe: runtime suspend\n"); + + dev = pci_get_drvdata(pdev); + if (!dev) + return -ENODEV; + + mutex_lock(&dev->device_lock); + + if (mei_write_is_idle(dev)) + ret = mei_txe_aliveness_set_sync(dev, 0); + else + ret = -EAGAIN; + + /* + * If everything is okay we're about to enter PCI low + * power state (D3) therefor we need to disable the + * interrupts towards host. + * However if device is not wakeable we do not enter + * D-low state and we need to keep the interrupt kicking + */ + if (!ret && pci_dev_run_wake(pdev)) + mei_disable_interrupts(dev); + + dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret); + + mutex_unlock(&dev->device_lock); + return ret; +} + +static int mei_txe_pm_runtime_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct mei_device *dev; + int ret; + + dev_dbg(&pdev->dev, "rpm: txe: runtime resume\n"); + + dev = pci_get_drvdata(pdev); + if (!dev) + return -ENODEV; + + mutex_lock(&dev->device_lock); + + mei_enable_interrupts(dev); + + ret = mei_txe_aliveness_set_sync(dev, 1); + + mutex_unlock(&dev->device_lock); + + dev_dbg(&pdev->dev, "rpm: txe: runtime resume ret = %d\n", ret); + + return ret; +} + +/** + * mei_txe_set_pm_domain - fill and set pm domian stucture for device + * + * @dev: mei_device + */ +static inline void mei_txe_set_pm_domain(struct mei_device *dev) +{ + struct pci_dev *pdev = dev->pdev; + + if (pdev->dev.bus && pdev->dev.bus->pm) { + dev->pg_domain.ops = *pdev->dev.bus->pm; + + dev->pg_domain.ops.runtime_suspend = mei_txe_pm_runtime_suspend; + dev->pg_domain.ops.runtime_resume = mei_txe_pm_runtime_resume; + dev->pg_domain.ops.runtime_idle = mei_txe_pm_runtime_idle; + + pdev->dev.pm_domain = &dev->pg_domain; + } +} + +/** + * mei_txe_unset_pm_domain - clean pm domian stucture for device + * + * @dev: mei_device + */ +static inline void mei_txe_unset_pm_domain(struct mei_device *dev) +{ + /* stop using pm callbacks if any */ + dev->pdev->dev.pm_domain = NULL; +} +#endif /* CONFIG_PM_RUNTIME */ + +#ifdef CONFIG_PM +static const struct dev_pm_ops mei_txe_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(mei_txe_pci_suspend, + mei_txe_pci_resume) + SET_RUNTIME_PM_OPS( + mei_txe_pm_runtime_suspend, + mei_txe_pm_runtime_resume, + mei_txe_pm_runtime_idle) +}; + +#define MEI_TXE_PM_OPS (&mei_txe_pm_ops) +#else +#define MEI_TXE_PM_OPS NULL +#endif /* CONFIG_PM */ + +/* + * PCI driver structure + */ +static struct pci_driver mei_txe_driver = { + .name = KBUILD_MODNAME, + .id_table = mei_txe_pci_tbl, + .probe = mei_txe_probe, + .remove = mei_txe_remove, + .shutdown = mei_txe_remove, + .driver.pm = MEI_TXE_PM_OPS, +}; + +module_pci_driver(mei_txe_driver); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel(R) Trusted Execution Environment Interface"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c index f70945ed96f6..a84a664dfccb 100644 --- a/drivers/misc/mei/wd.c +++ b/drivers/misc/mei/wd.c @@ -25,7 +25,6 @@ #include "mei_dev.h" #include "hbm.h" -#include "hw-me.h" #include "client.h" static const u8 mei_start_wd_params[] = { 0x02, 0x12, 0x13, 0x10 }; @@ -53,7 +52,7 @@ static void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout) * * @dev: the device structure * - * returns -ENENT if wd client cannot be found + * returns -ENOTTY if wd client cannot be found * -EIO if write has failed * 0 on success */ @@ -73,7 +72,7 @@ int mei_wd_host_init(struct mei_device *dev) id = mei_me_cl_by_uuid(dev, &mei_wd_guid); if (id < 0) { dev_info(&dev->pdev->dev, "wd: failed to find the client\n"); - return id; + return -ENOTTY; } cl->me_client_id = dev->me_clients[id].client_id; @@ -85,17 +84,20 @@ int mei_wd_host_init(struct mei_device *dev) return ret; } - cl->state = MEI_FILE_CONNECTING; + ret = mei_cl_connect(cl, NULL); - if (mei_hbm_cl_connect_req(dev, cl)) { - dev_err(&dev->pdev->dev, "wd: failed to connect to the client\n"); - cl->state = MEI_FILE_DISCONNECTED; - cl->host_client_id = 0; - return -EIO; + if (ret) { + dev_err(&dev->pdev->dev, "wd: failed to connect = %d\n", ret); + mei_cl_unlink(cl); + return ret; } - cl->timer_count = MEI_CONNECT_TIMEOUT; - return 0; + ret = mei_watchdog_register(dev); + if (ret) { + mei_cl_disconnect(cl); + mei_cl_unlink(cl); + } + return ret; } /** @@ -106,13 +108,16 @@ int mei_wd_host_init(struct mei_device *dev) * returns 0 if success, * -EIO when message send fails * -EINVAL when invalid message is to be sent + * -ENODEV on flow control failure */ int mei_wd_send(struct mei_device *dev) { + struct mei_cl *cl = &dev->wd_cl; struct mei_msg_hdr hdr; + int ret; - hdr.host_addr = dev->wd_cl.host_client_id; - hdr.me_addr = dev->wd_cl.me_client_id; + hdr.host_addr = cl->host_client_id; + hdr.me_addr = cl->me_client_id; hdr.msg_complete = 1; hdr.reserved = 0; hdr.internal = 0; @@ -121,10 +126,24 @@ int mei_wd_send(struct mei_device *dev) hdr.length = MEI_WD_START_MSG_SIZE; else if (!memcmp(dev->wd_data, mei_stop_wd_params, MEI_WD_HDR_SIZE)) hdr.length = MEI_WD_STOP_MSG_SIZE; - else + else { + dev_err(&dev->pdev->dev, "wd: invalid message is to be sent, aborting\n"); return -EINVAL; + } + + ret = mei_write_message(dev, &hdr, dev->wd_data); + if (ret) { + dev_err(&dev->pdev->dev, "wd: write message failed\n"); + return ret; + } - return mei_write_message(dev, &hdr, dev->wd_data); + ret = mei_cl_flow_ctrl_reduce(cl); + if (ret) { + dev_err(&dev->pdev->dev, "wd: flow_ctrl_reduce failed.\n"); + return ret; + } + + return 0; } /** @@ -133,9 +152,11 @@ int mei_wd_send(struct mei_device *dev) * @dev: the device structure * @preserve: indicate if to keep the timeout value * - * returns 0 if success, - * -EIO when message send fails + * returns 0 if success + * on error: + * -EIO when message send fails * -EINVAL when invalid message is to be sent + * -ETIME on message timeout */ int mei_wd_stop(struct mei_device *dev) { @@ -151,20 +172,12 @@ int mei_wd_stop(struct mei_device *dev) ret = mei_cl_flow_ctrl_creds(&dev->wd_cl); if (ret < 0) - goto out; - - if (ret && dev->hbuf_is_ready) { - ret = 0; - dev->hbuf_is_ready = false; - - if (!mei_wd_send(dev)) { - ret = mei_cl_flow_ctrl_reduce(&dev->wd_cl); - if (ret) - goto out; - } else { - dev_err(&dev->pdev->dev, "wd: send stop failed\n"); - } + goto err; + if (ret && mei_hbuf_acquire(dev)) { + ret = mei_wd_send(dev); + if (ret) + goto err; dev->wd_pending = false; } else { dev->wd_pending = true; @@ -172,21 +185,21 @@ int mei_wd_stop(struct mei_device *dev) mutex_unlock(&dev->device_lock); - ret = wait_event_interruptible_timeout(dev->wait_stop_wd, - dev->wd_state == MEI_WD_IDLE, - msecs_to_jiffies(MEI_WD_STOP_TIMEOUT)); + ret = wait_event_timeout(dev->wait_stop_wd, + dev->wd_state == MEI_WD_IDLE, + msecs_to_jiffies(MEI_WD_STOP_TIMEOUT)); mutex_lock(&dev->device_lock); - if (dev->wd_state == MEI_WD_IDLE) { - dev_dbg(&dev->pdev->dev, "wd: stop completed ret=%d.\n", ret); - ret = 0; - } else { - if (!ret) - ret = -ETIMEDOUT; + if (dev->wd_state != MEI_WD_IDLE) { + /* timeout */ + ret = -ETIME; dev_warn(&dev->pdev->dev, "wd: stop failed to complete ret=%d.\n", ret); + goto err; } - -out: + dev_dbg(&dev->pdev->dev, "wd: stop completed after %u msec\n", + MEI_WD_STOP_TIMEOUT - jiffies_to_msecs(ret)); + return 0; +err: return ret; } @@ -260,8 +273,8 @@ static int mei_wd_ops_stop(struct watchdog_device *wd_dev) */ static int mei_wd_ops_ping(struct watchdog_device *wd_dev) { - int ret = 0; struct mei_device *dev; + int ret; dev = watchdog_get_drvdata(wd_dev); if (!dev) @@ -277,25 +290,18 @@ static int mei_wd_ops_ping(struct watchdog_device *wd_dev) dev->wd_state = MEI_WD_RUNNING; + ret = mei_cl_flow_ctrl_creds(&dev->wd_cl); + if (ret < 0) + goto end; /* Check if we can send the ping to HW*/ - if (dev->hbuf_is_ready && mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) { + if (ret && mei_hbuf_acquire(dev)) { - dev->hbuf_is_ready = false; dev_dbg(&dev->pdev->dev, "wd: sending ping\n"); - if (mei_wd_send(dev)) { - dev_err(&dev->pdev->dev, "wd: send failed.\n"); - ret = -EIO; - goto end; - } - - if (mei_cl_flow_ctrl_reduce(&dev->wd_cl)) { - dev_err(&dev->pdev->dev, - "wd: mei_cl_flow_ctrl_reduce() failed.\n"); - ret = -EIO; + ret = mei_wd_send(dev); + if (ret) goto end; - } - + dev->wd_pending = false; } else { dev->wd_pending = true; } @@ -363,17 +369,25 @@ static struct watchdog_device amt_wd_dev = { }; -void mei_watchdog_register(struct mei_device *dev) +int mei_watchdog_register(struct mei_device *dev) { - if (watchdog_register_device(&amt_wd_dev)) { - dev_err(&dev->pdev->dev, - "wd: unable to register watchdog device.\n"); - return; + + int ret; + + /* unlock to perserve correct locking order */ + mutex_unlock(&dev->device_lock); + ret = watchdog_register_device(&amt_wd_dev); + mutex_lock(&dev->device_lock); + if (ret) { + dev_err(&dev->pdev->dev, "wd: unable to register watchdog device = %d.\n", + ret); + return ret; } dev_dbg(&dev->pdev->dev, "wd: successfully register watchdog interface.\n"); watchdog_set_drvdata(&amt_wd_dev, dev); + return 0; } void mei_watchdog_unregister(struct mei_device *dev) diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig index e42b331edbc6..462a5b1d8651 100644 --- a/drivers/misc/mic/Kconfig +++ b/drivers/misc/mic/Kconfig @@ -4,7 +4,6 @@ config INTEL_MIC_HOST tristate "Intel MIC Host Driver" depends on 64BIT && PCI && X86 select VHOST_RING - default N help This enables Host Driver support for the Intel Many Integrated Core (MIC) family of PCIe form factor coprocessor devices that @@ -25,7 +24,6 @@ config INTEL_MIC_CARD tristate "Intel MIC Card Driver" depends on 64BIT && X86 select VIRTIO - default N help This enables card driver support for the Intel Many Integrated Core (MIC) device family. The card driver communicates shutdown/ diff --git a/drivers/misc/mic/card/mic_device.h b/drivers/misc/mic/card/mic_device.h index 347b9b3b7916..306f502be95e 100644 --- a/drivers/misc/mic/card/mic_device.h +++ b/drivers/misc/mic/card/mic_device.h @@ -29,6 +29,7 @@ #include <linux/workqueue.h> #include <linux/io.h> +#include <linux/irqreturn.h> /** * struct mic_intr_info - Contains h/w specific interrupt sources info diff --git a/drivers/misc/mic/host/mic_device.h b/drivers/misc/mic/host/mic_device.h index 1a6edce2ecde..0398c696d257 100644 --- a/drivers/misc/mic/host/mic_device.h +++ b/drivers/misc/mic/host/mic_device.h @@ -24,6 +24,7 @@ #include <linux/cdev.h> #include <linux/idr.h> #include <linux/notifier.h> +#include <linux/irqreturn.h> #include "mic_intr.h" diff --git a/drivers/misc/mic/host/mic_intr.c b/drivers/misc/mic/host/mic_intr.c index f9c29bc918bc..dbc5afde1392 100644 --- a/drivers/misc/mic/host/mic_intr.c +++ b/drivers/misc/mic/host/mic_intr.c @@ -194,7 +194,7 @@ static int mic_setup_msix(struct mic_device *mdev, struct pci_dev *pdev) for (i = 0; i < MIC_MIN_MSIX; i++) mdev->irq_info.msix_entries[i].entry = i; - rc = pci_enable_msix(pdev, mdev->irq_info.msix_entries, + rc = pci_enable_msix_exact(pdev, mdev->irq_info.msix_entries, MIC_MIN_MSIX); if (rc) { dev_dbg(&pdev->dev, "Error enabling MSIx. rc = %d\n", rc); diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c index a5925f7f17f6..956597321d2a 100644 --- a/drivers/misc/pch_phub.c +++ b/drivers/misc/pch_phub.c @@ -636,6 +636,7 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr, u8 mac[ETH_ALEN]; ssize_t rom_size; struct pch_phub_reg *chip = dev_get_drvdata(dev); + int ret; if (!mac_pton(buf, mac)) return -EINVAL; @@ -644,8 +645,10 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr, if (!chip->pch_phub_extrom_base_address) return -ENOMEM; - pch_phub_write_gbe_mac_addr(chip, mac); + ret = pch_phub_write_gbe_mac_addr(chip, mac); pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); + if (ret) + return ret; return count; } diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c index 2bef3f76032a..a3700a56b8ff 100644 --- a/drivers/misc/sgi-gru/grukdump.c +++ b/drivers/misc/sgi-gru/grukdump.c @@ -178,10 +178,10 @@ static int gru_dump_context(struct gru_state *gru, int ctxnum, hdr.cbrcnt = cbrcnt; hdr.dsrcnt = dsrcnt; hdr.cch_locked = cch_locked; - if (!ret && copy_to_user((void __user *)uhdr, &hdr, sizeof(hdr))) - ret = -EFAULT; + if (copy_to_user(uhdr, &hdr, sizeof(hdr))) + return -EFAULT; - return ret ? ret : bytes; + return bytes; } int gru_dump_chiplet_request(unsigned long arg) diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c index afe66571ce0b..21181fa243df 100644 --- a/drivers/misc/sram.c +++ b/drivers/misc/sram.c @@ -24,6 +24,9 @@ #include <linux/err.h> #include <linux/io.h> #include <linux/of.h> +#include <linux/of_address.h> +#include <linux/list.h> +#include <linux/list_sort.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spinlock.h> @@ -36,14 +39,35 @@ struct sram_dev { struct clk *clk; }; +struct sram_reserve { + struct list_head list; + u32 start; + u32 size; +}; + +static int sram_reserve_cmp(void *priv, struct list_head *a, + struct list_head *b) +{ + struct sram_reserve *ra = list_entry(a, struct sram_reserve, list); + struct sram_reserve *rb = list_entry(b, struct sram_reserve, list); + + return ra->start - rb->start; +} + static int sram_probe(struct platform_device *pdev) { void __iomem *virt_base; struct sram_dev *sram; struct resource *res; - unsigned long size; + struct device_node *np = pdev->dev.of_node, *child; + unsigned long size, cur_start, cur_size; + struct sram_reserve *rblocks, *block; + struct list_head reserve_list; + unsigned int nblocks; int ret; + INIT_LIST_HEAD(&reserve_list); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); virt_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(virt_base)) @@ -65,19 +89,106 @@ static int sram_probe(struct platform_device *pdev) if (!sram->pool) return -ENOMEM; - ret = gen_pool_add_virt(sram->pool, (unsigned long)virt_base, - res->start, size, -1); - if (ret < 0) { - if (sram->clk) - clk_disable_unprepare(sram->clk); - return ret; + /* + * We need an additional block to mark the end of the memory region + * after the reserved blocks from the dt are processed. + */ + nblocks = (np) ? of_get_available_child_count(np) + 1 : 1; + rblocks = kmalloc((nblocks) * sizeof(*rblocks), GFP_KERNEL); + if (!rblocks) { + ret = -ENOMEM; + goto err_alloc; + } + + block = &rblocks[0]; + for_each_available_child_of_node(np, child) { + struct resource child_res; + + ret = of_address_to_resource(child, 0, &child_res); + if (ret < 0) { + dev_err(&pdev->dev, + "could not get address for node %s\n", + child->full_name); + goto err_chunks; + } + + if (child_res.start < res->start || child_res.end > res->end) { + dev_err(&pdev->dev, + "reserved block %s outside the sram area\n", + child->full_name); + ret = -EINVAL; + goto err_chunks; + } + + block->start = child_res.start - res->start; + block->size = resource_size(&child_res); + list_add_tail(&block->list, &reserve_list); + + dev_dbg(&pdev->dev, "found reserved block 0x%x-0x%x\n", + block->start, + block->start + block->size); + + block++; + } + + /* the last chunk marks the end of the region */ + rblocks[nblocks - 1].start = size; + rblocks[nblocks - 1].size = 0; + list_add_tail(&rblocks[nblocks - 1].list, &reserve_list); + + list_sort(NULL, &reserve_list, sram_reserve_cmp); + + cur_start = 0; + + list_for_each_entry(block, &reserve_list, list) { + /* can only happen if sections overlap */ + if (block->start < cur_start) { + dev_err(&pdev->dev, + "block at 0x%x starts after current offset 0x%lx\n", + block->start, cur_start); + ret = -EINVAL; + goto err_chunks; + } + + /* current start is in a reserved block, so continue after it */ + if (block->start == cur_start) { + cur_start = block->start + block->size; + continue; + } + + /* + * allocate the space between the current starting + * address and the following reserved block, or the + * end of the region. + */ + cur_size = block->start - cur_start; + + dev_dbg(&pdev->dev, "adding chunk 0x%lx-0x%lx\n", + cur_start, cur_start + cur_size); + ret = gen_pool_add_virt(sram->pool, + (unsigned long)virt_base + cur_start, + res->start + cur_start, cur_size, -1); + if (ret < 0) + goto err_chunks; + + /* next allocation after this reserved block */ + cur_start = block->start + block->size; } + kfree(rblocks); + platform_set_drvdata(pdev, sram); dev_dbg(&pdev->dev, "SRAM pool: %ld KiB @ 0x%p\n", size / 1024, virt_base); return 0; + +err_chunks: + kfree(rblocks); +err_alloc: + if (sram->clk) + clk_disable_unprepare(sram->clk); + return ret; } static int sram_remove(struct platform_device *pdev) @@ -87,8 +198,6 @@ static int sram_remove(struct platform_device *pdev) if (gen_pool_avail(sram->pool) < gen_pool_size(sram->pool)) dev_dbg(&pdev->dev, "removed while SRAM allocated\n"); - gen_pool_destroy(sram->pool); - if (sram->clk) clk_disable_unprepare(sram->clk); diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c index 3aed525e55b4..1972d57aadb3 100644 --- a/drivers/misc/ti-st/st_core.c +++ b/drivers/misc/ti-st/st_core.c @@ -22,7 +22,6 @@ #define pr_fmt(fmt) "(stc): " fmt #include <linux/module.h> #include <linux/kernel.h> -#include <linux/init.h> #include <linux/tty.h> #include <linux/seq_file.h> diff --git a/drivers/misc/ti_dac7512.c b/drivers/misc/ti_dac7512.c index 83da711ce9f1..cb0289b44a17 100644 --- a/drivers/misc/ti_dac7512.c +++ b/drivers/misc/ti_dac7512.c @@ -20,7 +20,6 @@ */ #include <linux/module.h> -#include <linux/init.h> #include <linux/spi/spi.h> #include <linux/of.h> diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c index 5bc10fa193de..b00335652e52 100644 --- a/drivers/misc/tsl2550.c +++ b/drivers/misc/tsl2550.c @@ -20,7 +20,6 @@ */ #include <linux/module.h> -#include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/mutex.h> diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c new file mode 100644 index 000000000000..73068e50e56d --- /dev/null +++ b/drivers/misc/vexpress-syscfg.c @@ -0,0 +1,324 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2014 ARM Limited + */ + +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/syscore_ops.h> +#include <linux/vexpress.h> + + +#define SYS_CFGDATA 0x0 + +#define SYS_CFGCTRL 0x4 +#define SYS_CFGCTRL_START (1 << 31) +#define SYS_CFGCTRL_WRITE (1 << 30) +#define SYS_CFGCTRL_DCC(n) (((n) & 0xf) << 26) +#define SYS_CFGCTRL_FUNC(n) (((n) & 0x3f) << 20) +#define SYS_CFGCTRL_SITE(n) (((n) & 0x3) << 16) +#define SYS_CFGCTRL_POSITION(n) (((n) & 0xf) << 12) +#define SYS_CFGCTRL_DEVICE(n) (((n) & 0xfff) << 0) + +#define SYS_CFGSTAT 0x8 +#define SYS_CFGSTAT_ERR (1 << 1) +#define SYS_CFGSTAT_COMPLETE (1 << 0) + + +struct vexpress_syscfg { + struct device *dev; + void __iomem *base; + struct list_head funcs; +}; + +struct vexpress_syscfg_func { + struct list_head list; + struct vexpress_syscfg *syscfg; + struct regmap *regmap; + int num_templates; + u32 template[0]; /* Keep it last! */ +}; + + +static int vexpress_syscfg_exec(struct vexpress_syscfg_func *func, + int index, bool write, u32 *data) +{ + struct vexpress_syscfg *syscfg = func->syscfg; + u32 command, status; + int tries; + long timeout; + + if (WARN_ON(index > func->num_templates)) + return -EINVAL; + + command = readl(syscfg->base + SYS_CFGCTRL); + if (WARN_ON(command & SYS_CFGCTRL_START)) + return -EBUSY; + + command = func->template[index]; + command |= SYS_CFGCTRL_START; + command |= write ? SYS_CFGCTRL_WRITE : 0; + + /* Use a canary for reads */ + if (!write) + *data = 0xdeadbeef; + + dev_dbg(syscfg->dev, "func %p, command %x, data %x\n", + func, command, *data); + writel(*data, syscfg->base + SYS_CFGDATA); + writel(0, syscfg->base + SYS_CFGSTAT); + writel(command, syscfg->base + SYS_CFGCTRL); + mb(); + + /* The operation can take ages... Go to sleep, 100us initially */ + tries = 100; + timeout = 100; + do { + if (!irqs_disabled()) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(usecs_to_jiffies(timeout)); + if (signal_pending(current)) + return -EINTR; + } else { + udelay(timeout); + } + + status = readl(syscfg->base + SYS_CFGSTAT); + if (status & SYS_CFGSTAT_ERR) + return -EFAULT; + + if (timeout > 20) + timeout -= 20; + } while (--tries && !(status & SYS_CFGSTAT_COMPLETE)); + if (WARN_ON_ONCE(!tries)) + return -ETIMEDOUT; + + if (!write) { + *data = readl(syscfg->base + SYS_CFGDATA); + dev_dbg(syscfg->dev, "func %p, read data %x\n", func, *data); + } + + return 0; +} + +static int vexpress_syscfg_read(void *context, unsigned int index, + unsigned int *val) +{ + struct vexpress_syscfg_func *func = context; + + return vexpress_syscfg_exec(func, index, false, val); +} + +static int vexpress_syscfg_write(void *context, unsigned int index, + unsigned int val) +{ + struct vexpress_syscfg_func *func = context; + + return vexpress_syscfg_exec(func, index, true, &val); +} + +struct regmap_config vexpress_syscfg_regmap_config = { + .lock = vexpress_config_lock, + .unlock = vexpress_config_unlock, + .reg_bits = 32, + .val_bits = 32, + .reg_read = vexpress_syscfg_read, + .reg_write = vexpress_syscfg_write, + .reg_format_endian = REGMAP_ENDIAN_LITTLE, + .val_format_endian = REGMAP_ENDIAN_LITTLE, +}; + + +static struct regmap *vexpress_syscfg_regmap_init(struct device *dev, + void *context) +{ + struct platform_device *pdev = to_platform_device(dev); + struct vexpress_syscfg *syscfg = context; + struct vexpress_syscfg_func *func; + struct property *prop; + const __be32 *val = NULL; + __be32 energy_quirk[4]; + int num; + u32 site, position, dcc; + int i; + + if (dev->of_node) { + int err = vexpress_config_get_topo(dev->of_node, &site, + &position, &dcc); + + if (err) + return ERR_PTR(err); + + prop = of_find_property(dev->of_node, + "arm,vexpress-sysreg,func", NULL); + if (!prop) + return ERR_PTR(-EINVAL); + + num = prop->length / sizeof(u32) / 2; + val = prop->value; + } else { + if (pdev->num_resources != 1 || + pdev->resource[0].flags != IORESOURCE_BUS) + return ERR_PTR(-EFAULT); + + site = pdev->resource[0].start; + if (site == VEXPRESS_SITE_MASTER) + site = vexpress_config_get_master(); + position = 0; + dcc = 0; + num = 1; + } + + /* + * "arm,vexpress-energy" function used to be described + * by its first device only, now it requires both + */ + if (num == 1 && of_device_is_compatible(dev->of_node, + "arm,vexpress-energy")) { + num = 2; + energy_quirk[0] = *val; + energy_quirk[2] = *val++; + energy_quirk[1] = *val; + energy_quirk[3] = cpu_to_be32(be32_to_cpup(val) + 1); + val = energy_quirk; + } + + func = kzalloc(sizeof(*func) + sizeof(*func->template) * num, + GFP_KERNEL); + if (!func) + return NULL; + + func->syscfg = syscfg; + func->num_templates = num; + + for (i = 0; i < num; i++) { + u32 function, device; + + if (dev->of_node) { + function = be32_to_cpup(val++); + device = be32_to_cpup(val++); + } else { + function = pdev->resource[0].end; + device = pdev->id; + } + + dev_dbg(dev, "func %p: %u/%u/%u/%u/%u\n", + func, site, position, dcc, + function, device); + + func->template[i] = SYS_CFGCTRL_DCC(dcc); + func->template[i] |= SYS_CFGCTRL_SITE(site); + func->template[i] |= SYS_CFGCTRL_POSITION(position); + func->template[i] |= SYS_CFGCTRL_FUNC(function); + func->template[i] |= SYS_CFGCTRL_DEVICE(device); + } + + vexpress_syscfg_regmap_config.max_register = num - 1; + + func->regmap = regmap_init(dev, NULL, func, + &vexpress_syscfg_regmap_config); + + if (IS_ERR(func->regmap)) + kfree(func); + else + list_add(&func->list, &syscfg->funcs); + + return func->regmap; +} + +static void vexpress_syscfg_regmap_exit(struct regmap *regmap, void *context) +{ + struct vexpress_syscfg *syscfg = context; + struct vexpress_syscfg_func *func, *tmp; + + regmap_exit(regmap); + + list_for_each_entry_safe(func, tmp, &syscfg->funcs, list) { + if (func->regmap == regmap) { + list_del(&syscfg->funcs); + kfree(func); + break; + } + } +} + +static struct vexpress_config_bridge_ops vexpress_syscfg_bridge_ops = { + .regmap_init = vexpress_syscfg_regmap_init, + .regmap_exit = vexpress_syscfg_regmap_exit, +}; + + +/* Non-DT hack, to be gone... */ +static struct device *vexpress_syscfg_bridge; + +int vexpress_syscfg_device_register(struct platform_device *pdev) +{ + pdev->dev.parent = vexpress_syscfg_bridge; + + return platform_device_register(pdev); +} + + +int vexpress_syscfg_probe(struct platform_device *pdev) +{ + struct vexpress_syscfg *syscfg; + struct resource *res; + struct device *bridge; + + syscfg = devm_kzalloc(&pdev->dev, sizeof(*syscfg), GFP_KERNEL); + if (!syscfg) + return -ENOMEM; + syscfg->dev = &pdev->dev; + INIT_LIST_HEAD(&syscfg->funcs); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!devm_request_mem_region(&pdev->dev, res->start, + resource_size(res), pdev->name)) + return -EBUSY; + + syscfg->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (!syscfg->base) + return -EFAULT; + + /* Must use dev.parent (MFD), as that's where DT phandle points at... */ + bridge = vexpress_config_bridge_register(pdev->dev.parent, + &vexpress_syscfg_bridge_ops, syscfg); + if (IS_ERR(bridge)) + return PTR_ERR(bridge); + + /* Non-DT case */ + if (!pdev->dev.of_node) + vexpress_syscfg_bridge = bridge; + + return 0; +} + +static const struct platform_device_id vexpress_syscfg_id_table[] = { + { "vexpress-syscfg", }, + {}, +}; + +static struct platform_driver vexpress_syscfg_driver = { + .driver.name = "vexpress-syscfg", + .id_table = vexpress_syscfg_id_table, + .probe = vexpress_syscfg_probe, +}; + +static int __init vexpress_syscfg_init(void) +{ + return platform_driver_register(&vexpress_syscfg_driver); +} +core_initcall(vexpress_syscfg_init); diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c index d35cda06b5e8..e0d5017785e5 100644 --- a/drivers/misc/vmw_vmci/vmci_guest.c +++ b/drivers/misc/vmw_vmci/vmci_guest.c @@ -383,11 +383,12 @@ static int vmci_enable_msix(struct pci_dev *pdev, vmci_dev->msix_entries[i].vector = i; } - result = pci_enable_msix(pdev, vmci_dev->msix_entries, VMCI_MAX_INTRS); + result = pci_enable_msix_exact(pdev, + vmci_dev->msix_entries, VMCI_MAX_INTRS); if (result == 0) vmci_dev->exclusive_vectors = true; - else if (result > 0) - result = pci_enable_msix(pdev, vmci_dev->msix_entries, 1); + else if (result == -ENOSPC) + result = pci_enable_msix_exact(pdev, vmci_dev->msix_entries, 1); return result; } |