From bf22433575ef30a4807f0620498017df0f27df67 Mon Sep 17 00:00:00 2001 From: Philip Avinash Date: Fri, 4 Jan 2013 13:26:50 +0530 Subject: mtd: devices: elm: Add support for ELM error correction The ELM hardware module can be used to speedup BCH 4/8/16 ECC scheme error correction. For now only 4 & 8 bit support is added Signed-off-by: Philip Avinash Signed-off-by: Artem Bityutskiy --- include/linux/platform_data/elm.h | 53 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 include/linux/platform_data/elm.h (limited to 'include') diff --git a/include/linux/platform_data/elm.h b/include/linux/platform_data/elm.h new file mode 100644 index 000000000000..11ab6aaf2431 --- /dev/null +++ b/include/linux/platform_data/elm.h @@ -0,0 +1,53 @@ +/* + * BCH Error Location Module + * + * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ELM_H +#define __ELM_H + +enum bch_ecc { + BCH4_ECC = 0, + BCH8_ECC, +}; + +/* ELM support 8 error syndrome process */ +#define ERROR_VECTOR_MAX 8 + +#define BCH8_ECC_OOB_BYTES 13 +#define BCH4_ECC_OOB_BYTES 7 +/* RBL requires 14 byte even though BCH8 uses only 13 byte */ +#define BCH8_SIZE (BCH8_ECC_OOB_BYTES + 1) +#define BCH4_SIZE (BCH4_ECC_OOB_BYTES) + +/** + * struct elm_errorvec - error vector for elm + * @error_reported: set true for vectors error is reported + * @error_uncorrectable: number of uncorrectable errors + * @error_count: number of correctable errors in the sector + * @error_loc: buffer for error location + * + */ +struct elm_errorvec { + bool error_reported; + bool error_uncorrectable; + int error_count; + int error_loc[ERROR_VECTOR_MAX]; +}; + +void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc, + struct elm_errorvec *err_vec); +void elm_config(struct device *dev, enum bch_ecc bch_type); +#endif /* __ELM_H */ -- cgit v1.2.3 From 62116e5171e00f85a8d53f76e45b84423c89ff34 Mon Sep 17 00:00:00 2001 From: Philip Avinash Date: Fri, 4 Jan 2013 13:26:51 +0530 Subject: mtd: nand: omap2: Support for hardware BCH error correction. ELM module can be used for hardware error correction of BCH 4 & 8 bit. ELM module functionality is verified by checking the availability of handle for ELM module in device tree. Hence supporting 1. ELM module available, BCH error correction done by ELM module. Also support read & write page in one shot by adding custom read_page and write_page methods. This helps in optimizing code for NAND flashes with page size less than 4 KB. 2. If ELM module not available fall back to software BCH error correction support. New structure member is added to omap_nand_info 1. "is_elm_used" to know the status of whether the ELM module is used for error correction or not. 2. "elm_dev" device pointer to elm device on detection of ELM module. Also being here update the device tree documentation of gpmc-nand for adding optional property elm_id. Note: ECC layout uses 1 extra bytes for 512 byte of data to handle erased pages. Extra byte programmed to zero for programmed pages. Also BCH8 requires 14 byte ecc to maintain compatibility with RBL ECC layout. This results a common ecc layout across RBL, U-boot & Linux with BCH8. Signed-off-by: Philip Avinash Signed-off-by: Artem Bityutskiy --- drivers/mtd/nand/omap2.c | 573 +++++++++++++++++++++++++++++++++++--- include/linux/platform_data/elm.h | 3 +- 2 files changed, 536 insertions(+), 40 deletions(-) (limited to 'include') diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index ec20f67e8949..1a88bd062ac1 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -22,9 +22,12 @@ #include #include #include +#include +#include #ifdef CONFIG_MTD_NAND_OMAP_BCH #include +#include #endif #include @@ -120,6 +123,30 @@ #define BCH8_MAX_ERROR 8 /* upto 8 bit correctable */ #define BCH4_MAX_ERROR 4 /* upto 4 bit correctable */ +#define SECTOR_BYTES 512 +/* 4 bit padding to make byte aligned, 56 = 52 + 4 */ +#define BCH4_BIT_PAD 4 +#define BCH8_ECC_MAX ((SECTOR_BYTES + BCH8_ECC_OOB_BYTES) * 8) +#define BCH4_ECC_MAX ((SECTOR_BYTES + BCH4_ECC_OOB_BYTES) * 8) + +/* GPMC ecc engine settings for read */ +#define BCH_WRAPMODE_1 1 /* BCH wrap mode 1 */ +#define BCH8R_ECC_SIZE0 0x1a /* ecc_size0 = 26 */ +#define BCH8R_ECC_SIZE1 0x2 /* ecc_size1 = 2 */ +#define BCH4R_ECC_SIZE0 0xd /* ecc_size0 = 13 */ +#define BCH4R_ECC_SIZE1 0x3 /* ecc_size1 = 3 */ + +/* GPMC ecc engine settings for write */ +#define BCH_WRAPMODE_6 6 /* BCH wrap mode 6 */ +#define BCH_ECC_SIZE0 0x0 /* ecc_size0 = 0, no oob protection */ +#define BCH_ECC_SIZE1 0x20 /* ecc_size1 = 32 */ + +#ifdef CONFIG_MTD_NAND_OMAP_BCH +static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc, + 0xac, 0x6b, 0xff, 0x99, 0x7b}; +static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10}; +#endif + /* oob info generated runtime depending on ecc algorithm and layout selected */ static struct nand_ecclayout omap_oobinfo; /* Define some generic bad / good block scan pattern which are used @@ -159,6 +186,9 @@ struct omap_nand_info { #ifdef CONFIG_MTD_NAND_OMAP_BCH struct bch_control *bch; struct nand_ecclayout ecclayout; + bool is_elm_used; + struct device *elm_dev; + struct device_node *of_node; #endif }; @@ -1034,6 +1064,13 @@ static int omap_dev_ready(struct mtd_info *mtd) * omap3_enable_hwecc_bch - Program OMAP3 GPMC to perform BCH ECC correction * @mtd: MTD device structure * @mode: Read/Write mode + * + * When using BCH, sector size is hardcoded to 512 bytes. + * Using wrapping mode 6 both for reading and writing if ELM module not uses + * for error correction. + * On writing, + * eccsize0 = 0 (no additional protected byte in spare area) + * eccsize1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area) */ static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode) { @@ -1042,32 +1079,57 @@ static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode) struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, mtd); struct nand_chip *chip = mtd->priv; - u32 val; + u32 val, wr_mode; + unsigned int ecc_size1, ecc_size0; + + /* Using wrapping mode 6 for writing */ + wr_mode = BCH_WRAPMODE_6; - nerrors = info->nand.ecc.strength; - dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0; - nsectors = 1; /* - * Program GPMC to perform correction on one 512-byte sector at a time. - * Using 4 sectors at a time (i.e. ecc.size = 2048) is also possible and - * gives a slight (5%) performance gain (but requires additional code). + * ECC engine enabled for valid ecc_size0 nibbles + * and disabled for ecc_size1 nibbles. */ + ecc_size0 = BCH_ECC_SIZE0; + ecc_size1 = BCH_ECC_SIZE1; + + /* Perform ecc calculation on 512-byte sector */ + nsectors = 1; + + /* Update number of error correction */ + nerrors = info->nand.ecc.strength; + + /* Multi sector reading/writing for NAND flash with page size < 4096 */ + if (info->is_elm_used && (mtd->writesize <= 4096)) { + if (mode == NAND_ECC_READ) { + /* Using wrapping mode 1 for reading */ + wr_mode = BCH_WRAPMODE_1; + + /* + * ECC engine enabled for ecc_size0 nibbles + * and disabled for ecc_size1 nibbles. + */ + ecc_size0 = (nerrors == 8) ? + BCH8R_ECC_SIZE0 : BCH4R_ECC_SIZE0; + ecc_size1 = (nerrors == 8) ? + BCH8R_ECC_SIZE1 : BCH4R_ECC_SIZE1; + } + + /* Perform ecc calculation for one page (< 4096) */ + nsectors = info->nand.ecc.steps; + } writel(ECC1, info->reg.gpmc_ecc_control); - /* - * When using BCH, sector size is hardcoded to 512 bytes. - * Here we are using wrapping mode 6 both for reading and writing, with: - * size0 = 0 (no additional protected byte in spare area) - * size1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area) - */ - val = (32 << ECCSIZE1_SHIFT) | (0 << ECCSIZE0_SHIFT); + /* Configure ecc size for BCH */ + val = (ecc_size1 << ECCSIZE1_SHIFT) | (ecc_size0 << ECCSIZE0_SHIFT); writel(val, info->reg.gpmc_ecc_size_config); + dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0; + /* BCH configuration */ val = ((1 << 16) | /* enable BCH */ (((nerrors == 8) ? 1 : 0) << 12) | /* 8 or 4 bits */ - (0x06 << 8) | /* wrap mode = 6 */ + (wr_mode << 8) | /* wrap mode */ (dev_width << 7) | /* bus width */ (((nsectors-1) & 0x7) << 4) | /* number of sectors */ (info->gpmc_cs << 1) | /* ECC CS */ @@ -1075,7 +1137,7 @@ static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode) writel(val, info->reg.gpmc_ecc_config); - /* clear ecc and enable bits */ + /* Clear ecc and enable bits */ writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control); } @@ -1164,6 +1226,298 @@ static int omap3_calculate_ecc_bch8(struct mtd_info *mtd, const u_char *dat, return 0; } +/** + * omap3_calculate_ecc_bch - Generate bytes of ECC bytes + * @mtd: MTD device structure + * @dat: The pointer to data on which ecc is computed + * @ecc_code: The ecc_code buffer + * + * Support calculating of BCH4/8 ecc vectors for the page + */ +static int omap3_calculate_ecc_bch(struct mtd_info *mtd, const u_char *dat, + u_char *ecc_code) +{ + struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, + mtd); + unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4; + int i, eccbchtsel; + + nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1; + /* + * find BCH scheme used + * 0 -> BCH4 + * 1 -> BCH8 + */ + eccbchtsel = ((readl(info->reg.gpmc_ecc_config) >> 12) & 0x3); + + for (i = 0; i < nsectors; i++) { + + /* Read hw-computed remainder */ + bch_val1 = readl(info->reg.gpmc_bch_result0[i]); + bch_val2 = readl(info->reg.gpmc_bch_result1[i]); + if (eccbchtsel) { + bch_val3 = readl(info->reg.gpmc_bch_result2[i]); + bch_val4 = readl(info->reg.gpmc_bch_result3[i]); + } + + if (eccbchtsel) { + /* BCH8 ecc scheme */ + *ecc_code++ = (bch_val4 & 0xFF); + *ecc_code++ = ((bch_val3 >> 24) & 0xFF); + *ecc_code++ = ((bch_val3 >> 16) & 0xFF); + *ecc_code++ = ((bch_val3 >> 8) & 0xFF); + *ecc_code++ = (bch_val3 & 0xFF); + *ecc_code++ = ((bch_val2 >> 24) & 0xFF); + *ecc_code++ = ((bch_val2 >> 16) & 0xFF); + *ecc_code++ = ((bch_val2 >> 8) & 0xFF); + *ecc_code++ = (bch_val2 & 0xFF); + *ecc_code++ = ((bch_val1 >> 24) & 0xFF); + *ecc_code++ = ((bch_val1 >> 16) & 0xFF); + *ecc_code++ = ((bch_val1 >> 8) & 0xFF); + *ecc_code++ = (bch_val1 & 0xFF); + /* + * Setting 14th byte to zero to handle + * erased page & maintain compatibility + * with RBL + */ + *ecc_code++ = 0x0; + } else { + /* BCH4 ecc scheme */ + *ecc_code++ = ((bch_val2 >> 12) & 0xFF); + *ecc_code++ = ((bch_val2 >> 4) & 0xFF); + *ecc_code++ = ((bch_val2 & 0xF) << 4) | + ((bch_val1 >> 28) & 0xF); + *ecc_code++ = ((bch_val1 >> 20) & 0xFF); + *ecc_code++ = ((bch_val1 >> 12) & 0xFF); + *ecc_code++ = ((bch_val1 >> 4) & 0xFF); + *ecc_code++ = ((bch_val1 & 0xF) << 4); + /* + * Setting 8th byte to zero to handle + * erased page + */ + *ecc_code++ = 0x0; + } + } + + return 0; +} + +/** + * erased_sector_bitflips - count bit flips + * @data: data sector buffer + * @oob: oob buffer + * @info: omap_nand_info + * + * Check the bit flips in erased page falls below correctable level. + * If falls below, report the page as erased with correctable bit + * flip, else report as uncorrectable page. + */ +static int erased_sector_bitflips(u_char *data, u_char *oob, + struct omap_nand_info *info) +{ + int flip_bits = 0, i; + + for (i = 0; i < info->nand.ecc.size; i++) { + flip_bits += hweight8(~data[i]); + if (flip_bits > info->nand.ecc.strength) + return 0; + } + + for (i = 0; i < info->nand.ecc.bytes - 1; i++) { + flip_bits += hweight8(~oob[i]); + if (flip_bits > info->nand.ecc.strength) + return 0; + } + + /* + * Bit flips falls in correctable level. + * Fill data area with 0xFF + */ + if (flip_bits) { + memset(data, 0xFF, info->nand.ecc.size); + memset(oob, 0xFF, info->nand.ecc.bytes); + } + + return flip_bits; +} + +/** + * omap_elm_correct_data - corrects page data area in case error reported + * @mtd: MTD device structure + * @data: page data + * @read_ecc: ecc read from nand flash + * @calc_ecc: ecc read from HW ECC registers + * + * Calculated ecc vector reported as zero in case of non-error pages. + * In case of error/erased pages non-zero error vector is reported. + * In case of non-zero ecc vector, check read_ecc at fixed offset + * (x = 13/7 in case of BCH8/4 == 0) to find page programmed or not. + * To handle bit flips in this data, count the number of 0's in + * read_ecc[x] and check if it greater than 4. If it is less, it is + * programmed page, else erased page. + * + * 1. If page is erased, check with standard ecc vector (ecc vector + * for erased page to find any bit flip). If check fails, bit flip + * is present in erased page. Count the bit flips in erased page and + * if it falls under correctable level, report page with 0xFF and + * update the correctable bit information. + * 2. If error is reported on programmed page, update elm error + * vector and correct the page with ELM error correction routine. + * + */ +static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data, + u_char *read_ecc, u_char *calc_ecc) +{ + struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, + mtd); + int eccsteps = info->nand.ecc.steps; + int i , j, stat = 0; + int eccsize, eccflag, ecc_vector_size; + struct elm_errorvec err_vec[ERROR_VECTOR_MAX]; + u_char *ecc_vec = calc_ecc; + u_char *spare_ecc = read_ecc; + u_char *erased_ecc_vec; + enum bch_ecc type; + bool is_error_reported = false; + + /* Initialize elm error vector to zero */ + memset(err_vec, 0, sizeof(err_vec)); + + if (info->nand.ecc.strength == BCH8_MAX_ERROR) { + type = BCH8_ECC; + erased_ecc_vec = bch8_vector; + } else { + type = BCH4_ECC; + erased_ecc_vec = bch4_vector; + } + + ecc_vector_size = info->nand.ecc.bytes; + + /* + * Remove extra byte padding for BCH8 RBL + * compatibility and erased page handling + */ + eccsize = ecc_vector_size - 1; + + for (i = 0; i < eccsteps ; i++) { + eccflag = 0; /* initialize eccflag */ + + /* + * Check any error reported, + * In case of error, non zero ecc reported. + */ + + for (j = 0; (j < eccsize); j++) { + if (calc_ecc[j] != 0) { + eccflag = 1; /* non zero ecc, error present */ + break; + } + } + + if (eccflag == 1) { + /* + * Set threshold to minimum of 4, half of ecc.strength/2 + * to allow max bit flip in byte to 4 + */ + unsigned int threshold = min_t(unsigned int, 4, + info->nand.ecc.strength / 2); + + /* + * Check data area is programmed by counting + * number of 0's at fixed offset in spare area. + * Checking count of 0's against threshold. + * In case programmed page expects at least threshold + * zeros in byte. + * If zeros are less than threshold for programmed page/ + * zeros are more than threshold erased page, either + * case page reported as uncorrectable. + */ + if (hweight8(~read_ecc[eccsize]) >= threshold) { + /* + * Update elm error vector as + * data area is programmed + */ + err_vec[i].error_reported = true; + is_error_reported = true; + } else { + /* Error reported in erased page */ + int bitflip_count; + u_char *buf = &data[info->nand.ecc.size * i]; + + if (memcmp(calc_ecc, erased_ecc_vec, eccsize)) { + bitflip_count = erased_sector_bitflips( + buf, read_ecc, info); + + if (bitflip_count) + stat += bitflip_count; + else + return -EINVAL; + } + } + } + + /* Update the ecc vector */ + calc_ecc += ecc_vector_size; + read_ecc += ecc_vector_size; + } + + /* Check if any error reported */ + if (!is_error_reported) + return 0; + + /* Decode BCH error using ELM module */ + elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec); + + for (i = 0; i < eccsteps; i++) { + if (err_vec[i].error_reported) { + for (j = 0; j < err_vec[i].error_count; j++) { + u32 bit_pos, byte_pos, error_max, pos; + + if (type == BCH8_ECC) + error_max = BCH8_ECC_MAX; + else + error_max = BCH4_ECC_MAX; + + if (info->nand.ecc.strength == BCH8_MAX_ERROR) + pos = err_vec[i].error_loc[j]; + else + /* Add 4 to take care 4 bit padding */ + pos = err_vec[i].error_loc[j] + + BCH4_BIT_PAD; + + /* Calculate bit position of error */ + bit_pos = pos % 8; + + /* Calculate byte position of error */ + byte_pos = (error_max - pos - 1) / 8; + + if (pos < error_max) { + if (byte_pos < 512) + data[byte_pos] ^= 1 << bit_pos; + else + spare_ecc[byte_pos - 512] ^= + 1 << bit_pos; + } + /* else, not interested to correct ecc */ + } + } + + /* Update number of correctable errors */ + stat += err_vec[i].error_count; + + /* Update page data with sector size */ + data += info->nand.ecc.size; + spare_ecc += ecc_vector_size; + } + + for (i = 0; i < eccsteps; i++) + /* Return error if uncorrectable error present */ + if (err_vec[i].error_uncorrectable) + return -EINVAL; + + return stat; +} + /** * omap3_correct_data_bch - Decode received data and correct errors * @mtd: MTD device structure @@ -1196,6 +1550,92 @@ static int omap3_correct_data_bch(struct mtd_info *mtd, u_char *data, return count; } +/** + * omap_write_page_bch - BCH ecc based write page function for entire page + * @mtd: mtd info structure + * @chip: nand chip info structure + * @buf: data buffer + * @oob_required: must write chip->oob_poi to OOB + * + * Custom write page method evolved to support multi sector writing in one shot + */ +static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip, + const uint8_t *buf, int oob_required) +{ + int i; + uint8_t *ecc_calc = chip->buffers->ecccalc; + uint32_t *eccpos = chip->ecc.layout->eccpos; + + /* Enable GPMC ecc engine */ + chip->ecc.hwctl(mtd, NAND_ECC_WRITE); + + /* Write data */ + chip->write_buf(mtd, buf, mtd->writesize); + + /* Update ecc vector from GPMC result registers */ + chip->ecc.calculate(mtd, buf, &ecc_calc[0]); + + for (i = 0; i < chip->ecc.total; i++) + chip->oob_poi[eccpos[i]] = ecc_calc[i]; + + /* Write ecc vector to OOB area */ + chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); + return 0; +} + +/** + * omap_read_page_bch - BCH ecc based page read function for entire page + * @mtd: mtd info structure + * @chip: nand chip info structure + * @buf: buffer to store read data + * @oob_required: caller requires OOB data read to chip->oob_poi + * @page: page number to read + * + * For BCH ecc scheme, GPMC used for syndrome calculation and ELM module + * used for error correction. + * Custom method evolved to support ELM error correction & multi sector + * reading. On reading page data area is read along with OOB data with + * ecc engine enabled. ecc vector updated after read of OOB data. + * For non error pages ecc vector reported as zero. + */ +static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip, + uint8_t *buf, int oob_required, int page) +{ + uint8_t *ecc_calc = chip->buffers->ecccalc; + uint8_t *ecc_code = chip->buffers->ecccode; + uint32_t *eccpos = chip->ecc.layout->eccpos; + uint8_t *oob = &chip->oob_poi[eccpos[0]]; + uint32_t oob_pos = mtd->writesize + chip->ecc.layout->eccpos[0]; + int stat; + unsigned int max_bitflips = 0; + + /* Enable GPMC ecc engine */ + chip->ecc.hwctl(mtd, NAND_ECC_READ); + + /* Read data */ + chip->read_buf(mtd, buf, mtd->writesize); + + /* Read oob bytes */ + chip->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_pos, -1); + chip->read_buf(mtd, oob, chip->ecc.total); + + /* Calculate ecc bytes */ + chip->ecc.calculate(mtd, buf, ecc_calc); + + memcpy(ecc_code, &chip->oob_poi[eccpos[0]], chip->ecc.total); + + stat = chip->ecc.correct(mtd, buf, ecc_code, ecc_calc); + + if (stat < 0) { + mtd->ecc_stats.failed++; + } else { + mtd->ecc_stats.corrected += stat; + max_bitflips = max_t(unsigned int, max_bitflips, stat); + } + + return max_bitflips; +} + /** * omap3_free_bch - Release BCH ecc resources * @mtd: MTD device structure @@ -1225,6 +1665,11 @@ static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt) #else const int hw_errors = BCH4_MAX_ERROR; #endif + enum bch_ecc bch_type; + const __be32 *parp; + int lenp; + struct device_node *elm_node; + info->bch = NULL; max_errors = (ecc_opt == OMAP_ECC_BCH8_CODE_HW) ? @@ -1235,30 +1680,67 @@ static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt) goto fail; } - /* software bch library is only used to detect and locate errors */ - info->bch = init_bch(13, max_errors, 0x201b /* hw polynomial */); - if (!info->bch) - goto fail; + info->nand.ecc.size = 512; + info->nand.ecc.hwctl = omap3_enable_hwecc_bch; + info->nand.ecc.mode = NAND_ECC_HW; + info->nand.ecc.strength = max_errors; - info->nand.ecc.size = 512; - info->nand.ecc.hwctl = omap3_enable_hwecc_bch; - info->nand.ecc.correct = omap3_correct_data_bch; - info->nand.ecc.mode = NAND_ECC_HW; + if (hw_errors == BCH8_MAX_ERROR) + bch_type = BCH8_ECC; + else + bch_type = BCH4_ECC; - /* - * The number of corrected errors in an ecc block that will trigger - * block scrubbing defaults to the ecc strength (4 or 8). - * Set mtd->bitflip_threshold here to define a custom threshold. - */ + /* Detect availability of ELM module */ + parp = of_get_property(info->of_node, "elm_id", &lenp); + if ((parp == NULL) && (lenp != (sizeof(void *) * 2))) { + pr_err("Missing elm_id property, fall back to Software BCH\n"); + info->is_elm_used = false; + } else { + struct platform_device *pdev; - if (max_errors == 8) { - info->nand.ecc.strength = 8; - info->nand.ecc.bytes = 13; - info->nand.ecc.calculate = omap3_calculate_ecc_bch8; + elm_node = of_find_node_by_phandle(be32_to_cpup(parp)); + pdev = of_find_device_by_node(elm_node); + info->elm_dev = &pdev->dev; + elm_config(info->elm_dev, bch_type); + info->is_elm_used = true; + } + + if (info->is_elm_used && (mtd->writesize <= 4096)) { + + if (hw_errors == BCH8_MAX_ERROR) + info->nand.ecc.bytes = BCH8_SIZE; + else + info->nand.ecc.bytes = BCH4_SIZE; + + info->nand.ecc.correct = omap_elm_correct_data; + info->nand.ecc.calculate = omap3_calculate_ecc_bch; + info->nand.ecc.read_page = omap_read_page_bch; + info->nand.ecc.write_page = omap_write_page_bch; } else { - info->nand.ecc.strength = 4; - info->nand.ecc.bytes = 7; - info->nand.ecc.calculate = omap3_calculate_ecc_bch4; + /* + * software bch library is only used to detect and + * locate errors + */ + info->bch = init_bch(13, max_errors, + 0x201b /* hw polynomial */); + if (!info->bch) + goto fail; + + info->nand.ecc.correct = omap3_correct_data_bch; + + /* + * The number of corrected errors in an ecc block that will + * trigger block scrubbing defaults to the ecc strength (4 or 8) + * Set mtd->bitflip_threshold here to define a custom threshold. + */ + + if (max_errors == 8) { + info->nand.ecc.bytes = 13; + info->nand.ecc.calculate = omap3_calculate_ecc_bch8; + } else { + info->nand.ecc.bytes = 7; + info->nand.ecc.calculate = omap3_calculate_ecc_bch4; + } } pr_info("enabling NAND BCH ecc with %d-bit correction\n", max_errors); @@ -1274,7 +1756,7 @@ fail: */ static int omap3_init_bch_tail(struct mtd_info *mtd) { - int i, steps; + int i, steps, offset; struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, mtd); struct nand_ecclayout *layout = &info->ecclayout; @@ -1296,11 +1778,21 @@ static int omap3_init_bch_tail(struct mtd_info *mtd) goto fail; } + /* ECC layout compatible with RBL for BCH8 */ + if (info->is_elm_used && (info->nand.ecc.bytes == BCH8_SIZE)) + offset = 2; + else + offset = mtd->oobsize - layout->eccbytes; + /* put ecc bytes at oob tail */ for (i = 0; i < layout->eccbytes; i++) - layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i; + layout->eccpos[i] = offset + i; + + if (info->is_elm_used && (info->nand.ecc.bytes == BCH8_SIZE)) + layout->oobfree[0].offset = 2 + layout->eccbytes * steps; + else + layout->oobfree[0].offset = 2; - layout->oobfree[0].offset = 2; layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes; info->nand.ecc.layout = layout; @@ -1363,6 +1855,9 @@ static int omap_nand_probe(struct platform_device *pdev) info->nand.options = pdata->devsize; info->nand.options |= NAND_SKIP_BBTSCAN; +#ifdef CONFIG_MTD_NAND_OMAP_BCH + info->of_node = pdata->of_node; +#endif res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { diff --git a/include/linux/platform_data/elm.h b/include/linux/platform_data/elm.h index 11ab6aaf2431..1bd5244d1dcd 100644 --- a/include/linux/platform_data/elm.h +++ b/include/linux/platform_data/elm.h @@ -30,7 +30,8 @@ enum bch_ecc { #define BCH4_ECC_OOB_BYTES 7 /* RBL requires 14 byte even though BCH8 uses only 13 byte */ #define BCH8_SIZE (BCH8_ECC_OOB_BYTES + 1) -#define BCH4_SIZE (BCH4_ECC_OOB_BYTES) +/* Uses 1 extra byte to handle erased pages */ +#define BCH4_SIZE (BCH4_ECC_OOB_BYTES + 1) /** * struct elm_errorvec - error vector for elm -- cgit v1.2.3 From a2f74a7dacc1c17a0b146eb3112217874c5db436 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Sun, 6 Jan 2013 21:28:50 +0100 Subject: mtd: bcm47xxsflash: add own struct for abstrating bus type MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Rafał Miłecki --- drivers/mtd/devices/bcm47xxsflash.c | 37 +++++++++++++++++++---------- drivers/mtd/devices/bcm47xxsflash.h | 15 ++++++++++++ include/linux/bcma/bcma_driver_chipcommon.h | 1 + 3 files changed, 40 insertions(+), 13 deletions(-) create mode 100644 drivers/mtd/devices/bcm47xxsflash.h (limited to 'include') diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c index 4714584aa993..8fe1a097161b 100644 --- a/drivers/mtd/devices/bcm47xxsflash.c +++ b/drivers/mtd/devices/bcm47xxsflash.c @@ -5,6 +5,8 @@ #include #include +#include "bcm47xxsflash.h" + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Serial flash driver for BCMA bus"); @@ -13,26 +15,27 @@ static const char *probes[] = { "bcm47xxpart", NULL }; static int bcm47xxsflash_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { - struct bcma_sflash *sflash = mtd->priv; + struct bcm47xxsflash *b47s = mtd->priv; /* Check address range */ if ((from + len) > mtd->size) return -EINVAL; - memcpy_fromio(buf, (void __iomem *)KSEG0ADDR(sflash->window + from), + memcpy_fromio(buf, (void __iomem *)KSEG0ADDR(b47s->window + from), len); return len; } -static void bcm47xxsflash_fill_mtd(struct bcma_sflash *sflash, - struct mtd_info *mtd) +static void bcm47xxsflash_fill_mtd(struct bcm47xxsflash *b47s) { - mtd->priv = sflash; + struct mtd_info *mtd = &b47s->mtd; + + mtd->priv = b47s; mtd->name = "bcm47xxsflash"; mtd->owner = THIS_MODULE; mtd->type = MTD_ROM; - mtd->size = sflash->size; + mtd->size = b47s->size; mtd->_read = bcm47xxsflash_read; /* TODO: implement writing support and verify/change following code */ @@ -43,16 +46,23 @@ static void bcm47xxsflash_fill_mtd(struct bcma_sflash *sflash, static int bcm47xxsflash_probe(struct platform_device *pdev) { struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev); + struct bcm47xxsflash *b47s; int err; - sflash->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL); - if (!sflash->mtd) { + b47s = kzalloc(sizeof(*b47s), GFP_KERNEL); + if (!b47s) { err = -ENOMEM; goto out; } - bcm47xxsflash_fill_mtd(sflash, sflash->mtd); + sflash->priv = b47s; + + b47s->window = sflash->window; + b47s->blocksize = sflash->blocksize; + b47s->numblocks = sflash->numblocks; + b47s->size = sflash->size; + bcm47xxsflash_fill_mtd(b47s); - err = mtd_device_parse_register(sflash->mtd, probes, NULL, NULL, 0); + err = mtd_device_parse_register(&b47s->mtd, probes, NULL, NULL, 0); if (err) { pr_err("Failed to register MTD device: %d\n", err); goto err_dev_reg; @@ -61,7 +71,7 @@ static int bcm47xxsflash_probe(struct platform_device *pdev) return 0; err_dev_reg: - kfree(sflash->mtd); + kfree(&b47s->mtd); out: return err; } @@ -69,9 +79,10 @@ out: static int bcm47xxsflash_remove(struct platform_device *pdev) { struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev); + struct bcm47xxsflash *b47s = sflash->priv; - mtd_device_unregister(sflash->mtd); - kfree(sflash->mtd); + mtd_device_unregister(&b47s->mtd); + kfree(b47s); return 0; } diff --git a/drivers/mtd/devices/bcm47xxsflash.h b/drivers/mtd/devices/bcm47xxsflash.h new file mode 100644 index 000000000000..ebf6f710e23c --- /dev/null +++ b/drivers/mtd/devices/bcm47xxsflash.h @@ -0,0 +1,15 @@ +#ifndef __BCM47XXSFLASH_H +#define __BCM47XXSFLASH_H + +#include + +struct bcm47xxsflash { + u32 window; + u32 blocksize; + u16 numblocks; + u32 size; + + struct mtd_info mtd; +}; + +#endif /* BCM47XXSFLASH */ diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index 9a0e3fa3ca95..a5bfda6b0a76 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -528,6 +528,7 @@ struct bcma_sflash { u32 size; struct mtd_info *mtd; + void *priv; }; #endif -- cgit v1.2.3 From 1648eaaa1575ea686acb82fb8cb3d8839764ef2c Mon Sep 17 00:00:00 2001 From: Stefan Roese Date: Fri, 18 Jan 2013 13:10:05 +0100 Subject: mtd: cfi_cmdset_0002: Support Persistent Protection Bits (PPB) locking Currently cfi_cmdset_0002.c does not support PPB locking of sectors. This patch adds support for this locking/unlocking mechanism. It is needed on some platforms, since newer U-Boot versions do support this PPB locking and protect for example their environment sector(s) this way. This PPB locking/unlocking will be enabled for all devices supported by cfi_cmdset_0002 reporting 8 in the CFI word 0x49 (Sector Protect/Unprotect scheme). Please note that PPB locking does support sector-by-sector locking. But the whole chip can only be unlocked together. So unlocking one sector will automatically unlock all sectors of this device. Because of this chip limitation, the PPB unlocking function saves the current locking status of all sectors before unlocking the whole device. After unlocking the saved locking status is re-configured. This way only the addressed sectors will be unlocked. To selectively enable this advanced sector protection mechanism, the device-tree property "use-advanced-sector-protection" has been created. To enable support for this locking this property needs to be present in the flash DT node. E.g.: nor_flash@0,0 { compatible = "amd,s29gl256n", "cfi-flash"; bank-width = <2>; use-advanced-sector-protection; ... Tested with Spansion S29GL512S10THI and Micron JS28F512M29EWx flash devices. Signed-off-by: Stefan Roese Tested-by: Holger Brunck Signed-off-by: Artem Bityutskiy --- .../devicetree/bindings/mtd/mtd-physmap.txt | 3 + drivers/mtd/chips/cfi_cmdset_0002.c | 217 +++++++++++++++++++++ drivers/mtd/maps/physmap_of.c | 1 + include/linux/mtd/map.h | 1 + 4 files changed, 222 insertions(+) (limited to 'include') diff --git a/Documentation/devicetree/bindings/mtd/mtd-physmap.txt b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt index dab7847fc800..61c5ec850f2f 100644 --- a/Documentation/devicetree/bindings/mtd/mtd-physmap.txt +++ b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt @@ -26,6 +26,9 @@ file systems on embedded devices. - linux,mtd-name: allow to specify the mtd name for retro capability with physmap-flash drivers as boot loader pass the mtd partition via the old device name physmap-flash. + - use-advanced-sector-protection: boolean to enable support for the + advanced sector protection (Spansion: PPB - Persistent Protection + Bits) locking. For JEDEC compatible devices, the following additional properties are defined: diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index b86197286f24..fff665d59a0d 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -33,6 +33,8 @@ #include #include #include +#include +#include #include #include #include @@ -74,6 +76,10 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); +static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); +static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); +static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len); + static struct mtd_chip_driver cfi_amdstd_chipdrv = { .probe = NULL, /* Not usable directly */ .destroy = cfi_amdstd_destroy, @@ -496,6 +502,7 @@ static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi) struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) { struct cfi_private *cfi = map->fldrv_priv; + struct device_node __maybe_unused *np = map->device_node; struct mtd_info *mtd; int i; @@ -570,6 +577,17 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) cfi_tell_features(extp); #endif +#ifdef CONFIG_OF + if (np && of_property_read_bool( + np, "use-advanced-sector-protection") + && extp->BlkProtUnprot == 8) { + printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n"); + mtd->_lock = cfi_ppb_lock; + mtd->_unlock = cfi_ppb_unlock; + mtd->_is_locked = cfi_ppb_is_locked; + } +#endif + bootloc = extp->TopBottom; if ((bootloc < 2) || (bootloc > 5)) { printk(KERN_WARNING "%s: CFI contains unrecognised boot " @@ -2172,6 +2190,205 @@ static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL); } +/* + * Advanced Sector Protection - PPB (Persistent Protection Bit) locking + */ + +struct ppb_lock { + struct flchip *chip; + loff_t offset; + int locked; +}; + +#define MAX_SECTORS 512 + +#define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1) +#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2) +#define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3) + +static int __maybe_unused do_ppb_xxlock(struct map_info *map, + struct flchip *chip, + unsigned long adr, int len, void *thunk) +{ + struct cfi_private *cfi = map->fldrv_priv; + unsigned long timeo; + int ret; + + mutex_lock(&chip->mutex); + ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); + if (ret) { + mutex_unlock(&chip->mutex); + return ret; + } + + pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len); + + cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, + cfi->device_type, NULL); + cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, + cfi->device_type, NULL); + /* PPB entry command */ + cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi, + cfi->device_type, NULL); + + if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) { + chip->state = FL_LOCKING; + map_write(map, CMD(0xA0), chip->start + adr); + map_write(map, CMD(0x00), chip->start + adr); + } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) { + /* + * Unlocking of one specific sector is not supported, so we + * have to unlock all sectors of this device instead + */ + chip->state = FL_UNLOCKING; + map_write(map, CMD(0x80), chip->start); + map_write(map, CMD(0x30), chip->start); + } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) { + chip->state = FL_JEDEC_QUERY; + /* Return locked status: 0->locked, 1->unlocked */ + ret = !cfi_read_query(map, adr); + } else + BUG(); + + /* + * Wait for some time as unlocking of all sectors takes quite long + */ + timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */ + for (;;) { + if (chip_ready(map, adr)) + break; + + if (time_after(jiffies, timeo)) { + printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); + ret = -EIO; + break; + } + + UDELAY(map, chip, adr, 1); + } + + /* Exit BC commands */ + map_write(map, CMD(0x90), chip->start); + map_write(map, CMD(0x00), chip->start); + + chip->state = FL_READY; + put_chip(map, chip, adr + chip->start); + mutex_unlock(&chip->mutex); + + return ret; +} + +static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, + uint64_t len) +{ + return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, + DO_XXLOCK_ONEBLOCK_LOCK); +} + +static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, + uint64_t len) +{ + struct mtd_erase_region_info *regions = mtd->eraseregions; + struct map_info *map = mtd->priv; + struct cfi_private *cfi = map->fldrv_priv; + struct ppb_lock *sect; + unsigned long adr; + loff_t offset; + uint64_t length; + int chipnum; + int i; + int sectors; + int ret; + + /* + * PPB unlocking always unlocks all sectors of the flash chip. + * We need to re-lock all previously locked sectors. So lets + * first check the locking status of all sectors and save + * it for future use. + */ + sect = kzalloc(MAX_SECTORS * sizeof(struct ppb_lock), GFP_KERNEL); + if (!sect) + return -ENOMEM; + + /* + * This code to walk all sectors is a slightly modified version + * of the cfi_varsize_frob() code. + */ + i = 0; + chipnum = 0; + adr = 0; + sectors = 0; + offset = 0; + length = mtd->size; + + while (length) { + int size = regions[i].erasesize; + + /* + * Only test sectors that shall not be unlocked. The other + * sectors shall be unlocked, so lets keep their locking + * status at "unlocked" (locked=0) for the final re-locking. + */ + if ((adr < ofs) || (adr >= (ofs + len))) { + sect[sectors].chip = &cfi->chips[chipnum]; + sect[sectors].offset = offset; + sect[sectors].locked = do_ppb_xxlock( + map, &cfi->chips[chipnum], adr, 0, + DO_XXLOCK_ONEBLOCK_GETLOCK); + } + + adr += size; + offset += size; + length -= size; + + if (offset == regions[i].offset + size * regions[i].numblocks) + i++; + + if (adr >> cfi->chipshift) { + adr = 0; + chipnum++; + + if (chipnum >= cfi->numchips) + break; + } + + sectors++; + if (sectors >= MAX_SECTORS) { + printk(KERN_ERR "Only %d sectors for PPB locking supported!\n", + MAX_SECTORS); + kfree(sect); + return -EINVAL; + } + } + + /* Now unlock the whole chip */ + ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, + DO_XXLOCK_ONEBLOCK_UNLOCK); + if (ret) { + kfree(sect); + return ret; + } + + /* + * PPB unlocking always unlocks all sectors of the flash chip. + * We need to re-lock all previously locked sectors. + */ + for (i = 0; i < sectors; i++) { + if (sect[i].locked) + do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0, + DO_XXLOCK_ONEBLOCK_LOCK); + } + + kfree(sect); + return ret; +} + +static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, + uint64_t len) +{ + return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, + DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0; +} static void cfi_amdstd_sync (struct mtd_info *mtd) { diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c index 67cc73c18ddd..263d9e1b8001 100644 --- a/drivers/mtd/maps/physmap_of.c +++ b/drivers/mtd/maps/physmap_of.c @@ -241,6 +241,7 @@ static int of_flash_probe(struct platform_device *dev) info->list[i].map.phys = res.start; info->list[i].map.size = res_size; info->list[i].map.bankwidth = be32_to_cpup(width); + info->list[i].map.device_node = dp; err = -ENOMEM; info->list[i].map.virt = ioremap(info->list[i].map.phys, diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h index f6eb4332ac92..8b9bfd7dcaa3 100644 --- a/include/linux/mtd/map.h +++ b/include/linux/mtd/map.h @@ -245,6 +245,7 @@ struct map_info { unsigned long pfow_base; unsigned long map_priv_1; unsigned long map_priv_2; + struct device_node *device_node; void *fldrv_priv; struct mtd_chip_driver *fldrv; }; -- cgit v1.2.3 From 24dea0c9feccf699749f860fa2f4ccd84d30390d Mon Sep 17 00:00:00 2001 From: Dmitry Eremin-Solenikov Date: Thu, 31 Jan 2013 21:06:34 +0400 Subject: mtd: map: BUG() in non handled cases Several map-related functions look like a serie of ifs, checking widths of map. Those functions do not have any handling for default case. Instead of fiddling with uninitialized_var in those functions, let's just add a (correct) BUG() to the default case on those maps. This will also allow us to catch potential errors in maps setup in future. Signed-off-by: Dmitry Eremin-Solenikov Signed-off-by: Artem Bityutskiy --- include/linux/mtd/map.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h index 8b9bfd7dcaa3..4b02512e421c 100644 --- a/include/linux/mtd/map.h +++ b/include/linux/mtd/map.h @@ -329,7 +329,7 @@ static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word static inline map_word map_word_load(struct map_info *map, const void *ptr) { - map_word r = {{0} }; + map_word r; if (map_bankwidth_is_1(map)) r.x[0] = *(unsigned char *)ptr; @@ -343,6 +343,8 @@ static inline map_word map_word_load(struct map_info *map, const void *ptr) #endif else if (map_bankwidth_is_large(map)) memcpy(r.x, ptr, map->bankwidth); + else + BUG(); return r; } @@ -392,7 +394,7 @@ static inline map_word map_word_ff(struct map_info *map) static inline map_word inline_map_read(struct map_info *map, unsigned long ofs) { - map_word uninitialized_var(r); + map_word r; if (map_bankwidth_is_1(map)) r.x[0] = __raw_readb(map->virt + ofs); @@ -426,6 +428,8 @@ static inline void inline_map_write(struct map_info *map, const map_word datum, #endif else if (map_bankwidth_is_large(map)) memcpy_toio(map->virt+ofs, datum.x, map->bankwidth); + else + BUG(); mb(); } -- cgit v1.2.3