diff options
Diffstat (limited to 'drivers')
336 files changed, 8476 insertions, 3453 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 32b20efff5f8..4556c86c3465 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -570,8 +570,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) { struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); - if (cx->type == ACPI_STATE_C3) - ACPI_FLUSH_CPU_CACHE(); + ACPI_FLUSH_CPU_CACHE(); while (1) { diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 9efbfe087de7..762b61f67e6c 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -588,19 +588,6 @@ static struct acpi_device *handle_to_device(acpi_handle handle, return adev; } -int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device) -{ - if (!device) - return -EINVAL; - - *device = handle_to_device(handle, NULL); - if (!*device) - return -ENODEV; - - return 0; -} -EXPORT_SYMBOL(acpi_bus_get_device); - /** * acpi_fetch_acpi_dev - Retrieve ACPI device object. * @handle: ACPI handle associated with the requested ACPI device object. diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index e5641e6c52ee..bb45a9c00514 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -115,14 +115,16 @@ config SATA_AHCI If unsure, say N. -config SATA_LPM_POLICY +config SATA_MOBILE_LPM_POLICY int "Default SATA Link Power Management policy for low power chipsets" range 0 4 default 0 depends on SATA_AHCI help Select the Default SATA Link Power Management (LPM) policy to use - for chipsets / "South Bridges" designated as supporting low power. + for chipsets / "South Bridges" supporting low-power modes. Such + chipsets are typically found on most laptops but desktops and + servers now also widely use chipsets supporting low power modes. The value set has the following meanings: 0 => Keep firmware settings diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 84456c05e845..397dfd27c90d 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -1595,7 +1595,7 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports, static void ahci_update_initial_lpm_policy(struct ata_port *ap, struct ahci_host_priv *hpriv) { - int policy = CONFIG_SATA_LPM_POLICY; + int policy = CONFIG_SATA_MOBILE_LPM_POLICY; /* Ignore processing for chipsets that don't use policy */ diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index 6ead58c1b6e5..ad11a4c52fbe 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -236,7 +236,7 @@ enum { AHCI_HFLAG_NO_WRITE_TO_RO = (1 << 24), /* don't write to read only registers */ AHCI_HFLAG_USE_LPM_POLICY = (1 << 25), /* chipset that should use - SATA_LPM_POLICY + SATA_MOBILE_LPM_POLICY as default lpm_policy */ AHCI_HFLAG_SUSPEND_PHYS = (1 << 26), /* handle PHYs during suspend/resume */ diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index cceedde51126..ca64837641be 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4014,6 +4014,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "Samsung SSD 840 EVO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_NO_DMA_LOG | + ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index b3be7a8f5bea..b1666adc1c3a 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -1634,7 +1634,7 @@ EXPORT_SYMBOL_GPL(ata_sff_interrupt); void ata_sff_lost_interrupt(struct ata_port *ap) { - u8 status; + u8 status = 0; struct ata_queued_cmd *qc; /* Only one outstanding command per SFF channel */ diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c index bec33d781ae0..e3263e961045 100644 --- a/drivers/ata/sata_dwc_460ex.c +++ b/drivers/ata/sata_dwc_460ex.c @@ -137,7 +137,11 @@ struct sata_dwc_device { #endif }; -#define SATA_DWC_QCMD_MAX 32 +/* + * Allow one extra special slot for commands and DMA management + * to account for libata internal commands. + */ +#define SATA_DWC_QCMD_MAX (ATA_MAX_QUEUE + 1) struct sata_dwc_device_port { struct sata_dwc_device *hsdev; diff --git a/drivers/base/dd.c b/drivers/base/dd.c index af6bea56f4e2..3fc3b5940bb3 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -296,6 +296,7 @@ int driver_deferred_probe_check_state(struct device *dev) return -EPROBE_DEFER; } +EXPORT_SYMBOL_GPL(driver_deferred_probe_check_state); static void deferred_probe_timeout_work_func(struct work_struct *work) { diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 4b55e864a0a3..4d3efaa20b7b 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1638,22 +1638,22 @@ struct sib_info { }; void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib); -extern void notify_resource_state(struct sk_buff *, +extern int notify_resource_state(struct sk_buff *, unsigned int, struct drbd_resource *, struct resource_info *, enum drbd_notification_type); -extern void notify_device_state(struct sk_buff *, +extern int notify_device_state(struct sk_buff *, unsigned int, struct drbd_device *, struct device_info *, enum drbd_notification_type); -extern void notify_connection_state(struct sk_buff *, +extern int notify_connection_state(struct sk_buff *, unsigned int, struct drbd_connection *, struct connection_info *, enum drbd_notification_type); -extern void notify_peer_device_state(struct sk_buff *, +extern int notify_peer_device_state(struct sk_buff *, unsigned int, struct drbd_peer_device *, struct peer_device_info *, diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 9676a1d214bc..4b0b25cc916e 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2719,6 +2719,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig sprintf(disk->disk_name, "drbd%d", minor); disk->private_data = device; + blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue); blk_queue_write_cache(disk->queue, true, true); /* Setting the max_hw_sectors to an odd value of 8kibyte here This triggers a max_bio_size message upon first attach or connect */ @@ -2773,12 +2774,12 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig if (init_submitter(device)) { err = ERR_NOMEM; - goto out_idr_remove_vol; + goto out_idr_remove_from_resource; } err = add_disk(disk); if (err) - goto out_idr_remove_vol; + goto out_idr_remove_from_resource; /* inherit the connection state */ device->state.conn = first_connection(resource)->cstate; @@ -2792,8 +2793,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig drbd_debugfs_device_add(device); return NO_ERROR; -out_idr_remove_vol: - idr_remove(&connection->peer_devices, vnr); out_idr_remove_from_resource: for_each_connection(connection, resource) { peer_device = idr_remove(&connection->peer_devices, vnr); diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 02030c9c4d3b..b7216c186ba4 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -4549,7 +4549,7 @@ static int nla_put_notification_header(struct sk_buff *msg, return drbd_notification_header_to_skb(msg, &nh, true); } -void notify_resource_state(struct sk_buff *skb, +int notify_resource_state(struct sk_buff *skb, unsigned int seq, struct drbd_resource *resource, struct resource_info *resource_info, @@ -4591,16 +4591,17 @@ void notify_resource_state(struct sk_buff *skb, if (err && err != -ESRCH) goto failed; } - return; + return 0; nla_put_failure: nlmsg_free(skb); failed: drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n", err, seq); + return err; } -void notify_device_state(struct sk_buff *skb, +int notify_device_state(struct sk_buff *skb, unsigned int seq, struct drbd_device *device, struct device_info *device_info, @@ -4640,16 +4641,17 @@ void notify_device_state(struct sk_buff *skb, if (err && err != -ESRCH) goto failed; } - return; + return 0; nla_put_failure: nlmsg_free(skb); failed: drbd_err(device, "Error %d while broadcasting event. Event seq:%u\n", err, seq); + return err; } -void notify_connection_state(struct sk_buff *skb, +int notify_connection_state(struct sk_buff *skb, unsigned int seq, struct drbd_connection *connection, struct connection_info *connection_info, @@ -4689,16 +4691,17 @@ void notify_connection_state(struct sk_buff *skb, if (err && err != -ESRCH) goto failed; } - return; + return 0; nla_put_failure: nlmsg_free(skb); failed: drbd_err(connection, "Error %d while broadcasting event. Event seq:%u\n", err, seq); + return err; } -void notify_peer_device_state(struct sk_buff *skb, +int notify_peer_device_state(struct sk_buff *skb, unsigned int seq, struct drbd_peer_device *peer_device, struct peer_device_info *peer_device_info, @@ -4739,13 +4742,14 @@ void notify_peer_device_state(struct sk_buff *skb, if (err && err != -ESRCH) goto failed; } - return; + return 0; nla_put_failure: nlmsg_free(skb); failed: drbd_err(peer_device, "Error %d while broadcasting event. Event seq:%u\n", err, seq); + return err; } void notify_helper(enum drbd_notification_type type, @@ -4796,7 +4800,7 @@ fail: err, seq); } -static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq) +static int notify_initial_state_done(struct sk_buff *skb, unsigned int seq) { struct drbd_genlmsghdr *dh; int err; @@ -4810,11 +4814,12 @@ static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq) if (nla_put_notification_header(skb, NOTIFY_EXISTS)) goto nla_put_failure; genlmsg_end(skb, dh); - return; + return 0; nla_put_failure: nlmsg_free(skb); pr_err("Error %d sending event. Event seq:%u\n", err, seq); + return err; } static void free_state_changes(struct list_head *list) @@ -4841,6 +4846,7 @@ static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb) unsigned int seq = cb->args[2]; unsigned int n; enum drbd_notification_type flags = 0; + int err = 0; /* There is no need for taking notification_mutex here: it doesn't matter if the initial state events mix with later state chage @@ -4849,32 +4855,32 @@ static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb) cb->args[5]--; if (cb->args[5] == 1) { - notify_initial_state_done(skb, seq); + err = notify_initial_state_done(skb, seq); goto out; } n = cb->args[4]++; if (cb->args[4] < cb->args[3]) flags |= NOTIFY_CONTINUES; if (n < 1) { - notify_resource_state_change(skb, seq, state_change->resource, + err = notify_resource_state_change(skb, seq, state_change->resource, NOTIFY_EXISTS | flags); goto next; } n--; if (n < state_change->n_connections) { - notify_connection_state_change(skb, seq, &state_change->connections[n], + err = notify_connection_state_change(skb, seq, &state_change->connections[n], NOTIFY_EXISTS | flags); goto next; } n -= state_change->n_connections; if (n < state_change->n_devices) { - notify_device_state_change(skb, seq, &state_change->devices[n], + err = notify_device_state_change(skb, seq, &state_change->devices[n], NOTIFY_EXISTS | flags); goto next; } n -= state_change->n_devices; if (n < state_change->n_devices * state_change->n_connections) { - notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n], + err = notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n], NOTIFY_EXISTS | flags); goto next; } @@ -4889,7 +4895,10 @@ next: cb->args[4] = 0; } out: - return skb->len; + if (err) + return err; + else + return skb->len; } int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb) diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c index b8a27818ab3f..4ee11aef6672 100644 --- a/drivers/block/drbd/drbd_state.c +++ b/drivers/block/drbd/drbd_state.c @@ -1537,7 +1537,7 @@ int drbd_bitmap_io_from_worker(struct drbd_device *device, return rv; } -void notify_resource_state_change(struct sk_buff *skb, +int notify_resource_state_change(struct sk_buff *skb, unsigned int seq, struct drbd_resource_state_change *resource_state_change, enum drbd_notification_type type) @@ -1550,10 +1550,10 @@ void notify_resource_state_change(struct sk_buff *skb, .res_susp_fen = resource_state_change->susp_fen[NEW], }; - notify_resource_state(skb, seq, resource, &resource_info, type); + return notify_resource_state(skb, seq, resource, &resource_info, type); } -void notify_connection_state_change(struct sk_buff *skb, +int notify_connection_state_change(struct sk_buff *skb, unsigned int seq, struct drbd_connection_state_change *connection_state_change, enum drbd_notification_type type) @@ -1564,10 +1564,10 @@ void notify_connection_state_change(struct sk_buff *skb, .conn_role = connection_state_change->peer_role[NEW], }; - notify_connection_state(skb, seq, connection, &connection_info, type); + return notify_connection_state(skb, seq, connection, &connection_info, type); } -void notify_device_state_change(struct sk_buff *skb, +int notify_device_state_change(struct sk_buff *skb, unsigned int seq, struct drbd_device_state_change *device_state_change, enum drbd_notification_type type) @@ -1577,10 +1577,10 @@ void notify_device_state_change(struct sk_buff *skb, .dev_disk_state = device_state_change->disk_state[NEW], }; - notify_device_state(skb, seq, device, &device_info, type); + return notify_device_state(skb, seq, device, &device_info, type); } -void notify_peer_device_state_change(struct sk_buff *skb, +int notify_peer_device_state_change(struct sk_buff *skb, unsigned int seq, struct drbd_peer_device_state_change *p, enum drbd_notification_type type) @@ -1594,7 +1594,7 @@ void notify_peer_device_state_change(struct sk_buff *skb, .peer_resync_susp_dependency = p->resync_susp_dependency[NEW], }; - notify_peer_device_state(skb, seq, peer_device, &peer_device_info, type); + return notify_peer_device_state(skb, seq, peer_device, &peer_device_info, type); } static void broadcast_state_change(struct drbd_state_change *state_change) @@ -1602,7 +1602,7 @@ static void broadcast_state_change(struct drbd_state_change *state_change) struct drbd_resource_state_change *resource_state_change = &state_change->resource[0]; bool resource_state_has_changed; unsigned int n_device, n_connection, n_peer_device, n_peer_devices; - void (*last_func)(struct sk_buff *, unsigned int, void *, + int (*last_func)(struct sk_buff *, unsigned int, void *, enum drbd_notification_type) = NULL; void *last_arg = NULL; diff --git a/drivers/block/drbd/drbd_state_change.h b/drivers/block/drbd/drbd_state_change.h index ba80f612d6ab..d5b0479bc9a6 100644 --- a/drivers/block/drbd/drbd_state_change.h +++ b/drivers/block/drbd/drbd_state_change.h @@ -44,19 +44,19 @@ extern struct drbd_state_change *remember_old_state(struct drbd_resource *, gfp_ extern void copy_old_to_new_state_change(struct drbd_state_change *); extern void forget_state_change(struct drbd_state_change *); -extern void notify_resource_state_change(struct sk_buff *, +extern int notify_resource_state_change(struct sk_buff *, unsigned int, struct drbd_resource_state_change *, enum drbd_notification_type type); -extern void notify_connection_state_change(struct sk_buff *, +extern int notify_connection_state_change(struct sk_buff *, unsigned int, struct drbd_connection_state_change *, enum drbd_notification_type type); -extern void notify_device_state_change(struct sk_buff *, +extern int notify_device_state_change(struct sk_buff *, unsigned int, struct drbd_device_state_change *, enum drbd_notification_type type); -extern void notify_peer_device_state_change(struct sk_buff *, +extern int notify_peer_device_state_change(struct sk_buff *, unsigned int, struct drbd_peer_device_state_change *, enum drbd_notification_type type); diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index 05b1120e6623..c441a4972064 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -1600,7 +1600,7 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res) * Only fake timeouts need to execute blk_mq_complete_request() here. */ cmd->error = BLK_STS_TIMEOUT; - if (cmd->fake_timeout) + if (cmd->fake_timeout || hctx->type == HCTX_TYPE_POLL) blk_mq_complete_request(rq); return BLK_EH_DONE; } diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c index 183d5cc37d42..b0c3704777e9 100644 --- a/drivers/bus/brcmstb_gisb.c +++ b/drivers/bus/brcmstb_gisb.c @@ -536,7 +536,6 @@ static struct platform_driver brcmstb_gisb_arb_driver = { .name = "brcm-gisb-arb", .of_match_table = brcmstb_gisb_arb_of_match, .pm = &brcmstb_gisb_arb_pm_ops, - .suppress_bind_attrs = true, }, }; diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 54c0ee6dda30..41d5e69063f5 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -3049,7 +3049,7 @@ static const struct soc_device_attribute sysc_soc_match[] = { SOC_FLAG("AM43*", SOC_AM4), SOC_FLAG("DRA7*", SOC_DRA7), - { /* sentinel */ }, + { /* sentinel */ } }; /* @@ -3070,7 +3070,7 @@ static const struct soc_device_attribute sysc_soc_feat_match[] = { SOC_FLAG("OMAP3615/AM3715", DIS_IVA), SOC_FLAG("OMAP3621", DIS_ISP), - { /* sentinel */ }, + { /* sentinel */ } }; static int sysc_add_disabled(unsigned long base) diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 7bd10d63ddbe..2dc9da683a13 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -1365,7 +1365,6 @@ out_free: */ int cdrom_number_of_slots(struct cdrom_device_info *cdi) { - int status; int nslots = 1; struct cdrom_changer_info *info; @@ -1377,7 +1376,7 @@ int cdrom_number_of_slots(struct cdrom_device_info *cdi) if (!info) return -ENOMEM; - if ((status = cdrom_read_mech_status(cdi, info)) == 0) + if (cdrom_read_mech_status(cdi, info) == 0) nslots = info->hdr.nslots; kfree(info); diff --git a/drivers/char/random.c b/drivers/char/random.c index 1d8242969751..3a293f919af9 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -333,7 +333,7 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], chacha20_block(chacha_state, first_block); memcpy(key, first_block, CHACHA_KEY_SIZE); - memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len); + memmove(random_data, first_block + CHACHA_KEY_SIZE, random_data_len); memzero_explicit(first_block, sizeof(first_block)); } @@ -437,11 +437,8 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS], * This shouldn't be set by functions like add_device_randomness(), * where we can't trust the buffer passed to it is guaranteed to be * unpredictable (so it might not have any entropy at all). - * - * Returns the number of bytes processed from input, which is bounded - * by CRNG_INIT_CNT_THRESH if account is true. */ -static size_t crng_pre_init_inject(const void *input, size_t len, bool account) +static void crng_pre_init_inject(const void *input, size_t len, bool account) { static int crng_init_cnt = 0; struct blake2s_state hash; @@ -452,18 +449,15 @@ static size_t crng_pre_init_inject(const void *input, size_t len, bool account) spin_lock_irqsave(&base_crng.lock, flags); if (crng_init != 0) { spin_unlock_irqrestore(&base_crng.lock, flags); - return 0; + return; } - if (account) - len = min_t(size_t, len, CRNG_INIT_CNT_THRESH - crng_init_cnt); - blake2s_update(&hash, base_crng.key, sizeof(base_crng.key)); blake2s_update(&hash, input, len); blake2s_final(&hash, base_crng.key); if (account) { - crng_init_cnt += len; + crng_init_cnt += min_t(size_t, len, CRNG_INIT_CNT_THRESH - crng_init_cnt); if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { ++base_crng.generation; crng_init = 1; @@ -474,8 +468,6 @@ static size_t crng_pre_init_inject(const void *input, size_t len, bool account) if (crng_init == 1) pr_notice("fast init done\n"); - - return len; } static void _get_random_bytes(void *buf, size_t nbytes) @@ -531,49 +523,59 @@ EXPORT_SYMBOL(get_random_bytes); static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes) { - bool large_request = nbytes > 256; - ssize_t ret = 0; - size_t len; + size_t len, left, ret = 0; u32 chacha_state[CHACHA_STATE_WORDS]; u8 output[CHACHA_BLOCK_SIZE]; if (!nbytes) return 0; - len = min_t(size_t, 32, nbytes); - crng_make_state(chacha_state, output, len); - - if (copy_to_user(buf, output, len)) - return -EFAULT; - nbytes -= len; - buf += len; - ret += len; - - while (nbytes) { - if (large_request && need_resched()) { - if (signal_pending(current)) - break; - schedule(); - } + /* + * Immediately overwrite the ChaCha key at index 4 with random + * bytes, in case userspace causes copy_to_user() below to sleep + * forever, so that we still retain forward secrecy in that case. + */ + crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE); + /* + * However, if we're doing a read of len <= 32, we don't need to + * use chacha_state after, so we can simply return those bytes to + * the user directly. + */ + if (nbytes <= CHACHA_KEY_SIZE) { + ret = nbytes - copy_to_user(buf, &chacha_state[4], nbytes); + goto out_zero_chacha; + } + for (;;) { chacha20_block(chacha_state, output); if (unlikely(chacha_state[12] == 0)) ++chacha_state[13]; len = min_t(size_t, nbytes, CHACHA_BLOCK_SIZE); - if (copy_to_user(buf, output, len)) { - ret = -EFAULT; + left = copy_to_user(buf, output, len); + if (left) { + ret += len - left; break; } - nbytes -= len; buf += len; ret += len; + nbytes -= len; + if (!nbytes) + break; + + BUILD_BUG_ON(PAGE_SIZE % CHACHA_BLOCK_SIZE != 0); + if (ret % PAGE_SIZE == 0) { + if (signal_pending(current)) + break; + cond_resched(); + } } - memzero_explicit(chacha_state, sizeof(chacha_state)); memzero_explicit(output, sizeof(output)); - return ret; +out_zero_chacha: + memzero_explicit(chacha_state, sizeof(chacha_state)); + return ret ? ret : -EFAULT; } /* @@ -1016,7 +1018,7 @@ int __init rand_initialize(void) */ void add_device_randomness(const void *buf, size_t size) { - cycles_t cycles = random_get_entropy(); + unsigned long cycles = random_get_entropy(); unsigned long flags, now = jiffies; if (crng_init == 0 && size) @@ -1047,8 +1049,7 @@ struct timer_rand_state { */ static void add_timer_randomness(struct timer_rand_state *state, unsigned int num) { - cycles_t cycles = random_get_entropy(); - unsigned long flags, now = jiffies; + unsigned long cycles = random_get_entropy(), now = jiffies, flags; long delta, delta2, delta3; spin_lock_irqsave(&input_pool.lock, flags); @@ -1141,12 +1142,9 @@ void add_hwgenerator_randomness(const void *buffer, size_t count, size_t entropy) { if (unlikely(crng_init == 0 && entropy < POOL_MIN_BITS)) { - size_t ret = crng_pre_init_inject(buffer, count, true); - mix_pool_bytes(buffer, ret); - count -= ret; - buffer += ret; - if (!count || crng_init == 0) - return; + crng_pre_init_inject(buffer, count, true); + mix_pool_bytes(buffer, count); + return; } /* @@ -1340,8 +1338,7 @@ static void mix_interrupt_randomness(struct work_struct *work) void add_interrupt_randomness(int irq) { enum { MIX_INFLIGHT = 1U << 31 }; - cycles_t cycles = random_get_entropy(); - unsigned long now = jiffies; + unsigned long cycles = random_get_entropy(), now = jiffies; struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); struct pt_regs *regs = get_irq_regs(); unsigned int new_count; @@ -1354,16 +1351,12 @@ void add_interrupt_randomness(int irq) if (cycles == 0) cycles = get_reg(fast_pool, regs); - if (sizeof(cycles) == 8) + if (sizeof(unsigned long) == 8) { irq_data.u64[0] = cycles ^ rol64(now, 32) ^ irq; - else { + irq_data.u64[1] = regs ? instruction_pointer(regs) : _RET_IP_; + } else { irq_data.u32[0] = cycles ^ irq; irq_data.u32[1] = now; - } - - if (sizeof(unsigned long) == 8) - irq_data.u64[1] = regs ? instruction_pointer(regs) : _RET_IP_; - else { irq_data.u32[2] = regs ? instruction_pointer(regs) : _RET_IP_; irq_data.u32[3] = get_reg(fast_pool, regs); } @@ -1410,7 +1403,7 @@ static void entropy_timer(struct timer_list *t) static void try_to_generate_entropy(void) { struct { - cycles_t cycles; + unsigned long cycles; struct timer_list timer; } stack; @@ -1545,6 +1538,13 @@ static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes, { static int maxwarn = 10; + /* + * Opportunistically attempt to initialize the RNG on platforms that + * have fast cycle counters, but don't (for now) require it to succeed. + */ + if (!crng_ready()) + try_to_generate_entropy(); + if (!crng_ready() && maxwarn > 0) { maxwarn--; if (__ratelimit(&urandom_warning)) diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 8a7267d116b7..3f2182d66829 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -436,7 +436,6 @@ static int wait_for_media_ready(struct cxl_dev_state *cxlds) for (i = mbox_ready_timeout; i; i--) { u32 temp; - int rc; rc = pci_read_config_dword( pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &temp); diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index 511805dbeb75..4c9eb53ba3f8 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -12,6 +12,7 @@ dmabuf_selftests-y := \ selftest.o \ st-dma-fence.o \ st-dma-fence-chain.o \ + st-dma-fence-unwrap.o \ st-dma-resv.o obj-$(CONFIG_DMABUF_SELFTESTS) += dmabuf_selftests.o diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c index cb1bacb5a42b..5c8a7084577b 100644 --- a/drivers/dma-buf/dma-fence-array.c +++ b/drivers/dma-buf/dma-fence-array.c @@ -159,6 +159,8 @@ struct dma_fence_array *dma_fence_array_create(int num_fences, struct dma_fence_array *array; size_t size = sizeof(*array); + WARN_ON(!num_fences || !fences); + /* Allocate the callback structures behind the array. */ size += num_fences * sizeof(struct dma_fence_array_cb); array = kzalloc(size, GFP_KERNEL); @@ -219,3 +221,33 @@ bool dma_fence_match_context(struct dma_fence *fence, u64 context) return true; } EXPORT_SYMBOL(dma_fence_match_context); + +struct dma_fence *dma_fence_array_first(struct dma_fence *head) +{ + struct dma_fence_array *array; + + if (!head) + return NULL; + + array = to_dma_fence_array(head); + if (!array) + return head; + + if (!array->num_fences) + return NULL; + + return array->fences[0]; +} +EXPORT_SYMBOL(dma_fence_array_first); + +struct dma_fence *dma_fence_array_next(struct dma_fence *head, + unsigned int index) +{ + struct dma_fence_array *array = to_dma_fence_array(head); + + if (!array || index >= array->num_fences) + return NULL; + + return array->fences[index]; +} +EXPORT_SYMBOL(dma_fence_array_next); diff --git a/drivers/dma-buf/selftests.h b/drivers/dma-buf/selftests.h index 97d73aaa31da..851965867d9c 100644 --- a/drivers/dma-buf/selftests.h +++ b/drivers/dma-buf/selftests.h @@ -12,4 +12,5 @@ selftest(sanitycheck, __sanitycheck__) /* keep first (igt selfcheck) */ selftest(dma_fence, dma_fence) selftest(dma_fence_chain, dma_fence_chain) +selftest(dma_fence_unwrap, dma_fence_unwrap) selftest(dma_resv, dma_resv) diff --git a/drivers/dma-buf/st-dma-fence-unwrap.c b/drivers/dma-buf/st-dma-fence-unwrap.c new file mode 100644 index 000000000000..039f016b57be --- /dev/null +++ b/drivers/dma-buf/st-dma-fence-unwrap.c @@ -0,0 +1,261 @@ +// SPDX-License-Identifier: MIT + +/* + * Copyright (C) 2022 Advanced Micro Devices, Inc. + */ + +#include <linux/dma-fence-unwrap.h> +#if 0 +#include <linux/kernel.h> +#include <linux/kthread.h> +#include <linux/mm.h> +#include <linux/sched/signal.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/random.h> +#endif + +#include "selftest.h" + +#define CHAIN_SZ (4 << 10) + +static inline struct mock_fence { + struct dma_fence base; + spinlock_t lock; +} *to_mock_fence(struct dma_fence *f) { + return container_of(f, struct mock_fence, base); +} + +static const char *mock_name(struct dma_fence *f) +{ + return "mock"; +} + +static const struct dma_fence_ops mock_ops = { + .get_driver_name = mock_name, + .get_timeline_name = mock_name, +}; + +static struct dma_fence *mock_fence(void) +{ + struct mock_fence *f; + + f = kmalloc(sizeof(*f), GFP_KERNEL); + if (!f) + return NULL; + + spin_lock_init(&f->lock); + dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0); + + return &f->base; +} + +static struct dma_fence *mock_array(unsigned int num_fences, ...) +{ + struct dma_fence_array *array; + struct dma_fence **fences; + va_list valist; + int i; + + fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL); + if (!fences) + return NULL; + + va_start(valist, num_fences); + for (i = 0; i < num_fences; ++i) + fences[i] = va_arg(valist, typeof(*fences)); + va_end(valist); + + array = dma_fence_array_create(num_fences, fences, + dma_fence_context_alloc(1), + 1, false); + if (!array) + goto cleanup; + return &array->base; + +cleanup: + for (i = 0; i < num_fences; ++i) + dma_fence_put(fences[i]); + kfree(fences); + return NULL; +} + +static struct dma_fence *mock_chain(struct dma_fence *prev, + struct dma_fence *fence) +{ + struct dma_fence_chain *f; + + f = dma_fence_chain_alloc(); + if (!f) { + dma_fence_put(prev); + dma_fence_put(fence); + return NULL; + } + + dma_fence_chain_init(f, prev, fence, 1); + return &f->base; +} + +static int sanitycheck(void *arg) +{ + struct dma_fence *f, *chain, *array; + int err = 0; + + f = mock_fence(); + if (!f) + return -ENOMEM; + + array = mock_array(1, f); + if (!array) + return -ENOMEM; + + chain = mock_chain(NULL, array); + if (!chain) + return -ENOMEM; + + dma_fence_signal(f); + dma_fence_put(chain); + return err; +} + +static int unwrap_array(void *arg) +{ + struct dma_fence *fence, *f1, *f2, *array; + struct dma_fence_unwrap iter; + int err = 0; + + f1 = mock_fence(); + if (!f1) + return -ENOMEM; + + f2 = mock_fence(); + if (!f2) { + dma_fence_put(f1); + return -ENOMEM; + } + + array = mock_array(2, f1, f2); + if (!array) + return -ENOMEM; + + dma_fence_unwrap_for_each(fence, &iter, array) { + if (fence == f1) { + f1 = NULL; + } else if (fence == f2) { + f2 = NULL; + } else { + pr_err("Unexpected fence!\n"); + err = -EINVAL; + } + } + + if (f1 || f2) { + pr_err("Not all fences seen!\n"); + err = -EINVAL; + } + + dma_fence_signal(f1); + dma_fence_signal(f2); + dma_fence_put(array); + return 0; +} + +static int unwrap_chain(void *arg) +{ + struct dma_fence *fence, *f1, *f2, *chain; + struct dma_fence_unwrap iter; + int err = 0; + + f1 = mock_fence(); + if (!f1) + return -ENOMEM; + + f2 = mock_fence(); + if (!f2) { + dma_fence_put(f1); + return -ENOMEM; + } + + chain = mock_chain(f1, f2); + if (!chain) + return -ENOMEM; + + dma_fence_unwrap_for_each(fence, &iter, chain) { + if (fence == f1) { + f1 = NULL; + } else if (fence == f2) { + f2 = NULL; + } else { + pr_err("Unexpected fence!\n"); + err = -EINVAL; + } + } + + if (f1 || f2) { + pr_err("Not all fences seen!\n"); + err = -EINVAL; + } + + dma_fence_signal(f1); + dma_fence_signal(f2); + dma_fence_put(chain); + return 0; +} + +static int unwrap_chain_array(void *arg) +{ + struct dma_fence *fence, *f1, *f2, *array, *chain; + struct dma_fence_unwrap iter; + int err = 0; + + f1 = mock_fence(); + if (!f1) + return -ENOMEM; + + f2 = mock_fence(); + if (!f2) { + dma_fence_put(f1); + return -ENOMEM; + } + + array = mock_array(2, f1, f2); + if (!array) + return -ENOMEM; + + chain = mock_chain(NULL, array); + if (!chain) + return -ENOMEM; + + dma_fence_unwrap_for_each(fence, &iter, chain) { + if (fence == f1) { + f1 = NULL; + } else if (fence == f2) { + f2 = NULL; + } else { + pr_err("Unexpected fence!\n"); + err = -EINVAL; + } + } + + if (f1 || f2) { + pr_err("Not all fences seen!\n"); + err = -EINVAL; + } + + dma_fence_signal(f1); + dma_fence_signal(f2); + dma_fence_put(chain); + return 0; +} + +int dma_fence_unwrap(void) +{ + static const struct subtest tests[] = { + SUBTEST(sanitycheck), + SUBTEST(unwrap_array), + SUBTEST(unwrap_chain), + SUBTEST(unwrap_chain_array), + }; + + return subtests(tests, NULL); +} diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 394e6e1e9686..514d213261df 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -5,6 +5,7 @@ * Copyright (C) 2012 Google, Inc. */ +#include <linux/dma-fence-unwrap.h> #include <linux/export.h> #include <linux/file.h> #include <linux/fs.h> @@ -172,20 +173,6 @@ static int sync_file_set_fence(struct sync_file *sync_file, return 0; } -static struct dma_fence **get_fences(struct sync_file *sync_file, - int *num_fences) -{ - if (dma_fence_is_array(sync_file->fence)) { - struct dma_fence_array *array = to_dma_fence_array(sync_file->fence); - - *num_fences = array->num_fences; - return array->fences; - } - - *num_fences = 1; - return &sync_file->fence; -} - static void add_fence(struct dma_fence **fences, int *i, struct dma_fence *fence) { @@ -210,86 +197,97 @@ static void add_fence(struct dma_fence **fences, static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, struct sync_file *b) { + struct dma_fence *a_fence, *b_fence, **fences; + struct dma_fence_unwrap a_iter, b_iter; + unsigned int index, num_fences; struct sync_file *sync_file; - struct dma_fence **fences = NULL, **nfences, **a_fences, **b_fences; - int i = 0, i_a, i_b, num_fences, a_num_fences, b_num_fences; sync_file = sync_file_alloc(); if (!sync_file) return NULL; - a_fences = get_fences(a, &a_num_fences); - b_fences = get_fences(b, &b_num_fences); - if (a_num_fences > INT_MAX - b_num_fences) - goto err; + num_fences = 0; + dma_fence_unwrap_for_each(a_fence, &a_iter, a->fence) + ++num_fences; + dma_fence_unwrap_for_each(b_fence, &b_iter, b->fence) + ++num_fences; - num_fences = a_num_fences + b_num_fences; + if (num_fences > INT_MAX) + goto err_free_sync_file; fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL); if (!fences) - goto err; + goto err_free_sync_file; /* - * Assume sync_file a and b are both ordered and have no - * duplicates with the same context. + * We can't guarantee that fences in both a and b are ordered, but it is + * still quite likely. * - * If a sync_file can only be created with sync_file_merge - * and sync_file_create, this is a reasonable assumption. + * So attempt to order the fences as we pass over them and merge fences + * with the same context. */ - for (i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) { - struct dma_fence *pt_a = a_fences[i_a]; - struct dma_fence *pt_b = b_fences[i_b]; - if (pt_a->context < pt_b->context) { - add_fence(fences, &i, pt_a); + index = 0; + for (a_fence = dma_fence_unwrap_first(a->fence, &a_iter), + b_fence = dma_fence_unwrap_first(b->fence, &b_iter); + a_fence || b_fence; ) { + + if (!b_fence) { + add_fence(fences, &index, a_fence); + a_fence = dma_fence_unwrap_next(&a_iter); + + } else if (!a_fence) { + add_fence(fences, &index, b_fence); + b_fence = dma_fence_unwrap_next(&b_iter); + + } else if (a_fence->context < b_fence->context) { + add_fence(fences, &index, a_fence); + a_fence = dma_fence_unwrap_next(&a_iter); - i_a++; - } else if (pt_a->context > pt_b->context) { - add_fence(fences, &i, pt_b); + } else if (b_fence->context < a_fence->context) { + add_fence(fences, &index, b_fence); + b_fence = dma_fence_unwrap_next(&b_iter); + + } else if (__dma_fence_is_later(a_fence->seqno, b_fence->seqno, + a_fence->ops)) { + add_fence(fences, &index, a_fence); + a_fence = dma_fence_unwrap_next(&a_iter); + b_fence = dma_fence_unwrap_next(&b_iter); - i_b++; } else { - if (__dma_fence_is_later(pt_a->seqno, pt_b->seqno, - pt_a->ops)) - add_fence(fences, &i, pt_a); - else - add_fence(fences, &i, pt_b); - - i_a++; - i_b++; + add_fence(fences, &index, b_fence); + a_fence = dma_fence_unwrap_next(&a_iter); + b_fence = dma_fence_unwrap_next(&b_iter); } } - for (; i_a < a_num_fences; i_a++) - add_fence(fences, &i, a_fences[i_a]); - - for (; i_b < b_num_fences; i_b++) - add_fence(fences, &i, b_fences[i_b]); - - if (i == 0) - fences[i++] = dma_fence_get(a_fences[0]); + if (index == 0) + fences[index++] = dma_fence_get_stub(); - if (num_fences > i) { - nfences = krealloc_array(fences, i, sizeof(*fences), GFP_KERNEL); - if (!nfences) - goto err; + if (num_fences > index) { + struct dma_fence **tmp; - fences = nfences; + /* Keep going even when reducing the size failed */ + tmp = krealloc_array(fences, index, sizeof(*fences), + GFP_KERNEL); + if (tmp) + fences = tmp; } - if (sync_file_set_fence(sync_file, fences, i) < 0) - goto err; + if (sync_file_set_fence(sync_file, fences, index) < 0) + goto err_put_fences; strlcpy(sync_file->user_name, name, sizeof(sync_file->user_name)); return sync_file; -err: - while (i) - dma_fence_put(fences[--i]); +err_put_fences: + while (index) + dma_fence_put(fences[--index]); kfree(fences); + +err_free_sync_file: fput(sync_file->file); return NULL; - } static int sync_file_release(struct inode *inode, struct file *file) @@ -398,11 +396,13 @@ static int sync_fill_fence_info(struct dma_fence *fence, static long sync_file_ioctl_fence_info(struct sync_file *sync_file, unsigned long arg) { - struct sync_file_info info; struct sync_fence_info *fence_info = NULL; - struct dma_fence **fences; + struct dma_fence_unwrap iter; + struct sync_file_info info; + unsigned int num_fences; + struct dma_fence *fence; + int ret; __u32 size; - int num_fences, ret, i; if (copy_from_user(&info, (void __user *)arg, sizeof(info))) return -EFAULT; @@ -410,7 +410,9 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, if (info.flags || info.pad) return -EINVAL; - fences = get_fences(sync_file, &num_fences); + num_fences = 0; + dma_fence_unwrap_for_each(fence, &iter, sync_file->fence) + ++num_fences; /* * Passing num_fences = 0 means that userspace doesn't want to @@ -433,8 +435,11 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, if (!fence_info) return -ENOMEM; - for (i = 0; i < num_fences; i++) { - int status = sync_fill_fence_info(fences[i], &fence_info[i]); + num_fences = 0; + dma_fence_unwrap_for_each(fence, &iter, sync_file->fence) { + int status; + + status = sync_fill_fence_info(fence, &fence_info[num_fences++]); info.status = info.status <= 0 ? info.status : status; } diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c index 14f900047ac0..ec731e9e942b 100644 --- a/drivers/firmware/arm_ffa/driver.c +++ b/drivers/firmware/arm_ffa/driver.c @@ -398,11 +398,15 @@ static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz, if (ret.a0 == FFA_ERROR) return ffa_to_linux_errno((int)ret.a2); - if (ret.a0 != FFA_SUCCESS) + if (ret.a0 == FFA_SUCCESS) { + if (handle) + *handle = PACK_HANDLE(ret.a2, ret.a3); + } else if (ret.a0 == FFA_MEM_FRAG_RX) { + if (handle) + *handle = PACK_HANDLE(ret.a1, ret.a2); + } else { return -EOPNOTSUPP; - - if (handle) - *handle = PACK_HANDLE(ret.a2, ret.a3); + } return frag_len; } @@ -426,10 +430,12 @@ static int ffa_mem_next_frag(u64 handle, u32 frag_len) if (ret.a0 == FFA_ERROR) return ffa_to_linux_errno((int)ret.a2); - if (ret.a0 != FFA_MEM_FRAG_RX) - return -EOPNOTSUPP; + if (ret.a0 == FFA_MEM_FRAG_RX) + return ret.a3; + else if (ret.a0 == FFA_SUCCESS) + return 0; - return ret.a3; + return -EOPNOTSUPP; } static int @@ -582,7 +588,7 @@ static int ffa_partition_info_get(const char *uuid_str, return -ENODEV; } - count = ffa_partition_probe(&uuid_null, &pbuf); + count = ffa_partition_probe(&uuid, &pbuf); if (count <= 0) return -ENOENT; @@ -688,8 +694,6 @@ static void ffa_setup_partitions(void) __func__, tpbuf->id); continue; } - - ffa_dev_set_drvdata(ffa_dev, drv_info); } kfree(pbuf); } diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig index 7794bd41eaa0..1e7b7fec97d9 100644 --- a/drivers/firmware/arm_scmi/Kconfig +++ b/drivers/firmware/arm_scmi/Kconfig @@ -59,6 +59,7 @@ config ARM_SCMI_TRANSPORT_OPTEE depends on OPTEE=y || OPTEE=ARM_SCMI_PROTOCOL select ARM_SCMI_HAVE_TRANSPORT select ARM_SCMI_HAVE_SHMEM + select ARM_SCMI_HAVE_MSG default y help This enables the OP-TEE service based transport for SCMI. diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c index f5219334fd3a..20fba7370f4e 100644 --- a/drivers/firmware/arm_scmi/base.c +++ b/drivers/firmware/arm_scmi/base.c @@ -178,6 +178,7 @@ scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph, __le32 *num_skip, *num_ret; u32 tot_num_ret = 0, loop_num_ret; struct device *dev = ph->dev; + struct scmi_revision_info *rev = ph->get_priv(ph); ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_LIST_PROTOCOLS, sizeof(*num_skip), 0, &t); @@ -189,6 +190,9 @@ scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph, list = t->rx.buf + sizeof(*num_ret); do { + size_t real_list_sz; + u32 calc_list_sz; + /* Set the number of protocols to be skipped/already read */ *num_skip = cpu_to_le32(tot_num_ret); @@ -197,8 +201,30 @@ scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph, break; loop_num_ret = le32_to_cpu(*num_ret); - if (tot_num_ret + loop_num_ret > MAX_PROTOCOLS_IMP) { - dev_err(dev, "No. of Protocol > MAX_PROTOCOLS_IMP"); + if (!loop_num_ret) + break; + + if (loop_num_ret > rev->num_protocols - tot_num_ret) { + dev_err(dev, + "No. Returned protocols > Total protocols.\n"); + break; + } + + if (t->rx.len < (sizeof(u32) * 2)) { + dev_err(dev, "Truncated reply - rx.len:%zd\n", + t->rx.len); + ret = -EPROTO; + break; + } + + real_list_sz = t->rx.len - sizeof(u32); + calc_list_sz = (1 + (loop_num_ret - 1) / sizeof(u32)) * + sizeof(u32); + if (calc_list_sz != real_list_sz) { + dev_err(dev, + "Malformed reply - real_sz:%zd calc_sz:%u\n", + real_list_sz, calc_list_sz); + ret = -EPROTO; break; } @@ -208,7 +234,7 @@ scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph, tot_num_ret += loop_num_ret; ph->xops->reset_rx_to_maxsz(ph, t); - } while (loop_num_ret); + } while (tot_num_ret < rev->num_protocols); ph->xops->xfer_put(ph, t); @@ -351,15 +377,19 @@ static int scmi_base_protocol_init(const struct scmi_protocol_handle *ph) if (ret) return ret; - prot_imp = devm_kcalloc(dev, MAX_PROTOCOLS_IMP, sizeof(u8), GFP_KERNEL); - if (!prot_imp) - return -ENOMEM; - rev->major_ver = PROTOCOL_REV_MAJOR(version), rev->minor_ver = PROTOCOL_REV_MINOR(version); ph->set_priv(ph, rev); - scmi_base_attributes_get(ph); + ret = scmi_base_attributes_get(ph); + if (ret) + return ret; + + prot_imp = devm_kcalloc(dev, rev->num_protocols, sizeof(u8), + GFP_KERNEL); + if (!prot_imp) + return -ENOMEM; + scmi_base_vendor_id_get(ph, false); scmi_base_vendor_id_get(ph, true); scmi_base_implementation_version_get(ph); diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c index cf6fed6dec77..4d36a9a133d1 100644 --- a/drivers/firmware/arm_scmi/clock.c +++ b/drivers/firmware/arm_scmi/clock.c @@ -2,13 +2,15 @@ /* * System Control and Management Interface (SCMI) Clock Protocol * - * Copyright (C) 2018-2021 ARM Ltd. + * Copyright (C) 2018-2022 ARM Ltd. */ #include <linux/module.h> +#include <linux/limits.h> #include <linux/sort.h> -#include "common.h" +#include "protocols.h" +#include "notify.h" enum scmi_clock_protocol_cmd { CLOCK_ATTRIBUTES = 0x3, @@ -16,6 +18,9 @@ enum scmi_clock_protocol_cmd { CLOCK_RATE_SET = 0x5, CLOCK_RATE_GET = 0x6, CLOCK_CONFIG_SET = 0x7, + CLOCK_NAME_GET = 0x8, + CLOCK_RATE_NOTIFY = 0x9, + CLOCK_RATE_CHANGE_REQUESTED_NOTIFY = 0xA, }; struct scmi_msg_resp_clock_protocol_attributes { @@ -27,7 +32,10 @@ struct scmi_msg_resp_clock_protocol_attributes { struct scmi_msg_resp_clock_attributes { __le32 attributes; #define CLOCK_ENABLE BIT(0) - u8 name[SCMI_MAX_STR_SIZE]; +#define SUPPORTS_RATE_CHANGED_NOTIF(x) ((x) & BIT(31)) +#define SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(x) ((x) & BIT(30)) +#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(29)) + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; __le32 clock_enable_latency; }; @@ -49,7 +57,7 @@ struct scmi_msg_resp_clock_describe_rates { struct { __le32 value_low; __le32 value_high; - } rate[0]; + } rate[]; #define RATE_TO_U64(X) \ ({ \ typeof(X) x = (X); \ @@ -68,6 +76,24 @@ struct scmi_clock_set_rate { __le32 value_high; }; +struct scmi_msg_resp_set_rate_complete { + __le32 id; + __le32 rate_low; + __le32 rate_high; +}; + +struct scmi_msg_clock_rate_notify { + __le32 clk_id; + __le32 notify_enable; +}; + +struct scmi_clock_rate_notify_payld { + __le32 agent_id; + __le32 clock_id; + __le32 rate_low; + __le32 rate_high; +}; + struct clock_info { u32 version; int num_clocks; @@ -76,6 +102,11 @@ struct clock_info { struct scmi_clock_info *clk; }; +static enum scmi_clock_protocol_cmd evt_2_cmd[] = { + CLOCK_RATE_NOTIFY, + CLOCK_RATE_CHANGE_REQUESTED_NOTIFY, +}; + static int scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph, struct clock_info *ci) @@ -102,9 +133,11 @@ scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph, } static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph, - u32 clk_id, struct scmi_clock_info *clk) + u32 clk_id, struct scmi_clock_info *clk, + u32 version) { int ret; + u32 attributes; struct scmi_xfer *t; struct scmi_msg_resp_clock_attributes *attr; @@ -118,16 +151,33 @@ static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph, ret = ph->xops->do_xfer(ph, t); if (!ret) { + u32 latency = 0; + attributes = le32_to_cpu(attr->attributes); strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE); - /* Is optional field clock_enable_latency provided ? */ - if (t->rx.len == sizeof(*attr)) - clk->enable_latency = - le32_to_cpu(attr->clock_enable_latency); - } else { - clk->name[0] = '\0'; + /* clock_enable_latency field is present only since SCMI v3.1 */ + if (PROTOCOL_REV_MAJOR(version) >= 0x2) + latency = le32_to_cpu(attr->clock_enable_latency); + clk->enable_latency = latency ? : U32_MAX; } ph->xops->xfer_put(ph, t); + + /* + * If supported overwrite short name with the extended one; + * on error just carry on and use already provided short name. + */ + if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x2) { + if (SUPPORTS_EXTENDED_NAMES(attributes)) + ph->hops->extended_name_get(ph, CLOCK_NAME_GET, clk_id, + clk->name, + SCMI_MAX_STR_SIZE); + + if (SUPPORTS_RATE_CHANGED_NOTIF(attributes)) + clk->rate_changed_notifications = true; + if (SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(attributes)) + clk->rate_change_requested_notifications = true; + } + return ret; } @@ -143,80 +193,111 @@ static int rate_cmp_func(const void *_r1, const void *_r2) return 1; } -static int -scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id, - struct scmi_clock_info *clk) -{ - u64 *rate = NULL; - int ret, cnt; - bool rate_discrete = false; - u32 tot_rate_cnt = 0, rates_flag; - u16 num_returned, num_remaining; - struct scmi_xfer *t; - struct scmi_msg_clock_describe_rates *clk_desc; - struct scmi_msg_resp_clock_describe_rates *rlist; +struct scmi_clk_ipriv { + u32 clk_id; + struct scmi_clock_info *clk; +}; - ret = ph->xops->xfer_get_init(ph, CLOCK_DESCRIBE_RATES, - sizeof(*clk_desc), 0, &t); - if (ret) - return ret; +static void iter_clk_describe_prepare_message(void *message, + const unsigned int desc_index, + const void *priv) +{ + struct scmi_msg_clock_describe_rates *msg = message; + const struct scmi_clk_ipriv *p = priv; - clk_desc = t->tx.buf; - rlist = t->rx.buf; + msg->id = cpu_to_le32(p->clk_id); + /* Set the number of rates to be skipped/already read */ + msg->rate_index = cpu_to_le32(desc_index); +} - do { - clk_desc->id = cpu_to_le32(clk_id); - /* Set the number of rates to be skipped/already read */ - clk_desc->rate_index = cpu_to_le32(tot_rate_cnt); +static int +iter_clk_describe_update_state(struct scmi_iterator_state *st, + const void *response, void *priv) +{ + u32 flags; + struct scmi_clk_ipriv *p = priv; + const struct scmi_msg_resp_clock_describe_rates *r = response; - ret = ph->xops->do_xfer(ph, t); - if (ret) - goto err; + flags = le32_to_cpu(r->num_rates_flags); + st->num_remaining = NUM_REMAINING(flags); + st->num_returned = NUM_RETURNED(flags); + p->clk->rate_discrete = RATE_DISCRETE(flags); - rates_flag = le32_to_cpu(rlist->num_rates_flags); - num_remaining = NUM_REMAINING(rates_flag); - rate_discrete = RATE_DISCRETE(rates_flag); - num_returned = NUM_RETURNED(rates_flag); + return 0; +} - if (tot_rate_cnt + num_returned > SCMI_MAX_NUM_RATES) { - dev_err(ph->dev, "No. of rates > MAX_NUM_RATES"); +static int +iter_clk_describe_process_response(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, void *priv) +{ + int ret = 0; + struct scmi_clk_ipriv *p = priv; + const struct scmi_msg_resp_clock_describe_rates *r = response; + + if (!p->clk->rate_discrete) { + switch (st->desc_index + st->loop_idx) { + case 0: + p->clk->range.min_rate = RATE_TO_U64(r->rate[0]); break; - } - - if (!rate_discrete) { - clk->range.min_rate = RATE_TO_U64(rlist->rate[0]); - clk->range.max_rate = RATE_TO_U64(rlist->rate[1]); - clk->range.step_size = RATE_TO_U64(rlist->rate[2]); - dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n", - clk->range.min_rate, clk->range.max_rate, - clk->range.step_size); + case 1: + p->clk->range.max_rate = RATE_TO_U64(r->rate[1]); + break; + case 2: + p->clk->range.step_size = RATE_TO_U64(r->rate[2]); + break; + default: + ret = -EINVAL; break; } + } else { + u64 *rate = &p->clk->list.rates[st->desc_index + st->loop_idx]; - rate = &clk->list.rates[tot_rate_cnt]; - for (cnt = 0; cnt < num_returned; cnt++, rate++) { - *rate = RATE_TO_U64(rlist->rate[cnt]); - dev_dbg(ph->dev, "Rate %llu Hz\n", *rate); - } + *rate = RATE_TO_U64(r->rate[st->loop_idx]); + p->clk->list.num_rates++; + //XXX dev_dbg(ph->dev, "Rate %llu Hz\n", *rate); + } - tot_rate_cnt += num_returned; + return ret; +} + +static int +scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id, + struct scmi_clock_info *clk) +{ + int ret; - ph->xops->reset_rx_to_maxsz(ph, t); - /* - * check for both returned and remaining to avoid infinite - * loop due to buggy firmware - */ - } while (num_returned && num_remaining); + void *iter; + struct scmi_msg_clock_describe_rates *msg; + struct scmi_iterator_ops ops = { + .prepare_message = iter_clk_describe_prepare_message, + .update_state = iter_clk_describe_update_state, + .process_response = iter_clk_describe_process_response, + }; + struct scmi_clk_ipriv cpriv = { + .clk_id = clk_id, + .clk = clk, + }; + + iter = ph->hops->iter_response_init(ph, &ops, SCMI_MAX_NUM_RATES, + CLOCK_DESCRIBE_RATES, + sizeof(*msg), &cpriv); + if (IS_ERR(iter)) + return PTR_ERR(iter); + + ret = ph->hops->iter_response_run(iter); + if (ret) + return ret; - if (rate_discrete && rate) { - clk->list.num_rates = tot_rate_cnt; - sort(rate, tot_rate_cnt, sizeof(*rate), rate_cmp_func, NULL); + if (!clk->rate_discrete) { + dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n", + clk->range.min_rate, clk->range.max_rate, + clk->range.step_size); + } else if (clk->list.num_rates) { + sort(clk->list.rates, clk->list.num_rates, + sizeof(clk->list.rates[0]), rate_cmp_func, NULL); } - clk->rate_discrete = rate_discrete; - -err: - ph->xops->xfer_put(ph, t); return ret; } @@ -265,10 +346,22 @@ static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph, cfg->value_low = cpu_to_le32(rate & 0xffffffff); cfg->value_high = cpu_to_le32(rate >> 32); - if (flags & CLOCK_SET_ASYNC) + if (flags & CLOCK_SET_ASYNC) { ret = ph->xops->do_xfer_with_response(ph, t); - else + if (!ret) { + struct scmi_msg_resp_set_rate_complete *resp; + + resp = t->rx.buf; + if (le32_to_cpu(resp->id) == clk_id) + dev_dbg(ph->dev, + "Clk ID %d set async to %llu\n", clk_id, + get_unaligned_le64(&resp->rate_low)); + else + ret = -EPROTO; + } + } else { ret = ph->xops->do_xfer(ph, t); + } if (ci->max_async_req) atomic_dec(&ci->cur_async_req); @@ -354,13 +447,111 @@ static const struct scmi_clk_proto_ops clk_proto_ops = { .disable_atomic = scmi_clock_disable_atomic, }; +static int scmi_clk_rate_notify(const struct scmi_protocol_handle *ph, + u32 clk_id, int message_id, bool enable) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_clock_rate_notify *notify; + + ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t); + if (ret) + return ret; + + notify = t->tx.buf; + notify->clk_id = cpu_to_le32(clk_id); + notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0; + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_clk_set_notify_enabled(const struct scmi_protocol_handle *ph, + u8 evt_id, u32 src_id, bool enable) +{ + int ret, cmd_id; + + if (evt_id >= ARRAY_SIZE(evt_2_cmd)) + return -EINVAL; + + cmd_id = evt_2_cmd[evt_id]; + ret = scmi_clk_rate_notify(ph, src_id, cmd_id, enable); + if (ret) + pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n", + evt_id, src_id, ret); + + return ret; +} + +static void *scmi_clk_fill_custom_report(const struct scmi_protocol_handle *ph, + u8 evt_id, ktime_t timestamp, + const void *payld, size_t payld_sz, + void *report, u32 *src_id) +{ + const struct scmi_clock_rate_notify_payld *p = payld; + struct scmi_clock_rate_notif_report *r = report; + + if (sizeof(*p) != payld_sz || + (evt_id != SCMI_EVENT_CLOCK_RATE_CHANGED && + evt_id != SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED)) + return NULL; + + r->timestamp = timestamp; + r->agent_id = le32_to_cpu(p->agent_id); + r->clock_id = le32_to_cpu(p->clock_id); + r->rate = get_unaligned_le64(&p->rate_low); + *src_id = r->clock_id; + + return r; +} + +static int scmi_clk_get_num_sources(const struct scmi_protocol_handle *ph) +{ + struct clock_info *ci = ph->get_priv(ph); + + if (!ci) + return -EINVAL; + + return ci->num_clocks; +} + +static const struct scmi_event clk_events[] = { + { + .id = SCMI_EVENT_CLOCK_RATE_CHANGED, + .max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld), + .max_report_sz = sizeof(struct scmi_clock_rate_notif_report), + }, + { + .id = SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED, + .max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld), + .max_report_sz = sizeof(struct scmi_clock_rate_notif_report), + }, +}; + +static const struct scmi_event_ops clk_event_ops = { + .get_num_sources = scmi_clk_get_num_sources, + .set_notify_enabled = scmi_clk_set_notify_enabled, + .fill_custom_report = scmi_clk_fill_custom_report, +}; + +static const struct scmi_protocol_events clk_protocol_events = { + .queue_sz = SCMI_PROTO_QUEUE_SZ, + .ops = &clk_event_ops, + .evts = clk_events, + .num_events = ARRAY_SIZE(clk_events), +}; + static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph) { u32 version; int clkid, ret; struct clock_info *cinfo; - ph->xops->version_get(ph, &version); + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; dev_dbg(ph->dev, "Clock Version %d.%d\n", PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); @@ -369,7 +560,9 @@ static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph) if (!cinfo) return -ENOMEM; - scmi_clock_protocol_attributes_get(ph, cinfo); + ret = scmi_clock_protocol_attributes_get(ph, cinfo); + if (ret) + return ret; cinfo->clk = devm_kcalloc(ph->dev, cinfo->num_clocks, sizeof(*cinfo->clk), GFP_KERNEL); @@ -379,7 +572,7 @@ static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph) for (clkid = 0; clkid < cinfo->num_clocks; clkid++) { struct scmi_clock_info *clk = cinfo->clk + clkid; - ret = scmi_clock_attributes_get(ph, clkid, clk); + ret = scmi_clock_attributes_get(ph, clkid, clk, version); if (!ret) scmi_clock_describe_rates_get(ph, clkid, clk); } @@ -393,6 +586,7 @@ static const struct scmi_protocol scmi_clock = { .owner = THIS_MODULE, .instance_init = &scmi_clock_protocol_init, .ops = &clk_proto_ops, + .events = &clk_protocol_events, }; DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(clock, scmi_clock) diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index 4fda84bfab42..61aba7447c32 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -4,7 +4,7 @@ * driver common header file containing some definitions, structures * and function prototypes used in all the different SCMI protocols. * - * Copyright (C) 2018-2021 ARM Ltd. + * Copyright (C) 2018-2022 ARM Ltd. */ #ifndef _SCMI_COMMON_H #define _SCMI_COMMON_H @@ -24,38 +24,9 @@ #include <asm/unaligned.h> +#include "protocols.h" #include "notify.h" -#define PROTOCOL_REV_MINOR_MASK GENMASK(15, 0) -#define PROTOCOL_REV_MAJOR_MASK GENMASK(31, 16) -#define PROTOCOL_REV_MAJOR(x) (u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x))) -#define PROTOCOL_REV_MINOR(x) (u16)(FIELD_GET(PROTOCOL_REV_MINOR_MASK, (x))) -#define MAX_PROTOCOLS_IMP 16 -#define MAX_OPPS 16 - -enum scmi_common_cmd { - PROTOCOL_VERSION = 0x0, - PROTOCOL_ATTRIBUTES = 0x1, - PROTOCOL_MESSAGE_ATTRIBUTES = 0x2, -}; - -/** - * struct scmi_msg_resp_prot_version - Response for a message - * - * @minor_version: Minor version of the ABI that firmware supports - * @major_version: Major version of the ABI that firmware supports - * - * In general, ABI version changes follow the rule that minor version increments - * are backward compatible. Major revision changes in ABI may not be - * backward compatible. - * - * Response to a generic message with message type SCMI_MSG_VERSION - */ -struct scmi_msg_resp_prot_version { - __le16 minor_version; - __le16 major_version; -}; - #define MSG_ID_MASK GENMASK(7, 0) #define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr)) #define MSG_TYPE_MASK GENMASK(9, 8) @@ -80,28 +51,6 @@ struct scmi_msg_resp_prot_version { #define SCMI_PENDING_XFERS_HT_ORDER_SZ 9 /** - * struct scmi_msg_hdr - Message(Tx/Rx) header - * - * @id: The identifier of the message being sent - * @protocol_id: The identifier of the protocol used to send @id message - * @type: The SCMI type for this message - * @seq: The token to identify the message. When a message returns, the - * platform returns the whole message header unmodified including the - * token - * @status: Status of the transfer once it's complete - * @poll_completion: Indicate if the transfer needs to be polled for - * completion or interrupt mode is used - */ -struct scmi_msg_hdr { - u8 id; - u8 protocol_id; - u8 type; - u16 seq; - u32 status; - bool poll_completion; -}; - -/** * pack_scmi_header() - packs and returns 32-bit header * * @hdr: pointer to header containing all the information on message id, @@ -130,72 +79,6 @@ static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr) hdr->type = MSG_XTRACT_TYPE(msg_hdr); } -/** - * struct scmi_msg - Message(Tx/Rx) structure - * - * @buf: Buffer pointer - * @len: Length of data in the Buffer - */ -struct scmi_msg { - void *buf; - size_t len; -}; - -/** - * struct scmi_xfer - Structure representing a message flow - * - * @transfer_id: Unique ID for debug & profiling purpose - * @hdr: Transmit message header - * @tx: Transmit message - * @rx: Receive message, the buffer should be pre-allocated to store - * message. If request-ACK protocol is used, we can reuse the same - * buffer for the rx path as we use for the tx path. - * @done: command message transmit completion event - * @async_done: pointer to delayed response message received event completion - * @pending: True for xfers added to @pending_xfers hashtable - * @node: An hlist_node reference used to store this xfer, alternatively, on - * the free list @free_xfers or in the @pending_xfers hashtable - * @users: A refcount to track the active users for this xfer. - * This is meant to protect against the possibility that, when a command - * transaction times out concurrently with the reception of a valid - * response message, the xfer could be finally put on the TX path, and - * so vanish, while on the RX path scmi_rx_callback() is still - * processing it: in such a case this refcounting will ensure that, even - * though the timed-out transaction will anyway cause the command - * request to be reported as failed by time-out, the underlying xfer - * cannot be discarded and possibly reused until the last one user on - * the RX path has released it. - * @busy: An atomic flag to ensure exclusive write access to this xfer - * @state: The current state of this transfer, with states transitions deemed - * valid being: - * - SCMI_XFER_SENT_OK -> SCMI_XFER_RESP_OK [ -> SCMI_XFER_DRESP_OK ] - * - SCMI_XFER_SENT_OK -> SCMI_XFER_DRESP_OK - * (Missing synchronous response is assumed OK and ignored) - * @lock: A spinlock to protect state and busy fields. - * @priv: A pointer for transport private usage. - */ -struct scmi_xfer { - int transfer_id; - struct scmi_msg_hdr hdr; - struct scmi_msg tx; - struct scmi_msg rx; - struct completion done; - struct completion *async_done; - bool pending; - struct hlist_node node; - refcount_t users; -#define SCMI_XFER_FREE 0 -#define SCMI_XFER_BUSY 1 - atomic_t busy; -#define SCMI_XFER_SENT_OK 0 -#define SCMI_XFER_RESP_OK 1 -#define SCMI_XFER_DRESP_OK 2 - int state; - /* A lock to protect state and busy fields */ - spinlock_t lock; - void *priv; -}; - /* * An helper macro to lookup an xfer from the @pending_xfers hashtable * using the message sequence number token as a key. @@ -211,64 +94,6 @@ struct scmi_xfer { xfer_; \ }) -struct scmi_xfer_ops; - -/** - * struct scmi_protocol_handle - Reference to an initialized protocol instance - * - * @dev: A reference to the associated SCMI instance device (handle->dev). - * @xops: A reference to a struct holding refs to the core xfer operations that - * can be used by the protocol implementation to generate SCMI messages. - * @set_priv: A method to set protocol private data for this instance. - * @get_priv: A method to get protocol private data previously set. - * - * This structure represents a protocol initialized against specific SCMI - * instance and it will be used as follows: - * - as a parameter fed from the core to the protocol initialization code so - * that it can access the core xfer operations to build and generate SCMI - * messages exclusively for the specific underlying protocol instance. - * - as an opaque handle fed by an SCMI driver user when it tries to access - * this protocol through its own protocol operations. - * In this case this handle will be returned as an opaque object together - * with the related protocol operations when the SCMI driver tries to access - * the protocol. - */ -struct scmi_protocol_handle { - struct device *dev; - const struct scmi_xfer_ops *xops; - int (*set_priv)(const struct scmi_protocol_handle *ph, void *priv); - void *(*get_priv)(const struct scmi_protocol_handle *ph); -}; - -/** - * struct scmi_xfer_ops - References to the core SCMI xfer operations. - * @version_get: Get this version protocol. - * @xfer_get_init: Initialize one struct xfer if any xfer slot is free. - * @reset_rx_to_maxsz: Reset rx size to max transport size. - * @do_xfer: Do the SCMI transfer. - * @do_xfer_with_response: Do the SCMI transfer waiting for a response. - * @xfer_put: Free the xfer slot. - * - * Note that all this operations expect a protocol handle as first parameter; - * they then internally use it to infer the underlying protocol number: this - * way is not possible for a protocol implementation to forge messages for - * another protocol. - */ -struct scmi_xfer_ops { - int (*version_get)(const struct scmi_protocol_handle *ph, u32 *version); - int (*xfer_get_init)(const struct scmi_protocol_handle *ph, u8 msg_id, - size_t tx_size, size_t rx_size, - struct scmi_xfer **p); - void (*reset_rx_to_maxsz)(const struct scmi_protocol_handle *ph, - struct scmi_xfer *xfer); - int (*do_xfer)(const struct scmi_protocol_handle *ph, - struct scmi_xfer *xfer); - int (*do_xfer_with_response)(const struct scmi_protocol_handle *ph, - struct scmi_xfer *xfer); - void (*xfer_put)(const struct scmi_protocol_handle *ph, - struct scmi_xfer *xfer); -}; - struct scmi_revision_info * scmi_revision_area_get(const struct scmi_protocol_handle *ph); int scmi_handle_put(const struct scmi_handle *handle); @@ -277,55 +102,9 @@ void scmi_set_handle(struct scmi_device *scmi_dev); void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph, u8 *prot_imp); -typedef int (*scmi_prot_init_ph_fn_t)(const struct scmi_protocol_handle *); - -/** - * struct scmi_protocol - Protocol descriptor - * @id: Protocol ID. - * @owner: Module reference if any. - * @instance_init: Mandatory protocol initialization function. - * @instance_deinit: Optional protocol de-initialization function. - * @ops: Optional reference to the operations provided by the protocol and - * exposed in scmi_protocol.h. - * @events: An optional reference to the events supported by this protocol. - */ -struct scmi_protocol { - const u8 id; - struct module *owner; - const scmi_prot_init_ph_fn_t instance_init; - const scmi_prot_init_ph_fn_t instance_deinit; - const void *ops; - const struct scmi_protocol_events *events; -}; - int __init scmi_bus_init(void); void __exit scmi_bus_exit(void); -#define DECLARE_SCMI_REGISTER_UNREGISTER(func) \ - int __init scmi_##func##_register(void); \ - void __exit scmi_##func##_unregister(void) -DECLARE_SCMI_REGISTER_UNREGISTER(base); -DECLARE_SCMI_REGISTER_UNREGISTER(clock); -DECLARE_SCMI_REGISTER_UNREGISTER(perf); -DECLARE_SCMI_REGISTER_UNREGISTER(power); -DECLARE_SCMI_REGISTER_UNREGISTER(reset); -DECLARE_SCMI_REGISTER_UNREGISTER(sensors); -DECLARE_SCMI_REGISTER_UNREGISTER(voltage); -DECLARE_SCMI_REGISTER_UNREGISTER(system); - -#define DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(name, proto) \ -static const struct scmi_protocol *__this_proto = &(proto); \ - \ -int __init scmi_##name##_register(void) \ -{ \ - return scmi_protocol_register(__this_proto); \ -} \ - \ -void __exit scmi_##name##_unregister(void) \ -{ \ - scmi_protocol_unregister(__this_proto); \ -} - const struct scmi_protocol *scmi_protocol_get(int protocol_id); void scmi_protocol_put(int protocol_id); diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 46118300a4d1..c1922bd650ae 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -128,7 +128,8 @@ struct scmi_protocol_instance { * usage. * @protocols_mtx: A mutex to protect protocols instances initialization. * @protocols_imp: List of protocols implemented, currently maximum of - * MAX_PROTOCOLS_IMP elements allocated by the base protocol + * scmi_revision_info.num_protocols elements allocated by the + * base protocol * @active_protocols: IDR storing device_nodes for protocols actually defined * in the DT and confirmed as implemented by fw. * @atomic_threshold: Optional system wide DT-configured threshold, expressed @@ -679,7 +680,8 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo, xfer = scmi_xfer_command_acquire(cinfo, msg_hdr); if (IS_ERR(xfer)) { - scmi_clear_channel(info, cinfo); + if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP) + scmi_clear_channel(info, cinfo); return; } @@ -1101,6 +1103,167 @@ static const struct scmi_xfer_ops xfer_ops = { .xfer_put = xfer_put, }; +struct scmi_msg_resp_domain_name_get { + __le32 flags; + u8 name[SCMI_MAX_STR_SIZE]; +}; + +/** + * scmi_common_extended_name_get - Common helper to get extended resources name + * @ph: A protocol handle reference. + * @cmd_id: The specific command ID to use. + * @res_id: The specific resource ID to use. + * @name: A pointer to the preallocated area where the retrieved name will be + * stored as a NULL terminated string. + * @len: The len in bytes of the @name char array. + * + * Return: 0 on Succcess + */ +static int scmi_common_extended_name_get(const struct scmi_protocol_handle *ph, + u8 cmd_id, u32 res_id, char *name, + size_t len) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_resp_domain_name_get *resp; + + ret = ph->xops->xfer_get_init(ph, cmd_id, sizeof(res_id), + sizeof(*resp), &t); + if (ret) + goto out; + + put_unaligned_le32(res_id, t->tx.buf); + resp = t->rx.buf; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) + strscpy(name, resp->name, len); + + ph->xops->xfer_put(ph, t); +out: + if (ret) + dev_warn(ph->dev, + "Failed to get extended name - id:%u (ret:%d). Using %s\n", + res_id, ret, name); + return ret; +} + +/** + * struct scmi_iterator - Iterator descriptor + * @msg: A reference to the message TX buffer; filled by @prepare_message with + * a proper custom command payload for each multi-part command request. + * @resp: A reference to the response RX buffer; used by @update_state and + * @process_response to parse the multi-part replies. + * @t: A reference to the underlying xfer initialized and used transparently by + * the iterator internal routines. + * @ph: A reference to the associated protocol handle to be used. + * @ops: A reference to the custom provided iterator operations. + * @state: The current iterator state; used and updated in turn by the iterators + * internal routines and by the caller-provided @scmi_iterator_ops. + * @priv: A reference to optional private data as provided by the caller and + * passed back to the @@scmi_iterator_ops. + */ +struct scmi_iterator { + void *msg; + void *resp; + struct scmi_xfer *t; + const struct scmi_protocol_handle *ph; + struct scmi_iterator_ops *ops; + struct scmi_iterator_state state; + void *priv; +}; + +static void *scmi_iterator_init(const struct scmi_protocol_handle *ph, + struct scmi_iterator_ops *ops, + unsigned int max_resources, u8 msg_id, + size_t tx_size, void *priv) +{ + int ret; + struct scmi_iterator *i; + + i = devm_kzalloc(ph->dev, sizeof(*i), GFP_KERNEL); + if (!i) + return ERR_PTR(-ENOMEM); + + i->ph = ph; + i->ops = ops; + i->priv = priv; + + ret = ph->xops->xfer_get_init(ph, msg_id, tx_size, 0, &i->t); + if (ret) { + devm_kfree(ph->dev, i); + return ERR_PTR(ret); + } + + i->state.max_resources = max_resources; + i->msg = i->t->tx.buf; + i->resp = i->t->rx.buf; + + return i; +} + +static int scmi_iterator_run(void *iter) +{ + int ret = -EINVAL; + struct scmi_iterator_ops *iops; + const struct scmi_protocol_handle *ph; + struct scmi_iterator_state *st; + struct scmi_iterator *i = iter; + + if (!i || !i->ops || !i->ph) + return ret; + + iops = i->ops; + ph = i->ph; + st = &i->state; + + do { + iops->prepare_message(i->msg, st->desc_index, i->priv); + ret = ph->xops->do_xfer(ph, i->t); + if (ret) + break; + + ret = iops->update_state(st, i->resp, i->priv); + if (ret) + break; + + if (st->num_returned > st->max_resources - st->desc_index) { + dev_err(ph->dev, + "No. of resources can't exceed %d\n", + st->max_resources); + ret = -EINVAL; + break; + } + + for (st->loop_idx = 0; st->loop_idx < st->num_returned; + st->loop_idx++) { + ret = iops->process_response(ph, i->resp, st, i->priv); + if (ret) + goto out; + } + + st->desc_index += st->num_returned; + ph->xops->reset_rx_to_maxsz(ph, i->t); + /* + * check for both returned and remaining to avoid infinite + * loop due to buggy firmware + */ + } while (st->num_returned && st->num_remaining); + +out: + /* Finalize and destroy iterator */ + ph->xops->xfer_put(ph, i->t); + devm_kfree(ph->dev, i); + + return ret; +} + +static const struct scmi_proto_helpers_ops helpers_ops = { + .extended_name_get = scmi_common_extended_name_get, + .iter_response_init = scmi_iterator_init, + .iter_response_run = scmi_iterator_run, +}; + /** * scmi_revision_area_get - Retrieve version memory area. * @@ -1161,6 +1324,7 @@ scmi_alloc_init_protocol_instance(struct scmi_info *info, pi->handle = handle; pi->ph.dev = handle->dev; pi->ph.xops = &xfer_ops; + pi->ph.hops = &helpers_ops; pi->ph.set_priv = scmi_set_protocol_priv; pi->ph.get_priv = scmi_get_protocol_priv; refcount_set(&pi->users, 1); @@ -1309,11 +1473,12 @@ scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id) { int i; struct scmi_info *info = handle_to_scmi_info(handle); + struct scmi_revision_info *rev = handle->version; if (!info->protocols_imp) return false; - for (i = 0; i < MAX_PROTOCOLS_IMP; i++) + for (i = 0; i < rev->num_protocols; i++) if (info->protocols_imp[i] == prot_id) return true; return false; diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c index 734f1eeee161..b503c22cfd32 100644 --- a/drivers/firmware/arm_scmi/optee.c +++ b/drivers/firmware/arm_scmi/optee.c @@ -64,6 +64,22 @@ enum scmi_optee_pta_cmd { * [in] value[0].b: Requested capabilities mask (enum pta_scmi_caps) */ PTA_SCMI_CMD_GET_CHANNEL = 3, + + /* + * PTA_SCMI_CMD_PROCESS_MSG_CHANNEL - Process SCMI message in a MSG + * buffer pointed by memref parameters + * + * [in] value[0].a: Channel handle + * [in] memref[1]: Message buffer (MSG and SCMI payload) + * [out] memref[2]: Response buffer (MSG and SCMI payload) + * + * Shared memories used for SCMI message/response are MSG buffers + * referenced by param[1] and param[2]. MSG transport protocol + * uses a 32bit header to carry SCMI meta-data (protocol ID and + * protocol message ID) followed by the effective SCMI message + * payload. + */ + PTA_SCMI_CMD_PROCESS_MSG_CHANNEL = 4, }; /* @@ -72,9 +88,17 @@ enum scmi_optee_pta_cmd { * PTA_SCMI_CAPS_SMT_HEADER * When set, OP-TEE supports command using SMT header protocol (SCMI shmem) in * shared memory buffers to carry SCMI protocol synchronisation information. + * + * PTA_SCMI_CAPS_MSG_HEADER + * When set, OP-TEE supports command using MSG header protocol in an OP-TEE + * shared memory to carry SCMI protocol synchronisation information and SCMI + * message payload. */ #define PTA_SCMI_CAPS_NONE 0 #define PTA_SCMI_CAPS_SMT_HEADER BIT(0) +#define PTA_SCMI_CAPS_MSG_HEADER BIT(1) +#define PTA_SCMI_CAPS_MASK (PTA_SCMI_CAPS_SMT_HEADER | \ + PTA_SCMI_CAPS_MSG_HEADER) /** * struct scmi_optee_channel - Description of an OP-TEE SCMI channel @@ -85,7 +109,8 @@ enum scmi_optee_pta_cmd { * @mu: Mutex protection on channel access * @cinfo: SCMI channel information * @shmem: Virtual base address of the shared memory - * @tee_shm: Reference to TEE shared memory or NULL if using static shmem + * @req: Shared memory protocol handle for SCMI request and synchronous response + * @tee_shm: TEE shared memory handle @req or NULL if using IOMEM shmem * @link: Reference in agent's channel list */ struct scmi_optee_channel { @@ -94,7 +119,10 @@ struct scmi_optee_channel { u32 caps; struct mutex mu; struct scmi_chan_info *cinfo; - struct scmi_shared_mem __iomem *shmem; + union { + struct scmi_shared_mem __iomem *shmem; + struct scmi_msg_payld *msg; + } req; struct tee_shm *tee_shm; struct list_head link; }; @@ -178,8 +206,8 @@ static int get_capabilities(struct scmi_optee_agent *agent) caps = param[0].u.value.a; - if (!(caps & PTA_SCMI_CAPS_SMT_HEADER)) { - dev_err(agent->dev, "OP-TEE SCMI PTA doesn't support SMT\n"); + if (!(caps & (PTA_SCMI_CAPS_SMT_HEADER | PTA_SCMI_CAPS_MSG_HEADER))) { + dev_err(agent->dev, "OP-TEE SCMI PTA doesn't support SMT and MSG\n"); return -EOPNOTSUPP; } @@ -193,9 +221,14 @@ static int get_channel(struct scmi_optee_channel *channel) struct device *dev = scmi_optee_private->dev; struct tee_ioctl_invoke_arg arg = { }; struct tee_param param[1] = { }; - unsigned int caps = PTA_SCMI_CAPS_SMT_HEADER; + unsigned int caps = 0; int ret; + if (channel->tee_shm) + caps = PTA_SCMI_CAPS_MSG_HEADER; + else + caps = PTA_SCMI_CAPS_SMT_HEADER; + arg.func = PTA_SCMI_CMD_GET_CHANNEL; arg.session = channel->tee_session; arg.num_params = 1; @@ -220,25 +253,48 @@ static int get_channel(struct scmi_optee_channel *channel) static int invoke_process_smt_channel(struct scmi_optee_channel *channel) { - struct tee_ioctl_invoke_arg arg = { }; - struct tee_param param[2] = { }; + struct tee_ioctl_invoke_arg arg = { + .func = PTA_SCMI_CMD_PROCESS_SMT_CHANNEL, + .session = channel->tee_session, + .num_params = 1, + }; + struct tee_param param[1] = { }; int ret; - arg.session = channel->tee_session; param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT; param[0].u.value.a = channel->channel_id; - if (channel->tee_shm) { - param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT; - param[1].u.memref.shm = channel->tee_shm; - param[1].u.memref.size = SCMI_OPTEE_MAX_MSG_SIZE; - arg.num_params = 2; - arg.func = PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE; - } else { - arg.num_params = 1; - arg.func = PTA_SCMI_CMD_PROCESS_SMT_CHANNEL; + ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param); + if (ret < 0 || arg.ret) { + dev_err(scmi_optee_private->dev, "Can't invoke channel %u: %d / %#x\n", + channel->channel_id, ret, arg.ret); + return -EIO; } + return 0; +} + +static int invoke_process_msg_channel(struct scmi_optee_channel *channel, size_t msg_size) +{ + struct tee_ioctl_invoke_arg arg = { + .func = PTA_SCMI_CMD_PROCESS_MSG_CHANNEL, + .session = channel->tee_session, + .num_params = 3, + }; + struct tee_param param[3] = { }; + int ret; + + param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT; + param[0].u.value.a = channel->channel_id; + + param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT; + param[1].u.memref.shm = channel->tee_shm; + param[1].u.memref.size = msg_size; + + param[2].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT; + param[2].u.memref.shm = channel->tee_shm; + param[2].u.memref.size = SCMI_OPTEE_MAX_MSG_SIZE; + ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param); if (ret < 0 || arg.ret) { dev_err(scmi_optee_private->dev, "Can't invoke channel %u: %d / %#x\n", @@ -279,7 +335,26 @@ static void scmi_optee_clear_channel(struct scmi_chan_info *cinfo) { struct scmi_optee_channel *channel = cinfo->transport_info; - shmem_clear_channel(channel->shmem); + if (!channel->tee_shm) + shmem_clear_channel(channel->req.shmem); +} + +static int setup_dynamic_shmem(struct device *dev, struct scmi_optee_channel *channel) +{ + const size_t msg_size = SCMI_OPTEE_MAX_MSG_SIZE; + void *shbuf; + + channel->tee_shm = tee_shm_alloc_kernel_buf(scmi_optee_private->tee_ctx, msg_size); + if (IS_ERR(channel->tee_shm)) { + dev_err(channel->cinfo->dev, "shmem allocation failed\n"); + return -ENOMEM; + } + + shbuf = tee_shm_get_va(channel->tee_shm, 0); + memset(shbuf, 0, msg_size); + channel->req.msg = shbuf; + + return 0; } static int setup_static_shmem(struct device *dev, struct scmi_chan_info *cinfo, @@ -304,8 +379,8 @@ static int setup_static_shmem(struct device *dev, struct scmi_chan_info *cinfo, size = resource_size(&res); - channel->shmem = devm_ioremap(dev, res.start, size); - if (!channel->shmem) { + channel->req.shmem = devm_ioremap(dev, res.start, size); + if (!channel->req.shmem) { dev_err(dev, "Failed to ioremap SCMI Tx shared memory\n"); ret = -EADDRNOTAVAIL; goto out; @@ -325,7 +400,7 @@ static int setup_shmem(struct device *dev, struct scmi_chan_info *cinfo, if (of_find_property(cinfo->dev->of_node, "shmem", NULL)) return setup_static_shmem(dev, cinfo, channel); else - return -ENOMEM; + return setup_dynamic_shmem(dev, channel); } static int scmi_optee_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, bool tx) @@ -405,27 +480,22 @@ static int scmi_optee_chan_free(int id, void *p, void *data) return 0; } -static struct scmi_shared_mem *get_channel_shm(struct scmi_optee_channel *chan, - struct scmi_xfer *xfer) -{ - if (!chan) - return NULL; - - return chan->shmem; -} - - static int scmi_optee_send_message(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer) { struct scmi_optee_channel *channel = cinfo->transport_info; - struct scmi_shared_mem *shmem = get_channel_shm(channel, xfer); int ret; mutex_lock(&channel->mu); - shmem_tx_prepare(shmem, xfer); - ret = invoke_process_smt_channel(channel); + if (channel->tee_shm) { + msg_tx_prepare(channel->req.msg, xfer); + ret = invoke_process_msg_channel(channel, msg_command_size(xfer)); + } else { + shmem_tx_prepare(channel->req.shmem, xfer); + ret = invoke_process_smt_channel(channel); + } + if (ret) mutex_unlock(&channel->mu); @@ -436,9 +506,11 @@ static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer) { struct scmi_optee_channel *channel = cinfo->transport_info; - struct scmi_shared_mem *shmem = get_channel_shm(channel, xfer); - shmem_fetch_response(shmem, xfer); + if (channel->tee_shm) + msg_fetch_response(channel->req.msg, SCMI_OPTEE_MAX_MSG_SIZE, xfer); + else + shmem_fetch_response(channel->req.shmem, xfer); } static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret, diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c index f4cd5193b961..8f4051aca220 100644 --- a/drivers/firmware/arm_scmi/perf.c +++ b/drivers/firmware/arm_scmi/perf.c @@ -2,7 +2,7 @@ /* * System Control and Management Interface (SCMI) Performance Protocol * - * Copyright (C) 2018-2021 ARM Ltd. + * Copyright (C) 2018-2022 ARM Ltd. */ #define pr_fmt(fmt) "SCMI Notifications PERF - " fmt @@ -17,9 +17,11 @@ #include <linux/scmi_protocol.h> #include <linux/sort.h> -#include "common.h" +#include "protocols.h" #include "notify.h" +#define MAX_OPPS 16 + enum scmi_performance_protocol_cmd { PERF_DOMAIN_ATTRIBUTES = 0x3, PERF_DESCRIBE_LEVELS = 0x4, @@ -30,6 +32,7 @@ enum scmi_performance_protocol_cmd { PERF_NOTIFY_LIMITS = 0x9, PERF_NOTIFY_LEVEL = 0xa, PERF_DESCRIBE_FASTCHANNEL = 0xb, + PERF_DOMAIN_NAME_GET = 0xc, }; struct scmi_opp { @@ -42,6 +45,7 @@ struct scmi_msg_resp_perf_attributes { __le16 num_domains; __le16 flags; #define POWER_SCALE_IN_MILLIWATT(x) ((x) & BIT(0)) +#define POWER_SCALE_IN_MICROWATT(x) ((x) & BIT(1)) __le32 stats_addr_low; __le32 stats_addr_high; __le32 stats_size; @@ -54,10 +58,11 @@ struct scmi_msg_resp_perf_domain_attributes { #define SUPPORTS_PERF_LIMIT_NOTIFY(x) ((x) & BIT(29)) #define SUPPORTS_PERF_LEVEL_NOTIFY(x) ((x) & BIT(28)) #define SUPPORTS_PERF_FASTCHANNELS(x) ((x) & BIT(27)) +#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(26)) __le32 rate_limit_us; __le32 sustained_freq_khz; __le32 sustained_perf_level; - u8 name[SCMI_MAX_STR_SIZE]; + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; }; struct scmi_msg_perf_describe_levels { @@ -166,6 +171,7 @@ struct scmi_perf_info { u32 version; int num_domains; bool power_scale_mw; + bool power_scale_uw; u64 stats_addr; u32 stats_size; struct perf_dom_info *dom_info; @@ -196,6 +202,8 @@ static int scmi_perf_attributes_get(const struct scmi_protocol_handle *ph, pi->num_domains = le16_to_cpu(attr->num_domains); pi->power_scale_mw = POWER_SCALE_IN_MILLIWATT(flags); + if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3) + pi->power_scale_uw = POWER_SCALE_IN_MICROWATT(flags); pi->stats_addr = le32_to_cpu(attr->stats_addr_low) | (u64)le32_to_cpu(attr->stats_addr_high) << 32; pi->stats_size = le32_to_cpu(attr->stats_size); @@ -207,9 +215,11 @@ static int scmi_perf_attributes_get(const struct scmi_protocol_handle *ph, static int scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph, - u32 domain, struct perf_dom_info *dom_info) + u32 domain, struct perf_dom_info *dom_info, + u32 version) { int ret; + u32 flags; struct scmi_xfer *t; struct scmi_msg_resp_perf_domain_attributes *attr; @@ -223,7 +233,7 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph, ret = ph->xops->do_xfer(ph, t); if (!ret) { - u32 flags = le32_to_cpu(attr->flags); + flags = le32_to_cpu(attr->flags); dom_info->set_limits = SUPPORTS_SET_LIMITS(flags); dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags); @@ -246,6 +256,16 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph, } ph->xops->xfer_put(ph, t); + + /* + * If supported overwrite short name with the extended one; + * on error just carry on and use already provided short name. + */ + if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 && + SUPPORTS_EXTENDED_NAMES(flags)) + ph->hops->extended_name_get(ph, PERF_DOMAIN_NAME_GET, domain, + dom_info->name, SCMI_MAX_STR_SIZE); + return ret; } @@ -256,66 +276,87 @@ static int opp_cmp_func(const void *opp1, const void *opp2) return t1->perf - t2->perf; } -static int -scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain, - struct perf_dom_info *perf_dom) +struct scmi_perf_ipriv { + u32 domain; + struct perf_dom_info *perf_dom; +}; + +static void iter_perf_levels_prepare_message(void *message, + unsigned int desc_index, + const void *priv) { - int ret, cnt; - u32 tot_opp_cnt = 0; - u16 num_returned, num_remaining; - struct scmi_xfer *t; - struct scmi_opp *opp; - struct scmi_msg_perf_describe_levels *dom_info; - struct scmi_msg_resp_perf_describe_levels *level_info; + struct scmi_msg_perf_describe_levels *msg = message; + const struct scmi_perf_ipriv *p = priv; - ret = ph->xops->xfer_get_init(ph, PERF_DESCRIBE_LEVELS, - sizeof(*dom_info), 0, &t); - if (ret) - return ret; + msg->domain = cpu_to_le32(p->domain); + /* Set the number of OPPs to be skipped/already read */ + msg->level_index = cpu_to_le32(desc_index); +} - dom_info = t->tx.buf; - level_info = t->rx.buf; +static int iter_perf_levels_update_state(struct scmi_iterator_state *st, + const void *response, void *priv) +{ + const struct scmi_msg_resp_perf_describe_levels *r = response; - do { - dom_info->domain = cpu_to_le32(domain); - /* Set the number of OPPs to be skipped/already read */ - dom_info->level_index = cpu_to_le32(tot_opp_cnt); + st->num_returned = le16_to_cpu(r->num_returned); + st->num_remaining = le16_to_cpu(r->num_remaining); - ret = ph->xops->do_xfer(ph, t); - if (ret) - break; + return 0; +} - num_returned = le16_to_cpu(level_info->num_returned); - num_remaining = le16_to_cpu(level_info->num_remaining); - if (tot_opp_cnt + num_returned > MAX_OPPS) { - dev_err(ph->dev, "No. of OPPs exceeded MAX_OPPS"); - break; - } +static int +iter_perf_levels_process_response(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, void *priv) +{ + struct scmi_opp *opp; + const struct scmi_msg_resp_perf_describe_levels *r = response; + struct scmi_perf_ipriv *p = priv; - opp = &perf_dom->opp[tot_opp_cnt]; - for (cnt = 0; cnt < num_returned; cnt++, opp++) { - opp->perf = le32_to_cpu(level_info->opp[cnt].perf_val); - opp->power = le32_to_cpu(level_info->opp[cnt].power); - opp->trans_latency_us = le16_to_cpu - (level_info->opp[cnt].transition_latency_us); + opp = &p->perf_dom->opp[st->desc_index + st->loop_idx]; + opp->perf = le32_to_cpu(r->opp[st->loop_idx].perf_val); + opp->power = le32_to_cpu(r->opp[st->loop_idx].power); + opp->trans_latency_us = + le16_to_cpu(r->opp[st->loop_idx].transition_latency_us); + p->perf_dom->opp_count++; - dev_dbg(ph->dev, "Level %d Power %d Latency %dus\n", - opp->perf, opp->power, opp->trans_latency_us); - } + dev_dbg(ph->dev, "Level %d Power %d Latency %dus\n", + opp->perf, opp->power, opp->trans_latency_us); - tot_opp_cnt += num_returned; + return 0; +} - ph->xops->reset_rx_to_maxsz(ph, t); - /* - * check for both returned and remaining to avoid infinite - * loop due to buggy firmware - */ - } while (num_returned && num_remaining); +static int +scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain, + struct perf_dom_info *perf_dom) +{ + int ret; + void *iter; + struct scmi_msg_perf_describe_levels *msg; + struct scmi_iterator_ops ops = { + .prepare_message = iter_perf_levels_prepare_message, + .update_state = iter_perf_levels_update_state, + .process_response = iter_perf_levels_process_response, + }; + struct scmi_perf_ipriv ppriv = { + .domain = domain, + .perf_dom = perf_dom, + }; + + iter = ph->hops->iter_response_init(ph, &ops, MAX_OPPS, + PERF_DESCRIBE_LEVELS, + sizeof(*msg), &ppriv); + if (IS_ERR(iter)) + return PTR_ERR(iter); + + ret = ph->hops->iter_response_run(iter); + if (ret) + return ret; - perf_dom->opp_count = tot_opp_cnt; - ph->xops->xfer_put(ph, t); + if (perf_dom->opp_count) + sort(perf_dom->opp, perf_dom->opp_count, + sizeof(struct scmi_opp), opp_cmp_func, NULL); - sort(perf_dom->opp, tot_opp_cnt, sizeof(*opp), opp_cmp_func, NULL); return ret; } @@ -382,6 +423,9 @@ static int scmi_perf_limits_set(const struct scmi_protocol_handle *ph, struct scmi_perf_info *pi = ph->get_priv(ph); struct perf_dom_info *dom = pi->dom_info + domain; + if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3 && !max_perf && !min_perf) + return -EINVAL; + if (dom->fc_info && dom->fc_info->limit_set_addr) { iowrite32(max_perf, dom->fc_info->limit_set_addr); iowrite32(min_perf, dom->fc_info->limit_set_addr + 4); @@ -873,11 +917,13 @@ static const struct scmi_protocol_events perf_protocol_events = { static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph) { - int domain; + int domain, ret; u32 version; struct scmi_perf_info *pinfo; - ph->xops->version_get(ph, &version); + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; dev_dbg(ph->dev, "Performance Version %d.%d\n", PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); @@ -886,7 +932,9 @@ static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph) if (!pinfo) return -ENOMEM; - scmi_perf_attributes_get(ph, pinfo); + ret = scmi_perf_attributes_get(ph, pinfo); + if (ret) + return ret; pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains, sizeof(*pinfo->dom_info), GFP_KERNEL); @@ -896,7 +944,7 @@ static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph) for (domain = 0; domain < pinfo->num_domains; domain++) { struct perf_dom_info *dom = pinfo->dom_info + domain; - scmi_perf_domain_attributes_get(ph, domain, dom); + scmi_perf_domain_attributes_get(ph, domain, dom, version); scmi_perf_describe_levels_get(ph, domain, dom); if (dom->perf_fastchannels) diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c index ad2ab080f344..964882cc8747 100644 --- a/drivers/firmware/arm_scmi/power.c +++ b/drivers/firmware/arm_scmi/power.c @@ -2,7 +2,7 @@ /* * System Control and Management Interface (SCMI) Power Protocol * - * Copyright (C) 2018-2021 ARM Ltd. + * Copyright (C) 2018-2022 ARM Ltd. */ #define pr_fmt(fmt) "SCMI Notifications POWER - " fmt @@ -10,7 +10,7 @@ #include <linux/module.h> #include <linux/scmi_protocol.h> -#include "common.h" +#include "protocols.h" #include "notify.h" enum scmi_power_protocol_cmd { @@ -18,6 +18,7 @@ enum scmi_power_protocol_cmd { POWER_STATE_SET = 0x4, POWER_STATE_GET = 0x5, POWER_STATE_NOTIFY = 0x6, + POWER_DOMAIN_NAME_GET = 0x8, }; struct scmi_msg_resp_power_attributes { @@ -33,7 +34,8 @@ struct scmi_msg_resp_power_domain_attributes { #define SUPPORTS_STATE_SET_NOTIFY(x) ((x) & BIT(31)) #define SUPPORTS_STATE_SET_ASYNC(x) ((x) & BIT(30)) #define SUPPORTS_STATE_SET_SYNC(x) ((x) & BIT(29)) - u8 name[SCMI_MAX_STR_SIZE]; +#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(27)) + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; }; struct scmi_power_set_state { @@ -97,9 +99,11 @@ static int scmi_power_attributes_get(const struct scmi_protocol_handle *ph, static int scmi_power_domain_attributes_get(const struct scmi_protocol_handle *ph, - u32 domain, struct power_dom_info *dom_info) + u32 domain, struct power_dom_info *dom_info, + u32 version) { int ret; + u32 flags; struct scmi_xfer *t; struct scmi_msg_resp_power_domain_attributes *attr; @@ -113,15 +117,26 @@ scmi_power_domain_attributes_get(const struct scmi_protocol_handle *ph, ret = ph->xops->do_xfer(ph, t); if (!ret) { - u32 flags = le32_to_cpu(attr->flags); + flags = le32_to_cpu(attr->flags); dom_info->state_set_notify = SUPPORTS_STATE_SET_NOTIFY(flags); dom_info->state_set_async = SUPPORTS_STATE_SET_ASYNC(flags); dom_info->state_set_sync = SUPPORTS_STATE_SET_SYNC(flags); strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); } - ph->xops->xfer_put(ph, t); + + /* + * If supported overwrite short name with the extended one; + * on error just carry on and use already provided short name. + */ + if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 && + SUPPORTS_EXTENDED_NAMES(flags)) { + ph->hops->extended_name_get(ph, POWER_DOMAIN_NAME_GET, + domain, dom_info->name, + SCMI_MAX_STR_SIZE); + } + return ret; } @@ -174,8 +189,9 @@ static int scmi_power_num_domains_get(const struct scmi_protocol_handle *ph) return pi->num_domains; } -static char *scmi_power_name_get(const struct scmi_protocol_handle *ph, - u32 domain) +static const char * +scmi_power_name_get(const struct scmi_protocol_handle *ph, + u32 domain) { struct scmi_power_info *pi = ph->get_priv(ph); struct power_dom_info *dom = pi->dom_info + domain; @@ -280,11 +296,13 @@ static const struct scmi_protocol_events power_protocol_events = { static int scmi_power_protocol_init(const struct scmi_protocol_handle *ph) { - int domain; + int domain, ret; u32 version; struct scmi_power_info *pinfo; - ph->xops->version_get(ph, &version); + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; dev_dbg(ph->dev, "Power Version %d.%d\n", PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); @@ -293,7 +311,9 @@ static int scmi_power_protocol_init(const struct scmi_protocol_handle *ph) if (!pinfo) return -ENOMEM; - scmi_power_attributes_get(ph, pinfo); + ret = scmi_power_attributes_get(ph, pinfo); + if (ret) + return ret; pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains, sizeof(*pinfo->dom_info), GFP_KERNEL); @@ -303,7 +323,7 @@ static int scmi_power_protocol_init(const struct scmi_protocol_handle *ph) for (domain = 0; domain < pinfo->num_domains; domain++) { struct power_dom_info *dom = pinfo->dom_info + domain; - scmi_power_domain_attributes_get(ph, domain, dom); + scmi_power_domain_attributes_get(ph, domain, dom, version); } pinfo->version = version; diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h new file mode 100644 index 000000000000..73304af5ec4a --- /dev/null +++ b/drivers/firmware/arm_scmi/protocols.h @@ -0,0 +1,318 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * System Control and Management Interface (SCMI) Message Protocol + * protocols common header file containing some definitions, structures + * and function prototypes used in all the different SCMI protocols. + * + * Copyright (C) 2022 ARM Ltd. + */ +#ifndef _SCMI_PROTOCOLS_H +#define _SCMI_PROTOCOLS_H + +#include <linux/bitfield.h> +#include <linux/completion.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/hashtable.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/refcount.h> +#include <linux/scmi_protocol.h> +#include <linux/spinlock.h> +#include <linux/types.h> + +#include <asm/unaligned.h> + +#define SCMI_SHORT_NAME_MAX_SIZE 16 + +#define PROTOCOL_REV_MINOR_MASK GENMASK(15, 0) +#define PROTOCOL_REV_MAJOR_MASK GENMASK(31, 16) +#define PROTOCOL_REV_MAJOR(x) ((u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x)))) +#define PROTOCOL_REV_MINOR(x) ((u16)(FIELD_GET(PROTOCOL_REV_MINOR_MASK, (x)))) + +enum scmi_common_cmd { + PROTOCOL_VERSION = 0x0, + PROTOCOL_ATTRIBUTES = 0x1, + PROTOCOL_MESSAGE_ATTRIBUTES = 0x2, +}; + +/** + * struct scmi_msg_resp_prot_version - Response for a message + * + * @minor_version: Minor version of the ABI that firmware supports + * @major_version: Major version of the ABI that firmware supports + * + * In general, ABI version changes follow the rule that minor version increments + * are backward compatible. Major revision changes in ABI may not be + * backward compatible. + * + * Response to a generic message with message type SCMI_MSG_VERSION + */ +struct scmi_msg_resp_prot_version { + __le16 minor_version; + __le16 major_version; +}; + +/** + * struct scmi_msg - Message(Tx/Rx) structure + * + * @buf: Buffer pointer + * @len: Length of data in the Buffer + */ +struct scmi_msg { + void *buf; + size_t len; +}; + +/** + * struct scmi_msg_hdr - Message(Tx/Rx) header + * + * @id: The identifier of the message being sent + * @protocol_id: The identifier of the protocol used to send @id message + * @type: The SCMI type for this message + * @seq: The token to identify the message. When a message returns, the + * platform returns the whole message header unmodified including the + * token + * @status: Status of the transfer once it's complete + * @poll_completion: Indicate if the transfer needs to be polled for + * completion or interrupt mode is used + */ +struct scmi_msg_hdr { + u8 id; + u8 protocol_id; + u8 type; + u16 seq; + u32 status; + bool poll_completion; +}; + +/** + * struct scmi_xfer - Structure representing a message flow + * + * @transfer_id: Unique ID for debug & profiling purpose + * @hdr: Transmit message header + * @tx: Transmit message + * @rx: Receive message, the buffer should be pre-allocated to store + * message. If request-ACK protocol is used, we can reuse the same + * buffer for the rx path as we use for the tx path. + * @done: command message transmit completion event + * @async_done: pointer to delayed response message received event completion + * @pending: True for xfers added to @pending_xfers hashtable + * @node: An hlist_node reference used to store this xfer, alternatively, on + * the free list @free_xfers or in the @pending_xfers hashtable + * @users: A refcount to track the active users for this xfer. + * This is meant to protect against the possibility that, when a command + * transaction times out concurrently with the reception of a valid + * response message, the xfer could be finally put on the TX path, and + * so vanish, while on the RX path scmi_rx_callback() is still + * processing it: in such a case this refcounting will ensure that, even + * though the timed-out transaction will anyway cause the command + * request to be reported as failed by time-out, the underlying xfer + * cannot be discarded and possibly reused until the last one user on + * the RX path has released it. + * @busy: An atomic flag to ensure exclusive write access to this xfer + * @state: The current state of this transfer, with states transitions deemed + * valid being: + * - SCMI_XFER_SENT_OK -> SCMI_XFER_RESP_OK [ -> SCMI_XFER_DRESP_OK ] + * - SCMI_XFER_SENT_OK -> SCMI_XFER_DRESP_OK + * (Missing synchronous response is assumed OK and ignored) + * @lock: A spinlock to protect state and busy fields. + * @priv: A pointer for transport private usage. + */ +struct scmi_xfer { + int transfer_id; + struct scmi_msg_hdr hdr; + struct scmi_msg tx; + struct scmi_msg rx; + struct completion done; + struct completion *async_done; + bool pending; + struct hlist_node node; + refcount_t users; +#define SCMI_XFER_FREE 0 +#define SCMI_XFER_BUSY 1 + atomic_t busy; +#define SCMI_XFER_SENT_OK 0 +#define SCMI_XFER_RESP_OK 1 +#define SCMI_XFER_DRESP_OK 2 + int state; + /* A lock to protect state and busy fields */ + spinlock_t lock; + void *priv; +}; + +struct scmi_xfer_ops; +struct scmi_proto_helpers_ops; + +/** + * struct scmi_protocol_handle - Reference to an initialized protocol instance + * + * @dev: A reference to the associated SCMI instance device (handle->dev). + * @xops: A reference to a struct holding refs to the core xfer operations that + * can be used by the protocol implementation to generate SCMI messages. + * @set_priv: A method to set protocol private data for this instance. + * @get_priv: A method to get protocol private data previously set. + * + * This structure represents a protocol initialized against specific SCMI + * instance and it will be used as follows: + * - as a parameter fed from the core to the protocol initialization code so + * that it can access the core xfer operations to build and generate SCMI + * messages exclusively for the specific underlying protocol instance. + * - as an opaque handle fed by an SCMI driver user when it tries to access + * this protocol through its own protocol operations. + * In this case this handle will be returned as an opaque object together + * with the related protocol operations when the SCMI driver tries to access + * the protocol. + */ +struct scmi_protocol_handle { + struct device *dev; + const struct scmi_xfer_ops *xops; + const struct scmi_proto_helpers_ops *hops; + int (*set_priv)(const struct scmi_protocol_handle *ph, void *priv); + void *(*get_priv)(const struct scmi_protocol_handle *ph); +}; + +/** + * struct scmi_iterator_state - Iterator current state descriptor + * @desc_index: Starting index for the current mulit-part request. + * @num_returned: Number of returned items in the last multi-part reply. + * @num_remaining: Number of remaining items in the multi-part message. + * @max_resources: Maximum acceptable number of items, configured by the caller + * depending on the underlying resources that it is querying. + * @loop_idx: The iterator loop index in the current multi-part reply. + * @priv: Optional pointer to some additional state-related private data setup + * by the caller during the iterations. + */ +struct scmi_iterator_state { + unsigned int desc_index; + unsigned int num_returned; + unsigned int num_remaining; + unsigned int max_resources; + unsigned int loop_idx; + void *priv; +}; + +/** + * struct scmi_iterator_ops - Custom iterator operations + * @prepare_message: An operation to provide the custom logic to fill in the + * SCMI command request pointed by @message. @desc_index is + * a reference to the next index to use in the multi-part + * request. + * @update_state: An operation to provide the custom logic to update the + * iterator state from the actual message response. + * @process_response: An operation to provide the custom logic needed to process + * each chunk of the multi-part message. + */ +struct scmi_iterator_ops { + void (*prepare_message)(void *message, unsigned int desc_index, + const void *priv); + int (*update_state)(struct scmi_iterator_state *st, + const void *response, void *priv); + int (*process_response)(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, void *priv); +}; + +/** + * struct scmi_proto_helpers_ops - References to common protocol helpers + * @extended_name_get: A common helper function to retrieve extended naming + * for the specified resource using the specified command. + * Result is returned as a NULL terminated string in the + * pre-allocated area pointed to by @name with maximum + * capacity of @len bytes. + * @iter_response_init: A common helper to initialize a generic iterator to + * parse multi-message responses: when run the iterator + * will take care to send the initial command request as + * specified by @msg_id and @tx_size and then to parse the + * multi-part responses using the custom operations + * provided in @ops. + * @iter_response_run: A common helper to trigger the run of a previously + * initialized iterator. + */ +struct scmi_proto_helpers_ops { + int (*extended_name_get)(const struct scmi_protocol_handle *ph, + u8 cmd_id, u32 res_id, char *name, size_t len); + void *(*iter_response_init)(const struct scmi_protocol_handle *ph, + struct scmi_iterator_ops *ops, + unsigned int max_resources, u8 msg_id, + size_t tx_size, void *priv); + int (*iter_response_run)(void *iter); +}; + +/** + * struct scmi_xfer_ops - References to the core SCMI xfer operations. + * @version_get: Get this version protocol. + * @xfer_get_init: Initialize one struct xfer if any xfer slot is free. + * @reset_rx_to_maxsz: Reset rx size to max transport size. + * @do_xfer: Do the SCMI transfer. + * @do_xfer_with_response: Do the SCMI transfer waiting for a response. + * @xfer_put: Free the xfer slot. + * + * Note that all this operations expect a protocol handle as first parameter; + * they then internally use it to infer the underlying protocol number: this + * way is not possible for a protocol implementation to forge messages for + * another protocol. + */ +struct scmi_xfer_ops { + int (*version_get)(const struct scmi_protocol_handle *ph, u32 *version); + int (*xfer_get_init)(const struct scmi_protocol_handle *ph, u8 msg_id, + size_t tx_size, size_t rx_size, + struct scmi_xfer **p); + void (*reset_rx_to_maxsz)(const struct scmi_protocol_handle *ph, + struct scmi_xfer *xfer); + int (*do_xfer)(const struct scmi_protocol_handle *ph, + struct scmi_xfer *xfer); + int (*do_xfer_with_response)(const struct scmi_protocol_handle *ph, + struct scmi_xfer *xfer); + void (*xfer_put)(const struct scmi_protocol_handle *ph, + struct scmi_xfer *xfer); +}; + +typedef int (*scmi_prot_init_ph_fn_t)(const struct scmi_protocol_handle *); + +/** + * struct scmi_protocol - Protocol descriptor + * @id: Protocol ID. + * @owner: Module reference if any. + * @instance_init: Mandatory protocol initialization function. + * @instance_deinit: Optional protocol de-initialization function. + * @ops: Optional reference to the operations provided by the protocol and + * exposed in scmi_protocol.h. + * @events: An optional reference to the events supported by this protocol. + */ +struct scmi_protocol { + const u8 id; + struct module *owner; + const scmi_prot_init_ph_fn_t instance_init; + const scmi_prot_init_ph_fn_t instance_deinit; + const void *ops; + const struct scmi_protocol_events *events; +}; + +#define DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(name, proto) \ +static const struct scmi_protocol *__this_proto = &(proto); \ + \ +int __init scmi_##name##_register(void) \ +{ \ + return scmi_protocol_register(__this_proto); \ +} \ + \ +void __exit scmi_##name##_unregister(void) \ +{ \ + scmi_protocol_unregister(__this_proto); \ +} + +#define DECLARE_SCMI_REGISTER_UNREGISTER(func) \ + int __init scmi_##func##_register(void); \ + void __exit scmi_##func##_unregister(void) +DECLARE_SCMI_REGISTER_UNREGISTER(base); +DECLARE_SCMI_REGISTER_UNREGISTER(clock); +DECLARE_SCMI_REGISTER_UNREGISTER(perf); +DECLARE_SCMI_REGISTER_UNREGISTER(power); +DECLARE_SCMI_REGISTER_UNREGISTER(reset); +DECLARE_SCMI_REGISTER_UNREGISTER(sensors); +DECLARE_SCMI_REGISTER_UNREGISTER(voltage); +DECLARE_SCMI_REGISTER_UNREGISTER(system); + +#endif /* _SCMI_PROTOCOLS_H */ diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c index 9bf2478ec6d1..a420a9102094 100644 --- a/drivers/firmware/arm_scmi/reset.c +++ b/drivers/firmware/arm_scmi/reset.c @@ -2,7 +2,7 @@ /* * System Control and Management Interface (SCMI) Reset Protocol * - * Copyright (C) 2019-2021 ARM Ltd. + * Copyright (C) 2019-2022 ARM Ltd. */ #define pr_fmt(fmt) "SCMI Notifications RESET - " fmt @@ -10,13 +10,14 @@ #include <linux/module.h> #include <linux/scmi_protocol.h> -#include "common.h" +#include "protocols.h" #include "notify.h" enum scmi_reset_protocol_cmd { RESET_DOMAIN_ATTRIBUTES = 0x3, RESET = 0x4, RESET_NOTIFY = 0x5, + RESET_DOMAIN_NAME_GET = 0x6, }; #define NUM_RESET_DOMAIN_MASK 0xffff @@ -26,8 +27,9 @@ struct scmi_msg_resp_reset_domain_attributes { __le32 attributes; #define SUPPORTS_ASYNC_RESET(x) ((x) & BIT(31)) #define SUPPORTS_NOTIFY_RESET(x) ((x) & BIT(30)) +#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(29)) __le32 latency; - u8 name[SCMI_MAX_STR_SIZE]; + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; }; struct scmi_msg_reset_domain_reset { @@ -89,9 +91,11 @@ static int scmi_reset_attributes_get(const struct scmi_protocol_handle *ph, static int scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph, - u32 domain, struct reset_dom_info *dom_info) + u32 domain, struct reset_dom_info *dom_info, + u32 version) { int ret; + u32 attributes; struct scmi_xfer *t; struct scmi_msg_resp_reset_domain_attributes *attr; @@ -105,7 +109,7 @@ scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph, ret = ph->xops->do_xfer(ph, t); if (!ret) { - u32 attributes = le32_to_cpu(attr->attributes); + attributes = le32_to_cpu(attr->attributes); dom_info->async_reset = SUPPORTS_ASYNC_RESET(attributes); dom_info->reset_notify = SUPPORTS_NOTIFY_RESET(attributes); @@ -116,6 +120,16 @@ scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph, } ph->xops->xfer_put(ph, t); + + /* + * If supported overwrite short name with the extended one; + * on error just carry on and use already provided short name. + */ + if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 && + SUPPORTS_EXTENDED_NAMES(attributes)) + ph->hops->extended_name_get(ph, RESET_DOMAIN_NAME_GET, domain, + dom_info->name, SCMI_MAX_STR_SIZE); + return ret; } @@ -126,8 +140,8 @@ static int scmi_reset_num_domains_get(const struct scmi_protocol_handle *ph) return pi->num_domains; } -static char *scmi_reset_name_get(const struct scmi_protocol_handle *ph, - u32 domain) +static const char * +scmi_reset_name_get(const struct scmi_protocol_handle *ph, u32 domain) { struct scmi_reset_info *pi = ph->get_priv(ph); @@ -293,11 +307,13 @@ static const struct scmi_protocol_events reset_protocol_events = { static int scmi_reset_protocol_init(const struct scmi_protocol_handle *ph) { - int domain; + int domain, ret; u32 version; struct scmi_reset_info *pinfo; - ph->xops->version_get(ph, &version); + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; dev_dbg(ph->dev, "Reset Version %d.%d\n", PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); @@ -306,7 +322,9 @@ static int scmi_reset_protocol_init(const struct scmi_protocol_handle *ph) if (!pinfo) return -ENOMEM; - scmi_reset_attributes_get(ph, pinfo); + ret = scmi_reset_attributes_get(ph, pinfo); + if (ret) + return ret; pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains, sizeof(*pinfo->dom_info), GFP_KERNEL); @@ -316,7 +334,7 @@ static int scmi_reset_protocol_init(const struct scmi_protocol_handle *ph) for (domain = 0; domain < pinfo->num_domains; domain++) { struct reset_dom_info *dom = pinfo->dom_info + domain; - scmi_reset_domain_attributes_get(ph, domain, dom); + scmi_reset_domain_attributes_get(ph, domain, dom, version); } pinfo->version = version; diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c index cdbb287bd8bc..21e0ce89b153 100644 --- a/drivers/firmware/arm_scmi/sensors.c +++ b/drivers/firmware/arm_scmi/sensors.c @@ -2,7 +2,7 @@ /* * System Control and Management Interface (SCMI) Sensor Protocol * - * Copyright (C) 2018-2021 ARM Ltd. + * Copyright (C) 2018-2022 ARM Ltd. */ #define pr_fmt(fmt) "SCMI Notifications SENSOR - " fmt @@ -11,7 +11,7 @@ #include <linux/module.h> #include <linux/scmi_protocol.h> -#include "common.h" +#include "protocols.h" #include "notify.h" #define SCMI_MAX_NUM_SENSOR_AXIS 63 @@ -27,6 +27,8 @@ enum scmi_sensor_protocol_cmd { SENSOR_CONFIG_GET = 0x9, SENSOR_CONFIG_SET = 0xA, SENSOR_CONTINUOUS_UPDATE_NOTIFY = 0xB, + SENSOR_NAME_GET = 0xC, + SENSOR_AXIS_NAME_GET = 0xD, }; struct scmi_msg_resp_sensor_attributes { @@ -63,6 +65,10 @@ struct scmi_msg_resp_attrs { __le32 max_range_high; }; +struct scmi_msg_sensor_description { + __le32 desc_index; +}; + struct scmi_msg_resp_sensor_description { __le16 num_returned; __le16 num_remaining; @@ -71,6 +77,7 @@ struct scmi_msg_resp_sensor_description { __le32 attributes_low; /* Common attributes_low macros */ #define SUPPORTS_ASYNC_READ(x) FIELD_GET(BIT(31), (x)) +#define SUPPORTS_EXTENDED_NAMES(x) FIELD_GET(BIT(29), (x)) #define NUM_TRIP_POINTS(x) FIELD_GET(GENMASK(7, 0), (x)) __le32 attributes_high; /* Common attributes_high macros */ @@ -78,7 +85,7 @@ struct scmi_msg_resp_sensor_description { #define SENSOR_SCALE_SIGN BIT(4) #define SENSOR_SCALE_EXTEND GENMASK(31, 5) #define SENSOR_TYPE(x) FIELD_GET(GENMASK(7, 0), (x)) - u8 name[SCMI_MAX_STR_SIZE]; + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; /* only for version > 2.0 */ __le32 power; __le32 resolution; @@ -111,13 +118,22 @@ struct scmi_msg_resp_sensor_axis_description { struct scmi_axis_descriptor { __le32 id; __le32 attributes_low; +#define SUPPORTS_EXTENDED_AXIS_NAMES(x) FIELD_GET(BIT(9), (x)) __le32 attributes_high; - u8 name[SCMI_MAX_STR_SIZE]; + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; __le32 resolution; struct scmi_msg_resp_attrs attrs; } desc[]; }; +struct scmi_msg_resp_sensor_axis_names_description { + __le32 num_axis_flags; + struct scmi_sensor_axis_name_descriptor { + __le32 axis_id; + u8 name[SCMI_MAX_STR_SIZE]; + } desc[]; +}; + /* Base scmi_axis_descriptor size excluding extended attrs after name */ #define SCMI_MSG_RESP_AXIS_DESCR_BASE_SZ 28 @@ -231,335 +247,414 @@ static int scmi_sensor_attributes_get(const struct scmi_protocol_handle *ph, } static inline void scmi_parse_range_attrs(struct scmi_range_attrs *out, - struct scmi_msg_resp_attrs *in) + const struct scmi_msg_resp_attrs *in) { out->min_range = get_unaligned_le64((void *)&in->min_range_low); out->max_range = get_unaligned_le64((void *)&in->max_range_low); } -static int scmi_sensor_update_intervals(const struct scmi_protocol_handle *ph, - struct scmi_sensor_info *s) -{ - int ret, cnt; - u32 desc_index = 0; - u16 num_returned, num_remaining; - struct scmi_xfer *ti; - struct scmi_msg_resp_sensor_list_update_intervals *buf; - struct scmi_msg_sensor_list_update_intervals *msg; - - ret = ph->xops->xfer_get_init(ph, SENSOR_LIST_UPDATE_INTERVALS, - sizeof(*msg), 0, &ti); - if (ret) - return ret; - - buf = ti->rx.buf; - do { - u32 flags; - - msg = ti->tx.buf; - /* Set the number of sensors to be skipped/already read */ - msg->id = cpu_to_le32(s->id); - msg->index = cpu_to_le32(desc_index); +struct scmi_sens_ipriv { + void *priv; + struct device *dev; +}; - ret = ph->xops->do_xfer(ph, ti); - if (ret) - break; +static void iter_intervals_prepare_message(void *message, + unsigned int desc_index, + const void *p) +{ + struct scmi_msg_sensor_list_update_intervals *msg = message; + const struct scmi_sensor_info *s; - flags = le32_to_cpu(buf->num_intervals_flags); - num_returned = NUM_INTERVALS_RETURNED(flags); - num_remaining = NUM_INTERVALS_REMAINING(flags); + s = ((const struct scmi_sens_ipriv *)p)->priv; + /* Set the number of sensors to be skipped/already read */ + msg->id = cpu_to_le32(s->id); + msg->index = cpu_to_le32(desc_index); +} - /* - * Max intervals is not declared previously anywhere so we - * assume it's returned+remaining. - */ - if (!s->intervals.count) { - s->intervals.segmented = SEGMENTED_INTVL_FORMAT(flags); - s->intervals.count = num_returned + num_remaining; - /* segmented intervals are reported in one triplet */ - if (s->intervals.segmented && - (num_remaining || num_returned != 3)) { - dev_err(ph->dev, - "Sensor ID:%d advertises an invalid segmented interval (%d)\n", - s->id, s->intervals.count); +static int iter_intervals_update_state(struct scmi_iterator_state *st, + const void *response, void *p) +{ + u32 flags; + struct scmi_sensor_info *s = ((struct scmi_sens_ipriv *)p)->priv; + struct device *dev = ((struct scmi_sens_ipriv *)p)->dev; + const struct scmi_msg_resp_sensor_list_update_intervals *r = response; + + flags = le32_to_cpu(r->num_intervals_flags); + st->num_returned = NUM_INTERVALS_RETURNED(flags); + st->num_remaining = NUM_INTERVALS_REMAINING(flags); + + /* + * Max intervals is not declared previously anywhere so we + * assume it's returned+remaining on first call. + */ + if (!st->max_resources) { + s->intervals.segmented = SEGMENTED_INTVL_FORMAT(flags); + s->intervals.count = st->num_returned + st->num_remaining; + /* segmented intervals are reported in one triplet */ + if (s->intervals.segmented && + (st->num_remaining || st->num_returned != 3)) { + dev_err(dev, + "Sensor ID:%d advertises an invalid segmented interval (%d)\n", + s->id, s->intervals.count); + s->intervals.segmented = false; + s->intervals.count = 0; + return -EINVAL; + } + /* Direct allocation when exceeding pre-allocated */ + if (s->intervals.count >= SCMI_MAX_PREALLOC_POOL) { + s->intervals.desc = + devm_kcalloc(dev, + s->intervals.count, + sizeof(*s->intervals.desc), + GFP_KERNEL); + if (!s->intervals.desc) { s->intervals.segmented = false; s->intervals.count = 0; - ret = -EINVAL; - break; - } - /* Direct allocation when exceeding pre-allocated */ - if (s->intervals.count >= SCMI_MAX_PREALLOC_POOL) { - s->intervals.desc = - devm_kcalloc(ph->dev, - s->intervals.count, - sizeof(*s->intervals.desc), - GFP_KERNEL); - if (!s->intervals.desc) { - s->intervals.segmented = false; - s->intervals.count = 0; - ret = -ENOMEM; - break; - } + return -ENOMEM; } - } else if (desc_index + num_returned > s->intervals.count) { - dev_err(ph->dev, - "No. of update intervals can't exceed %d\n", - s->intervals.count); - ret = -EINVAL; - break; } - for (cnt = 0; cnt < num_returned; cnt++) - s->intervals.desc[desc_index + cnt] = - le32_to_cpu(buf->intervals[cnt]); + st->max_resources = s->intervals.count; + } + + return 0; +} - desc_index += num_returned; +static int +iter_intervals_process_response(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, void *p) +{ + const struct scmi_msg_resp_sensor_list_update_intervals *r = response; + struct scmi_sensor_info *s = ((struct scmi_sens_ipriv *)p)->priv; - ph->xops->reset_rx_to_maxsz(ph, ti); - /* - * check for both returned and remaining to avoid infinite - * loop due to buggy firmware - */ - } while (num_returned && num_remaining); + s->intervals.desc[st->desc_index + st->loop_idx] = + le32_to_cpu(r->intervals[st->loop_idx]); - ph->xops->xfer_put(ph, ti); - return ret; + return 0; } -static int scmi_sensor_axis_description(const struct scmi_protocol_handle *ph, +static int scmi_sensor_update_intervals(const struct scmi_protocol_handle *ph, struct scmi_sensor_info *s) { - int ret, cnt; - u32 desc_index = 0; - u16 num_returned, num_remaining; - struct scmi_xfer *te; - struct scmi_msg_resp_sensor_axis_description *buf; - struct scmi_msg_sensor_axis_description_get *msg; + void *iter; + struct scmi_msg_sensor_list_update_intervals *msg; + struct scmi_iterator_ops ops = { + .prepare_message = iter_intervals_prepare_message, + .update_state = iter_intervals_update_state, + .process_response = iter_intervals_process_response, + }; + struct scmi_sens_ipriv upriv = { + .priv = s, + .dev = ph->dev, + }; + + iter = ph->hops->iter_response_init(ph, &ops, s->intervals.count, + SENSOR_LIST_UPDATE_INTERVALS, + sizeof(*msg), &upriv); + if (IS_ERR(iter)) + return PTR_ERR(iter); + + return ph->hops->iter_response_run(iter); +} - s->axis = devm_kcalloc(ph->dev, s->num_axis, - sizeof(*s->axis), GFP_KERNEL); - if (!s->axis) - return -ENOMEM; +static void iter_axes_desc_prepare_message(void *message, + const unsigned int desc_index, + const void *priv) +{ + struct scmi_msg_sensor_axis_description_get *msg = message; + const struct scmi_sensor_info *s = priv; - ret = ph->xops->xfer_get_init(ph, SENSOR_AXIS_DESCRIPTION_GET, - sizeof(*msg), 0, &te); - if (ret) - return ret; + /* Set the number of sensors to be skipped/already read */ + msg->id = cpu_to_le32(s->id); + msg->axis_desc_index = cpu_to_le32(desc_index); +} - buf = te->rx.buf; - do { - u32 flags; - struct scmi_axis_descriptor *adesc; +static int +iter_axes_desc_update_state(struct scmi_iterator_state *st, + const void *response, void *priv) +{ + u32 flags; + const struct scmi_msg_resp_sensor_axis_description *r = response; - msg = te->tx.buf; - /* Set the number of sensors to be skipped/already read */ - msg->id = cpu_to_le32(s->id); - msg->axis_desc_index = cpu_to_le32(desc_index); + flags = le32_to_cpu(r->num_axis_flags); + st->num_returned = NUM_AXIS_RETURNED(flags); + st->num_remaining = NUM_AXIS_REMAINING(flags); + st->priv = (void *)&r->desc[0]; - ret = ph->xops->do_xfer(ph, te); - if (ret) - break; + return 0; +} - flags = le32_to_cpu(buf->num_axis_flags); - num_returned = NUM_AXIS_RETURNED(flags); - num_remaining = NUM_AXIS_REMAINING(flags); +static int +iter_axes_desc_process_response(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, void *priv) +{ + u32 attrh, attrl; + struct scmi_sensor_axis_info *a; + size_t dsize = SCMI_MSG_RESP_AXIS_DESCR_BASE_SZ; + struct scmi_sensor_info *s = priv; + const struct scmi_axis_descriptor *adesc = st->priv; - if (desc_index + num_returned > s->num_axis) { - dev_err(ph->dev, "No. of axis can't exceed %d\n", - s->num_axis); - break; - } + attrl = le32_to_cpu(adesc->attributes_low); - adesc = &buf->desc[0]; - for (cnt = 0; cnt < num_returned; cnt++) { - u32 attrh, attrl; - struct scmi_sensor_axis_info *a; - size_t dsize = SCMI_MSG_RESP_AXIS_DESCR_BASE_SZ; + a = &s->axis[st->desc_index + st->loop_idx]; + a->id = le32_to_cpu(adesc->id); + a->extended_attrs = SUPPORTS_EXTEND_ATTRS(attrl); - attrl = le32_to_cpu(adesc->attributes_low); + attrh = le32_to_cpu(adesc->attributes_high); + a->scale = S32_EXT(SENSOR_SCALE(attrh)); + a->type = SENSOR_TYPE(attrh); + strscpy(a->name, adesc->name, SCMI_MAX_STR_SIZE); - a = &s->axis[desc_index + cnt]; + if (a->extended_attrs) { + unsigned int ares = le32_to_cpu(adesc->resolution); - a->id = le32_to_cpu(adesc->id); - a->extended_attrs = SUPPORTS_EXTEND_ATTRS(attrl); + a->resolution = SENSOR_RES(ares); + a->exponent = S32_EXT(SENSOR_RES_EXP(ares)); + dsize += sizeof(adesc->resolution); - attrh = le32_to_cpu(adesc->attributes_high); - a->scale = S32_EXT(SENSOR_SCALE(attrh)); - a->type = SENSOR_TYPE(attrh); - strlcpy(a->name, adesc->name, SCMI_MAX_STR_SIZE); + scmi_parse_range_attrs(&a->attrs, &adesc->attrs); + dsize += sizeof(adesc->attrs); + } + st->priv = ((u8 *)adesc + dsize); + + return 0; +} - if (a->extended_attrs) { - unsigned int ares = - le32_to_cpu(adesc->resolution); +static int +iter_axes_extended_name_update_state(struct scmi_iterator_state *st, + const void *response, void *priv) +{ + u32 flags; + const struct scmi_msg_resp_sensor_axis_names_description *r = response; - a->resolution = SENSOR_RES(ares); - a->exponent = - S32_EXT(SENSOR_RES_EXP(ares)); - dsize += sizeof(adesc->resolution); + flags = le32_to_cpu(r->num_axis_flags); + st->num_returned = NUM_AXIS_RETURNED(flags); + st->num_remaining = NUM_AXIS_REMAINING(flags); + st->priv = (void *)&r->desc[0]; - scmi_parse_range_attrs(&a->attrs, - &adesc->attrs); - dsize += sizeof(adesc->attrs); - } + return 0; +} - adesc = (typeof(adesc))((u8 *)adesc + dsize); - } +static int +iter_axes_extended_name_process_response(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, + void *priv) +{ + struct scmi_sensor_axis_info *a; + const struct scmi_sensor_info *s = priv; + struct scmi_sensor_axis_name_descriptor *adesc = st->priv; - desc_index += num_returned; + a = &s->axis[st->desc_index + st->loop_idx]; + strscpy(a->name, adesc->name, SCMI_MAX_STR_SIZE); + st->priv = ++adesc; - ph->xops->reset_rx_to_maxsz(ph, te); - /* - * check for both returned and remaining to avoid infinite - * loop due to buggy firmware - */ - } while (num_returned && num_remaining); + return 0; +} - ph->xops->xfer_put(ph, te); - return ret; +static int +scmi_sensor_axis_extended_names_get(const struct scmi_protocol_handle *ph, + struct scmi_sensor_info *s) +{ + void *iter; + struct scmi_msg_sensor_axis_description_get *msg; + struct scmi_iterator_ops ops = { + .prepare_message = iter_axes_desc_prepare_message, + .update_state = iter_axes_extended_name_update_state, + .process_response = iter_axes_extended_name_process_response, + }; + + iter = ph->hops->iter_response_init(ph, &ops, s->num_axis, + SENSOR_AXIS_NAME_GET, + sizeof(*msg), s); + if (IS_ERR(iter)) + return PTR_ERR(iter); + + return ph->hops->iter_response_run(iter); } -static int scmi_sensor_description_get(const struct scmi_protocol_handle *ph, - struct sensors_info *si) +static int scmi_sensor_axis_description(const struct scmi_protocol_handle *ph, + struct scmi_sensor_info *s, + u32 version) { - int ret, cnt; - u32 desc_index = 0; - u16 num_returned, num_remaining; - struct scmi_xfer *t; - struct scmi_msg_resp_sensor_description *buf; + int ret; + void *iter; + struct scmi_msg_sensor_axis_description_get *msg; + struct scmi_iterator_ops ops = { + .prepare_message = iter_axes_desc_prepare_message, + .update_state = iter_axes_desc_update_state, + .process_response = iter_axes_desc_process_response, + }; - ret = ph->xops->xfer_get_init(ph, SENSOR_DESCRIPTION_GET, - sizeof(__le32), 0, &t); + s->axis = devm_kcalloc(ph->dev, s->num_axis, + sizeof(*s->axis), GFP_KERNEL); + if (!s->axis) + return -ENOMEM; + + iter = ph->hops->iter_response_init(ph, &ops, s->num_axis, + SENSOR_AXIS_DESCRIPTION_GET, + sizeof(*msg), s); + if (IS_ERR(iter)) + return PTR_ERR(iter); + + ret = ph->hops->iter_response_run(iter); if (ret) return ret; - buf = t->rx.buf; - - do { - struct scmi_sensor_descriptor *sdesc; + if (PROTOCOL_REV_MAJOR(version) >= 0x3) + ret = scmi_sensor_axis_extended_names_get(ph, s); - /* Set the number of sensors to be skipped/already read */ - put_unaligned_le32(desc_index, t->tx.buf); + return ret; +} - ret = ph->xops->do_xfer(ph, t); - if (ret) - break; +static void iter_sens_descr_prepare_message(void *message, + unsigned int desc_index, + const void *priv) +{ + struct scmi_msg_sensor_description *msg = message; - num_returned = le16_to_cpu(buf->num_returned); - num_remaining = le16_to_cpu(buf->num_remaining); + msg->desc_index = cpu_to_le32(desc_index); +} - if (desc_index + num_returned > si->num_sensors) { - dev_err(ph->dev, "No. of sensors can't exceed %d", - si->num_sensors); - break; - } +static int iter_sens_descr_update_state(struct scmi_iterator_state *st, + const void *response, void *priv) +{ + const struct scmi_msg_resp_sensor_description *r = response; - sdesc = &buf->desc[0]; - for (cnt = 0; cnt < num_returned; cnt++) { - u32 attrh, attrl; - struct scmi_sensor_info *s; - size_t dsize = SCMI_MSG_RESP_SENS_DESCR_BASE_SZ; - - s = &si->sensors[desc_index + cnt]; - s->id = le32_to_cpu(sdesc->id); - - attrl = le32_to_cpu(sdesc->attributes_low); - /* common bitfields parsing */ - s->async = SUPPORTS_ASYNC_READ(attrl); - s->num_trip_points = NUM_TRIP_POINTS(attrl); - /** - * only SCMIv3.0 specific bitfield below. - * Such bitfields are assumed to be zeroed on non - * relevant fw versions...assuming fw not buggy ! - */ - s->update = SUPPORTS_UPDATE_NOTIFY(attrl); - s->timestamped = SUPPORTS_TIMESTAMP(attrl); - if (s->timestamped) - s->tstamp_scale = - S32_EXT(SENSOR_TSTAMP_EXP(attrl)); - s->extended_scalar_attrs = - SUPPORTS_EXTEND_ATTRS(attrl); - - attrh = le32_to_cpu(sdesc->attributes_high); - /* common bitfields parsing */ - s->scale = S32_EXT(SENSOR_SCALE(attrh)); - s->type = SENSOR_TYPE(attrh); - /* Use pre-allocated pool wherever possible */ - s->intervals.desc = s->intervals.prealloc_pool; - if (si->version == SCMIv2_SENSOR_PROTOCOL) { - s->intervals.segmented = false; - s->intervals.count = 1; - /* - * Convert SCMIv2.0 update interval format to - * SCMIv3.0 to be used as the common exposed - * descriptor, accessible via common macros. - */ - s->intervals.desc[0] = - (SENSOR_UPDATE_BASE(attrh) << 5) | - SENSOR_UPDATE_SCALE(attrh); - } else { - /* - * From SCMIv3.0 update intervals are retrieved - * via a dedicated (optional) command. - * Since the command is optional, on error carry - * on without any update interval. - */ - if (scmi_sensor_update_intervals(ph, s)) - dev_dbg(ph->dev, - "Update Intervals not available for sensor ID:%d\n", - s->id); - } - /** - * only > SCMIv2.0 specific bitfield below. - * Such bitfields are assumed to be zeroed on non - * relevant fw versions...assuming fw not buggy ! - */ - s->num_axis = min_t(unsigned int, - SUPPORTS_AXIS(attrh) ? - SENSOR_AXIS_NUMBER(attrh) : 0, - SCMI_MAX_NUM_SENSOR_AXIS); - strlcpy(s->name, sdesc->name, SCMI_MAX_STR_SIZE); - - if (s->extended_scalar_attrs) { - s->sensor_power = le32_to_cpu(sdesc->power); - dsize += sizeof(sdesc->power); - /* Only for sensors reporting scalar values */ - if (s->num_axis == 0) { - unsigned int sres = - le32_to_cpu(sdesc->resolution); - - s->resolution = SENSOR_RES(sres); - s->exponent = - S32_EXT(SENSOR_RES_EXP(sres)); - dsize += sizeof(sdesc->resolution); - - scmi_parse_range_attrs(&s->scalar_attrs, - &sdesc->scalar_attrs); - dsize += sizeof(sdesc->scalar_attrs); - } - } - if (s->num_axis > 0) { - ret = scmi_sensor_axis_description(ph, s); - if (ret) - goto out; - } + st->num_returned = le16_to_cpu(r->num_returned); + st->num_remaining = le16_to_cpu(r->num_remaining); + st->priv = (void *)&r->desc[0]; - sdesc = (typeof(sdesc))((u8 *)sdesc + dsize); - } + return 0; +} - desc_index += num_returned; +static int +iter_sens_descr_process_response(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, void *priv) - ph->xops->reset_rx_to_maxsz(ph, t); +{ + int ret = 0; + u32 attrh, attrl; + size_t dsize = SCMI_MSG_RESP_SENS_DESCR_BASE_SZ; + struct scmi_sensor_info *s; + struct sensors_info *si = priv; + const struct scmi_sensor_descriptor *sdesc = st->priv; + + s = &si->sensors[st->desc_index + st->loop_idx]; + s->id = le32_to_cpu(sdesc->id); + + attrl = le32_to_cpu(sdesc->attributes_low); + /* common bitfields parsing */ + s->async = SUPPORTS_ASYNC_READ(attrl); + s->num_trip_points = NUM_TRIP_POINTS(attrl); + /** + * only SCMIv3.0 specific bitfield below. + * Such bitfields are assumed to be zeroed on non + * relevant fw versions...assuming fw not buggy ! + */ + s->update = SUPPORTS_UPDATE_NOTIFY(attrl); + s->timestamped = SUPPORTS_TIMESTAMP(attrl); + if (s->timestamped) + s->tstamp_scale = S32_EXT(SENSOR_TSTAMP_EXP(attrl)); + s->extended_scalar_attrs = SUPPORTS_EXTEND_ATTRS(attrl); + + attrh = le32_to_cpu(sdesc->attributes_high); + /* common bitfields parsing */ + s->scale = S32_EXT(SENSOR_SCALE(attrh)); + s->type = SENSOR_TYPE(attrh); + /* Use pre-allocated pool wherever possible */ + s->intervals.desc = s->intervals.prealloc_pool; + if (si->version == SCMIv2_SENSOR_PROTOCOL) { + s->intervals.segmented = false; + s->intervals.count = 1; + /* + * Convert SCMIv2.0 update interval format to + * SCMIv3.0 to be used as the common exposed + * descriptor, accessible via common macros. + */ + s->intervals.desc[0] = (SENSOR_UPDATE_BASE(attrh) << 5) | + SENSOR_UPDATE_SCALE(attrh); + } else { /* - * check for both returned and remaining to avoid infinite - * loop due to buggy firmware + * From SCMIv3.0 update intervals are retrieved + * via a dedicated (optional) command. + * Since the command is optional, on error carry + * on without any update interval. */ - } while (num_returned && num_remaining); + if (scmi_sensor_update_intervals(ph, s)) + dev_dbg(ph->dev, + "Update Intervals not available for sensor ID:%d\n", + s->id); + } + /** + * only > SCMIv2.0 specific bitfield below. + * Such bitfields are assumed to be zeroed on non + * relevant fw versions...assuming fw not buggy ! + */ + s->num_axis = min_t(unsigned int, + SUPPORTS_AXIS(attrh) ? + SENSOR_AXIS_NUMBER(attrh) : 0, + SCMI_MAX_NUM_SENSOR_AXIS); + strscpy(s->name, sdesc->name, SCMI_MAX_STR_SIZE); + + /* + * If supported overwrite short name with the extended + * one; on error just carry on and use already provided + * short name. + */ + if (PROTOCOL_REV_MAJOR(si->version) >= 0x3 && + SUPPORTS_EXTENDED_NAMES(attrl)) + ph->hops->extended_name_get(ph, SENSOR_NAME_GET, s->id, + s->name, SCMI_MAX_STR_SIZE); + + if (s->extended_scalar_attrs) { + s->sensor_power = le32_to_cpu(sdesc->power); + dsize += sizeof(sdesc->power); + + /* Only for sensors reporting scalar values */ + if (s->num_axis == 0) { + unsigned int sres = le32_to_cpu(sdesc->resolution); + + s->resolution = SENSOR_RES(sres); + s->exponent = S32_EXT(SENSOR_RES_EXP(sres)); + dsize += sizeof(sdesc->resolution); + + scmi_parse_range_attrs(&s->scalar_attrs, + &sdesc->scalar_attrs); + dsize += sizeof(sdesc->scalar_attrs); + } + } + + if (s->num_axis > 0) + ret = scmi_sensor_axis_description(ph, s, si->version); + + st->priv = ((u8 *)sdesc + dsize); -out: - ph->xops->xfer_put(ph, t); return ret; } +static int scmi_sensor_description_get(const struct scmi_protocol_handle *ph, + struct sensors_info *si) +{ + void *iter; + struct scmi_iterator_ops ops = { + .prepare_message = iter_sens_descr_prepare_message, + .update_state = iter_sens_descr_update_state, + .process_response = iter_sens_descr_process_response, + }; + + iter = ph->hops->iter_response_init(ph, &ops, si->num_sensors, + SENSOR_DESCRIPTION_GET, + sizeof(__le32), si); + if (IS_ERR(iter)) + return PTR_ERR(iter); + + return ph->hops->iter_response_run(iter); +} + static inline int scmi_sensor_request_notify(const struct scmi_protocol_handle *ph, u32 sensor_id, u8 message_id, bool enable) @@ -966,7 +1061,9 @@ static int scmi_sensors_protocol_init(const struct scmi_protocol_handle *ph) int ret; struct sensors_info *sinfo; - ph->xops->version_get(ph, &version); + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; dev_dbg(ph->dev, "Sensor Version %d.%d\n", PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); diff --git a/drivers/firmware/arm_scmi/system.c b/drivers/firmware/arm_scmi/system.c index e5175ef73b40..220e399118ad 100644 --- a/drivers/firmware/arm_scmi/system.c +++ b/drivers/firmware/arm_scmi/system.c @@ -2,7 +2,7 @@ /* * System Control and Management Interface (SCMI) System Power Protocol * - * Copyright (C) 2020-2021 ARM Ltd. + * Copyright (C) 2020-2022 ARM Ltd. */ #define pr_fmt(fmt) "SCMI Notifications SYSTEM - " fmt @@ -10,7 +10,7 @@ #include <linux/module.h> #include <linux/scmi_protocol.h> -#include "common.h" +#include "protocols.h" #include "notify.h" #define SCMI_SYSTEM_NUM_SOURCES 1 @@ -113,10 +113,13 @@ static const struct scmi_protocol_events system_protocol_events = { static int scmi_system_protocol_init(const struct scmi_protocol_handle *ph) { + int ret; u32 version; struct scmi_system_info *pinfo; - ph->xops->version_get(ph, &version); + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; dev_dbg(ph->dev, "System Power Version %d.%d\n", PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); diff --git a/drivers/firmware/arm_scmi/voltage.c b/drivers/firmware/arm_scmi/voltage.c index ac08e819088b..9d195d8719ab 100644 --- a/drivers/firmware/arm_scmi/voltage.c +++ b/drivers/firmware/arm_scmi/voltage.c @@ -2,13 +2,13 @@ /* * System Control and Management Interface (SCMI) Voltage Protocol * - * Copyright (C) 2020-2021 ARM Ltd. + * Copyright (C) 2020-2022 ARM Ltd. */ #include <linux/module.h> #include <linux/scmi_protocol.h> -#include "common.h" +#include "protocols.h" #define VOLTAGE_DOMS_NUM_MASK GENMASK(15, 0) #define REMAINING_LEVELS_MASK GENMASK(31, 16) @@ -21,13 +21,16 @@ enum scmi_voltage_protocol_cmd { VOLTAGE_CONFIG_GET = 0x6, VOLTAGE_LEVEL_SET = 0x7, VOLTAGE_LEVEL_GET = 0x8, + VOLTAGE_DOMAIN_NAME_GET = 0x09, }; #define NUM_VOLTAGE_DOMAINS(x) ((u16)(FIELD_GET(VOLTAGE_DOMS_NUM_MASK, (x)))) struct scmi_msg_resp_domain_attributes { __le32 attr; - u8 name[SCMI_MAX_STR_SIZE]; +#define SUPPORTS_ASYNC_LEVEL_SET(x) ((x) & BIT(31)) +#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(30)) + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; }; struct scmi_msg_cmd_describe_levels { @@ -54,6 +57,11 @@ struct scmi_msg_cmd_level_set { __le32 voltage_level; }; +struct scmi_resp_voltage_level_set_complete { + __le32 domain_id; + __le32 voltage_level; +}; + struct voltage_info { unsigned int version; unsigned int num_domains; @@ -110,14 +118,100 @@ static int scmi_init_voltage_levels(struct device *dev, return 0; } +struct scmi_volt_ipriv { + struct device *dev; + struct scmi_voltage_info *v; +}; + +static void iter_volt_levels_prepare_message(void *message, + unsigned int desc_index, + const void *priv) +{ + struct scmi_msg_cmd_describe_levels *msg = message; + const struct scmi_volt_ipriv *p = priv; + + msg->domain_id = cpu_to_le32(p->v->id); + msg->level_index = cpu_to_le32(desc_index); +} + +static int iter_volt_levels_update_state(struct scmi_iterator_state *st, + const void *response, void *priv) +{ + int ret = 0; + u32 flags; + const struct scmi_msg_resp_describe_levels *r = response; + struct scmi_volt_ipriv *p = priv; + + flags = le32_to_cpu(r->flags); + st->num_returned = NUM_RETURNED_LEVELS(flags); + st->num_remaining = NUM_REMAINING_LEVELS(flags); + + /* Allocate space for num_levels if not already done */ + if (!p->v->num_levels) { + ret = scmi_init_voltage_levels(p->dev, p->v, st->num_returned, + st->num_remaining, + SUPPORTS_SEGMENTED_LEVELS(flags)); + if (!ret) + st->max_resources = p->v->num_levels; + } + + return ret; +} + +static int +iter_volt_levels_process_response(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, void *priv) +{ + s32 val; + const struct scmi_msg_resp_describe_levels *r = response; + struct scmi_volt_ipriv *p = priv; + + val = (s32)le32_to_cpu(r->voltage[st->loop_idx]); + p->v->levels_uv[st->desc_index + st->loop_idx] = val; + if (val < 0) + p->v->negative_volts_allowed = true; + + return 0; +} + +static int scmi_voltage_levels_get(const struct scmi_protocol_handle *ph, + struct scmi_voltage_info *v) +{ + int ret; + void *iter; + struct scmi_msg_cmd_describe_levels *msg; + struct scmi_iterator_ops ops = { + .prepare_message = iter_volt_levels_prepare_message, + .update_state = iter_volt_levels_update_state, + .process_response = iter_volt_levels_process_response, + }; + struct scmi_volt_ipriv vpriv = { + .dev = ph->dev, + .v = v, + }; + + iter = ph->hops->iter_response_init(ph, &ops, v->num_levels, + VOLTAGE_DESCRIBE_LEVELS, + sizeof(*msg), &vpriv); + if (IS_ERR(iter)) + return PTR_ERR(iter); + + ret = ph->hops->iter_response_run(iter); + if (ret) { + v->num_levels = 0; + devm_kfree(ph->dev, v->levels_uv); + } + + return ret; +} + static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph, struct voltage_info *vinfo) { int ret, dom; - struct scmi_xfer *td, *tl; - struct device *dev = ph->dev; + struct scmi_xfer *td; struct scmi_msg_resp_domain_attributes *resp_dom; - struct scmi_msg_resp_describe_levels *resp_levels; ret = ph->xops->xfer_get_init(ph, VOLTAGE_DOMAIN_ATTRIBUTES, sizeof(__le32), sizeof(*resp_dom), &td); @@ -125,16 +219,8 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph, return ret; resp_dom = td->rx.buf; - ret = ph->xops->xfer_get_init(ph, VOLTAGE_DESCRIBE_LEVELS, - sizeof(__le64), 0, &tl); - if (ret) - goto outd; - resp_levels = tl->rx.buf; - for (dom = 0; dom < vinfo->num_domains; dom++) { - u32 desc_index = 0; - u16 num_returned = 0, num_remaining = 0; - struct scmi_msg_cmd_describe_levels *cmd; + u32 attributes; struct scmi_voltage_info *v; /* Retrieve domain attributes at first ... */ @@ -146,69 +232,31 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph, v = vinfo->domains + dom; v->id = dom; - v->attributes = le32_to_cpu(resp_dom->attr); + attributes = le32_to_cpu(resp_dom->attr); strlcpy(v->name, resp_dom->name, SCMI_MAX_STR_SIZE); - cmd = tl->tx.buf; - /* ...then retrieve domain levels descriptions */ - do { - u32 flags; - int cnt; - - cmd->domain_id = cpu_to_le32(v->id); - cmd->level_index = cpu_to_le32(desc_index); - ret = ph->xops->do_xfer(ph, tl); - if (ret) - break; - - flags = le32_to_cpu(resp_levels->flags); - num_returned = NUM_RETURNED_LEVELS(flags); - num_remaining = NUM_REMAINING_LEVELS(flags); - - /* Allocate space for num_levels if not already done */ - if (!v->num_levels) { - ret = scmi_init_voltage_levels(dev, v, - num_returned, - num_remaining, - SUPPORTS_SEGMENTED_LEVELS(flags)); - if (ret) - break; - } - - if (desc_index + num_returned > v->num_levels) { - dev_err(ph->dev, - "No. of voltage levels can't exceed %d\n", - v->num_levels); - ret = -EINVAL; - break; - } - - for (cnt = 0; cnt < num_returned; cnt++) { - s32 val; - - val = - (s32)le32_to_cpu(resp_levels->voltage[cnt]); - v->levels_uv[desc_index + cnt] = val; - if (val < 0) - v->negative_volts_allowed = true; - } - - desc_index += num_returned; - - ph->xops->reset_rx_to_maxsz(ph, tl); - /* check both to avoid infinite loop due to buggy fw */ - } while (num_returned && num_remaining); - - if (ret) { - v->num_levels = 0; - devm_kfree(dev, v->levels_uv); + /* + * If supported overwrite short name with the extended one; + * on error just carry on and use already provided short name. + */ + if (PROTOCOL_REV_MAJOR(vinfo->version) >= 0x2) { + if (SUPPORTS_EXTENDED_NAMES(attributes)) + ph->hops->extended_name_get(ph, + VOLTAGE_DOMAIN_NAME_GET, + v->id, v->name, + SCMI_MAX_STR_SIZE); + if (SUPPORTS_ASYNC_LEVEL_SET(attributes)) + v->async_level_set = true; } + ret = scmi_voltage_levels_get(ph, v); + /* Skip invalid voltage descriptors */ + if (ret) + continue; + ph->xops->reset_rx_to_maxsz(ph, td); } - ph->xops->xfer_put(ph, tl); -outd: ph->xops->xfer_put(ph, td); return ret; @@ -271,12 +319,15 @@ static int scmi_voltage_config_get(const struct scmi_protocol_handle *ph, } static int scmi_voltage_level_set(const struct scmi_protocol_handle *ph, - u32 domain_id, u32 flags, s32 volt_uV) + u32 domain_id, + enum scmi_voltage_level_mode mode, + s32 volt_uV) { int ret; struct scmi_xfer *t; struct voltage_info *vinfo = ph->get_priv(ph); struct scmi_msg_cmd_level_set *cmd; + struct scmi_voltage_info *v; if (domain_id >= vinfo->num_domains) return -EINVAL; @@ -286,12 +337,31 @@ static int scmi_voltage_level_set(const struct scmi_protocol_handle *ph, if (ret) return ret; + v = vinfo->domains + domain_id; + cmd = t->tx.buf; cmd->domain_id = cpu_to_le32(domain_id); - cmd->flags = cpu_to_le32(flags); cmd->voltage_level = cpu_to_le32(volt_uV); - ret = ph->xops->do_xfer(ph, t); + if (!v->async_level_set || mode != SCMI_VOLTAGE_LEVEL_SET_AUTO) { + cmd->flags = cpu_to_le32(0x0); + ret = ph->xops->do_xfer(ph, t); + } else { + cmd->flags = cpu_to_le32(0x1); + ret = ph->xops->do_xfer_with_response(ph, t); + if (!ret) { + struct scmi_resp_voltage_level_set_complete *resp; + + resp = t->rx.buf; + if (le32_to_cpu(resp->domain_id) == domain_id) + dev_dbg(ph->dev, + "Voltage domain %d set async to %d\n", + v->id, + le32_to_cpu(resp->voltage_level)); + else + ret = -EPROTO; + } + } ph->xops->xfer_put(ph, t); return ret; diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c index 8e5d87984a48..41c31b10ae84 100644 --- a/drivers/gpio/gpio-sim.c +++ b/drivers/gpio/gpio-sim.c @@ -134,7 +134,7 @@ static int gpio_sim_get_multiple(struct gpio_chip *gc, struct gpio_sim_chip *chip = gpiochip_get_data(gc); mutex_lock(&chip->lock); - bitmap_copy(bits, chip->value_map, gc->ngpio); + bitmap_replace(bits, bits, chip->value_map, mask, gc->ngpio); mutex_unlock(&chip->lock); return 0; @@ -146,7 +146,7 @@ static void gpio_sim_set_multiple(struct gpio_chip *gc, struct gpio_sim_chip *chip = gpiochip_get_data(gc); mutex_lock(&chip->lock); - bitmap_copy(chip->value_map, bits, gc->ngpio); + bitmap_replace(chip->value_map, chip->value_map, bits, mask, gc->ngpio); mutex_unlock(&chip->lock); } diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index a5495ad31c9c..c2523ac26fac 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -108,7 +108,7 @@ static int acpi_gpiochip_find(struct gpio_chip *gc, void *data) * controller does not have GPIO chip registered at the moment. This is to * support probe deferral. */ -static struct gpio_desc *acpi_get_gpiod(char *path, int pin) +static struct gpio_desc *acpi_get_gpiod(char *path, unsigned int pin) { struct gpio_chip *chip; acpi_handle handle; @@ -136,7 +136,7 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin) * as it is intended for use outside of the GPIO layer (in a similar fashion to * gpiod_get_index() for example) it also holds a reference to the GPIO device. */ -struct gpio_desc *acpi_get_and_request_gpiod(char *path, int pin, char *label) +struct gpio_desc *acpi_get_and_request_gpiod(char *path, unsigned int pin, char *label) { struct gpio_desc *gpio; int ret; @@ -317,11 +317,12 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip, return desc; } -static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in) +static bool acpi_gpio_in_ignore_list(const char *controller_in, unsigned int pin_in) { const char *controller, *pin_str; - int len, pin; + unsigned int pin; char *endp; + int len; controller = ignore_wake; while (controller) { @@ -354,13 +355,13 @@ err: static bool acpi_gpio_irq_is_wake(struct device *parent, struct acpi_resource_gpio *agpio) { - int pin = agpio->pin_table[0]; + unsigned int pin = agpio->pin_table[0]; if (agpio->wake_capable != ACPI_WAKE_CAPABLE) return false; if (acpi_gpio_in_ignore_list(dev_name(parent), pin)) { - dev_info(parent, "Ignoring wakeup on pin %d\n", pin); + dev_info(parent, "Ignoring wakeup on pin %u\n", pin); return false; } @@ -378,7 +379,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares, struct acpi_gpio_event *event; irq_handler_t handler = NULL; struct gpio_desc *desc; - int ret, pin, irq; + unsigned int pin; + int ret, irq; if (!acpi_gpio_get_irq_resource(ares, &agpio)) return AE_OK; @@ -387,8 +389,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares, pin = agpio->pin_table[0]; if (pin <= 255) { - char ev_name[5]; - sprintf(ev_name, "_%c%02hhX", + char ev_name[8]; + sprintf(ev_name, "_%c%02X", agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L', pin); if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle))) @@ -1098,7 +1100,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address, length = min_t(u16, agpio->pin_table_length, pin_index + bits); for (i = pin_index; i < length; ++i) { - int pin = agpio->pin_table[i]; + unsigned int pin = agpio->pin_table[i]; struct acpi_gpio_connection *conn; struct gpio_desc *desc; bool found; diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index e59884cc12a7..085348e08986 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -1404,6 +1404,16 @@ static int gpiochip_to_irq(struct gpio_chip *gc, unsigned int offset) { struct irq_domain *domain = gc->irq.domain; +#ifdef CONFIG_GPIOLIB_IRQCHIP + /* + * Avoid race condition with other code, which tries to lookup + * an IRQ before the irqchip has been properly registered, + * i.e. while gpiochip is still being brought up. + */ + if (!gc->irq.initialized) + return -EPROBE_DEFER; +#endif + if (!gpiochip_irqchip_irq_valid(gc, offset)) return -ENXIO; @@ -1593,6 +1603,15 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc, acpi_gpiochip_request_interrupts(gc); + /* + * Using barrier() here to prevent compiler from reordering + * gc->irq.initialized before initialization of above + * GPIO chip irq members. + */ + barrier(); + + gc->irq.initialized = true; + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h index 5b393622f592..a0f0a17e224f 100644 --- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h +++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h @@ -119,6 +119,7 @@ #define CONNECTOR_OBJECT_ID_eDP 0x14 #define CONNECTOR_OBJECT_ID_MXM 0x15 #define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16 +#define CONNECTOR_OBJECT_ID_USBC 0x17 /* deleted */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 3987ecb24ef4..49f734137f15 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -5733,7 +5733,7 @@ void amdgpu_device_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { #ifdef CONFIG_X86_64 - if (adev->flags & AMD_IS_APU) + if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) return; #endif if (adev->gmc.xgmi.connected_to_cpu) @@ -5749,7 +5749,7 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { #ifdef CONFIG_X86_64 - if (adev->flags & AMD_IS_APU) + if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) return; #endif if (adev->gmc.xgmi.connected_to_cpu) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index bb1c025d9001..29e9419a914b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -680,7 +680,7 @@ MODULE_PARM_DESC(sched_policy, * Maximum number of processes that HWS can schedule concurrently. The maximum is the * number of VMIDs assigned to the HWS, which is also the default. */ -int hws_max_conc_proc = 8; +int hws_max_conc_proc = -1; module_param(hws_max_conc_proc, int, 0444); MODULE_PARM_DESC(hws_max_conc_proc, "Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))"); @@ -2323,18 +2323,23 @@ static int amdgpu_pmops_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(drm_dev); - int r; if (amdgpu_acpi_is_s0ix_active(adev)) adev->in_s0ix = true; else adev->in_s3 = true; - r = amdgpu_device_suspend(drm_dev, true); - if (r) - return r; + return amdgpu_device_suspend(drm_dev, true); +} + +static int amdgpu_pmops_suspend_noirq(struct device *dev) +{ + struct drm_device *drm_dev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(drm_dev); + if (!adev->in_s0ix) - r = amdgpu_asic_reset(adev); - return r; + return amdgpu_asic_reset(adev); + + return 0; } static int amdgpu_pmops_resume(struct device *dev) @@ -2575,6 +2580,7 @@ static const struct dev_pm_ops amdgpu_pm_ops = { .prepare = amdgpu_pmops_prepare, .complete = amdgpu_pmops_complete, .suspend = amdgpu_pmops_suspend, + .suspend_noirq = amdgpu_pmops_suspend_noirq, .resume = amdgpu_pmops_resume, .freeze = amdgpu_pmops_freeze, .thaw = amdgpu_pmops_thaw, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 8fe939976224..28a736c507bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -266,7 +266,7 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, * adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_queue_per_pipe; - while (queue_bit-- >= 0) { + while (--queue_bit >= 0) { if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap)) continue; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index ca2cfb65f976..a66a0881a934 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -561,9 +561,15 @@ void amdgpu_gmc_noretry_set(struct amdgpu_device *adev) switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(9, 0, 1): + case IP_VERSION(9, 3, 0): case IP_VERSION(9, 4, 0): case IP_VERSION(9, 4, 1): case IP_VERSION(9, 4, 2): + case IP_VERSION(10, 3, 3): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 6): + case IP_VERSION(10, 3, 7): /* * noretry = 0 will cause kfd page fault tests fail * for some ASICs, so set default to 1 for these ASICs. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 25731719c627..940752488330 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1284,6 +1284,7 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem, */ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) { + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct dma_fence *fence = NULL; struct amdgpu_bo *abo; int r; @@ -1303,7 +1304,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo); if (bo->resource->mem_type != TTM_PL_VRAM || - !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) + !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) || + adev->in_suspend || adev->shutdown) return; if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv))) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 5320bb0883d8..317d80209e95 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -300,8 +300,8 @@ void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); void amdgpu_ring_commit(struct amdgpu_ring *ring); void amdgpu_ring_undo(struct amdgpu_ring *ring); int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, - unsigned int ring_size, struct amdgpu_irq_src *irq_src, - unsigned int irq_type, unsigned int prio, + unsigned int max_dw, struct amdgpu_irq_src *irq_src, + unsigned int irq_type, unsigned int hw_prio, atomic_t *sched_score); void amdgpu_ring_fini(struct amdgpu_ring *ring); void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index f99093f2ebc7..a0ee828a4a97 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -52,7 +52,7 @@ #define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin" #define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin" #define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin" -#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2_vcn.bin" +#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2.bin" MODULE_FIRMWARE(FIRMWARE_RAVEN); MODULE_FIRMWARE(FIRMWARE_PICASSO); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index e2fde88aaf5e..f06fb7f882e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -159,6 +159,7 @@ #define AMDGPU_VCN_MULTI_QUEUE_FLAG (1 << 8) #define AMDGPU_VCN_SW_RING_FLAG (1 << 9) #define AMDGPU_VCN_FW_LOGGING_FLAG (1 << 10) +#define AMDGPU_VCN_SMU_VERSION_INFO_FLAG (1 << 11) #define AMDGPU_VCN_IB_FLAG_DECODE_BUFFER 0x00000001 #define AMDGPU_VCN_CMD_FLAG_MSG_BUFFER 0x00000001 @@ -279,6 +280,11 @@ struct amdgpu_fw_shared_fw_logging { uint32_t size; }; +struct amdgpu_fw_shared_smu_interface_info { + uint8_t smu_interface_type; + uint8_t padding[3]; +}; + struct amdgpu_fw_shared { uint32_t present_flag_0; uint8_t pad[44]; @@ -287,6 +293,7 @@ struct amdgpu_fw_shared { struct amdgpu_fw_shared_multi_queue multi_queue; struct amdgpu_fw_shared_sw_ring sw_ring; struct amdgpu_fw_shared_fw_logging fw_log; + struct amdgpu_fw_shared_smu_interface_info smu_interface_info; }; struct amdgpu_vcn_fwlog { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index f4c6accd3226..9426e252d8aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -3293,7 +3293,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_3[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000242), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x000000e4), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210), @@ -3429,7 +3429,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_6[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000042), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x00000044), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210), @@ -3454,7 +3454,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_7[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000041), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x000000e4), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210), @@ -7689,6 +7689,7 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev) switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(10, 3, 1): case IP_VERSION(10, 3, 3): + case IP_VERSION(10, 3, 7): preempt_disable(); clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh); clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 46d4bf27ebbb..b8cfcc6b1125 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1205,6 +1205,8 @@ static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = { { 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 }, /* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */ { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 }, + /* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */ + { 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 }, { 0, 0, 0, 0, 0 }, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 3c1d440824a7..5228421b0f72 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -814,7 +814,7 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev) adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); #ifdef CONFIG_X86_64 - if (adev->flags & AMD_IS_APU) { + if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) { adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev); adev->gmc.aper_size = adev->gmc.real_vram_size; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 344d819b4c1b..979da6f510e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -381,8 +381,9 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); #ifdef CONFIG_X86_64 - if (adev->flags & AMD_IS_APU && - adev->gmc.real_vram_size > adev->gmc.aper_size) { + if ((adev->flags & AMD_IS_APU) && + adev->gmc.real_vram_size > adev->gmc.aper_size && + !amdgpu_passthrough(adev)) { adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22; adev->gmc.aper_size = adev->gmc.real_vram_size; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index ca9841d5669f..1932a3e4af7e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -581,7 +581,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); #ifdef CONFIG_X86_64 - if (adev->flags & AMD_IS_APU) { + if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) { adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22; adev->gmc.aper_size = adev->gmc.real_vram_size; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 431742eb7811..6009fbfdcc19 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1456,7 +1456,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) */ /* check whether both host-gpu and gpu-gpu xgmi links exist */ - if ((adev->flags & AMD_IS_APU) || + if (((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) || (adev->gmc.xgmi.supported && adev->gmc.xgmi.connected_to_cpu)) { adev->gmc.aper_base = @@ -1721,7 +1721,7 @@ static int gmc_v9_0_sw_fini(void *handle) amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); amdgpu_gart_table_vram_free(adev); - amdgpu_bo_unref(&adev->gmc.pdb0_bo); + amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0); amdgpu_bo_fini(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index dff54190b96c..f0fbcda76f5e 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -24,6 +24,7 @@ #include <linux/firmware.h> #include "amdgpu.h" +#include "amdgpu_cs.h" #include "amdgpu_vcn.h" #include "amdgpu_pm.h" #include "soc15.h" @@ -1900,6 +1901,75 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = { .set_powergating_state = vcn_v1_0_set_powergating_state, }; +/* + * It is a hardware issue that VCN can't handle a GTT TMZ buffer on + * CHIP_RAVEN series ASIC. Move such a GTT TMZ buffer to VRAM domain + * before command submission as a workaround. + */ +static int vcn_v1_0_validate_bo(struct amdgpu_cs_parser *parser, + struct amdgpu_job *job, + uint64_t addr) +{ + struct ttm_operation_ctx ctx = { false, false }; + struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; + struct amdgpu_vm *vm = &fpriv->vm; + struct amdgpu_bo_va_mapping *mapping; + struct amdgpu_bo *bo; + int r; + + addr &= AMDGPU_GMC_HOLE_MASK; + if (addr & 0x7) { + DRM_ERROR("VCN messages must be 8 byte aligned!\n"); + return -EINVAL; + } + + mapping = amdgpu_vm_bo_lookup_mapping(vm, addr/AMDGPU_GPU_PAGE_SIZE); + if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo) + return -EINVAL; + + bo = mapping->bo_va->base.bo; + if (!(bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED)) + return 0; + + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (r) { + DRM_ERROR("Failed to validate the VCN message BO (%d)!\n", r); + return r; + } + + return r; +} + +static int vcn_v1_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, + struct amdgpu_job *job, + struct amdgpu_ib *ib) +{ + uint32_t msg_lo = 0, msg_hi = 0; + int i, r; + + if (!(ib->flags & AMDGPU_IB_FLAGS_SECURE)) + return 0; + + for (i = 0; i < ib->length_dw; i += 2) { + uint32_t reg = amdgpu_ib_get_value(ib, i); + uint32_t val = amdgpu_ib_get_value(ib, i + 1); + + if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) { + msg_lo = val; + } else if (reg == PACKET0(p->adev->vcn.internal.data1, 0)) { + msg_hi = val; + } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0)) { + r = vcn_v1_0_validate_bo(p, job, + ((u64)msg_hi) << 32 | msg_lo); + if (r) + return r; + } + } + + return 0; +} + static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_DEC, .align_mask = 0xf, @@ -1910,6 +1980,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = { .get_rptr = vcn_v1_0_dec_ring_get_rptr, .get_wptr = vcn_v1_0_dec_ring_get_wptr, .set_wptr = vcn_v1_0_dec_ring_set_wptr, + .patch_cs_in_place = vcn_v1_0_ring_patch_cs_in_place, .emit_frame_size = 6 + 6 + /* hdp invalidate / flush */ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index c87263ed20ec..cb5f0a12333f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -219,6 +219,11 @@ static int vcn_v3_0_sw_init(void *handle) cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG) | cpu_to_le32(AMDGPU_VCN_FW_SHARED_FLAG_0_RB); fw_shared->sw_ring.is_enabled = cpu_to_le32(DEC_SW_RING_ENABLED); + fw_shared->present_flag_0 |= AMDGPU_VCN_SMU_VERSION_INFO_FLAG; + if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 1, 2)) + fw_shared->smu_interface_info.smu_interface_type = 2; + else if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 1, 1)) + fw_shared->smu_interface_info.smu_interface_type = 1; if (amdgpu_vcnfw_log) amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]); @@ -575,8 +580,8 @@ static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect); /* VCN global tiling registers */ - WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( - UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); + WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( + UVD, inst_idx, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); } static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst) @@ -1480,8 +1485,11 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev) static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) { + struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE}; uint32_t tmp; + vcn_v3_0_pause_dpg_mode(adev, inst_idx, &state); + /* Wait for power status to be 1 */ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 339e12c94cff..62aa6c9d5123 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -483,15 +483,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, } /* Verify module parameters regarding mapped process number*/ - if ((hws_max_conc_proc < 0) - || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) { - dev_err(kfd_device, - "hws_max_conc_proc %d must be between 0 and %d, use %d instead\n", - hws_max_conc_proc, kfd->vm_info.vmid_num_kfd, - kfd->vm_info.vmid_num_kfd); + if (hws_max_conc_proc >= 0) + kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd); + else kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd; - } else - kfd->max_proc_per_quantum = hws_max_conc_proc; /* calculate max size of mqds needed for queues */ size = max_num_of_queues_per_device * @@ -536,7 +531,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, goto kfd_doorbell_error; } - kfd->hive_id = kfd->adev->gmc.xgmi.hive_id; + if (amdgpu_use_xgmi_p2p) + kfd->hive_id = kfd->adev->gmc.xgmi.hive_id; kfd->noretry = kfd->adev->gmc.noretry; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index deecccebe5b6..64f4a51cc880 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -749,6 +749,8 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events) event_waiters = kmalloc_array(num_events, sizeof(struct kfd_event_waiter), GFP_KERNEL); + if (!event_waiters) + return NULL; for (i = 0; (event_waiters) && (i < num_events) ; i++) { init_wait(&event_waiters[i].wait); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c index e4beebb1c80a..f2e1d506ba21 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c @@ -247,15 +247,6 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd) return ret; } - ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client, - O_RDWR); - if (ret < 0) { - kfifo_free(&client->fifo); - kfree(client); - return ret; - } - *fd = ret; - init_waitqueue_head(&client->wait_queue); spin_lock_init(&client->lock); client->events = 0; @@ -265,5 +256,20 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd) list_add_rcu(&client->list, &dev->smi_clients); spin_unlock(&dev->smi_lock); + ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client, + O_RDWR); + if (ret < 0) { + spin_lock(&dev->smi_lock); + list_del_rcu(&client->list); + spin_unlock(&dev->smi_lock); + + synchronize_rcu(); + + kfifo_free(&client->fifo); + kfree(client); + return ret; + } + *fd = ret; + return 0; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index b30656959fd8..62139ff35476 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2714,7 +2714,8 @@ static int dm_resume(void *handle) * this is the case when traversing through already created * MST connectors, should be skipped */ - if (aconnector->mst_port) + if (aconnector->dc_link && + aconnector->dc_link->type == dc_connection_mst_branch) continue; mutex_lock(&aconnector->hpd_lock); @@ -3972,7 +3973,7 @@ static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *cap max - min); } -static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, +static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, int bl_idx, u32 user_brightness) { @@ -4003,7 +4004,8 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx); } - return rc ? 0 : 1; + if (rc) + dm->actual_brightness[bl_idx] = user_brightness; } static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) @@ -9947,7 +9949,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) /* restore the backlight level */ for (i = 0; i < dm->num_of_edps; i++) { if (dm->backlight_dev[i] && - (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i])) + (dm->actual_brightness[i] != dm->brightness[i])) amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); } #endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 6a908d736d6a..7e44b0429448 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -540,6 +540,12 @@ struct amdgpu_display_manager { * cached backlight values. */ u32 brightness[AMDGPU_DM_MAX_NUM_EDP]; + /** + * @actual_brightness: + * + * last successfully applied backlight values. + */ + u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP]; }; enum dsc_clock_force_state { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c index dfba6138f538..26feefbb8990 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c @@ -374,7 +374,7 @@ void dce_clock_read_ss_info(struct clk_mgr_internal *clk_mgr_dce) clk_mgr_dce->dprefclk_ss_percentage = info.spread_spectrum_percentage; } - if (clk_mgr_dce->base.ctx->dc->debug.ignore_dpref_ss) + if (clk_mgr_dce->base.ctx->dc->config.ignore_dpref_ss) clk_mgr_dce->dprefclk_ss_percentage = 0; } } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c index edda572dc570..8be4c1970628 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c @@ -436,57 +436,84 @@ static void dcn315_clk_mgr_helper_populate_bw_params( struct integrated_info *bios_info, const DpmClocks_315_t *clock_table) { - int i, j; + int i; struct clk_bw_params *bw_params = clk_mgr->base.bw_params; - uint32_t max_dispclk = 0, max_dppclk = 0; - - j = -1; - - ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL); - - /* Find lowest DPM, FCLK is filled in reverse order*/ - - for (i = NUM_DF_PSTATE_LEVELS - 1; i >= 0; i--) { - if (clock_table->DfPstateTable[i].FClk != 0) { - j = i; - break; + uint32_t max_dispclk, max_dppclk, max_pstate, max_socclk, max_fclk = 0, min_pstate = 0; + struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1]; + + max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled); + max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled); + max_socclk = find_max_clk_value(clock_table->SocClocks, clock_table->NumSocClkLevelsEnabled); + + /* Find highest fclk pstate */ + for (i = 0; i < clock_table->NumDfPstatesEnabled; i++) { + if (clock_table->DfPstateTable[i].FClk > max_fclk) { + max_fclk = clock_table->DfPstateTable[i].FClk; + max_pstate = i; } } - if (j == -1) { - /* clock table is all 0s, just use our own hardcode */ - ASSERT(0); - return; - } - - bw_params->clk_table.num_entries = j + 1; - - /* dispclk and dppclk can be max at any voltage, same number of levels for both */ - if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS && - clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) { - max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled); - max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled); - } else { - ASSERT(0); - } + /* For 315 we want to base clock table on dcfclk, need at least one entry regardless of pmfw table */ + for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) { + int j; + uint32_t min_fclk = clock_table->DfPstateTable[0].FClk; - for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) { - int temp; + for (j = 1; j < clock_table->NumDfPstatesEnabled; j++) { + if (clock_table->DfPstateTable[j].Voltage <= clock_table->SocVoltage[i] + && clock_table->DfPstateTable[j].FClk < min_fclk) { + min_fclk = clock_table->DfPstateTable[j].FClk; + min_pstate = j; + } + } - bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].FClk; - bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].MemClk; - bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].Voltage; + bw_params->clk_table.entries[i].fclk_mhz = min_fclk; + bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[min_pstate].MemClk; + bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[min_pstate].Voltage; + bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i]; + bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i]; + bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk; + bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk; bw_params->clk_table.entries[i].wck_ratio = 1; - temp = find_clk_for_voltage(clock_table, clock_table->DcfClocks, clock_table->DfPstateTable[j].Voltage); - if (temp) - bw_params->clk_table.entries[i].dcfclk_mhz = temp; - temp = find_clk_for_voltage(clock_table, clock_table->SocClocks, clock_table->DfPstateTable[j].Voltage); - if (temp) - bw_params->clk_table.entries[i].socclk_mhz = temp; + }; + + /* Make sure to include at least one entry and highest pstate */ + if (max_pstate != min_pstate) { + bw_params->clk_table.entries[i].fclk_mhz = max_fclk; + bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[max_pstate].MemClk; + bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[max_pstate].Voltage; + bw_params->clk_table.entries[i].dcfclk_mhz = find_clk_for_voltage( + clock_table, clock_table->DcfClocks, clock_table->DfPstateTable[max_pstate].Voltage); + bw_params->clk_table.entries[i].socclk_mhz = find_clk_for_voltage( + clock_table, clock_table->SocClocks, clock_table->DfPstateTable[max_pstate].Voltage); bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk; bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk; + bw_params->clk_table.entries[i].wck_ratio = 1; + i++; } + bw_params->clk_table.num_entries = i; + + /* Include highest socclk */ + if (bw_params->clk_table.entries[i-1].socclk_mhz < max_socclk) + bw_params->clk_table.entries[i-1].socclk_mhz = max_socclk; + /* Set any 0 clocks to max default setting. Not an issue for + * power since we aren't doing switching in such case anyway + */ + for (i = 0; i < bw_params->clk_table.num_entries; i++) { + if (!bw_params->clk_table.entries[i].fclk_mhz) { + bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz; + bw_params->clk_table.entries[i].memclk_mhz = def_max.memclk_mhz; + bw_params->clk_table.entries[i].voltage = def_max.voltage; + } + if (!bw_params->clk_table.entries[i].dcfclk_mhz) + bw_params->clk_table.entries[i].dcfclk_mhz = def_max.dcfclk_mhz; + if (!bw_params->clk_table.entries[i].socclk_mhz) + bw_params->clk_table.entries[i].socclk_mhz = def_max.socclk_mhz; + if (!bw_params->clk_table.entries[i].dispclk_mhz) + bw_params->clk_table.entries[i].dispclk_mhz = def_max.dispclk_mhz; + if (!bw_params->clk_table.entries[i].dppclk_mhz) + bw_params->clk_table.entries[i].dppclk_mhz = def_max.dppclk_mhz; + } bw_params->vram_type = bios_info->memory_type; bw_params->num_channels = bios_info->ma_channel_number; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c index 880ffea2afc6..2600313fea57 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c @@ -80,8 +80,8 @@ static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D #define VBIOSSMC_MSG_SetDppclkFreq 0x06 ///< Set DPP clock frequency in MHZ #define VBIOSSMC_MSG_SetHardMinDcfclkByFreq 0x07 ///< Set DCF clock frequency hard min in MHZ #define VBIOSSMC_MSG_SetMinDeepSleepDcfclk 0x08 ///< Set DCF clock minimum frequency in deep sleep in MHZ -#define VBIOSSMC_MSG_SetPhyclkVoltageByFreq 0x09 ///< Set display phy clock frequency in MHZ in case VMIN does not support phy frequency -#define VBIOSSMC_MSG_GetFclkFrequency 0x0A ///< Get FCLK frequency, return frequemcy in MHZ +#define VBIOSSMC_MSG_GetDtbclkFreq 0x09 ///< Get display dtb clock frequency in MHZ in case VMIN does not support phy frequency +#define VBIOSSMC_MSG_SetDtbClk 0x0A ///< Set dtb clock frequency, return frequemcy in MHZ #define VBIOSSMC_MSG_SetDisplayCount 0x0B ///< Inform PMFW of number of display connected #define VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown 0x0C ///< To ask PMFW turn off TMDP 48MHz refclk during display off to save power #define VBIOSSMC_MSG_UpdatePmeRestore 0x0D ///< To ask PMFW to write into Azalia for PME wake up event @@ -324,15 +324,26 @@ int dcn315_smu_get_dpref_clk(struct clk_mgr_internal *clk_mgr) return (dprefclk_get_mhz * 1000); } -int dcn315_smu_get_smu_fclk(struct clk_mgr_internal *clk_mgr) +int dcn315_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr) { int fclk_get_mhz = -1; if (clk_mgr->smu_present) { fclk_get_mhz = dcn315_smu_send_msg_with_param( clk_mgr, - VBIOSSMC_MSG_GetFclkFrequency, + VBIOSSMC_MSG_GetDtbclkFreq, 0); } return (fclk_get_mhz * 1000); } + +void dcn315_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable) +{ + if (!clk_mgr->smu_present) + return; + + dcn315_smu_send_msg_with_param( + clk_mgr, + VBIOSSMC_MSG_SetDtbClk, + enable); +} diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.h index 66fa42f8dd18..5aa3275ac7d8 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.h @@ -37,6 +37,7 @@ #define NUM_SOC_VOLTAGE_LEVELS 4 #define NUM_DF_PSTATE_LEVELS 4 + typedef struct { uint16_t MinClock; // This is either DCFCLK or SOCCLK (in MHz) uint16_t MaxClock; // This is either DCFCLK or SOCCLK (in MHz) @@ -124,5 +125,6 @@ void dcn315_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr); void dcn315_smu_request_voltage_via_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz); void dcn315_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr); int dcn315_smu_get_dpref_clk(struct clk_mgr_internal *clk_mgr); -int dcn315_smu_get_smu_fclk(struct clk_mgr_internal *clk_mgr); +int dcn315_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr); +void dcn315_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable); #endif /* DAL_DC_315_SMU_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c index 702d00ce7da4..3121dd2d2a91 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c @@ -686,8 +686,8 @@ void dcn316_clk_mgr_construct( clk_mgr->base.base.dprefclk_khz = dcn316_smu_get_dpref_clk(&clk_mgr->base); clk_mgr->base.dccg->ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz; dce_clock_read_ss_info(&clk_mgr->base); - clk_mgr->base.dccg->ref_dtbclk_khz = - dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz); + /*clk_mgr->base.dccg->ref_dtbclk_khz = + dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);*/ clk_mgr->base.base.bw_params = &dcn316_bw_params; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index f6e19efea756..c436db416708 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -2389,6 +2389,8 @@ static enum surface_update_type check_update_surfaces_for_stream( if (stream_update->mst_bw_update) su_flags->bits.mst_bw = 1; + if (stream_update->crtc_timing_adjust && dc_extended_blank_supported(dc)) + su_flags->bits.crtc_timing_adjust = 1; if (su_flags->raw != 0) overall_type = UPDATE_TYPE_FULL; @@ -2650,6 +2652,9 @@ static void copy_stream_update_to_stream(struct dc *dc, if (update->vrr_infopacket) stream->vrr_infopacket = *update->vrr_infopacket; + if (update->crtc_timing_adjust) + stream->adjust = *update->crtc_timing_adjust; + if (update->dpms_off) stream->dpms_off = *update->dpms_off; @@ -4051,3 +4056,17 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause) pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst); } +/* + * dc_extended_blank_supported: Decide whether extended blank is supported + * + * Extended blank is a freesync optimization feature to be enabled in the future. + * During the extra vblank period gained from freesync, we have the ability to enter z9/z10. + * + * @param [in] dc: Current DC state + * @return: Indicate whether extended blank is supported (true or false) + */ +bool dc_extended_blank_supported(struct dc *dc) +{ + return dc->debug.extended_blank_optimization && !dc->debug.disable_z10 + && dc->caps.zstate_support && dc->caps.is_apu; +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index cb87dd643180..bbaa5abdf888 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -983,8 +983,7 @@ static bool should_verify_link_capability_destructively(struct dc_link *link, destrictive = false; } } - } else if (dc_is_hdmi_signal(link->local_sink->sink_signal)) - destrictive = true; + } return destrictive; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 351081f574cb..22dabe596dfc 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -5216,6 +5216,62 @@ static void retrieve_cable_id(struct dc_link *link) &link->dpcd_caps.cable_id, &usbc_cable_id); } +/* DPRX may take some time to respond to AUX messages after HPD asserted. + * If AUX read unsuccessful, try to wake unresponsive DPRX by toggling DPCD SET_POWER (0x600). + */ +static enum dc_status wa_try_to_wake_dprx(struct dc_link *link, uint64_t timeout_ms) +{ + enum dc_status status = DC_ERROR_UNEXPECTED; + uint8_t dpcd_data = 0; + uint64_t start_ts = 0; + uint64_t current_ts = 0; + uint64_t time_taken_ms = 0; + enum dc_connection_type type = dc_connection_none; + + status = core_link_read_dpcd( + link, + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, + &dpcd_data, + sizeof(dpcd_data)); + + if (status != DC_OK) { + DC_LOG_WARNING("%s: Read DPCD LTTPR_CAP failed - try to toggle DPCD SET_POWER for %lld ms.", + __func__, + timeout_ms); + start_ts = dm_get_timestamp(link->ctx); + + do { + if (!dc_link_detect_sink(link, &type) || type == dc_connection_none) + break; + + dpcd_data = DP_SET_POWER_D3; + status = core_link_write_dpcd( + link, + DP_SET_POWER, + &dpcd_data, + sizeof(dpcd_data)); + + dpcd_data = DP_SET_POWER_D0; + status = core_link_write_dpcd( + link, + DP_SET_POWER, + &dpcd_data, + sizeof(dpcd_data)); + + current_ts = dm_get_timestamp(link->ctx); + time_taken_ms = div_u64(dm_get_elapse_time_in_ns(link->ctx, current_ts, start_ts), 1000000); + } while (status != DC_OK && time_taken_ms < timeout_ms); + + DC_LOG_WARNING("%s: DPCD SET_POWER %s after %lld ms%s", + __func__, + (status == DC_OK) ? "succeeded" : "failed", + time_taken_ms, + (type == dc_connection_none) ? ". Unplugged." : "."); + } + + return status; +} + static bool retrieve_link_cap(struct dc_link *link) { /* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16, @@ -5251,6 +5307,15 @@ static bool retrieve_link_cap(struct dc_link *link) dc_link_aux_try_to_configure_timeout(link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD); + /* Try to ensure AUX channel active before proceeding. */ + if (link->dc->debug.aux_wake_wa.bits.enable_wa) { + uint64_t timeout_ms = link->dc->debug.aux_wake_wa.bits.timeout_ms; + + if (link->dc->debug.aux_wake_wa.bits.use_default_timeout) + timeout_ms = LINK_AUX_WAKE_TIMEOUT_MS; + status = wa_try_to_wake_dprx(link, timeout_ms); + } + is_lttpr_present = dp_retrieve_lttpr_cap(link); /* Read DP tunneling information. */ status = dpcd_get_tunneling_device_data(link); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 7af153434e9e..d251c3f3a714 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1685,8 +1685,8 @@ bool dc_is_stream_unchanged( if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param) return false; - // Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks - if (old_stream->audio_info.mode_count != stream->audio_info.mode_count) + /*compare audio info*/ + if (memcmp(&old_stream->audio_info, &stream->audio_info, sizeof(stream->audio_info)) != 0) return false; return true; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 4ffab7bb1098..9e79f60e6129 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -188,6 +188,7 @@ struct dc_caps { bool psp_setup_panel_mode; bool extended_aux_timeout_support; bool dmcub_support; + bool zstate_support; uint32_t num_of_internal_disp; enum dp_protocol_version max_dp_protocol_version; unsigned int mall_size_per_mem_channel; @@ -339,6 +340,7 @@ struct dc_config { bool is_asymmetric_memory; bool is_single_rank_dimm; bool use_pipe_ctx_sync_logic; + bool ignore_dpref_ss; }; enum visual_confirm { @@ -525,6 +527,22 @@ union dpia_debug_options { uint32_t raw; }; +/* AUX wake work around options + * 0: enable/disable work around + * 1: use default timeout LINK_AUX_WAKE_TIMEOUT_MS + * 15-2: reserved + * 31-16: timeout in ms + */ +union aux_wake_wa_options { + struct { + uint32_t enable_wa : 1; + uint32_t use_default_timeout : 1; + uint32_t rsvd: 14; + uint32_t timeout_ms : 16; + } bits; + uint32_t raw; +}; + struct dc_debug_data { uint32_t ltFailCount; uint32_t i2cErrorCount; @@ -703,14 +721,15 @@ struct dc_debug_options { bool enable_driver_sequence_debug; enum det_size crb_alloc_policy; int crb_alloc_policy_min_disp_count; -#if defined(CONFIG_DRM_AMD_DC_DCN) bool disable_z10; +#if defined(CONFIG_DRM_AMD_DC_DCN) bool enable_z9_disable_interface; bool enable_sw_cntl_psr; union dpia_debug_options dpia_debug; #endif bool apply_vendor_specific_lttpr_wa; - bool ignore_dpref_ss; + bool extended_blank_optimization; + union aux_wake_wa_options aux_wake_wa; uint8_t psr_power_use_phy_fsm; }; @@ -1369,6 +1388,8 @@ struct dc_sink_init_data { bool converter_disable_audio; }; +bool dc_extended_blank_supported(struct dc *dc); + struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params); /* Newer interfaces */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 99a750f561f8..c4168c11257c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -131,6 +131,7 @@ union stream_update_flags { uint32_t wb_update:1; uint32_t dsc_changed : 1; uint32_t mst_bw : 1; + uint32_t crtc_timing_adjust : 1; } bits; uint32_t raw; @@ -289,6 +290,7 @@ struct dc_stream_update { struct dc_3dlut *lut3d_func; struct test_pattern *pending_test_pattern; + struct dc_crtc_timing_adjust *crtc_timing_adjust; }; bool dc_is_stream_unchanged( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index c3e141c19a77..83fbea2df410 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1497,16 +1497,12 @@ void dcn10_init_hw(struct dc *dc) link->link_status.link_active = true; } - /* Power gate DSCs */ - if (!is_optimized_init_done) { - for (i = 0; i < res_pool->res_cap->num_dsc; i++) - if (hws->funcs.dsc_pg_control != NULL) - hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false); - } - /* we want to turn off all dp displays before doing detection */ dc_link_blank_all_dp_displays(dc); + if (hws->funcs.enable_power_gating_plane) + hws->funcs.enable_power_gating_plane(dc->hwseq, true); + /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which * pipes we want to use. @@ -1559,8 +1555,6 @@ void dcn10_init_hw(struct dc *dc) REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); } - if (hws->funcs.enable_power_gating_plane) - hws->funcs.enable_power_gating_plane(dc->hwseq, true); if (dc->clk_mgr->funcs->notify_wm_ranges) dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr); @@ -2056,7 +2050,7 @@ static int dcn10_align_pixel_clocks(struct dc *dc, int group_size, { struct dc_context *dc_ctx = dc->ctx; int i, master = -1, embedded = -1; - struct dc_crtc_timing hw_crtc_timing[MAX_PIPES] = {0}; + struct dc_crtc_timing *hw_crtc_timing; uint64_t phase[MAX_PIPES]; uint64_t modulo[MAX_PIPES]; unsigned int pclk; @@ -2067,6 +2061,10 @@ static int dcn10_align_pixel_clocks(struct dc *dc, int group_size, uint32_t dp_ref_clk_100hz = dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10; + hw_crtc_timing = kcalloc(MAX_PIPES, sizeof(*hw_crtc_timing), GFP_KERNEL); + if (!hw_crtc_timing) + return master; + if (dc->config.vblank_alignment_dto_params && dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) { embedded_h_total = @@ -2130,6 +2128,8 @@ static int dcn10_align_pixel_clocks(struct dc *dc, int group_size, } } + + kfree(hw_crtc_timing); return master; } @@ -2522,14 +2522,18 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) struct mpc *mpc = dc->res_pool->mpc; struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); - if (per_pixel_alpha) - blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; - else - blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; - blnd_cfg.overlap_only = false; blnd_cfg.global_gain = 0xff; + if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN; + blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value; + } else if (per_pixel_alpha) { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; + } else { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; + } + if (pipe_ctx->plane_state->global_alpha) blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value; else diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index ab910deed481..b627c41713cc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1857,6 +1857,7 @@ void dcn20_optimize_bandwidth( struct dc_state *context) { struct hubbub *hubbub = dc->res_pool->hubbub; + int i; /* program dchubbub watermarks */ hubbub->funcs->program_watermarks(hubbub, @@ -1873,6 +1874,17 @@ void dcn20_optimize_bandwidth( dc->clk_mgr, context, true); + if (dc_extended_blank_supported(dc) && context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) { + for (i = 0; i < dc->res_pool->pipe_count; ++i) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->stream && pipe_ctx->plane_res.hubp->funcs->program_extended_blank + && pipe_ctx->stream->adjust.v_total_min == pipe_ctx->stream->adjust.v_total_max + && pipe_ctx->stream->adjust.v_total_max > pipe_ctx->stream->timing.v_total) + pipe_ctx->plane_res.hubp->funcs->program_extended_blank(pipe_ctx->plane_res.hubp, + pipe_ctx->dlg_regs.optimized_min_dst_y_next_start); + } + } /* increase compbuf size */ if (hubbub->funcs->program_compbuf_size) hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true); @@ -2332,14 +2344,18 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) struct mpc *mpc = dc->res_pool->mpc; struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); - if (per_pixel_alpha) - blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; - else - blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; - blnd_cfg.overlap_only = false; blnd_cfg.global_gain = 0xff; + if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN; + blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value; + } else if (per_pixel_alpha) { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; + } else { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; + } + if (pipe_ctx->plane_state->global_alpha) blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value; else diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index d473708d5399..7802d603f796 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -1976,7 +1976,6 @@ int dcn20_validate_apply_pipe_split_flags( /*If need split for odm but 4 way split already*/ if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe) || !pipe->next_odm_pipe)) { - ASSERT(0); /* NOT expected yet */ merge[i] = true; /* 4 -> 2 ODM */ } else if (split[i] == 0 && pipe->prev_odm_pipe) { ASSERT(0); /* NOT expected yet */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 612732656772..3fe4bfbb98a0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -644,7 +644,7 @@ static const struct dc_debug_options debug_defaults_drv = { .clock_trace = true, .disable_pplib_clock_request = true, .min_disp_clk_khz = 100000, - .pipe_split_policy = MPC_SPLIT_DYNAMIC, + .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index ed0a0e5fd805..f61ec8763844 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -547,6 +547,9 @@ void dcn30_init_hw(struct dc *dc) /* we want to turn off all dp displays before doing detection */ dc_link_blank_all_dp_displays(dc); + if (hws->funcs.enable_power_gating_plane) + hws->funcs.enable_power_gating_plane(dc->hwseq, true); + /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which * pipes we want to use. @@ -624,8 +627,6 @@ void dcn30_init_hw(struct dc *dc) REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); } - if (hws->funcs.enable_power_gating_plane) - hws->funcs.enable_power_gating_plane(dc->hwseq, true); if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks) dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c index 3e6d6ebd199e..51c5f3685470 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c @@ -1042,5 +1042,7 @@ void hubbub31_construct(struct dcn20_hubbub *hubbub31, hubbub31->detile_buf_size = det_size_kb * 1024; hubbub31->pixel_chunk_size = pixel_chunk_size_kb * 1024; hubbub31->crb_size_segs = config_return_buffer_size_kb / DCN31_CRB_SEGMENT_SIZE_KB; + + hubbub31->debug_test_index_pstate = 0x6; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c index 53b792b997b7..8ae6117953ca 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c @@ -54,6 +54,13 @@ void hubp31_soft_reset(struct hubp *hubp, bool reset) REG_UPDATE(DCHUBP_CNTL, HUBP_SOFT_RESET, reset); } +void hubp31_program_extended_blank(struct hubp *hubp, unsigned int min_dst_y_next_start_optimized) +{ + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); + + REG_SET(BLANK_OFFSET_1, 0, MIN_DST_Y_NEXT_START, min_dst_y_next_start_optimized); +} + static struct hubp_funcs dcn31_hubp_funcs = { .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer, .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled, @@ -80,6 +87,7 @@ static struct hubp_funcs dcn31_hubp_funcs = { .set_unbounded_requesting = hubp31_set_unbounded_requesting, .hubp_soft_reset = hubp31_soft_reset, .hubp_in_blank = hubp1_in_blank, + .program_extended_blank = hubp31_program_extended_blank, }; bool hubp31_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index 4be228680909..631d8ac63aa4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -199,6 +199,9 @@ void dcn31_init_hw(struct dc *dc) /* we want to turn off all dp displays before doing detection */ dc_link_blank_all_dp_displays(dc); + if (hws->funcs.enable_power_gating_plane) + hws->funcs.enable_power_gating_plane(dc->hwseq, true); + /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which * pipes we want to use. @@ -248,8 +251,6 @@ void dcn31_init_hw(struct dc *dc) REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); } - if (hws->funcs.enable_power_gating_plane) - hws->funcs.enable_power_gating_plane(dc->hwseq, true); if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks) dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub); @@ -338,20 +339,20 @@ void dcn31_enable_power_gating_plane( bool enable) { bool force_on = true; /* disable power gating */ + uint32_t org_ip_request_cntl = 0; if (enable && !hws->ctx->dc->debug.disable_hubp_power_gate) force_on = false; + REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); + if (org_ip_request_cntl == 0) + REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); /* DCHUBP0/1/2/3/4/5 */ REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); - REG_WAIT(DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000); REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); - REG_WAIT(DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000); /* DPP0/1/2/3/4/5 */ REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); - REG_WAIT(DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000); REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); - REG_WAIT(DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000); force_on = true; /* disable power gating */ if (enable && !hws->ctx->dc->debug.disable_dsc_power_gate) @@ -359,11 +360,11 @@ void dcn31_enable_power_gating_plane( /* DCS0/1/2/3/4/5 */ REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); - REG_WAIT(DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000); REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); - REG_WAIT(DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000); REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on); - REG_WAIT(DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000); + + if (org_ip_request_cntl == 0) + REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); } void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx) diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c index 8afe2130d7c5..e05527a3a8ba 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c @@ -124,7 +124,6 @@ static bool optc31_enable_crtc(struct timing_generator *optc) static bool optc31_disable_crtc(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); - /* disable otg request until end of the first line * in the vertical blank region */ @@ -138,6 +137,7 @@ static bool optc31_disable_crtc(struct timing_generator *optc) REG_WAIT(OTG_CLOCK_CONTROL, OTG_BUSY, 0, 1, 100000); + optc1_clear_optc_underflow(optc); return true; } @@ -158,6 +158,9 @@ static bool optc31_immediate_disable_crtc(struct timing_generator *optc) OTG_BUSY, 0, 1, 100000); + /* clear the false state */ + optc1_clear_optc_underflow(optc); + return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index 89b7b6b7254a..63934ecf6be8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -2032,7 +2032,9 @@ bool dcn31_validate_bandwidth(struct dc *dc, BW_VAL_TRACE_COUNT(); + DC_FP_START(); out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate); + DC_FP_END(); // Disable fast_validate to set min dcfclk in alculate_wm_and_dlg if (pipe_cnt == 0) @@ -2232,6 +2234,7 @@ static bool dcn31_resource_construct( dc->caps.extended_aux_timeout_support = true; dc->caps.dmcub_support = true; dc->caps.is_apu = true; + dc->caps.zstate_support = true; /* Color pipeline capabilities */ dc->caps.color.dpp.dcn_arch = 1; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c index 2f6122153bdb..f93af45aeab4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -722,8 +722,10 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc { int plane_count; int i; + unsigned int optimized_min_dst_y_next_start_us; plane_count = 0; + optimized_min_dst_y_next_start_us = 0; for (i = 0; i < dc->res_pool->pipe_count; i++) { if (context->res_ctx.pipe_ctx[i].plane_state) plane_count++; @@ -744,11 +746,22 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc struct dc_link *link = context->streams[0]->sink->link; struct dc_stream_status *stream_status = &context->stream_status[0]; + if (dc_extended_blank_supported(dc)) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (context->res_ctx.pipe_ctx[i].stream == context->streams[0] + && context->res_ctx.pipe_ctx[i].stream->adjust.v_total_min == context->res_ctx.pipe_ctx[i].stream->adjust.v_total_max + && context->res_ctx.pipe_ctx[i].stream->adjust.v_total_min > context->res_ctx.pipe_ctx[i].stream->timing.v_total) { + optimized_min_dst_y_next_start_us = + context->res_ctx.pipe_ctx[i].dlg_regs.optimized_min_dst_y_next_start_us; + break; + } + } + } /* zstate only supported on PWRSEQ0 and when there's <2 planes*/ if (link->link_index != 0 || stream_status->plane_count > 1) return DCN_ZSTATE_SUPPORT_DISALLOW; - if (context->bw_ctx.dml.vba.StutterPeriod > 5000.0) + if (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000) return DCN_ZSTATE_SUPPORT_ALLOW; else if (link->psr_settings.psr_version == DC_PSR_VERSION_1 && !dc->debug.disable_psr) return DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY; @@ -786,8 +799,6 @@ void dcn20_calculate_dlg_params( != dm_dram_clock_change_unsupported; context->bw_ctx.bw.dcn.clk.dppclk_khz = 0; - context->bw_ctx.bw.dcn.clk.zstate_support = decide_zstate_support(dc, context); - context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context); if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz) @@ -843,6 +854,7 @@ void dcn20_calculate_dlg_params( &pipes[pipe_idx].pipe); pipe_idx++; } + context->bw_ctx.bw.dcn.clk.zstate_support = decide_zstate_support(dc, context); } static void swizzle_to_dml_params( diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c index e0fecf127bd5..53d760e169e6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c @@ -1055,6 +1055,7 @@ static void dml_rq_dlg_get_dlg_params( float vba__refcyc_per_req_delivery_pre_l = get_refcyc_per_req_delivery_pre_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA float vba__refcyc_per_req_delivery_l = get_refcyc_per_req_delivery_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA + int blank_lines; memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs)); memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs)); @@ -1080,6 +1081,18 @@ static void dml_rq_dlg_get_dlg_params( dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start; disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start) * dml_pow(2, 2)); + blank_lines = (dst->vblank_end + dst->vtotal_min - dst->vblank_start - dst->vstartup_start - 1); + if (blank_lines < 0) + blank_lines = 0; + if (blank_lines != 0) { + disp_dlg_regs->optimized_min_dst_y_next_start_us = + ((unsigned int) blank_lines * dst->hactive) / (unsigned int) dst->pixel_rate_mhz; + disp_dlg_regs->optimized_min_dst_y_next_start = + (unsigned int)(((double) (dlg_vblank_start + blank_lines)) * dml_pow(2, 2)); + } else { + // use unoptimized value + disp_dlg_regs->optimized_min_dst_y_next_start = disp_dlg_regs->min_dst_y_next_start; + } ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)dml_pow(2, 18)); dml_print("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, min_ttu_vblank); diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index 59f0a61c33cf..2df660cd8801 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -446,6 +446,8 @@ struct _vcs_dpi_display_dlg_regs_st { unsigned int refcyc_h_blank_end; unsigned int dlg_vblank_end; unsigned int min_dst_y_next_start; + unsigned int optimized_min_dst_y_next_start; + unsigned int optimized_min_dst_y_next_start_us; unsigned int refcyc_per_htotal; unsigned int refcyc_x_after_scaler; unsigned int dst_y_after_scaler; diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index efc2339f1fa0..4385d19bc489 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -864,11 +864,11 @@ static bool setup_dsc_config( min_slices_h = inc_num_slices(dsc_common_caps.slice_caps, min_slices_h); } + is_dsc_possible = (min_slices_h <= max_slices_h); + if (pic_width % min_slices_h != 0) min_slices_h = 0; // DSC TODO: Maybe try increasing the number of slices first? - is_dsc_possible = (min_slices_h <= max_slices_h); - if (min_slices_h == 0 && max_slices_h == 0) is_dsc_possible = false; diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h index ab9939db8cea..44f167d2584f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h @@ -33,6 +33,7 @@ #define MAX_MTP_SLOT_COUNT 64 #define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50 #define TRAINING_AUX_RD_INTERVAL 100 //us +#define LINK_AUX_WAKE_TIMEOUT_MS 1500 // Timeout when trying to wake unresponsive DPRX. struct dc_link; struct dc_stream_state; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index e45b7993c5c5..ad69d78c4ac3 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -195,6 +195,9 @@ struct hubp_funcs { void (*hubp_set_flip_int)(struct hubp *hubp); + void (*program_extended_blank)(struct hubp *hubp, + unsigned int min_dst_y_next_start_optimized); + void (*hubp_wait_pipe_read_start)(struct hubp *hubp); }; diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c index b691aa45e84f..79bc207415bc 100644 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -100,7 +100,8 @@ enum vsc_packet_revision { //PB7 = MD0 #define MASK_VTEM_MD0__VRR_EN 0x01 #define MASK_VTEM_MD0__M_CONST 0x02 -#define MASK_VTEM_MD0__RESERVED2 0x0C +#define MASK_VTEM_MD0__QMS_EN 0x04 +#define MASK_VTEM_MD0__RESERVED2 0x08 #define MASK_VTEM_MD0__FVA_FACTOR_M1 0xF0 //MD1 @@ -109,7 +110,7 @@ enum vsc_packet_revision { //MD2 #define MASK_VTEM_MD2__BASE_REFRESH_RATE_98 0x03 #define MASK_VTEM_MD2__RB 0x04 -#define MASK_VTEM_MD2__RESERVED3 0xF8 +#define MASK_VTEM_MD2__NEXT_TFR 0xF8 //MD3 #define MASK_VTEM_MD3__BASE_REFRESH_RATE_07 0xFF diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 89fbee568be4..5504d81c77b7 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -173,6 +173,17 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) if (!pp_funcs || !pp_funcs->get_asic_baco_capability) return false; + /* Don't use baco for reset in S3. + * This is a workaround for some platforms + * where entering BACO during suspend + * seems to cause reboots or hangs. + * This might be related to the fact that BACO controls + * power to the whole GPU including devices like audio and USB. + * Powering down/up everything may adversely affect these other + * devices. Needs more investigation. + */ + if (adev->in_s3) + return false; mutex_lock(&adev->pm.mutex); @@ -500,6 +511,9 @@ int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size) struct smu_context *smu = adev->powerplay.pp_handle; int ret = 0; + if (!is_support_sw_smu(adev)) + return -EOPNOTSUPP; + mutex_lock(&adev->pm.mutex); ret = smu_send_hbm_bad_pages_num(smu, size); mutex_unlock(&adev->pm.mutex); @@ -512,6 +526,9 @@ int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t si struct smu_context *smu = adev->powerplay.pp_handle; int ret = 0; + if (!is_support_sw_smu(adev)) + return -EOPNOTSUPP; + mutex_lock(&adev->pm.mutex); ret = smu_send_hbm_bad_channel_flag(smu, size); mutex_unlock(&adev->pm.mutex); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c index 9ddd8491ff00..ede71de2343d 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c @@ -773,13 +773,13 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinFclkByFreq, hwmgr->display_config->num_display > 3 ? - data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk : + (data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk / 100) : min_mclk, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinSocclkByFreq, - data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk, + data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk / 100, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinVcn, @@ -792,11 +792,11 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxFclkByFreq, - data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk, + data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk / 100, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxSocclkByFreq, - data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk, + data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk / 100, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxVcn, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c index 7bfac029e513..b81711c4ff33 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c @@ -991,7 +991,7 @@ static int smu_v13_0_5_set_performance_level(struct smu_context *smu, return -EINVAL; } - if (sclk_min && sclk_max) { + if (sclk_min && sclk_max && smu_v13_0_5_clk_dpm_is_enabled(smu, SMU_SCLK)) { ret = smu_v13_0_5_set_soft_freq_limited_range(smu, SMU_SCLK, sclk_min, diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c index 026e4e29a0f3..f4df344509a8 100644 --- a/drivers/gpu/drm/drm_of.c +++ b/drivers/gpu/drm/drm_of.c @@ -214,6 +214,29 @@ int drm_of_encoder_active_endpoint(struct device_node *node, } EXPORT_SYMBOL_GPL(drm_of_encoder_active_endpoint); +static int find_panel_or_bridge(struct device_node *node, + struct drm_panel **panel, + struct drm_bridge **bridge) +{ + if (panel) { + *panel = of_drm_find_panel(node); + if (!IS_ERR(*panel)) + return 0; + + /* Clear the panel pointer in case of error. */ + *panel = NULL; + } + + /* No panel found yet, check for a bridge next. */ + if (bridge) { + *bridge = of_drm_find_bridge(node); + if (*bridge) + return 0; + } + + return -EPROBE_DEFER; +} + /** * drm_of_find_panel_or_bridge - return connected panel or bridge device * @np: device tree node containing encoder output ports @@ -236,66 +259,44 @@ int drm_of_find_panel_or_bridge(const struct device_node *np, struct drm_panel **panel, struct drm_bridge **bridge) { - int ret = -EPROBE_DEFER; - struct device_node *remote; + struct device_node *node; + int ret; if (!panel && !bridge) return -EINVAL; + if (panel) *panel = NULL; - - /** - * Devices can also be child nodes when we also control that device - * through the upstream device (ie, MIPI-DCS for a MIPI-DSI device). - * - * Lookup for a child node of the given parent that isn't either port - * or ports. - */ - for_each_available_child_of_node(np, remote) { - if (of_node_name_eq(remote, "port") || - of_node_name_eq(remote, "ports")) - continue; - - goto of_find_panel_or_bridge; + if (bridge) + *bridge = NULL; + + /* Check for a graph on the device node first. */ + if (of_graph_is_present(np)) { + node = of_graph_get_remote_node(np, port, endpoint); + if (node) { + ret = find_panel_or_bridge(node, panel, bridge); + of_node_put(node); + + if (!ret) + return 0; + } } - /* - * of_graph_get_remote_node() produces a noisy error message if port - * node isn't found and the absence of the port is a legit case here, - * so at first we silently check whether graph presents in the - * device-tree node. - */ - if (!of_graph_is_present(np)) - return -ENODEV; - - remote = of_graph_get_remote_node(np, port, endpoint); - -of_find_panel_or_bridge: - if (!remote) - return -ENODEV; + /* Otherwise check for any child node other than port/ports. */ + for_each_available_child_of_node(np, node) { + if (of_node_name_eq(node, "port") || + of_node_name_eq(node, "ports")) + continue; - if (panel) { - *panel = of_drm_find_panel(remote); - if (!IS_ERR(*panel)) - ret = 0; - else - *panel = NULL; - } - - /* No panel found yet, check for a bridge next. */ - if (bridge) { - if (ret) { - *bridge = of_drm_find_bridge(remote); - if (*bridge) - ret = 0; - } else { - *bridge = NULL; - } + ret = find_panel_or_bridge(node, panel, bridge); + of_node_put(node); + /* Stop at the first found occurrence. */ + if (!ret) + return 0; } - of_node_put(remote); - return ret; + return -EPROBE_DEFER; } EXPORT_SYMBOL_GPL(drm_of_find_panel_or_bridge); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index c3ea243d414d..0c5c43852e24 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -70,7 +70,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, * mmap ioctl is disallowed for all discrete platforms, * and for all platforms with GRAPHICS_VER > 12. */ - if (IS_DGFX(i915) || GRAPHICS_VER(i915) > 12) + if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) > IP_VER(12, 0)) return -EOPNOTSUPP; if (args->flags & ~(I915_MMAP_WC)) diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c index 87428fb23d9f..a2277a0d6d06 100644 --- a/drivers/gpu/drm/imx/dw_hdmi-imx.c +++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c @@ -222,6 +222,7 @@ static int dw_hdmi_imx_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; const struct of_device_id *match = of_match_node(dw_hdmi_imx_dt_ids, np); struct imx_hdmi *hdmi; + int ret; hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL); if (!hdmi) @@ -243,10 +244,15 @@ static int dw_hdmi_imx_probe(struct platform_device *pdev) hdmi->bridge = of_drm_find_bridge(np); if (!hdmi->bridge) { dev_err(hdmi->dev, "Unable to find bridge\n"); + dw_hdmi_remove(hdmi->hdmi); return -ENODEV; } - return component_add(&pdev->dev, &dw_hdmi_imx_ops); + ret = component_add(&pdev->dev, &dw_hdmi_imx_ops); + if (ret) + dw_hdmi_remove(hdmi->hdmi); + + return ret; } static int dw_hdmi_imx_remove(struct platform_device *pdev) diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index e5078d03020d..fb0e951248f6 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -572,6 +572,8 @@ static int imx_ldb_panel_ddc(struct device *dev, edidp = of_get_property(child, "edid", &edid_len); if (edidp) { channel->edid = kmemdup(edidp, edid_len, GFP_KERNEL); + if (!channel->edid) + return -ENOMEM; } else if (!channel->panel) { /* fallback to display-timings node */ ret = of_get_drm_display_mode(child, diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 06cb1a59b9bc..63ba2ad84679 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -75,8 +75,10 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector) ret = of_get_drm_display_mode(np, &imxpd->mode, &imxpd->bus_flags, OF_USE_NATIVE_MODE); - if (ret) + if (ret) { + drm_mode_destroy(connector->dev, mode); return ret; + } drm_mode_copy(mode, &imxpd->mode); mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 83c31b2ad865..ccc4fcf7a630 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -1742,7 +1742,7 @@ a6xx_create_private_address_space(struct msm_gpu *gpu) return ERR_CAST(mmu); return msm_gem_address_space_create(mmu, - "gpu", 0x100000000ULL, 0x1ffffffffULL); + "gpu", 0x100000000ULL, SZ_4G); } static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 89cfd84760d7..8706bcdd1472 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -599,43 +599,91 @@ static const struct of_device_id dt_match[] = { {} }; -#ifdef CONFIG_PM -static int adreno_resume(struct device *dev) +static int adreno_runtime_resume(struct device *dev) { struct msm_gpu *gpu = dev_to_gpu(dev); return gpu->funcs->pm_resume(gpu); } -static int active_submits(struct msm_gpu *gpu) +static int adreno_runtime_suspend(struct device *dev) { - int active_submits; - mutex_lock(&gpu->active_lock); - active_submits = gpu->active_submits; - mutex_unlock(&gpu->active_lock); - return active_submits; + struct msm_gpu *gpu = dev_to_gpu(dev); + + /* + * We should be holding a runpm ref, which will prevent + * runtime suspend. In the system suspend path, we've + * already waited for active jobs to complete. + */ + WARN_ON_ONCE(gpu->active_submits); + + return gpu->funcs->pm_suspend(gpu); +} + +static void suspend_scheduler(struct msm_gpu *gpu) +{ + int i; + + /* + * Shut down the scheduler before we force suspend, so that + * suspend isn't racing with scheduler kthread feeding us + * more work. + * + * Note, we just want to park the thread, and let any jobs + * that are already on the hw queue complete normally, as + * opposed to the drm_sched_stop() path used for handling + * faulting/timed-out jobs. We can't really cancel any jobs + * already on the hw queue without racing with the GPU. + */ + for (i = 0; i < gpu->nr_rings; i++) { + struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched; + kthread_park(sched->thread); + } } -static int adreno_suspend(struct device *dev) +static void resume_scheduler(struct msm_gpu *gpu) +{ + int i; + + for (i = 0; i < gpu->nr_rings; i++) { + struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched; + kthread_unpark(sched->thread); + } +} + +static int adreno_system_suspend(struct device *dev) { struct msm_gpu *gpu = dev_to_gpu(dev); - int remaining; + int remaining, ret; + + suspend_scheduler(gpu); remaining = wait_event_timeout(gpu->retire_event, - active_submits(gpu) == 0, + gpu->active_submits == 0, msecs_to_jiffies(1000)); if (remaining == 0) { dev_err(dev, "Timeout waiting for GPU to suspend\n"); - return -EBUSY; + ret = -EBUSY; + goto out; } - return gpu->funcs->pm_suspend(gpu); + ret = pm_runtime_force_suspend(dev); +out: + if (ret) + resume_scheduler(gpu); + + return ret; +} + +static int adreno_system_resume(struct device *dev) +{ + resume_scheduler(dev_to_gpu(dev)); + return pm_runtime_force_resume(dev); } -#endif static const struct dev_pm_ops adreno_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) - SET_RUNTIME_PM_OPS(adreno_suspend, adreno_resume, NULL) + SYSTEM_SLEEP_PM_OPS(adreno_system_suspend, adreno_system_resume) + RUNTIME_PM_OPS(adreno_runtime_suspend, adreno_runtime_resume, NULL) }; static struct platform_driver adreno_driver = { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c index c515b7cf922c..c61b5b283f08 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c @@ -54,87 +54,87 @@ struct dpu_intr_reg { * When making changes be sure to sync with dpu_hw_intr_reg */ static const struct dpu_intr_reg dpu_intr_set[] = { - { + [MDP_SSPP_TOP0_INTR] = { MDP_SSPP_TOP0_OFF+INTR_CLEAR, MDP_SSPP_TOP0_OFF+INTR_EN, MDP_SSPP_TOP0_OFF+INTR_STATUS }, - { + [MDP_SSPP_TOP0_INTR2] = { MDP_SSPP_TOP0_OFF+INTR2_CLEAR, MDP_SSPP_TOP0_OFF+INTR2_EN, MDP_SSPP_TOP0_OFF+INTR2_STATUS }, - { + [MDP_SSPP_TOP0_HIST_INTR] = { MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR, MDP_SSPP_TOP0_OFF+HIST_INTR_EN, MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS }, - { + [MDP_INTF0_INTR] = { MDP_INTF_0_OFF+INTF_INTR_CLEAR, MDP_INTF_0_OFF+INTF_INTR_EN, MDP_INTF_0_OFF+INTF_INTR_STATUS }, - { + [MDP_INTF1_INTR] = { MDP_INTF_1_OFF+INTF_INTR_CLEAR, MDP_INTF_1_OFF+INTF_INTR_EN, MDP_INTF_1_OFF+INTF_INTR_STATUS }, - { + [MDP_INTF2_INTR] = { MDP_INTF_2_OFF+INTF_INTR_CLEAR, MDP_INTF_2_OFF+INTF_INTR_EN, MDP_INTF_2_OFF+INTF_INTR_STATUS }, - { + [MDP_INTF3_INTR] = { MDP_INTF_3_OFF+INTF_INTR_CLEAR, MDP_INTF_3_OFF+INTF_INTR_EN, MDP_INTF_3_OFF+INTF_INTR_STATUS }, - { + [MDP_INTF4_INTR] = { MDP_INTF_4_OFF+INTF_INTR_CLEAR, MDP_INTF_4_OFF+INTF_INTR_EN, MDP_INTF_4_OFF+INTF_INTR_STATUS }, - { + [MDP_INTF5_INTR] = { MDP_INTF_5_OFF+INTF_INTR_CLEAR, MDP_INTF_5_OFF+INTF_INTR_EN, MDP_INTF_5_OFF+INTF_INTR_STATUS }, - { + [MDP_AD4_0_INTR] = { MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF, MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF, MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF, }, - { + [MDP_AD4_1_INTR] = { MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF, MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF, MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF, }, - { + [MDP_INTF0_7xxx_INTR] = { MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_CLEAR, MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_EN, MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_STATUS }, - { + [MDP_INTF1_7xxx_INTR] = { MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_CLEAR, MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_EN, MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_STATUS }, - { + [MDP_INTF2_7xxx_INTR] = { MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_CLEAR, MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_EN, MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_STATUS }, - { + [MDP_INTF3_7xxx_INTR] = { MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_CLEAR, MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_EN, MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_STATUS }, - { + [MDP_INTF4_7xxx_INTR] = { MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_CLEAR, MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_EN, MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_STATUS }, - { + [MDP_INTF5_7xxx_INTR] = { MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_CLEAR, MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_EN, MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_STATUS diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c index 1ee824600995..c478d25f7825 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c @@ -98,7 +98,10 @@ static void mdp5_plane_reset(struct drm_plane *plane) __drm_atomic_helper_plane_destroy_state(plane->state); kfree(to_mdp5_plane_state(plane->state)); + plane->state = NULL; mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL); + if (!mdp5_state) + return; __drm_atomic_helper_plane_reset(plane, &mdp5_state->base); } diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c index 5d2ff6791058..acfe1b31e079 100644 --- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c +++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c @@ -176,6 +176,8 @@ void msm_disp_snapshot_add_block(struct msm_disp_state *disp_state, u32 len, va_list va; new_blk = kzalloc(sizeof(struct msm_disp_state_block), GFP_KERNEL); + if (!new_blk) + return; va_start(va, fmt); diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index 178b774a5fbd..a42732b67349 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -580,6 +580,12 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) dp->dp_display.connector_type, state); mutex_unlock(&dp->event_mutex); + /* + * add fail safe mode outside event_mutex scope + * to avoid potiential circular lock with drm thread + */ + dp_panel_add_fail_safe_mode(dp->dp_display.connector); + /* uevent will complete connection part */ return 0; }; diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c index f1418722c549..26c3653c99ec 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.c +++ b/drivers/gpu/drm/msm/dp/dp_panel.c @@ -151,6 +151,15 @@ static int dp_panel_update_modes(struct drm_connector *connector, return rc; } +void dp_panel_add_fail_safe_mode(struct drm_connector *connector) +{ + /* fail safe edid */ + mutex_lock(&connector->dev->mode_config.mutex); + if (drm_add_modes_noedid(connector, 640, 480)) + drm_set_preferred_mode(connector, 640, 480); + mutex_unlock(&connector->dev->mode_config.mutex); +} + int dp_panel_read_sink_caps(struct dp_panel *dp_panel, struct drm_connector *connector) { @@ -207,16 +216,7 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel, goto end; } - /* fail safe edid */ - mutex_lock(&connector->dev->mode_config.mutex); - if (drm_add_modes_noedid(connector, 640, 480)) - drm_set_preferred_mode(connector, 640, 480); - mutex_unlock(&connector->dev->mode_config.mutex); - } else { - /* always add fail-safe mode as backup mode */ - mutex_lock(&connector->dev->mode_config.mutex); - drm_add_modes_noedid(connector, 640, 480); - mutex_unlock(&connector->dev->mode_config.mutex); + dp_panel_add_fail_safe_mode(connector); } if (panel->aux_cfg_update_done) { diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h index 9023e5bb4b8b..99739ea679a7 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.h +++ b/drivers/gpu/drm/msm/dp/dp_panel.h @@ -59,6 +59,7 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel); int dp_panel_deinit(struct dp_panel *dp_panel); int dp_panel_timing_cfg(struct dp_panel *dp_panel); void dp_panel_dump_regs(struct dp_panel *dp_panel); +void dp_panel_add_fail_safe_mode(struct drm_connector *connector); int dp_panel_read_sink_caps(struct dp_panel *dp_panel, struct drm_connector *connector); u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp, diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 0c1b7dde377c..9f6af0f0fe00 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -638,7 +638,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id) return connector; fail: - connector->funcs->destroy(msm_dsi->connector); + connector->funcs->destroy(connector); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index affa95eb05fc..9c36b505daab 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -274,7 +274,7 @@ bool msm_use_mmu(struct drm_device *dev) struct msm_drm_private *priv = dev->dev_private; /* a2xx comes with its own MMU */ - return priv->is_a2xx || iommu_present(&platform_bus_type); + return priv->is_a2xx || device_iommu_mapped(dev->dev); } static int msm_init_vram(struct drm_device *dev) diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 02b9ae65a96a..a4f61972667b 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -926,6 +926,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, get_pid_task(aspace->pid, PIDTYPE_PID); if (task) { comm = kstrdup(task->comm, GFP_KERNEL); + put_task_struct(task); } else { comm = NULL; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c index e1772211b0a4..612310d5d481 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c @@ -216,6 +216,7 @@ gm20b_pmu = { .intr = gt215_pmu_intr, .recv = gm20b_pmu_recv, .initmsg = gm20b_pmu_initmsg, + .reset = gf100_pmu_reset, }; #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c index 6bf7fc1bd1e3..1a6f9c3af5ec 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c @@ -23,7 +23,7 @@ */ #include "priv.h" -static void +void gp102_pmu_reset(struct nvkm_pmu *pmu) { struct nvkm_device *device = pmu->subdev.device; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c index ba1583bb618b..94cfb1791af6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c @@ -83,6 +83,7 @@ gp10b_pmu = { .intr = gt215_pmu_intr, .recv = gm20b_pmu_recv, .initmsg = gm20b_pmu_initmsg, + .reset = gp102_pmu_reset, }; #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h index bcaade758ff7..21abf31f4442 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h @@ -41,6 +41,7 @@ int gt215_pmu_send(struct nvkm_pmu *, u32[2], u32, u32, u32, u32); bool gf100_pmu_enabled(struct nvkm_pmu *); void gf100_pmu_reset(struct nvkm_pmu *); +void gp102_pmu_reset(struct nvkm_pmu *pmu); void gk110_pmu_pgob(struct nvkm_pmu *, bool); diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c index a07ef26234e5..6826f4d4826a 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c @@ -612,8 +612,10 @@ static int ili9341_dbi_probe(struct spi_device *spi, struct gpio_desc *dc, int ret; vcc = devm_regulator_get_optional(dev, "vcc"); - if (IS_ERR(vcc)) + if (IS_ERR(vcc)) { dev_err(dev, "get optional vcc failed\n"); + vcc = NULL; + } dbidev = devm_drm_dev_alloc(dev, &ili9341_dbi_driver, struct mipi_dbi_dev, drm); diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c index 666223c6bec4..0a34e0ab4fe6 100644 --- a/drivers/gpu/ipu-v3/ipu-di.c +++ b/drivers/gpu/ipu-v3/ipu-di.c @@ -447,8 +447,9 @@ static void ipu_di_config_clock(struct ipu_di *di, error = rate / (sig->mode.pixelclock / 1000); - dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %d.%u%%\n", - rate, div, (signed)(error - 1000) / 10, error % 10); + dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %c%d.%d%%\n", + rate, div, error < 1000 ? '-' : '+', + abs(error - 1000) / 10, abs(error - 1000) % 10); /* Allow a 1% error */ if (error < 1010 && error >= 990) { diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 26d269ba947c..85a2142c9384 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -380,7 +380,7 @@ void vmbus_channel_map_relid(struct vmbus_channel *channel) * execute: * * (a) In the "normal (i.e., not resuming from hibernation)" path, - * the full barrier in smp_store_mb() guarantees that the store + * the full barrier in virt_store_mb() guarantees that the store * is propagated to all CPUs before the add_channel_work work * is queued. In turn, add_channel_work is queued before the * channel's ring buffer is allocated/initialized and the @@ -392,14 +392,14 @@ void vmbus_channel_map_relid(struct vmbus_channel *channel) * recv_int_page before retrieving the channel pointer from the * array of channels. * - * (b) In the "resuming from hibernation" path, the smp_store_mb() + * (b) In the "resuming from hibernation" path, the virt_store_mb() * guarantees that the store is propagated to all CPUs before * the VMBus connection is marked as ready for the resume event * (cf. check_ready_for_resume_event()). The interrupt handler * of the VMBus driver and vmbus_chan_sched() can not run before * vmbus_bus_resume() has completed execution (cf. resume_noirq). */ - smp_store_mb( + virt_store_mb( vmbus_connection.channels[channel->offermsg.child_relid], channel); } diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index 439f99b8b5de..3248b48f37f6 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -17,6 +17,7 @@ #include <linux/slab.h> #include <linux/kthread.h> #include <linux/completion.h> +#include <linux/count_zeros.h> #include <linux/memory_hotplug.h> #include <linux/memory.h> #include <linux/notifier.h> @@ -1130,6 +1131,7 @@ static void post_status(struct hv_dynmem_device *dm) struct dm_status status; unsigned long now = jiffies; unsigned long last_post = last_post_time; + unsigned long num_pages_avail, num_pages_committed; if (pressure_report_delay > 0) { --pressure_report_delay; @@ -1154,16 +1156,21 @@ static void post_status(struct hv_dynmem_device *dm) * num_pages_onlined) as committed to the host, otherwise it can try * asking us to balloon them out. */ - status.num_avail = si_mem_available(); - status.num_committed = vm_memory_committed() + + num_pages_avail = si_mem_available(); + num_pages_committed = vm_memory_committed() + dm->num_pages_ballooned + (dm->num_pages_added > dm->num_pages_onlined ? dm->num_pages_added - dm->num_pages_onlined : 0) + compute_balloon_floor(); - trace_balloon_status(status.num_avail, status.num_committed, + trace_balloon_status(num_pages_avail, num_pages_committed, vm_memory_committed(), dm->num_pages_ballooned, dm->num_pages_added, dm->num_pages_onlined); + + /* Convert numbers of pages into numbers of HV_HYP_PAGEs. */ + status.num_avail = num_pages_avail * NR_HV_HYP_PAGES_IN_PAGE; + status.num_committed = num_pages_committed * NR_HV_HYP_PAGES_IN_PAGE; + /* * If our transaction ID is no longer current, just don't * send the status. This can happen if we were interrupted @@ -1653,6 +1660,38 @@ static void disable_page_reporting(void) } } +static int ballooning_enabled(void) +{ + /* + * Disable ballooning if the page size is not 4k (HV_HYP_PAGE_SIZE), + * since currently it's unclear to us whether an unballoon request can + * make sure all page ranges are guest page size aligned. + */ + if (PAGE_SIZE != HV_HYP_PAGE_SIZE) { + pr_info("Ballooning disabled because page size is not 4096 bytes\n"); + return 0; + } + + return 1; +} + +static int hot_add_enabled(void) +{ + /* + * Disable hot add on ARM64, because we currently rely on + * memory_add_physaddr_to_nid() to get a node id of a hot add range, + * however ARM64's memory_add_physaddr_to_nid() always return 0 and + * DM_MEM_HOT_ADD_REQUEST doesn't have the NUMA node information for + * add_memory(). + */ + if (IS_ENABLED(CONFIG_ARM64)) { + pr_info("Memory hot add disabled on ARM64\n"); + return 0; + } + + return 1; +} + static int balloon_connect_vsp(struct hv_device *dev) { struct dm_version_request version_req; @@ -1724,8 +1763,8 @@ static int balloon_connect_vsp(struct hv_device *dev) * currently still requires the bits to be set, so we have to add code * to fail the host's hot-add and balloon up/down requests, if any. */ - cap_msg.caps.cap_bits.balloon = 1; - cap_msg.caps.cap_bits.hot_add = 1; + cap_msg.caps.cap_bits.balloon = ballooning_enabled(); + cap_msg.caps.cap_bits.hot_add = hot_add_enabled(); /* * Specify our alignment requirements as it relates diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c index c1dd21d0d7ef..ae68298c0dca 100644 --- a/drivers/hv/hv_common.c +++ b/drivers/hv/hv_common.c @@ -20,6 +20,7 @@ #include <linux/panic_notifier.h> #include <linux/ptrace.h> #include <linux/slab.h> +#include <linux/dma-map-ops.h> #include <asm/hyperv-tlfs.h> #include <asm/mshyperv.h> @@ -218,6 +219,16 @@ bool hv_query_ext_cap(u64 cap_query) } EXPORT_SYMBOL_GPL(hv_query_ext_cap); +void hv_setup_dma_ops(struct device *dev, bool coherent) +{ + /* + * Hyper-V does not offer a vIOMMU in the guest + * VM, so pass 0/NULL for the IOMMU settings + */ + arch_setup_dma_ops(dev, 0, 0, NULL, coherent); +} +EXPORT_SYMBOL_GPL(hv_setup_dma_ops); + bool hv_is_hibernation_supported(void) { return !hv_root_partition && acpi_sleep_state_supported(ACPI_STATE_S4); diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index 71efacb90965..3d215d9dec43 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c @@ -439,7 +439,16 @@ int hv_ringbuffer_read(struct vmbus_channel *channel, static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi) { u32 priv_read_loc = rbi->priv_read_index; - u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index); + u32 write_loc; + + /* + * The Hyper-V host writes the packet data, then uses + * store_release() to update the write_index. Use load_acquire() + * here to prevent loads of the packet data from being re-ordered + * before the read of the write_index and potentially getting + * stale data. + */ + write_loc = virt_load_acquire(&rbi->ring_buffer->write_index); if (write_loc >= priv_read_loc) return write_loc - priv_read_loc; diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 60ee8b329f9e..14de17087864 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -77,8 +77,8 @@ static int hyperv_panic_event(struct notifier_block *nb, unsigned long val, /* * Hyper-V should be notified only once about a panic. If we will be - * doing hyperv_report_panic_msg() later with kmsg data, don't do - * the notification here. + * doing hv_kmsg_dump() with kmsg data later, don't do the notification + * here. */ if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE && hyperv_report_reg()) { @@ -100,8 +100,8 @@ static int hyperv_die_event(struct notifier_block *nb, unsigned long val, /* * Hyper-V should be notified only once about a panic. If we will be - * doing hyperv_report_panic_msg() later with kmsg data, don't do - * the notification here. + * doing hv_kmsg_dump() with kmsg data later, don't do the notification + * here. */ if (hyperv_report_reg()) hyperv_report_panic(regs, val, true); @@ -921,6 +921,21 @@ static int vmbus_probe(struct device *child_device) } /* + * vmbus_dma_configure -- Configure DMA coherence for VMbus device + */ +static int vmbus_dma_configure(struct device *child_device) +{ + /* + * On ARM64, propagate the DMA coherence setting from the top level + * VMbus ACPI device to the child VMbus device being added here. + * On x86/x64 coherence is assumed and these calls have no effect. + */ + hv_setup_dma_ops(child_device, + device_get_dma_attr(&hv_acpi_dev->dev) == DEV_DMA_COHERENT); + return 0; +} + +/* * vmbus_remove - Remove a vmbus device */ static void vmbus_remove(struct device *child_device) @@ -1040,6 +1055,7 @@ static struct bus_type hv_bus = { .remove = vmbus_remove, .probe = vmbus_probe, .uevent = vmbus_uevent, + .dma_configure = vmbus_dma_configure, .dev_groups = vmbus_dev_groups, .drv_groups = vmbus_drv_groups, .bus_groups = vmbus_bus_groups, @@ -1546,14 +1562,20 @@ static int vmbus_bus_init(void) if (ret) goto err_connect; + if (hv_is_isolation_supported()) + sysctl_record_panic_msg = 0; + /* * Only register if the crash MSRs are available */ if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) { u64 hyperv_crash_ctl; /* - * Sysctl registration is not fatal, since by default - * reporting is enabled. + * Panic message recording (sysctl_record_panic_msg) + * is enabled by default in non-isolated guests and + * disabled by default in isolated guests; the panic + * message recording won't be available in isolated + * guests should the following registration fail. */ hv_ctl_table_hdr = register_sysctl_table(hv_root_table); if (!hv_ctl_table_hdr) @@ -2097,6 +2119,10 @@ int vmbus_device_register(struct hv_device *child_device_obj) child_device_obj->device.parent = &hv_acpi_dev->dev; child_device_obj->device.release = vmbus_device_release; + child_device_obj->device.dma_parms = &child_device_obj->dma_parms; + child_device_obj->device.dma_mask = &child_device_obj->dma_mask; + dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64)); + /* * Register with the LDM. This will kick off the driver/device * binding...which will eventually call vmbus_match() and vmbus_probe() @@ -2122,9 +2148,6 @@ int vmbus_device_register(struct hv_device *child_device_obj) } hv_debug_add_dev_dir(child_device_obj); - child_device_obj->device.dma_parms = &child_device_obj->dma_parms; - child_device_obj->device.dma_mask = &child_device_obj->dma_mask; - dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64)); return 0; err_kset_unregister: @@ -2428,6 +2451,21 @@ static int vmbus_acpi_add(struct acpi_device *device) hv_acpi_dev = device; + /* + * Older versions of Hyper-V for ARM64 fail to include the _CCA + * method on the top level VMbus device in the DSDT. But devices + * are hardware coherent in all current Hyper-V use cases, so fix + * up the ACPI device to behave as if _CCA is present and indicates + * hardware coherence. + */ + ACPI_COMPANION_SET(&device->dev, device); + if (IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED) && + device_get_dma_attr(&device->dev) == DEV_DMA_NOT_SUPPORTED) { + pr_info("No ACPI _CCA found; assuming coherent device I/O\n"); + device->flags.cca_seen = true; + device->flags.coherent_dma = true; + } + result = acpi_walk_resources(device->handle, METHOD_NAME__CRS, vmbus_walk_resources, NULL); @@ -2780,10 +2818,15 @@ static void __exit vmbus_exit(void) if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) { kmsg_dump_unregister(&hv_kmsg_dumper); unregister_die_notifier(&hyperv_die_block); - atomic_notifier_chain_unregister(&panic_notifier_list, - &hyperv_panic_block); } + /* + * The panic notifier is always registered, hence we should + * also unconditionally unregister it here as well. + */ + atomic_notifier_chain_unregister(&panic_notifier_list, + &hyperv_panic_block); + free_page((unsigned long)hv_panic_page); unregister_sysctl_table(hv_ctl_table_hdr); hv_ctl_table_hdr = NULL; diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 27f969b3dc07..e9e2db68b9fb 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -179,6 +179,12 @@ struct imx_i2c_hwdata { unsigned int ndivs; unsigned int i2sr_clr_opcode; unsigned int i2cr_ien_opcode; + /* + * Errata ERR007805 or e7805: + * I2C: When the I2C clock speed is configured for 400 kHz, + * the SCL low period violates the I2C spec of 1.3 uS min. + */ + bool has_err007805; }; struct imx_i2c_dma { @@ -240,6 +246,16 @@ static const struct imx_i2c_hwdata imx21_i2c_hwdata = { }; +static const struct imx_i2c_hwdata imx6_i2c_hwdata = { + .devtype = IMX21_I2C, + .regshift = IMX_I2C_REGSHIFT, + .clk_div = imx_i2c_clk_div, + .ndivs = ARRAY_SIZE(imx_i2c_clk_div), + .i2sr_clr_opcode = I2SR_CLR_OPCODE_W0C, + .i2cr_ien_opcode = I2CR_IEN_OPCODE_1, + .has_err007805 = true, +}; + static struct imx_i2c_hwdata vf610_i2c_hwdata = { .devtype = VF610_I2C, .regshift = VF610_I2C_REGSHIFT, @@ -266,6 +282,16 @@ MODULE_DEVICE_TABLE(platform, imx_i2c_devtype); static const struct of_device_id i2c_imx_dt_ids[] = { { .compatible = "fsl,imx1-i2c", .data = &imx1_i2c_hwdata, }, { .compatible = "fsl,imx21-i2c", .data = &imx21_i2c_hwdata, }, + { .compatible = "fsl,imx6q-i2c", .data = &imx6_i2c_hwdata, }, + { .compatible = "fsl,imx6sl-i2c", .data = &imx6_i2c_hwdata, }, + { .compatible = "fsl,imx6sll-i2c", .data = &imx6_i2c_hwdata, }, + { .compatible = "fsl,imx6sx-i2c", .data = &imx6_i2c_hwdata, }, + { .compatible = "fsl,imx6ul-i2c", .data = &imx6_i2c_hwdata, }, + { .compatible = "fsl,imx7s-i2c", .data = &imx6_i2c_hwdata, }, + { .compatible = "fsl,imx8mm-i2c", .data = &imx6_i2c_hwdata, }, + { .compatible = "fsl,imx8mn-i2c", .data = &imx6_i2c_hwdata, }, + { .compatible = "fsl,imx8mp-i2c", .data = &imx6_i2c_hwdata, }, + { .compatible = "fsl,imx8mq-i2c", .data = &imx6_i2c_hwdata, }, { .compatible = "fsl,vf610-i2c", .data = &vf610_i2c_hwdata, }, { /* sentinel */ } }; @@ -551,6 +577,13 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx, unsigned int div; int i; + if (i2c_imx->hwdata->has_err007805 && i2c_imx->bitrate > 384000) { + dev_dbg(&i2c_imx->adapter.dev, + "SoC errata ERR007805 or e7805 applies, bus frequency limited from %d Hz to 384000 Hz.\n", + i2c_imx->bitrate); + i2c_imx->bitrate = 384000; + } + /* Divider value calculation */ if (i2c_imx->cur_clk == i2c_clk_rate) return; diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c index f4820fd3dc13..c0364314877e 100644 --- a/drivers/i2c/busses/i2c-ismt.c +++ b/drivers/i2c/busses/i2c-ismt.c @@ -145,8 +145,8 @@ #define ISMT_SPGT_SPD_MASK 0xc0000000 /* SMBus Speed mask */ #define ISMT_SPGT_SPD_80K 0x00 /* 80 kHz */ #define ISMT_SPGT_SPD_100K (0x1 << 30) /* 100 kHz */ -#define ISMT_SPGT_SPD_400K (0x2 << 30) /* 400 kHz */ -#define ISMT_SPGT_SPD_1M (0x3 << 30) /* 1 MHz */ +#define ISMT_SPGT_SPD_400K (0x2U << 30) /* 400 kHz */ +#define ISMT_SPGT_SPD_1M (0x3U << 30) /* 1 MHz */ /* MSI Control Register (MSICTL) bit definitions */ diff --git a/drivers/i2c/busses/i2c-pasemi-core.c b/drivers/i2c/busses/i2c-pasemi-core.c index 7728c8460dc0..9028ffb58cc0 100644 --- a/drivers/i2c/busses/i2c-pasemi-core.c +++ b/drivers/i2c/busses/i2c-pasemi-core.c @@ -137,6 +137,12 @@ static int pasemi_i2c_xfer_msg(struct i2c_adapter *adapter, TXFIFO_WR(smbus, msg->buf[msg->len-1] | (stop ? MTXFIFO_STOP : 0)); + + if (stop) { + err = pasemi_smb_waitready(smbus); + if (err) + goto reset_out; + } } return 0; diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c index fc1dcc19f2a1..5b920f0fc7dd 100644 --- a/drivers/i2c/busses/i2c-qcom-geni.c +++ b/drivers/i2c/busses/i2c-qcom-geni.c @@ -843,10 +843,8 @@ static int geni_i2c_probe(struct platform_device *pdev) /* FIFO is disabled, so we can only use GPI DMA */ gi2c->gpi_mode = true; ret = setup_gpi_dma(gi2c); - if (ret) { - dev_err(dev, "Failed to setup GPI DMA mode:%d ret\n", ret); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, "Failed to setup GPI DMA mode\n"); dev_dbg(dev, "Using GPI DMA mode for I2C\n"); } else { diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index cf5d049342ea..ab0adaa130da 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c @@ -557,7 +557,7 @@ static long compat_i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned lo .addr = umsg.addr, .flags = umsg.flags, .len = umsg.len, - .buf = compat_ptr(umsg.buf) + .buf = (__force __u8 *)compat_ptr(umsg.buf), }; } @@ -668,16 +668,21 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy) i2c_dev->dev.class = i2c_dev_class; i2c_dev->dev.parent = &adap->dev; i2c_dev->dev.release = i2cdev_dev_release; - dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr); + + res = dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr); + if (res) + goto err_put_i2c_dev; res = cdev_device_add(&i2c_dev->cdev, &i2c_dev->dev); - if (res) { - put_i2c_dev(i2c_dev, false); - return res; - } + if (res) + goto err_put_i2c_dev; pr_debug("adapter [%s] registered as minor %d\n", adap->name, adap->nr); return 0; + +err_put_i2c_dev: + put_i2c_dev(i2c_dev, false); + return res; } static int i2cdev_detach_adapter(struct device *dev, void *dummy) diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 35f0d5e7533d..1c107d6d03b9 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -2824,6 +2824,7 @@ static int cm_dreq_handler(struct cm_work *work) switch (cm_id_priv->id.state) { case IB_CM_REP_SENT: case IB_CM_DREQ_SENT: + case IB_CM_MRA_REP_RCVD: ib_cancel_mad(cm_id_priv->msg); break; case IB_CM_ESTABLISHED: @@ -2831,8 +2832,6 @@ static int cm_dreq_handler(struct cm_work *work) cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) ib_cancel_mad(cm_id_priv->msg); break; - case IB_CM_MRA_REP_RCVD: - break; case IB_CM_TIMEWAIT: atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] [CM_DREQ_COUNTER]); diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c index 876cc78a22cc..7333646021bb 100644 --- a/drivers/infiniband/hw/hfi1/mmu_rb.c +++ b/drivers/infiniband/hw/hfi1/mmu_rb.c @@ -80,6 +80,9 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler) unsigned long flags; struct list_head del_list; + /* Prevent freeing of mm until we are completely finished. */ + mmgrab(handler->mn.mm); + /* Unregister first so we don't get any more notifications. */ mmu_notifier_unregister(&handler->mn, handler->mn.mm); @@ -102,6 +105,9 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler) do_remove(handler, &del_list); + /* Now the mm may be freed. */ + mmdrop(handler->mn.mm); + kfree(handler); } diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 956f8e875daa..32ef67e9a6a7 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -574,8 +574,10 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) spin_lock_irq(&ent->lock); if (ent->disabled) goto out; - if (need_delay) + if (need_delay) { queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); + goto out; + } remove_cache_mr_locked(ent); queue_adjust_cache_locked(ent); } @@ -625,6 +627,7 @@ static void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) { struct mlx5_cache_ent *ent = mr->cache_ent; + WRITE_ONCE(dev->cache.last_add, jiffies); spin_lock_irq(&ent->lock); list_add_tail(&mr->list, &ent->head); ent->available_mrs++; diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index ae50b56e8913..8ef112f883a7 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -3190,7 +3190,11 @@ serr_no_r_lock: spin_lock_irqsave(&sqp->s_lock, flags); rvt_send_complete(sqp, wqe, send_status); if (sqp->ibqp.qp_type == IB_QPT_RC) { - int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR); + int lastwqe; + + spin_lock(&sqp->r_lock); + lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR); + spin_unlock(&sqp->r_lock); sqp->s_flags &= ~RVT_S_BUSY; spin_unlock_irqrestore(&sqp->s_lock, flags); diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 4aab631ef517..d9cf2820c02e 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1661,7 +1661,7 @@ static struct iommu_device *omap_iommu_probe_device(struct device *dev) num_iommus = of_property_count_elems_of_size(dev->of_node, "iommus", sizeof(phandle)); if (num_iommus < 0) - return 0; + return ERR_PTR(-ENODEV); arch_data = kcalloc(num_iommus + 1, sizeof(*arch_data), GFP_KERNEL); if (!arch_data) diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 680d2fcf2686..15edb9a6fcae 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -433,6 +433,7 @@ config QCOM_PDC config QCOM_MPM tristate "QCOM MPM" depends on ARCH_QCOM + depends on MAILBOX select IRQ_DOMAIN_HIERARCHY help MSM Power Manager driver to manage and configure wakeup diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index cd772973114a..a0fc764ec9dc 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -3011,18 +3011,12 @@ static int __init allocate_lpi_tables(void) return 0; } -static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set) +static u64 read_vpend_dirty_clear(void __iomem *vlpi_base) { u32 count = 1000000; /* 1s! */ bool clean; u64 val; - val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER); - val &= ~GICR_VPENDBASER_Valid; - val &= ~clr; - val |= set; - gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); - do { val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER); clean = !(val & GICR_VPENDBASER_Dirty); @@ -3033,10 +3027,26 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set) } } while (!clean && count); - if (unlikely(val & GICR_VPENDBASER_Dirty)) { + if (unlikely(!clean)) pr_err_ratelimited("ITS virtual pending table not cleaning\n"); + + return val; +} + +static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set) +{ + u64 val; + + /* Make sure we wait until the RD is done with the initial scan */ + val = read_vpend_dirty_clear(vlpi_base); + val &= ~GICR_VPENDBASER_Valid; + val &= ~clr; + val |= set; + gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); + + val = read_vpend_dirty_clear(vlpi_base); + if (unlikely(val & GICR_VPENDBASER_Dirty)) val |= GICR_VPENDBASER_PendingLast; - } return val; } diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 0efe1a9a9f3b..b252d5534547 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -206,11 +206,11 @@ static inline void __iomem *gic_dist_base(struct irq_data *d) } } -static void gic_do_wait_for_rwp(void __iomem *base) +static void gic_do_wait_for_rwp(void __iomem *base, u32 bit) { u32 count = 1000000; /* 1s! */ - while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) { + while (readl_relaxed(base + GICD_CTLR) & bit) { count--; if (!count) { pr_err_ratelimited("RWP timeout, gone fishing\n"); @@ -224,13 +224,13 @@ static void gic_do_wait_for_rwp(void __iomem *base) /* Wait for completion of a distributor change */ static void gic_dist_wait_for_rwp(void) { - gic_do_wait_for_rwp(gic_data.dist_base); + gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP); } /* Wait for completion of a redistributor change */ static void gic_redist_wait_for_rwp(void) { - gic_do_wait_for_rwp(gic_data_rdist_rd_base()); + gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP); } #ifdef CONFIG_ARM64 @@ -1466,6 +1466,12 @@ static int gic_irq_domain_translate(struct irq_domain *d, if(fwspec->param_count != 2) return -EINVAL; + if (fwspec->param[0] < 16) { + pr_err(FW_BUG "Illegal GSI%d translation request\n", + fwspec->param[0]); + return -EINVAL; + } + *hwirq = fwspec->param[0]; *type = fwspec->param[1]; diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 58ba835bee1f..09c710ecc387 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -1123,6 +1123,12 @@ static int gic_irq_domain_translate(struct irq_domain *d, if(fwspec->param_count != 2) return -EINVAL; + if (fwspec->param[0] < 16) { + pr_err(FW_BUG "Illegal GSI%d translation request\n", + fwspec->param[0]); + return -EINVAL; + } + *hwirq = fwspec->param[0]; *type = fwspec->param[1]; diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c index eea5a753618c..d30614661eea 100644 --- a/drivers/irqchip/irq-qcom-mpm.c +++ b/drivers/irqchip/irq-qcom-mpm.c @@ -375,7 +375,7 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent) raw_spin_lock_init(&priv->lock); priv->base = devm_platform_ioremap_resource(pdev, 0); - if (!priv->base) + if (IS_ERR(priv->base)) return PTR_ERR(priv->base); for (i = 0; i < priv->reg_stride; i++) { diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index ad2d5faa2ebb..36ae30b73a6e 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -4399,6 +4399,7 @@ try_smaller_buffer: } if (ic->internal_hash) { + size_t recalc_tags_size; ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1); if (!ic->recalc_wq ) { ti->error = "Cannot allocate workqueue"; @@ -4412,8 +4413,10 @@ try_smaller_buffer: r = -ENOMEM; goto bad; } - ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block, - ic->tag_size, GFP_KERNEL); + recalc_tags_size = (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size; + if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size) + recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size; + ic->recalc_tags = kvmalloc(recalc_tags_size, GFP_KERNEL); if (!ic->recalc_tags) { ti->error = "Cannot allocate tags for recalculating"; r = -ENOMEM; diff --git a/drivers/md/dm-ps-historical-service-time.c b/drivers/md/dm-ps-historical-service-time.c index 875bca30a0dd..82f2a06153dc 100644 --- a/drivers/md/dm-ps-historical-service-time.c +++ b/drivers/md/dm-ps-historical-service-time.c @@ -27,7 +27,6 @@ #include <linux/blkdev.h> #include <linux/slab.h> #include <linux/module.h> -#include <linux/sched/clock.h> #define DM_MSG_PREFIX "multipath historical-service-time" @@ -433,7 +432,7 @@ static struct dm_path *hst_select_path(struct path_selector *ps, { struct selector *s = ps->context; struct path_info *pi = NULL, *best = NULL; - u64 time_now = sched_clock(); + u64 time_now = ktime_get_ns(); struct dm_path *ret = NULL; unsigned long flags; @@ -474,7 +473,7 @@ static int hst_start_io(struct path_selector *ps, struct dm_path *path, static u64 path_service_time(struct path_info *pi, u64 start_time) { - u64 sched_now = ktime_get_ns(); + u64 now = ktime_get_ns(); /* if a previous disk request has finished after this IO was * sent to the hardware, pretend the submission happened @@ -483,11 +482,11 @@ static u64 path_service_time(struct path_info *pi, u64 start_time) if (time_after64(pi->last_finish, start_time)) start_time = pi->last_finish; - pi->last_finish = sched_now; - if (time_before64(sched_now, start_time)) + pi->last_finish = now; + if (time_before64(now, start_time)) return 0; - return sched_now - start_time; + return now - start_time; } static int hst_end_io(struct path_selector *ps, struct dm_path *path, diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c index c1ca9be4b79e..57daa86c19cf 100644 --- a/drivers/md/dm-zone.c +++ b/drivers/md/dm-zone.c @@ -360,16 +360,20 @@ static int dm_update_zone_wp_offset(struct mapped_device *md, unsigned int zno, return 0; } +struct orig_bio_details { + unsigned int op; + unsigned int nr_sectors; +}; + /* * First phase of BIO mapping for targets with zone append emulation: * check all BIO that change a zone writer pointer and change zone * append operations into regular write operations. */ static bool dm_zone_map_bio_begin(struct mapped_device *md, - struct bio *orig_bio, struct bio *clone) + unsigned int zno, struct bio *clone) { sector_t zsectors = blk_queue_zone_sectors(md->queue); - unsigned int zno = bio_zone_no(orig_bio); unsigned int zwp_offset = READ_ONCE(md->zwp_offset[zno]); /* @@ -384,7 +388,7 @@ static bool dm_zone_map_bio_begin(struct mapped_device *md, WRITE_ONCE(md->zwp_offset[zno], zwp_offset); } - switch (bio_op(orig_bio)) { + switch (bio_op(clone)) { case REQ_OP_ZONE_RESET: case REQ_OP_ZONE_FINISH: return true; @@ -401,9 +405,8 @@ static bool dm_zone_map_bio_begin(struct mapped_device *md, * target zone. */ clone->bi_opf = REQ_OP_WRITE | REQ_NOMERGE | - (orig_bio->bi_opf & (~REQ_OP_MASK)); - clone->bi_iter.bi_sector = - orig_bio->bi_iter.bi_sector + zwp_offset; + (clone->bi_opf & (~REQ_OP_MASK)); + clone->bi_iter.bi_sector += zwp_offset; break; default: DMWARN_LIMIT("Invalid BIO operation"); @@ -423,11 +426,10 @@ static bool dm_zone_map_bio_begin(struct mapped_device *md, * data written to a zone. Note that at this point, the remapped clone BIO * may already have completed, so we do not touch it. */ -static blk_status_t dm_zone_map_bio_end(struct mapped_device *md, - struct bio *orig_bio, +static blk_status_t dm_zone_map_bio_end(struct mapped_device *md, unsigned int zno, + struct orig_bio_details *orig_bio_details, unsigned int nr_sectors) { - unsigned int zno = bio_zone_no(orig_bio); unsigned int zwp_offset = READ_ONCE(md->zwp_offset[zno]); /* The clone BIO may already have been completed and failed */ @@ -435,7 +437,7 @@ static blk_status_t dm_zone_map_bio_end(struct mapped_device *md, return BLK_STS_IOERR; /* Update the zone wp offset */ - switch (bio_op(orig_bio)) { + switch (orig_bio_details->op) { case REQ_OP_ZONE_RESET: WRITE_ONCE(md->zwp_offset[zno], 0); return BLK_STS_OK; @@ -452,7 +454,7 @@ static blk_status_t dm_zone_map_bio_end(struct mapped_device *md, * Check that the target did not truncate the write operation * emulating a zone append. */ - if (nr_sectors != bio_sectors(orig_bio)) { + if (nr_sectors != orig_bio_details->nr_sectors) { DMWARN_LIMIT("Truncated write for zone append"); return BLK_STS_IOERR; } @@ -488,7 +490,7 @@ static inline void dm_zone_unlock(struct request_queue *q, bio_clear_flag(clone, BIO_ZONE_WRITE_LOCKED); } -static bool dm_need_zone_wp_tracking(struct bio *orig_bio) +static bool dm_need_zone_wp_tracking(struct bio *bio) { /* * Special processing is not needed for operations that do not need the @@ -496,15 +498,15 @@ static bool dm_need_zone_wp_tracking(struct bio *orig_bio) * zones and all operations that do not modify directly a sequential * zone write pointer. */ - if (op_is_flush(orig_bio->bi_opf) && !bio_sectors(orig_bio)) + if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) return false; - switch (bio_op(orig_bio)) { + switch (bio_op(bio)) { case REQ_OP_WRITE_ZEROES: case REQ_OP_WRITE: case REQ_OP_ZONE_RESET: case REQ_OP_ZONE_FINISH: case REQ_OP_ZONE_APPEND: - return bio_zone_is_seq(orig_bio); + return bio_zone_is_seq(bio); default: return false; } @@ -519,8 +521,8 @@ int dm_zone_map_bio(struct dm_target_io *tio) struct dm_target *ti = tio->ti; struct mapped_device *md = io->md; struct request_queue *q = md->queue; - struct bio *orig_bio = io->orig_bio; struct bio *clone = &tio->clone; + struct orig_bio_details orig_bio_details; unsigned int zno; blk_status_t sts; int r; @@ -529,18 +531,21 @@ int dm_zone_map_bio(struct dm_target_io *tio) * IOs that do not change a zone write pointer do not need * any additional special processing. */ - if (!dm_need_zone_wp_tracking(orig_bio)) + if (!dm_need_zone_wp_tracking(clone)) return ti->type->map(ti, clone); /* Lock the target zone */ - zno = bio_zone_no(orig_bio); + zno = bio_zone_no(clone); dm_zone_lock(q, zno, clone); + orig_bio_details.nr_sectors = bio_sectors(clone); + orig_bio_details.op = bio_op(clone); + /* * Check that the bio and the target zone write pointer offset are * both valid, and if the bio is a zone append, remap it to a write. */ - if (!dm_zone_map_bio_begin(md, orig_bio, clone)) { + if (!dm_zone_map_bio_begin(md, zno, clone)) { dm_zone_unlock(q, zno, clone); return DM_MAPIO_KILL; } @@ -560,7 +565,8 @@ int dm_zone_map_bio(struct dm_target_io *tio) * The target submitted the clone BIO. The target zone will * be unlocked on completion of the clone. */ - sts = dm_zone_map_bio_end(md, orig_bio, *tio->len_ptr); + sts = dm_zone_map_bio_end(md, zno, &orig_bio_details, + *tio->len_ptr); break; case DM_MAPIO_REMAPPED: /* @@ -568,7 +574,8 @@ int dm_zone_map_bio(struct dm_target_io *tio) * unlock the target zone here as the clone will not be * submitted. */ - sts = dm_zone_map_bio_end(md, orig_bio, *tio->len_ptr); + sts = dm_zone_map_bio_end(md, zno, &orig_bio_details, + *tio->len_ptr); if (sts != BLK_STS_OK) dm_zone_unlock(q, zno, clone); break; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 3c5fad7c4ee6..82957bd460e8 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1323,8 +1323,7 @@ static void __map_bio(struct bio *clone) } static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, - struct dm_target *ti, unsigned num_bios, - unsigned *len) + struct dm_target *ti, unsigned num_bios) { struct bio *bio; int try; @@ -1335,7 +1334,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, if (try) mutex_lock(&ci->io->md->table_devices_lock); for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { - bio = alloc_tio(ci, ti, bio_nr, len, + bio = alloc_tio(ci, ti, bio_nr, NULL, try ? GFP_NOIO : GFP_NOWAIT); if (!bio) break; @@ -1363,11 +1362,11 @@ static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, break; case 1: clone = alloc_tio(ci, ti, 0, len, GFP_NOIO); - dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO); __map_bio(clone); break; default: - alloc_multiple_bios(&blist, ci, ti, num_bios, len); + /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */ + alloc_multiple_bios(&blist, ci, ti, num_bios); while ((clone = bio_list_pop(&blist))) { dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO); __map_bio(clone); @@ -1392,6 +1391,7 @@ static void __send_empty_flush(struct clone_info *ci) ci->bio = &flush_bio; ci->sector_count = 0; + ci->io->tio.clone.bi_iter.bi_size = 0; while ((ti = dm_table_get_target(ci->map, target_nr++))) __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); @@ -1407,14 +1407,10 @@ static void __send_changing_extent_only(struct clone_info *ci, struct dm_target len = min_t(sector_t, ci->sector_count, max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector))); - /* - * dm_accept_partial_bio cannot be used with duplicate bios, - * so update clone_info cursor before __send_duplicate_bios(). - */ + __send_duplicate_bios(ci, ti, num_bios, &len); + ci->sector += len; ci->sector_count -= len; - - __send_duplicate_bios(ci, ti, num_bios, &len); } static bool is_abnormal_io(struct bio *bio) diff --git a/drivers/media/platform/nxp/Kconfig b/drivers/media/platform/nxp/Kconfig index 28f2bafc14d2..5afa373e534f 100644 --- a/drivers/media/platform/nxp/Kconfig +++ b/drivers/media/platform/nxp/Kconfig @@ -7,6 +7,7 @@ comment "NXP media platform drivers" config VIDEO_IMX_MIPI_CSIS tristate "NXP MIPI CSI-2 CSIS receiver found on i.MX7 and i.MX8 models" depends on ARCH_MXC || COMPILE_TEST + depends on VIDEO_DEV select MEDIA_CONTROLLER select V4L2_FWNODE select VIDEO_V4L2_SUBDEV_API diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c index 4de5e8d2b261..3d3d1062e212 100644 --- a/drivers/media/platform/rockchip/rga/rga.c +++ b/drivers/media/platform/rockchip/rga/rga.c @@ -892,7 +892,7 @@ static int rga_probe(struct platform_device *pdev) } rga->dst_mmu_pages = (unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3); - if (rga->dst_mmu_pages) { + if (!rga->dst_mmu_pages) { ret = -ENOMEM; goto free_src_pages; } diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c index 47029746b89e..0de587b412d4 100644 --- a/drivers/media/tuners/si2157.c +++ b/drivers/media/tuners/si2157.c @@ -77,16 +77,16 @@ err_mutex_unlock: } static const struct si2157_tuner_info si2157_tuners[] = { - { SI2141, false, 0x60, SI2141_60_FIRMWARE, SI2141_A10_FIRMWARE }, - { SI2141, false, 0x61, SI2141_61_FIRMWARE, SI2141_A10_FIRMWARE }, - { SI2146, false, 0x11, SI2146_11_FIRMWARE, NULL }, - { SI2147, false, 0x50, SI2147_50_FIRMWARE, NULL }, - { SI2148, true, 0x32, SI2148_32_FIRMWARE, SI2158_A20_FIRMWARE }, - { SI2148, true, 0x33, SI2148_33_FIRMWARE, SI2158_A20_FIRMWARE }, - { SI2157, false, 0x50, SI2157_50_FIRMWARE, SI2157_A30_FIRMWARE }, - { SI2158, false, 0x50, SI2158_50_FIRMWARE, SI2158_A20_FIRMWARE }, - { SI2158, false, 0x51, SI2158_51_FIRMWARE, SI2158_A20_FIRMWARE }, - { SI2177, false, 0x50, SI2177_50_FIRMWARE, SI2157_A30_FIRMWARE }, + { SI2141, 0x60, false, SI2141_60_FIRMWARE, SI2141_A10_FIRMWARE }, + { SI2141, 0x61, false, SI2141_61_FIRMWARE, SI2141_A10_FIRMWARE }, + { SI2146, 0x11, false, SI2146_11_FIRMWARE, NULL }, + { SI2147, 0x50, false, SI2147_50_FIRMWARE, NULL }, + { SI2148, 0x32, true, SI2148_32_FIRMWARE, SI2158_A20_FIRMWARE }, + { SI2148, 0x33, true, SI2148_33_FIRMWARE, SI2158_A20_FIRMWARE }, + { SI2157, 0x50, false, SI2157_50_FIRMWARE, SI2157_A30_FIRMWARE }, + { SI2158, 0x50, false, SI2158_50_FIRMWARE, SI2158_A20_FIRMWARE }, + { SI2158, 0x51, false, SI2158_51_FIRMWARE, SI2158_A20_FIRMWARE }, + { SI2177, 0x50, false, SI2177_50_FIRMWARE, SI2157_A30_FIRMWARE }, }; static int si2157_load_firmware(struct dvb_frontend *fe, @@ -178,7 +178,7 @@ static int si2157_find_and_load_firmware(struct dvb_frontend *fe) } } - if (!fw_name && !fw_alt_name) { + if (required && !fw_name && !fw_alt_name) { dev_err(&client->dev, "unknown chip version Si21%d-%c%c%c ROM 0x%02x\n", part_id, cmd.args[1], cmd.args[3], cmd.args[4], rom_id); diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig index 30bff6cb1b8d..b7800b37af78 100644 --- a/drivers/memory/Kconfig +++ b/drivers/memory/Kconfig @@ -103,7 +103,7 @@ config TI_EMIF temperature changes config OMAP_GPMC - bool "Texas Instruments OMAP SoC GPMC driver" if COMPILE_TEST + tristate "Texas Instruments OMAP SoC GPMC driver" depends on OF_ADDRESS select GPIOLIB help diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c index c267283b01fd..e749dcb3ddea 100644 --- a/drivers/memory/atmel-ebi.c +++ b/drivers/memory/atmel-ebi.c @@ -544,20 +544,27 @@ static int atmel_ebi_probe(struct platform_device *pdev) smc_np = of_parse_phandle(dev->of_node, "atmel,smc", 0); ebi->smc.regmap = syscon_node_to_regmap(smc_np); - if (IS_ERR(ebi->smc.regmap)) - return PTR_ERR(ebi->smc.regmap); + if (IS_ERR(ebi->smc.regmap)) { + ret = PTR_ERR(ebi->smc.regmap); + goto put_node; + } ebi->smc.layout = atmel_hsmc_get_reg_layout(smc_np); - if (IS_ERR(ebi->smc.layout)) - return PTR_ERR(ebi->smc.layout); + if (IS_ERR(ebi->smc.layout)) { + ret = PTR_ERR(ebi->smc.layout); + goto put_node; + } ebi->smc.clk = of_clk_get(smc_np, 0); if (IS_ERR(ebi->smc.clk)) { - if (PTR_ERR(ebi->smc.clk) != -ENOENT) - return PTR_ERR(ebi->smc.clk); + if (PTR_ERR(ebi->smc.clk) != -ENOENT) { + ret = PTR_ERR(ebi->smc.clk); + goto put_node; + } ebi->smc.clk = NULL; } + of_node_put(smc_np); ret = clk_prepare_enable(ebi->smc.clk); if (ret) return ret; @@ -608,6 +615,10 @@ static int atmel_ebi_probe(struct platform_device *pdev) } return of_platform_populate(np, NULL, NULL, dev); + +put_node: + of_node_put(smc_np); + return ret; } static __maybe_unused int atmel_ebi_resume(struct device *dev) diff --git a/drivers/memory/brcmstb_dpfe.c b/drivers/memory/brcmstb_dpfe.c index 14412002775d..76c82e9c8fce 100644 --- a/drivers/memory/brcmstb_dpfe.c +++ b/drivers/memory/brcmstb_dpfe.c @@ -857,7 +857,6 @@ static int brcmstb_dpfe_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct brcmstb_dpfe_priv *priv; - struct resource *res; int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); @@ -869,22 +868,19 @@ static int brcmstb_dpfe_probe(struct platform_device *pdev) mutex_init(&priv->lock); platform_set_drvdata(pdev, priv); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpfe-cpu"); - priv->regs = devm_ioremap_resource(dev, res); + priv->regs = devm_platform_ioremap_resource_byname(pdev, "dpfe-cpu"); if (IS_ERR(priv->regs)) { dev_err(dev, "couldn't map DCPU registers\n"); return -ENODEV; } - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpfe-dmem"); - priv->dmem = devm_ioremap_resource(dev, res); + priv->dmem = devm_platform_ioremap_resource_byname(pdev, "dpfe-dmem"); if (IS_ERR(priv->dmem)) { dev_err(dev, "Couldn't map DCPU data memory\n"); return -ENOENT; } - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpfe-imem"); - priv->imem = devm_ioremap_resource(dev, res); + priv->imem = devm_platform_ioremap_resource_byname(pdev, "dpfe-imem"); if (IS_ERR(priv->imem)) { dev_err(dev, "Couldn't map DCPU instruction memory\n"); return -ENOENT; diff --git a/drivers/memory/da8xx-ddrctl.c b/drivers/memory/da8xx-ddrctl.c index 872addd0ec60..b32005bf269c 100644 --- a/drivers/memory/da8xx-ddrctl.c +++ b/drivers/memory/da8xx-ddrctl.c @@ -115,8 +115,7 @@ static int da8xx_ddrctl_probe(struct platform_device *pdev) return -EINVAL; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ddrctl = devm_ioremap_resource(dev, res); + ddrctl = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(ddrctl)) { dev_err(dev, "unable to map memory controller registers\n"); return PTR_ERR(ddrctl); diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c index ecc78d6f89ed..6c2a421b86e3 100644 --- a/drivers/memory/emif.c +++ b/drivers/memory/emif.c @@ -1025,10 +1025,8 @@ static struct emif_data *__init_or_module get_device_details( temp = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); dev_info = devm_kzalloc(dev, sizeof(*dev_info), GFP_KERNEL); - if (!emif || !temp || !dev_info) { - dev_err(dev, "%s:%d: allocation error\n", __func__, __LINE__); + if (!emif || !temp || !dev_info) goto error; - } memcpy(temp, pd, sizeof(*pd)); pd = temp; @@ -1067,9 +1065,6 @@ static struct emif_data *__init_or_module get_device_details( temp = devm_kzalloc(dev, sizeof(*cust_cfgs), GFP_KERNEL); if (temp) memcpy(temp, cust_cfgs, sizeof(*cust_cfgs)); - else - dev_warn(dev, "%s:%d: allocation error\n", __func__, - __LINE__); pd->custom_configs = temp; } @@ -1084,8 +1079,6 @@ static struct emif_data *__init_or_module get_device_details( memcpy(temp, pd->timings, size); pd->timings = temp; } else { - dev_warn(dev, "%s:%d: allocation error\n", __func__, - __LINE__); get_default_timings(emif); } } else { @@ -1098,8 +1091,6 @@ static struct emif_data *__init_or_module get_device_details( memcpy(temp, pd->min_tck, sizeof(*pd->min_tck)); pd->min_tck = temp; } else { - dev_warn(dev, "%s:%d: allocation error\n", __func__, - __LINE__); pd->min_tck = &lpddr2_jedec_min_tck; } } else { @@ -1116,7 +1107,6 @@ error: static int __init_or_module emif_probe(struct platform_device *pdev) { struct emif_data *emif; - struct resource *res; int irq, ret; if (pdev->dev.of_node) @@ -1135,8 +1125,7 @@ static int __init_or_module emif_probe(struct platform_device *pdev) emif->dev = &pdev->dev; platform_set_drvdata(pdev, emif); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - emif->base = devm_ioremap_resource(emif->dev, res); + emif->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(emif->base)) goto error; diff --git a/drivers/memory/fsl-corenet-cf.c b/drivers/memory/fsl-corenet-cf.c index f8ea592c9cb5..7fc9f57ae278 100644 --- a/drivers/memory/fsl-corenet-cf.c +++ b/drivers/memory/fsl-corenet-cf.c @@ -172,7 +172,6 @@ out: static int ccf_probe(struct platform_device *pdev) { struct ccf_private *ccf; - struct resource *r; const struct of_device_id *match; u32 errinten; int ret, irq; @@ -185,13 +184,7 @@ static int ccf_probe(struct platform_device *pdev) if (!ccf) return -ENOMEM; - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!r) { - dev_err(&pdev->dev, "%s: no mem resource\n", __func__); - return -ENXIO; - } - - ccf->regs = devm_ioremap_resource(&pdev->dev, r); + ccf->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ccf->regs)) return PTR_ERR(ccf->regs); diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c index 2f6939da21cd..e83b61c925a4 100644 --- a/drivers/memory/fsl_ifc.c +++ b/drivers/memory/fsl_ifc.c @@ -287,8 +287,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev) } /* legacy dts may still use "simple-bus" compatible */ - ret = of_platform_populate(dev->dev.of_node, NULL, NULL, - &dev->dev); + ret = of_platform_default_populate(dev->dev.of_node, NULL, &dev->dev); if (ret) goto err_free_nandirq; diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index ed11887c1b7c..2351f2708da2 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c @@ -12,6 +12,7 @@ #include <linux/cpu_pm.h> #include <linux/irq.h> #include <linux/kernel.h> +#include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/clk.h> @@ -1889,16 +1890,6 @@ int gpmc_cs_program_settings(int cs, struct gpmc_settings *p) } #ifdef CONFIG_OF -static const struct of_device_id gpmc_dt_ids[] = { - { .compatible = "ti,omap2420-gpmc" }, - { .compatible = "ti,omap2430-gpmc" }, - { .compatible = "ti,omap3430-gpmc" }, /* omap3430 & omap3630 */ - { .compatible = "ti,omap4430-gpmc" }, /* omap4430 & omap4460 & omap543x */ - { .compatible = "ti,am3352-gpmc" }, /* am335x devices */ - { .compatible = "ti,am64-gpmc" }, - { } -}; - static void gpmc_cs_set_name(int cs, const char *name) { struct gpmc_cs_data *gpmc = &gpmc_cs[cs]; @@ -2257,11 +2248,9 @@ no_timings: if (!of_platform_device_create(child, NULL, &pdev->dev)) goto err_child_fail; - /* is child a common bus? */ - if (of_match_node(of_default_bus_match_table, child)) - /* create children and other common bus children */ - if (of_platform_default_populate(child, NULL, &pdev->dev)) - goto err_child_fail; + /* create children and other common bus children */ + if (of_platform_default_populate(child, NULL, &pdev->dev)) + goto err_child_fail; return 0; @@ -2278,6 +2267,8 @@ err: return ret; } +static const struct of_device_id gpmc_dt_ids[]; + static int gpmc_probe_dt(struct platform_device *pdev) { int ret; @@ -2644,6 +2635,19 @@ static int gpmc_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(gpmc_pm_ops, gpmc_suspend, gpmc_resume); +#ifdef CONFIG_OF +static const struct of_device_id gpmc_dt_ids[] = { + { .compatible = "ti,omap2420-gpmc" }, + { .compatible = "ti,omap2430-gpmc" }, + { .compatible = "ti,omap3430-gpmc" }, /* omap3430 & omap3630 */ + { .compatible = "ti,omap4430-gpmc" }, /* omap4430 & omap4460 & omap543x */ + { .compatible = "ti,am3352-gpmc" }, /* am335x devices */ + { .compatible = "ti,am64-gpmc" }, + { } +}; +MODULE_DEVICE_TABLE(of, gpmc_dt_ids); +#endif + static struct platform_driver gpmc_driver = { .probe = gpmc_probe, .remove = gpmc_remove, @@ -2654,8 +2658,7 @@ static struct platform_driver gpmc_driver = { }, }; -static __init int gpmc_init(void) -{ - return platform_driver_register(&gpmc_driver); -} -postcore_initcall(gpmc_init); +module_platform_driver(gpmc_driver); + +MODULE_DESCRIPTION("Texas Instruments GPMC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c index e4cc64f56019..f4d62242c90a 100644 --- a/drivers/memory/renesas-rpc-if.c +++ b/drivers/memory/renesas-rpc-if.c @@ -229,8 +229,7 @@ int rpcif_sw_init(struct rpcif *rpc, struct device *dev) rpc->dev = dev; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); - rpc->base = devm_ioremap_resource(&pdev->dev, res); + rpc->base = devm_platform_ioremap_resource_byname(pdev, "regs"); if (IS_ERR(rpc->base)) return PTR_ERR(rpc->base); @@ -458,7 +457,7 @@ int rpcif_manual_xfer(struct rpcif *rpc) case RPCIF_DATA_OUT: while (pos < rpc->xferlen) { u32 bytes_left = rpc->xferlen - pos; - u32 nbytes, data[2]; + u32 nbytes, data[2], *p = data; smcr = rpc->smcr | RPCIF_SMCR_SPIE; @@ -471,15 +470,9 @@ int rpcif_manual_xfer(struct rpcif *rpc) regmap_write(rpc->regmap, RPCIF_SMENR, smenr); memcpy(data, rpc->buffer + pos, nbytes); - if (nbytes == 8) { - regmap_write(rpc->regmap, RPCIF_SMWDR1, - data[0]); - regmap_write(rpc->regmap, RPCIF_SMWDR0, - data[1]); - } else { - regmap_write(rpc->regmap, RPCIF_SMWDR0, - data[0]); - } + if (nbytes == 8) + regmap_write(rpc->regmap, RPCIF_SMWDR1, *p++); + regmap_write(rpc->regmap, RPCIF_SMWDR0, *p); regmap_write(rpc->regmap, RPCIF_SMCR, smcr); ret = wait_msg_xfer_end(rpc); @@ -521,7 +514,7 @@ int rpcif_manual_xfer(struct rpcif *rpc) } while (pos < rpc->xferlen) { u32 bytes_left = rpc->xferlen - pos; - u32 nbytes, data[2]; + u32 nbytes, data[2], *p = data; /* nbytes may only be 1, 2, 4, or 8 */ nbytes = bytes_left >= max ? max : (1 << ilog2(bytes_left)); @@ -537,15 +530,9 @@ int rpcif_manual_xfer(struct rpcif *rpc) if (ret) goto err_out; - if (nbytes == 8) { - regmap_read(rpc->regmap, RPCIF_SMRDR1, - &data[0]); - regmap_read(rpc->regmap, RPCIF_SMRDR0, - &data[1]); - } else { - regmap_read(rpc->regmap, RPCIF_SMRDR0, - &data[0]); - } + if (nbytes == 8) + regmap_read(rpc->regmap, RPCIF_SMRDR1, p++); + regmap_read(rpc->regmap, RPCIF_SMRDR0, p); memcpy(rpc->buffer + pos, data, nbytes); pos += nbytes; @@ -651,6 +638,7 @@ static int rpcif_probe(struct platform_device *pdev) struct platform_device *vdev; struct device_node *flash; const char *name; + int ret; flash = of_get_next_child(pdev->dev.of_node, NULL); if (!flash) { @@ -674,7 +662,14 @@ static int rpcif_probe(struct platform_device *pdev) return -ENOMEM; vdev->dev.parent = &pdev->dev; platform_set_drvdata(pdev, vdev); - return platform_device_add(vdev); + + ret = platform_device_add(vdev); + if (ret) { + platform_device_put(vdev); + return ret; + } + + return 0; } static int rpcif_remove(struct platform_device *pdev) diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c index 9c8318923ed0..4733e7898ffe 100644 --- a/drivers/memory/samsung/exynos5422-dmc.c +++ b/drivers/memory/samsung/exynos5422-dmc.c @@ -1322,7 +1322,6 @@ static int exynos5_dmc_init_clks(struct exynos5_dmc *dmc) */ static int exynos5_performance_counters_init(struct exynos5_dmc *dmc) { - int counters_size; int ret, i; dmc->num_counters = devfreq_event_get_edev_count(dmc->dev, @@ -1332,8 +1331,8 @@ static int exynos5_performance_counters_init(struct exynos5_dmc *dmc) return dmc->num_counters; } - counters_size = sizeof(struct devfreq_event_dev) * dmc->num_counters; - dmc->counter = devm_kzalloc(dmc->dev, counters_size, GFP_KERNEL); + dmc->counter = devm_kcalloc(dmc->dev, dmc->num_counters, + sizeof(*dmc->counter), GFP_KERNEL); if (!dmc->counter) return -ENOMEM; diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c index 44b4a4080920..6d22d1ee432a 100644 --- a/drivers/memory/tegra/mc.c +++ b/drivers/memory/tegra/mc.c @@ -716,7 +716,6 @@ del_provider: static int tegra_mc_probe(struct platform_device *pdev) { - struct resource *res; struct tegra_mc *mc; u64 mask; int err; @@ -741,8 +740,7 @@ static int tegra_mc_probe(struct platform_device *pdev) /* length of MC tick in nanoseconds */ mc->tick = 30; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mc->regs = devm_ioremap_resource(&pdev->dev, res); + mc->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mc->regs)) return PTR_ERR(mc->regs); diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c index 51d20c2ccb75..f81e7df8798a 100644 --- a/drivers/memory/ti-aemif.c +++ b/drivers/memory/ti-aemif.c @@ -328,7 +328,6 @@ static int aemif_probe(struct platform_device *pdev) { int i; int ret = -ENODEV; - struct resource *res; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct device_node *child_np; @@ -362,8 +361,7 @@ static int aemif_probe(struct platform_device *pdev) else if (pdata) aemif->cs_offset = pdata->cs_offset; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - aemif->base = devm_ioremap_resource(dev, res); + aemif->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(aemif->base)) { ret = PTR_ERR(aemif->base); goto error; diff --git a/drivers/memory/ti-emif-pm.c b/drivers/memory/ti-emif-pm.c index 179fec2da56d..31d6266f008c 100644 --- a/drivers/memory/ti-emif-pm.c +++ b/drivers/memory/ti-emif-pm.c @@ -290,9 +290,9 @@ static int ti_emif_probe(struct platform_device *pdev) emif_data->pm_data.ti_emif_sram_config = (unsigned long)match->data; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - emif_data->pm_data.ti_emif_base_addr_virt = devm_ioremap_resource(dev, - res); + emif_data->pm_data.ti_emif_base_addr_virt = devm_platform_get_and_ioremap_resource(pdev, + 0, + &res); if (IS_ERR(emif_data->pm_data.ti_emif_base_addr_virt)) { ret = PTR_ERR(emif_data->pm_data.ti_emif_base_addr_virt); return ret; diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index e90adfa57950..9b3ba2df71c7 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -6658,13 +6658,13 @@ static int mpt_summary_proc_show(struct seq_file *m, void *v) static int mpt_version_proc_show(struct seq_file *m, void *v) { u8 cb_idx; - int scsi, fc, sas, lan, ctl, targ, dmp; + int scsi, fc, sas, lan, ctl, targ; char *drvname; seq_printf(m, "%s-%s\n", "mptlinux", MPT_LINUX_VERSION_COMMON); seq_printf(m, " Fusion MPT base driver\n"); - scsi = fc = sas = lan = ctl = targ = dmp = 0; + scsi = fc = sas = lan = ctl = targ = 0; for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { drvname = NULL; if (MptCallbacks[cb_idx]) { diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c index e008d82e4ba3..a13506dd8119 100644 --- a/drivers/misc/habanalabs/common/memory.c +++ b/drivers/misc/habanalabs/common/memory.c @@ -111,10 +111,10 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args, if (contiguous) { if (is_power_of_2(page_size)) - paddr = (u64) (uintptr_t) gen_pool_dma_alloc_align(vm->dram_pg_pool, - total_size, NULL, page_size); + paddr = (uintptr_t) gen_pool_dma_alloc_align(vm->dram_pg_pool, + total_size, NULL, page_size); else - paddr = (u64) (uintptr_t) gen_pool_alloc(vm->dram_pg_pool, total_size); + paddr = gen_pool_alloc(vm->dram_pg_pool, total_size); if (!paddr) { dev_err(hdev->dev, "failed to allocate %llu contiguous pages with total size of %llu\n", @@ -150,12 +150,12 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args, for (i = 0 ; i < num_pgs ; i++) { if (is_power_of_2(page_size)) phys_pg_pack->pages[i] = - (u64) gen_pool_dma_alloc_align(vm->dram_pg_pool, - page_size, NULL, - page_size); + (uintptr_t)gen_pool_dma_alloc_align(vm->dram_pg_pool, + page_size, NULL, + page_size); else - phys_pg_pack->pages[i] = (u64) gen_pool_alloc(vm->dram_pg_pool, - page_size); + phys_pg_pack->pages[i] = gen_pool_alloc(vm->dram_pg_pool, + page_size); if (!phys_pg_pack->pages[i]) { dev_err(hdev->dev, "Failed to allocate device memory (out of memory)\n"); diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 4e67c1403cc9..506dc900f5c7 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -993,7 +993,7 @@ static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, return -EEXIST; md->reset_done |= type; - err = mmc_hw_reset(host); + err = mmc_hw_reset(host->card); /* Ensure we switch back to the correct partition */ if (err) { struct mmc_blk_data *main_md = @@ -1880,6 +1880,31 @@ static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq) brq->data.error || brq->cmd.resp[0] & CMD_ERRORS; } +static int mmc_spi_err_check(struct mmc_card *card) +{ + u32 status = 0; + int err; + + /* + * SPI does not have a TRAN state we have to wait on, instead the + * card is ready again when it no longer holds the line LOW. + * We still have to ensure two things here before we know the write + * was successful: + * 1. The card has not disconnected during busy and we actually read our + * own pull-up, thinking it was still connected, so ensure it + * still responds. + * 2. Check for any error bits, in particular R1_SPI_IDLE to catch a + * just reconnected card after being disconnected during busy. + */ + err = __mmc_send_status(card, &status, 0); + if (err) + return err; + /* All R1 and R2 bits of SPI are errors in our case */ + if (status) + return -EIO; + return 0; +} + static int mmc_blk_busy_cb(void *cb_data, bool *busy) { struct mmc_blk_busy_data *data = cb_data; @@ -1903,9 +1928,16 @@ static int mmc_blk_card_busy(struct mmc_card *card, struct request *req) struct mmc_blk_busy_data cb_data; int err; - if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ) + if (rq_data_dir(req) == READ) return 0; + if (mmc_host_is_spi(card->host)) { + err = mmc_spi_err_check(card); + if (err) + mqrq->brq.data.bytes_xfered = 0; + return err; + } + cb_data.card = card; cb_data.status = 0; err = __mmc_poll_for_busy(card->host, 0, MMC_BLK_TIMEOUT_MS, @@ -2350,6 +2382,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, struct mmc_blk_data *md; int devidx, ret; char cap_str[10]; + bool cache_enabled = false; + bool fua_enabled = false; devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL); if (devidx < 0) { @@ -2429,13 +2463,17 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, md->flags |= MMC_BLK_CMD23; } - if (mmc_card_mmc(card) && - md->flags & MMC_BLK_CMD23 && + if (md->flags & MMC_BLK_CMD23 && ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || card->ext_csd.rel_sectors)) { md->flags |= MMC_BLK_REL_WR; - blk_queue_write_cache(md->queue.queue, true, true); + fua_enabled = true; + cache_enabled = true; } + if (mmc_cache_enabled(card->host)) + cache_enabled = true; + + blk_queue_write_cache(md->queue.queue, cache_enabled, fua_enabled); string_get_size((u64)size, 512, STRING_UNITS_2, cap_str, sizeof(cap_str)); diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 368f10405e13..c6ae16d40766 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -1995,7 +1995,7 @@ static void mmc_hw_reset_for_init(struct mmc_host *host) /** * mmc_hw_reset - reset the card in hardware - * @host: MMC host to which the card is attached + * @card: card to be reset * * Hard reset the card. This function is only for upper layers, like the * block layer or card drivers. You cannot use it in host drivers (struct @@ -2003,8 +2003,9 @@ static void mmc_hw_reset_for_init(struct mmc_host *host) * * Return: 0 on success, -errno on failure */ -int mmc_hw_reset(struct mmc_host *host) +int mmc_hw_reset(struct mmc_card *card) { + struct mmc_host *host = card->host; int ret; ret = host->bus_ops->hw_reset(host); diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c index e6a2fd2c6d5c..8d9bceeff986 100644 --- a/drivers/mmc/core/mmc_test.c +++ b/drivers/mmc/core/mmc_test.c @@ -2325,10 +2325,9 @@ static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test) static int mmc_test_reset(struct mmc_test_card *test) { struct mmc_card *card = test->card; - struct mmc_host *host = card->host; int err; - err = mmc_hw_reset(host); + err = mmc_hw_reset(card); if (!err) { /* * Reset will re-enable the card's command queue, but tests diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c index 9c13f2c31365..4566d7fc9055 100644 --- a/drivers/mmc/host/mmci_stm32_sdmmc.c +++ b/drivers/mmc/host/mmci_stm32_sdmmc.c @@ -62,8 +62,8 @@ static int sdmmc_idma_validate_data(struct mmci_host *host, * excepted the last element which has no constraint on idmasize */ for_each_sg(data->sg, sg, data->sg_len - 1, i) { - if (!IS_ALIGNED(data->sg->offset, sizeof(u32)) || - !IS_ALIGNED(data->sg->length, SDMMC_IDMA_BURST)) { + if (!IS_ALIGNED(sg->offset, sizeof(u32)) || + !IS_ALIGNED(sg->length, SDMMC_IDMA_BURST)) { dev_err(mmc_dev(host->mmc), "unaligned scatterlist: ofst:%x length:%d\n", data->sg->offset, data->sg->length); @@ -71,7 +71,7 @@ static int sdmmc_idma_validate_data(struct mmci_host *host, } } - if (!IS_ALIGNED(data->sg->offset, sizeof(u32))) { + if (!IS_ALIGNED(sg->offset, sizeof(u32))) { dev_err(mmc_dev(host->mmc), "unaligned last scatterlist: ofst:%x length:%d\n", data->sg->offset, data->sg->length); diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index 2797a9c0f17d..ddb5ca2f559e 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -144,9 +144,9 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host, return clk_get_rate(priv->clk); if (priv->clkh) { + /* HS400 with 4TAP needs different clock settings */ bool use_4tap = priv->quirks && priv->quirks->hs400_4taps; - bool need_slow_clkh = (host->mmc->ios.timing == MMC_TIMING_UHS_SDR104) || - (host->mmc->ios.timing == MMC_TIMING_MMC_HS400); + bool need_slow_clkh = host->mmc->ios.timing == MMC_TIMING_MMC_HS400; clkh_shift = use_4tap && need_slow_clkh ? 1 : 2; ref_clk = priv->clkh; } @@ -396,10 +396,10 @@ static void renesas_sdhi_hs400_complete(struct mmc_host *mmc) SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) | sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2)); - /* Set the sampling clock selection range of HS400 mode */ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL, SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN | - 0x4 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT); + sd_scc_read32(host, priv, + SH_MOBILE_SDHI_SCC_DTCNTL)); /* Avoid bad TAP */ if (bad_taps & BIT(priv->tap_set)) { diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c index 666cee4c7f7c..08e838400b52 100644 --- a/drivers/mmc/host/sdhci-xenon.c +++ b/drivers/mmc/host/sdhci-xenon.c @@ -241,16 +241,6 @@ static void xenon_voltage_switch(struct sdhci_host *host) { /* Wait for 5ms after set 1.8V signal enable bit */ usleep_range(5000, 5500); - - /* - * For some reason the controller's Host Control2 register reports - * the bit representing 1.8V signaling as 0 when read after it was - * written as 1. Subsequent read reports 1. - * - * Since this may cause some issues, do an empty read of the Host - * Control2 register here to circumvent this. - */ - sdhci_readw(host, SDHCI_HOST_CONTROL2); } static unsigned int xenon_get_max_clock(struct sdhci_host *host) diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 413b0006e9a2..9e28219b223d 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -670,6 +670,8 @@ static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu, struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); enum dsa_tag_protocol old_proto = felix->tag_proto; + bool cpu_port_active = false; + struct dsa_port *dp; int err; if (proto != DSA_TAG_PROTO_SEVILLE && @@ -677,6 +679,27 @@ static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu, proto != DSA_TAG_PROTO_OCELOT_8021Q) return -EPROTONOSUPPORT; + /* We don't support multiple CPU ports, yet the DT blob may have + * multiple CPU ports defined. The first CPU port is the active one, + * the others are inactive. In this case, DSA will call + * ->change_tag_protocol() multiple times, once per CPU port. + * Since we implement the tagging protocol change towards "ocelot" or + * "seville" as effectively initializing the NPI port, what we are + * doing is effectively changing who the NPI port is to the last @cpu + * argument passed, which is an unused DSA CPU port and not the one + * that should actively pass traffic. + * Suppress DSA's calls on CPU ports that are inactive. + */ + dsa_switch_for_each_user_port(dp, ds) { + if (dp->cpu_dp->index == cpu) { + cpu_port_active = true; + break; + } + } + + if (!cpu_port_active) + return 0; + felix_del_tag_protocol(ds, cpu, old_proto); err = felix_set_tag_protocol(ds, cpu, proto); diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 8d382b27e625..52a8566071ed 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -2316,7 +2316,7 @@ static int felix_pci_probe(struct pci_dev *pdev, err = dsa_register_switch(ds); if (err) { - dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err); + dev_err_probe(&pdev->dev, err, "Failed to register DSA switch\n"); goto err_register_ds; } diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig index 1aa79735355f..060165a85fb7 100644 --- a/drivers/net/dsa/realtek/Kconfig +++ b/drivers/net/dsa/realtek/Kconfig @@ -9,34 +9,46 @@ menuconfig NET_DSA_REALTEK help Select to enable support for Realtek Ethernet switch chips. + Note that at least one interface driver must be enabled for the + subdrivers to be loaded. Moreover, an interface driver cannot achieve + anything without at least one subdriver enabled. + +if NET_DSA_REALTEK + config NET_DSA_REALTEK_MDIO - tristate "Realtek MDIO connected switch driver" - depends on NET_DSA_REALTEK + tristate "Realtek MDIO interface driver" depends on OF + depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB + depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB + depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB help Select to enable support for registering switches configured through MDIO. config NET_DSA_REALTEK_SMI - tristate "Realtek SMI connected switch driver" - depends on NET_DSA_REALTEK + tristate "Realtek SMI interface driver" depends on OF + depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB + depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB + depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB help Select to enable support for registering switches connected through SMI. config NET_DSA_REALTEK_RTL8365MB tristate "Realtek RTL8365MB switch subdriver" - depends on NET_DSA_REALTEK - depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO + imply NET_DSA_REALTEK_SMI + imply NET_DSA_REALTEK_MDIO select NET_DSA_TAG_RTL8_4 help Select to enable support for Realtek RTL8365MB-VC and RTL8367S. config NET_DSA_REALTEK_RTL8366RB tristate "Realtek RTL8366RB switch subdriver" - depends on NET_DSA_REALTEK - depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO + imply NET_DSA_REALTEK_SMI + imply NET_DSA_REALTEK_MDIO select NET_DSA_TAG_RTL4_A help - Select to enable support for Realtek RTL8366RB + Select to enable support for Realtek RTL8366RB. + +endif diff --git a/drivers/net/dsa/realtek/realtek-smi.c b/drivers/net/dsa/realtek/realtek-smi.c index 2243d3da55b2..6cec559c90ce 100644 --- a/drivers/net/dsa/realtek/realtek-smi.c +++ b/drivers/net/dsa/realtek/realtek-smi.c @@ -546,11 +546,6 @@ static const struct of_device_id realtek_smi_of_match[] = { .data = &rtl8366rb_variant, }, #endif - { - /* FIXME: add support for RTL8366S and more */ - .compatible = "realtek,rtl8366s", - .data = NULL, - }, #if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB) { .compatible = "realtek,rtl8365mb", diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 33f1a1377588..24d715c28a35 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -486,8 +486,8 @@ int aq_nic_start(struct aq_nic_s *self) if (err < 0) goto err_exit; - for (i = 0U, aq_vec = self->aq_vec[0]; - self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { + for (i = 0U; self->aq_vecs > i; ++i) { + aq_vec = self->aq_vec[i]; err = aq_vec_start(aq_vec); if (err < 0) goto err_exit; @@ -517,8 +517,8 @@ int aq_nic_start(struct aq_nic_s *self) mod_timer(&self->polling_timer, jiffies + AQ_CFG_POLLING_TIMER_INTERVAL); } else { - for (i = 0U, aq_vec = self->aq_vec[0]; - self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { + for (i = 0U; self->aq_vecs > i; ++i) { + aq_vec = self->aq_vec[i]; err = aq_pci_func_alloc_irq(self, i, self->ndev->name, aq_vec_isr, aq_vec, aq_vec_get_affinity_mask(aq_vec)); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index f4774cf051c9..6ab1f3212d24 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -43,8 +43,8 @@ static int aq_vec_poll(struct napi_struct *napi, int budget) if (!self) { err = -EINVAL; } else { - for (i = 0U, ring = self->ring[0]; - self->tx_rings > i; ++i, ring = self->ring[i]) { + for (i = 0U; self->tx_rings > i; ++i) { + ring = self->ring[i]; u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp); ring[AQ_VEC_RX_ID].stats.rx.polls++; u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp); @@ -182,8 +182,8 @@ int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops, self->aq_hw_ops = aq_hw_ops; self->aq_hw = aq_hw; - for (i = 0U, ring = self->ring[0]; - self->tx_rings > i; ++i, ring = self->ring[i]) { + for (i = 0U; self->tx_rings > i; ++i) { + ring = self->ring[i]; err = aq_ring_init(&ring[AQ_VEC_TX_ID], ATL_RING_TX); if (err < 0) goto err_exit; @@ -224,8 +224,8 @@ int aq_vec_start(struct aq_vec_s *self) unsigned int i = 0U; int err = 0; - for (i = 0U, ring = self->ring[0]; - self->tx_rings > i; ++i, ring = self->ring[i]) { + for (i = 0U; self->tx_rings > i; ++i) { + ring = self->ring[i]; err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw, &ring[AQ_VEC_TX_ID]); if (err < 0) @@ -248,8 +248,8 @@ void aq_vec_stop(struct aq_vec_s *self) struct aq_ring_s *ring = NULL; unsigned int i = 0U; - for (i = 0U, ring = self->ring[0]; - self->tx_rings > i; ++i, ring = self->ring[i]) { + for (i = 0U; self->tx_rings > i; ++i) { + ring = self->ring[i]; self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw, &ring[AQ_VEC_TX_ID]); @@ -268,8 +268,8 @@ void aq_vec_deinit(struct aq_vec_s *self) if (!self) goto err_exit; - for (i = 0U, ring = self->ring[0]; - self->tx_rings > i; ++i, ring = self->ring[i]) { + for (i = 0U; self->tx_rings > i; ++i) { + ring = self->ring[i]; aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]); } @@ -297,8 +297,8 @@ void aq_vec_ring_free(struct aq_vec_s *self) if (!self) goto err_exit; - for (i = 0U, ring = self->ring[0]; - self->tx_rings > i; ++i, ring = self->ring[i]) { + for (i = 0U; self->tx_rings > i; ++i) { + ring = self->ring[i]; aq_ring_free(&ring[AQ_VEC_TX_ID]); if (i < self->rx_rings) aq_ring_free(&ring[AQ_VEC_RX_ID]); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 1c28495875cf..874fad0a5cf8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3253,6 +3253,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) } qidx = bp->tc_to_qidx[j]; ring->queue_id = bp->q_info[qidx].queue_id; + spin_lock_init(&txr->xdp_tx_lock); if (i < bp->tx_nr_rings_xdp) continue; if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) @@ -10338,6 +10339,12 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) if (irq_re_init) udp_tunnel_nic_reset_ntf(bp->dev); + if (bp->tx_nr_rings_xdp < num_possible_cpus()) { + if (!static_key_enabled(&bnxt_xdp_locking_key)) + static_branch_enable(&bnxt_xdp_locking_key); + } else if (static_key_enabled(&bnxt_xdp_locking_key)) { + static_branch_disable(&bnxt_xdp_locking_key); + } set_bit(BNXT_STATE_OPEN, &bp->state); bnxt_enable_int(bp); /* Enable TX queues */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 61aa3e8c5952..98453a78cbd0 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -593,7 +593,8 @@ struct nqe_cn { #define BNXT_MAX_MTU 9500 #define BNXT_MAX_PAGE_MODE_MTU \ ((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN - \ - XDP_PACKET_HEADROOM) + XDP_PACKET_HEADROOM - \ + SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info))) #define BNXT_MIN_PKT_SIZE 52 @@ -800,6 +801,8 @@ struct bnxt_tx_ring_info { u32 dev_state; struct bnxt_ring_struct tx_ring_struct; + /* Synchronize simultaneous xdp_xmit on same ring */ + spinlock_t xdp_tx_lock; }; #define BNXT_LEGACY_COAL_CMPL_PARAMS \ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 52fad0fdeacf..03b1d6c04504 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -20,6 +20,8 @@ #include "bnxt.h" #include "bnxt_xdp.h" +DEFINE_STATIC_KEY_FALSE(bnxt_xdp_locking_key); + struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, struct bnxt_tx_ring_info *txr, dma_addr_t mapping, u32 len) @@ -227,11 +229,16 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames, ring = smp_processor_id() % bp->tx_nr_rings_xdp; txr = &bp->tx_ring[ring]; + if (READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING) + return -EINVAL; + + if (static_branch_unlikely(&bnxt_xdp_locking_key)) + spin_lock(&txr->xdp_tx_lock); + for (i = 0; i < num_frames; i++) { struct xdp_frame *xdp = frames[i]; - if (!txr || !bnxt_tx_avail(bp, txr) || - !(bp->bnapi[ring]->flags & BNXT_NAPI_FLAG_XDP)) + if (!bnxt_tx_avail(bp, txr)) break; mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len, @@ -250,6 +257,9 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames, bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); } + if (static_branch_unlikely(&bnxt_xdp_locking_key)) + spin_unlock(&txr->xdp_tx_lock); + return nxmit; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h index 0df40c3beb05..067bb5e821f5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h @@ -10,6 +10,8 @@ #ifndef BNXT_XDP_H #define BNXT_XDP_H +DECLARE_STATIC_KEY_FALSE(bnxt_xdp_locking_key); + struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, struct bnxt_tx_ring_info *txr, dma_addr_t mapping, u32 len); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 2dd79af9411b..9a41145dadfc 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -76,7 +76,7 @@ static inline void bcmgenet_writel(u32 value, void __iomem *offset) if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) __raw_writel(value, offset); else - writel(value, offset); + writel_relaxed(value, offset); } static inline u32 bcmgenet_readl(void __iomem *offset) @@ -84,7 +84,7 @@ static inline u32 bcmgenet_readl(void __iomem *offset) if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) return __raw_readl(offset); else - return readl(offset); + return readl_relaxed(offset); } static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 800d5ced5800..e475be29845c 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -1658,6 +1658,7 @@ static void macb_tx_restart(struct macb_queue *queue) unsigned int head = queue->tx_head; unsigned int tail = queue->tx_tail; struct macb *bp = queue->bp; + unsigned int head_idx, tbqp; if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) queue_writel(queue, ISR, MACB_BIT(TXUBR)); @@ -1665,6 +1666,13 @@ static void macb_tx_restart(struct macb_queue *queue) if (head == tail) return; + tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp); + tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp)); + head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, head)); + + if (tbqp == head_idx) + return; + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); } diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index d5356db7539a..caf48023f8ea 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1835,11 +1835,6 @@ static int ftgmac100_probe(struct platform_device *pdev) priv->rxdes0_edorr_mask = BIT(30); priv->txdes0_edotr_mask = BIT(30); priv->is_aspeed = true; - /* Disable ast2600 problematic HW arbitration */ - if (of_device_is_compatible(np, "aspeed,ast2600-mac")) { - iowrite32(FTGMAC100_TM_DEFAULT, - priv->base + FTGMAC100_OFFSET_TM); - } } else { priv->rxdes0_edorr_mask = BIT(15); priv->txdes0_edotr_mask = BIT(15); @@ -1911,6 +1906,11 @@ static int ftgmac100_probe(struct platform_device *pdev) err = ftgmac100_setup_clk(priv); if (err) goto err_phy_connect; + + /* Disable ast2600 problematic HW arbitration */ + if (of_device_is_compatible(np, "aspeed,ast2600-mac")) + iowrite32(FTGMAC100_TM_DEFAULT, + priv->base + FTGMAC100_OFFSET_TM); } /* Default ring sizes */ diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 763d2c7b5fb1..5750f9a56393 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -489,11 +489,15 @@ static int dpaa_get_ts_info(struct net_device *net_dev, info->phc_index = -1; fman_node = of_get_parent(mac_node); - if (fman_node) + if (fman_node) { ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0); + of_node_put(fman_node); + } - if (ptp_node) + if (ptp_node) { ptp_dev = of_find_device_by_node(ptp_node); + of_node_put(ptp_node); + } if (ptp_dev) ptp = platform_get_drvdata(ptp_dev); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c index 5f5f8c53c4a0..c8cb541572ff 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c @@ -167,7 +167,7 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev) base = of_iomap(node, 0); if (!base) { err = -ENOMEM; - goto err_close; + goto err_put; } err = fsl_mc_allocate_irqs(mc_dev); @@ -210,6 +210,8 @@ err_free_mc_irq: fsl_mc_free_irqs(mc_dev); err_unmap: iounmap(base); +err_put: + of_node_put(node); err_close: dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); err_free_mcp: diff --git a/drivers/net/ethernet/fungible/funcore/fun_dev.c b/drivers/net/ethernet/fungible/funcore/fun_dev.c index 5d7aef73df61..fb5120d90f26 100644 --- a/drivers/net/ethernet/fungible/funcore/fun_dev.c +++ b/drivers/net/ethernet/fungible/funcore/fun_dev.c @@ -586,8 +586,8 @@ static int fun_get_dev_limits(struct fun_dev *fdev) /* Calculate the max QID based on SQ/CQ/doorbell counts. * SQ/CQ doorbells alternate. */ - num_dbs = (pci_resource_len(pdev, 0) - NVME_REG_DBS) / - (fdev->db_stride * 4); + num_dbs = (pci_resource_len(pdev, 0) - NVME_REG_DBS) >> + (2 + NVME_CAP_STRIDE(fdev->cap_reg)); fdev->max_qid = min3(cq_count, sq_count, num_dbs / 2) - 1; fdev->kern_end_qid = fdev->max_qid + 1; return 0; diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 190590d32faf..7dfcf78b57fb 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -2871,7 +2871,6 @@ continue_reset: running = adapter->state == __IAVF_RUNNING; if (running) { - netdev->flags &= ~IFF_UP; netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); adapter->link_up = false; @@ -2988,7 +2987,7 @@ continue_reset: * to __IAVF_RUNNING */ iavf_up_complete(adapter); - netdev->flags |= IFF_UP; + iavf_irq_enable(adapter, true); } else { iavf_change_state(adapter, __IAVF_DOWN); @@ -3004,10 +3003,8 @@ continue_reset: reset_err: mutex_unlock(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); - if (running) { + if (running) iavf_change_state(adapter, __IAVF_RUNNING); - netdev->flags |= IFF_UP; - } dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); iavf_close(netdev); } diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index d4f1874df7d0..8ed3c9ab7ff7 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -301,7 +301,6 @@ enum ice_vsi_state { ICE_VSI_NETDEV_REGISTERED, ICE_VSI_UMAC_FLTR_CHANGED, ICE_VSI_MMAC_FLTR_CHANGED, - ICE_VSI_VLAN_FLTR_CHANGED, ICE_VSI_PROMISC_CHANGED, ICE_VSI_STATE_NBITS /* must be last */ }; @@ -672,7 +671,7 @@ static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev) static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi) { - return !!vsi->xdp_prog; + return !!READ_ONCE(vsi->xdp_prog); } static inline void ice_set_ring_xdp(struct ice_tx_ring *ring) diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c index 5daade32ea62..fba178e07600 100644 --- a/drivers/net/ethernet/intel/ice/ice_arfs.c +++ b/drivers/net/ethernet/intel/ice/ice_arfs.c @@ -577,7 +577,7 @@ void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) { struct net_device *netdev; - if (!vsi || vsi->type != ICE_VSI_PF || !vsi->arfs_fltr_list) + if (!vsi || vsi->type != ICE_VSI_PF) return; netdev = vsi->netdev; @@ -599,7 +599,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi) int base_idx, i; if (!vsi || vsi->type != ICE_VSI_PF) - return -EINVAL; + return 0; pf = vsi->back; netdev = vsi->netdev; @@ -636,7 +636,6 @@ void ice_remove_arfs(struct ice_pf *pf) if (!pf_vsi) return; - ice_free_cpu_rx_rmap(pf_vsi); ice_clear_arfs(pf_vsi); } @@ -653,9 +652,5 @@ void ice_rebuild_arfs(struct ice_pf *pf) return; ice_remove_arfs(pf); - if (ice_set_cpu_rx_rmap(pf_vsi)) { - dev_err(ice_pf_to_dev(pf), "Failed to rebuild aRFS\n"); - return; - } ice_init_arfs(pf_vsi); } diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c index af57eb114966..85a94483c2ed 100644 --- a/drivers/net/ethernet/intel/ice/ice_fltr.c +++ b/drivers/net/ethernet/intel/ice/ice_fltr.c @@ -58,7 +58,16 @@ int ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, u8 promisc_mask) { - return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false); + struct ice_pf *pf = hw->back; + int result; + + result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false); + if (result) + dev_err(ice_pf_to_dev(pf), + "Error setting promisc mode on VSI %i (rc=%d)\n", + vsi->vsi_num, result); + + return result; } /** @@ -73,7 +82,16 @@ int ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, u8 promisc_mask) { - return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true); + struct ice_pf *pf = hw->back; + int result; + + result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true); + if (result) + dev_err(ice_pf_to_dev(pf), + "Error clearing promisc mode on VSI %i (rc=%d)\n", + vsi->vsi_num, result); + + return result; } /** @@ -87,7 +105,16 @@ int ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid) { - return ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid); + struct ice_pf *pf = hw->back; + int result; + + result = ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid); + if (result) + dev_err(ice_pf_to_dev(pf), + "Error clearing promisc mode on VSI %i for VID %u (rc=%d)\n", + ice_get_hw_vsi_num(hw, vsi_handle), vid, result); + + return result; } /** @@ -101,7 +128,16 @@ int ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid) { - return ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid); + struct ice_pf *pf = hw->back; + int result; + + result = ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid); + if (result) + dev_err(ice_pf_to_dev(pf), + "Error setting promisc mode on VSI %i for VID %u (rc=%d)\n", + ice_get_hw_vsi_num(hw, vsi_handle), vid, result); + + return result; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index b897926f817d..6d19c58ccacd 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1480,6 +1480,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ring->tx_tstamps = &pf->ptp.port.tx; ring->dev = dev; ring->count = vsi->num_tx_desc; + ring->txq_teid = ICE_INVAL_TEID; if (dvm_ena) ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2; else @@ -2688,6 +2689,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi) return; vsi->irqs_ready = false; + ice_free_cpu_rx_rmap(vsi); + ice_for_each_q_vector(vsi, i) { u16 vector = i + base; int irq_num; @@ -2701,7 +2704,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi) continue; /* clear the affinity notifier in the IRQ descriptor */ - irq_set_affinity_notifier(irq_num, NULL); + if (!IS_ENABLED(CONFIG_RFS_ACCEL)) + irq_set_affinity_notifier(irq_num, NULL); /* clear the affinity_mask in the IRQ descriptor */ irq_set_affinity_hint(irq_num, NULL); @@ -2983,6 +2987,8 @@ int ice_vsi_release(struct ice_vsi *vsi) } } + if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) + ice_clear_dflt_vsi(pf->first_sw); ice_fltr_remove_all(vsi); ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index b588d7995631..5b1198859da7 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -243,8 +243,7 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) { return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) || - test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state) || - test_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); + test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); } /** @@ -260,10 +259,15 @@ static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m) if (vsi->type != ICE_VSI_PF) return 0; - if (ice_vsi_has_non_zero_vlans(vsi)) - status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m); - else - status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0); + if (ice_vsi_has_non_zero_vlans(vsi)) { + promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX); + status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, + promisc_m); + } else { + status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, + promisc_m, 0); + } + return status; } @@ -280,10 +284,15 @@ static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m) if (vsi->type != ICE_VSI_PF) return 0; - if (ice_vsi_has_non_zero_vlans(vsi)) - status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m); - else - status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0); + if (ice_vsi_has_non_zero_vlans(vsi)) { + promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX); + status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, + promisc_m); + } else { + status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, + promisc_m, 0); + } + return status; } @@ -302,7 +311,6 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; u32 changed_flags = 0; - u8 promisc_m; int err; if (!vsi->netdev) @@ -320,7 +328,6 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) if (ice_vsi_fltr_changed(vsi)) { clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); - clear_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); /* grab the netdev's addr_list_lock */ netif_addr_lock_bh(netdev); @@ -369,29 +376,15 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) /* check for changes in promiscuous modes */ if (changed_flags & IFF_ALLMULTI) { if (vsi->current_netdev_flags & IFF_ALLMULTI) { - if (ice_vsi_has_non_zero_vlans(vsi)) - promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; - else - promisc_m = ICE_MCAST_PROMISC_BITS; - - err = ice_set_promisc(vsi, promisc_m); + err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS); if (err) { - netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n", - vsi->vsi_num); vsi->current_netdev_flags &= ~IFF_ALLMULTI; goto out_promisc; } } else { /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */ - if (ice_vsi_has_non_zero_vlans(vsi)) - promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; - else - promisc_m = ICE_MCAST_PROMISC_BITS; - - err = ice_clear_promisc(vsi, promisc_m); + err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS); if (err) { - netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n", - vsi->vsi_num); vsi->current_netdev_flags |= IFF_ALLMULTI; goto out_promisc; } @@ -2517,6 +2510,13 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); } + err = ice_set_cpu_rx_rmap(vsi); + if (err) { + netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n", + vsi->vsi_num, ERR_PTR(err)); + goto free_q_irqs; + } + vsi->irqs_ready = true; return 0; @@ -2569,7 +2569,7 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) spin_lock_init(&xdp_ring->tx_lock); for (j = 0; j < xdp_ring->count; j++) { tx_desc = ICE_TX_DESC(xdp_ring, j); - tx_desc->cmd_type_offset_bsz = cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE); + tx_desc->cmd_type_offset_bsz = 0; } } @@ -2765,8 +2765,10 @@ free_qmap: ice_for_each_xdp_txq(vsi, i) if (vsi->xdp_rings[i]) { - if (vsi->xdp_rings[i]->desc) + if (vsi->xdp_rings[i]->desc) { + synchronize_rcu(); ice_free_tx_ring(vsi->xdp_rings[i]); + } kfree_rcu(vsi->xdp_rings[i], rcu); vsi->xdp_rings[i] = NULL; } @@ -3488,6 +3490,20 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) if (!vid) return 0; + while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) + usleep_range(1000, 2000); + + /* Add multicast promisc rule for the VLAN ID to be added if + * all-multicast is currently enabled. + */ + if (vsi->current_netdev_flags & IFF_ALLMULTI) { + ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, + ICE_MCAST_VLAN_PROMISC_BITS, + vid); + if (ret) + goto finish; + } + vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged @@ -3495,8 +3511,23 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) */ vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0); ret = vlan_ops->add_vlan(vsi, &vlan); - if (!ret) - set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); + if (ret) + goto finish; + + /* If all-multicast is currently enabled and this VLAN ID is only one + * besides VLAN-0 we have to update look-up type of multicast promisc + * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN. + */ + if ((vsi->current_netdev_flags & IFF_ALLMULTI) && + ice_vsi_num_non_zero_vlans(vsi) == 1) { + ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, + ICE_MCAST_PROMISC_BITS, 0); + ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, + ICE_MCAST_VLAN_PROMISC_BITS, 0); + } + +finish: + clear_bit(ICE_CFG_BUSY, vsi->state); return ret; } @@ -3522,6 +3553,9 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) if (!vid) return 0; + while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) + usleep_range(1000, 2000); + vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); /* Make sure VLAN delete is successful before updating VLAN @@ -3530,10 +3564,33 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0); ret = vlan_ops->del_vlan(vsi, &vlan); if (ret) - return ret; + goto finish; - set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); - return 0; + /* Remove multicast promisc rule for the removed VLAN ID if + * all-multicast is enabled. + */ + if (vsi->current_netdev_flags & IFF_ALLMULTI) + ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, + ICE_MCAST_VLAN_PROMISC_BITS, vid); + + if (!ice_vsi_has_non_zero_vlans(vsi)) { + /* Update look-up type of multicast promisc rule for VLAN 0 + * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when + * all-multicast is enabled and VLAN 0 is the only VLAN rule. + */ + if (vsi->current_netdev_flags & IFF_ALLMULTI) { + ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, + ICE_MCAST_VLAN_PROMISC_BITS, + 0); + ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, + ICE_MCAST_PROMISC_BITS, 0); + } + } + +finish: + clear_bit(ICE_CFG_BUSY, vsi->state); + + return ret; } /** @@ -3642,20 +3699,12 @@ static int ice_setup_pf_sw(struct ice_pf *pf) */ ice_napi_add(vsi); - status = ice_set_cpu_rx_rmap(vsi); - if (status) { - dev_err(dev, "Failed to set CPU Rx map VSI %d error %d\n", - vsi->vsi_num, status); - goto unroll_napi_add; - } status = ice_init_mac_fltr(pf); if (status) - goto free_cpu_rx_map; + goto unroll_napi_add; return 0; -free_cpu_rx_map: - ice_free_cpu_rx_rmap(vsi); unroll_napi_add: ice_tc_indir_block_unregister(vsi); unroll_cfg_netdev: @@ -5117,7 +5166,6 @@ static int __maybe_unused ice_suspend(struct device *dev) continue; ice_vsi_free_q_vectors(pf->vsi[v]); } - ice_free_cpu_rx_rmap(ice_get_main_vsi(pf)); ice_clear_interrupt_scheme(pf); pci_save_state(pdev); @@ -5475,16 +5523,19 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) /* Add filter for new MAC. If filter exists, return success */ err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); - if (err == -EEXIST) + if (err == -EEXIST) { /* Although this MAC filter is already present in hardware it's * possible in some cases (e.g. bonding) that dev_addr was * modified outside of the driver and needs to be restored back * to this value. */ netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac); - else if (err) + + return 0; + } else if (err) { /* error if the new filter addition failed */ err = -EADDRNOTAVAIL; + } err_update_filters: if (err) { diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index 3f1a63815bac..69ff4b929772 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -1358,9 +1358,9 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) goto error_param; } - /* Skip queue if not enabled */ if (!test_bit(vf_q_id, vf->txq_ena)) - continue; + dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n", + vf_q_id, vsi->vsi_num); ice_fill_txq_meta(vsi, ring, &txq_meta); diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index dfbcaf08520e..866ee4df9671 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -41,8 +41,10 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx) static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx) { ice_clean_tx_ring(vsi->tx_rings[q_idx]); - if (ice_is_xdp_ena_vsi(vsi)) + if (ice_is_xdp_ena_vsi(vsi)) { + synchronize_rcu(); ice_clean_tx_ring(vsi->xdp_rings[q_idx]); + } ice_clean_rx_ring(vsi->rx_rings[q_idx]); } @@ -918,7 +920,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, struct ice_vsi *vsi = np->vsi; struct ice_tx_ring *ring; - if (test_bit(ICE_DOWN, vsi->state)) + if (test_bit(ICE_VSI_DOWN, vsi->state)) return -ENETDOWN; if (!ice_is_xdp_ena_vsi(vsi)) diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 5f9ab1842d49..c18801490649 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2751,7 +2751,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, } ret = of_get_mac_address(pnp, ppd.mac_addr); - if (ret) + if (ret == -EPROBE_DEFER) return ret; mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size); diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c index 939b692ffc33..ce843ea91464 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c +++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c @@ -650,6 +650,7 @@ static int mlxsw_i2c_probe(struct i2c_client *client, return 0; errout: + mutex_destroy(&mlxsw_i2c->cmd.lock); i2c_set_clientdata(client, NULL); return err; diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig index 93df3049cdc0..830363bafcce 100644 --- a/drivers/net/ethernet/micrel/Kconfig +++ b/drivers/net/ethernet/micrel/Kconfig @@ -28,6 +28,7 @@ config KS8842 config KS8851 tristate "Micrel KS8851 SPI" depends on SPI + depends on PTP_1588_CLOCK_OPTIONAL select MII select CRC32 select EEPROM_93CX6 @@ -39,6 +40,7 @@ config KS8851 config KS8851_MLL tristate "Micrel KS8851 MLL" depends on HAS_IOMEM + depends on PTP_1588_CLOCK_OPTIONAL select MII select CRC32 select EEPROM_93CX6 diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c index ce5970bdcc6a..2679111ef669 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c @@ -346,7 +346,8 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row, lan966x_mac_process_raw_entry(&raw_entries[column], mac, &vid, &dest_idx); - WARN_ON(dest_idx > lan966x->num_phys_ports); + if (WARN_ON(dest_idx > lan966x->num_phys_ports)) + continue; /* If the entry in SW is found, then there is nothing * to do @@ -392,7 +393,8 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row, lan966x_mac_process_raw_entry(&raw_entries[column], mac, &vid, &dest_idx); - WARN_ON(dest_idx > lan966x->num_phys_ports); + if (WARN_ON(dest_idx > lan966x->num_phys_ports)) + continue; mac_entry = lan966x_mac_alloc_entry(mac, vid, dest_idx); if (!mac_entry) diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c index 1f8c67f0261b..958e55596b82 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c @@ -446,6 +446,12 @@ static bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, ANA_CPU_FWD_CFG_MLD_REDIR_ENA))) return true; + if (eth_type_vlan(skb->protocol)) { + skb = skb_vlan_untag(skb); + if (unlikely(!skb)) + return false; + } + if (skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->protocol == IPPROTO_IGMP) return false; diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c index ae782778d6dd..0a1041da4384 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c @@ -29,10 +29,10 @@ enum { static u64 lan966x_ptp_get_nominal_value(void) { - u64 res = 0x304d2df1; - - res <<= 32; - return res; + /* This is the default value that for each system clock, the time of day + * is increased. It has the format 5.59 nanosecond. + */ + return 0x304d4873ecade305; } int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr) diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c index e3555c94294d..df2bee678559 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c @@ -322,8 +322,7 @@ static int lan966x_port_prechangeupper(struct net_device *dev, if (netif_is_bridge_master(info->upper_dev) && !info->linking) switchdev_bridge_port_unoffload(port->dev, port, - &lan966x_switchdev_nb, - &lan966x_switchdev_blocking_nb); + NULL, NULL); return NOTIFY_DONE; } diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 50ac3ee2577a..21d2645885ce 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -2903,11 +2903,9 @@ static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb, status = myri10ge_xmit(curr, dev); if (status != 0) { dev_kfree_skb_any(curr); - if (segs != NULL) { - curr = segs; - segs = next; + skb_list_walk_safe(next, curr, next) { curr->next = NULL; - dev_kfree_skb_any(segs); + dev_kfree_skb_any(curr); } goto drop; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index e3edca187ddf..5250d1d1e49c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -489,7 +489,7 @@ struct split_type_defs { #define STATIC_DEBUG_LINE_DWORDS 9 -#define NUM_COMMON_GLOBAL_PARAMS 11 +#define NUM_COMMON_GLOBAL_PARAMS 10 #define MAX_RECURSION_DEPTH 10 diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index b242000a77fd..b7cc36589f59 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -748,6 +748,9 @@ qede_build_skb(struct qede_rx_queue *rxq, buf = page_address(bd->data) + bd->page_offset; skb = build_skb(buf, rxq->rx_buf_seg_size); + if (unlikely(!skb)) + return NULL; + skb_reserve(skb, pad); skb_put(skb, len); diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index f9064532beb6..377df8b7f015 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -786,6 +786,85 @@ void efx_remove_channels(struct efx_nic *efx) kfree(efx->xdp_tx_queues); } +static int efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number, + struct efx_tx_queue *tx_queue) +{ + if (xdp_queue_number >= efx->xdp_tx_queue_count) + return -EINVAL; + + netif_dbg(efx, drv, efx->net_dev, + "Channel %u TXQ %u is XDP %u, HW %u\n", + tx_queue->channel->channel, tx_queue->label, + xdp_queue_number, tx_queue->queue); + efx->xdp_tx_queues[xdp_queue_number] = tx_queue; + return 0; +} + +static void efx_set_xdp_channels(struct efx_nic *efx) +{ + struct efx_tx_queue *tx_queue; + struct efx_channel *channel; + unsigned int next_queue = 0; + int xdp_queue_number = 0; + int rc; + + /* We need to mark which channels really have RX and TX + * queues, and adjust the TX queue numbers if we have separate + * RX-only and TX-only channels. + */ + efx_for_each_channel(channel, efx) { + if (channel->channel < efx->tx_channel_offset) + continue; + + if (efx_channel_is_xdp_tx(channel)) { + efx_for_each_channel_tx_queue(tx_queue, channel) { + tx_queue->queue = next_queue++; + rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, + tx_queue); + if (rc == 0) + xdp_queue_number++; + } + } else { + efx_for_each_channel_tx_queue(tx_queue, channel) { + tx_queue->queue = next_queue++; + netif_dbg(efx, drv, efx->net_dev, + "Channel %u TXQ %u is HW %u\n", + channel->channel, tx_queue->label, + tx_queue->queue); + } + + /* If XDP is borrowing queues from net stack, it must + * use the queue with no csum offload, which is the + * first one of the channel + * (note: tx_queue_by_type is not initialized yet) + */ + if (efx->xdp_txq_queues_mode == + EFX_XDP_TX_QUEUES_BORROWED) { + tx_queue = &channel->tx_queue[0]; + rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, + tx_queue); + if (rc == 0) + xdp_queue_number++; + } + } + } + WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED && + xdp_queue_number != efx->xdp_tx_queue_count); + WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED && + xdp_queue_number > efx->xdp_tx_queue_count); + + /* If we have more CPUs than assigned XDP TX queues, assign the already + * existing queues to the exceeding CPUs + */ + next_queue = 0; + while (xdp_queue_number < efx->xdp_tx_queue_count) { + tx_queue = efx->xdp_tx_queues[next_queue++]; + rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue); + if (rc == 0) + xdp_queue_number++; + } +} + int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) { struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; @@ -857,6 +936,7 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) efx_init_napi_channel(efx->channel[i]); } + efx_set_xdp_channels(efx); out: /* Destroy unused channel structures */ for (i = 0; i < efx->n_channels; i++) { @@ -889,26 +969,9 @@ rollback: goto out; } -static inline int -efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number, - struct efx_tx_queue *tx_queue) -{ - if (xdp_queue_number >= efx->xdp_tx_queue_count) - return -EINVAL; - - netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n", - tx_queue->channel->channel, tx_queue->label, - xdp_queue_number, tx_queue->queue); - efx->xdp_tx_queues[xdp_queue_number] = tx_queue; - return 0; -} - int efx_set_channels(struct efx_nic *efx) { - struct efx_tx_queue *tx_queue; struct efx_channel *channel; - unsigned int next_queue = 0; - int xdp_queue_number; int rc; efx->tx_channel_offset = @@ -926,61 +989,14 @@ int efx_set_channels(struct efx_nic *efx) return -ENOMEM; } - /* We need to mark which channels really have RX and TX - * queues, and adjust the TX queue numbers if we have separate - * RX-only and TX-only channels. - */ - xdp_queue_number = 0; efx_for_each_channel(channel, efx) { if (channel->channel < efx->n_rx_channels) channel->rx_queue.core_index = channel->channel; else channel->rx_queue.core_index = -1; - - if (channel->channel >= efx->tx_channel_offset) { - if (efx_channel_is_xdp_tx(channel)) { - efx_for_each_channel_tx_queue(tx_queue, channel) { - tx_queue->queue = next_queue++; - rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue); - if (rc == 0) - xdp_queue_number++; - } - } else { - efx_for_each_channel_tx_queue(tx_queue, channel) { - tx_queue->queue = next_queue++; - netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is HW %u\n", - channel->channel, tx_queue->label, - tx_queue->queue); - } - - /* If XDP is borrowing queues from net stack, it must use the queue - * with no csum offload, which is the first one of the channel - * (note: channel->tx_queue_by_type is not initialized yet) - */ - if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) { - tx_queue = &channel->tx_queue[0]; - rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue); - if (rc == 0) - xdp_queue_number++; - } - } - } } - WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED && - xdp_queue_number != efx->xdp_tx_queue_count); - WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED && - xdp_queue_number > efx->xdp_tx_queue_count); - /* If we have more CPUs than assigned XDP TX queues, assign the already - * existing queues to the exceeding CPUs - */ - next_queue = 0; - while (xdp_queue_number < efx->xdp_tx_queue_count) { - tx_queue = efx->xdp_tx_queues[next_queue++]; - rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue); - if (rc == 0) - xdp_queue_number++; - } + efx_set_xdp_channels(efx); rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); if (rc) @@ -1124,7 +1140,7 @@ void efx_start_channels(struct efx_nic *efx) struct efx_rx_queue *rx_queue; struct efx_channel *channel; - efx_for_each_channel(channel, efx) { + efx_for_each_channel_rev(channel, efx) { efx_for_each_channel_tx_queue(tx_queue, channel) { efx_init_tx_queue(tx_queue); atomic_inc(&efx->active_queues); diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c index 1b22c7be0088..fa8b9aacca11 100644 --- a/drivers/net/ethernet/sfc/rx_common.c +++ b/drivers/net/ethernet/sfc/rx_common.c @@ -150,6 +150,9 @@ static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue) struct efx_nic *efx = rx_queue->efx; int i; + if (unlikely(!rx_queue->page_ring)) + return; + /* Unmap and release the pages in the recycle ring. Remove the ring. */ for (i = 0; i <= rx_queue->page_ptr_mask; i++) { struct page *page = rx_queue->page_ring[i]; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index d16e031e95f4..6983799e1c05 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -443,6 +443,9 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, if (unlikely(!tx_queue)) return -EINVAL; + if (!tx_queue->initialised) + return -EINVAL; + if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED) HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu); diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c index d530cde2b864..9bc8281b7f5b 100644 --- a/drivers/net/ethernet/sfc/tx_common.c +++ b/drivers/net/ethernet/sfc/tx_common.c @@ -101,6 +101,8 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, "shutting down TX queue %d\n", tx_queue->queue); + tx_queue->initialised = false; + if (!tx_queue->buffer) return; diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c index cd478d2cd871..00f6d347eaf7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c +++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c @@ -57,10 +57,6 @@ #define TSE_PCS_USE_SGMII_ENA BIT(0) #define TSE_PCS_IF_USE_SGMII 0x03 -#define SGMII_ADAPTER_CTRL_REG 0x00 -#define SGMII_ADAPTER_DISABLE 0x0001 -#define SGMII_ADAPTER_ENABLE 0x0000 - #define AUTONEGO_LINK_TIMER 20 static int tse_pcs_reset(void __iomem *base, struct tse_pcs *pcs) @@ -202,12 +198,8 @@ void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev, unsigned int speed) { void __iomem *tse_pcs_base = pcs->tse_pcs_base; - void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base; u32 val; - writew(SGMII_ADAPTER_ENABLE, - sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); - pcs->autoneg = phy_dev->autoneg; if (phy_dev->autoneg == AUTONEG_ENABLE) { diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h index 442812c0a4bd..694ac25ef426 100644 --- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h +++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h @@ -10,6 +10,10 @@ #include <linux/phy.h> #include <linux/timer.h> +#define SGMII_ADAPTER_CTRL_REG 0x00 +#define SGMII_ADAPTER_ENABLE 0x0000 +#define SGMII_ADAPTER_DISABLE 0x0001 + struct tse_pcs { struct device *dev; void __iomem *tse_pcs_base; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index ecf759ee1c9f..017dbbda0c1c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -205,7 +205,7 @@ static const struct pci_device_id loongson_dwmac_id_table[] = { }; MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table); -struct pci_driver loongson_dwmac_driver = { +static struct pci_driver loongson_dwmac_driver = { .name = "dwmac-loongson-pci", .id_table = loongson_dwmac_id_table, .probe = loongson_dwmac_probe, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index b7c2579c963b..ac9e6c7a33b5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -18,9 +18,6 @@ #include "altr_tse_pcs.h" -#define SGMII_ADAPTER_CTRL_REG 0x00 -#define SGMII_ADAPTER_DISABLE 0x0001 - #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2 @@ -62,16 +59,14 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed) { struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv; void __iomem *splitter_base = dwmac->splitter_base; - void __iomem *tse_pcs_base = dwmac->pcs.tse_pcs_base; void __iomem *sgmii_adapter_base = dwmac->pcs.sgmii_adapter_base; struct device *dev = dwmac->dev; struct net_device *ndev = dev_get_drvdata(dev); struct phy_device *phy_dev = ndev->phydev; u32 val; - if ((tse_pcs_base) && (sgmii_adapter_base)) - writew(SGMII_ADAPTER_DISABLE, - sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); + writew(SGMII_ADAPTER_DISABLE, + sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); if (splitter_base) { val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG); @@ -93,7 +88,9 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed) writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG); } - if (tse_pcs_base && sgmii_adapter_base) + writew(SGMII_ADAPTER_ENABLE, + sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); + if (phy_dev) tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 5d29f336315b..11e1055e8260 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -431,8 +431,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) plat->phylink_node = np; /* Get max speed of operation from device tree */ - if (of_property_read_u32(np, "max-speed", &plat->max_speed)) - plat->max_speed = -1; + of_property_read_u32(np, "max-speed", &plat->max_speed); plat->bus_id = of_alias_get_id(np, "ethernet"); if (plat->bus_id < 0) diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index 0f9c88dd1a4a..d5c1e5c4a508 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -433,8 +433,6 @@ struct axienet_local { struct net_device *ndev; struct device *dev; - struct device_node *phy_node; - struct phylink *phylink; struct phylink_config phylink_config; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index c7eb05e4a6bf..d6fc3f7acdf0 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -2064,25 +2064,33 @@ static int axienet_probe(struct platform_device *pdev) if (ret) goto cleanup_clk; - lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); - if (lp->phy_node) { - ret = axienet_mdio_setup(lp); - if (ret) - dev_warn(&pdev->dev, - "error registering MDIO bus: %d\n", ret); - } + ret = axienet_mdio_setup(lp); + if (ret) + dev_warn(&pdev->dev, + "error registering MDIO bus: %d\n", ret); + if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII || lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) { - if (!lp->phy_node) { - dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n"); + np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0); + if (!np) { + /* Deprecated: Always use "pcs-handle" for pcs_phy. + * Falling back to "phy-handle" here is only for + * backward compatibility with old device trees. + */ + np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); + } + if (!np) { + dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n"); ret = -EINVAL; goto cleanup_mdio; } - lp->pcs_phy = of_mdio_find_device(lp->phy_node); + lp->pcs_phy = of_mdio_find_device(np); if (!lp->pcs_phy) { ret = -EPROBE_DEFER; + of_node_put(np); goto cleanup_mdio; } + of_node_put(np); lp->pcs.ops = &axienet_pcs_ops; lp->pcs.poll = true; } @@ -2125,8 +2133,6 @@ cleanup_mdio: put_device(&lp->pcs_phy->dev); if (lp->mii_bus) axienet_mdio_teardown(lp); - of_node_put(lp->phy_node); - cleanup_clk: clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); clk_disable_unprepare(lp->axi_clk); @@ -2155,9 +2161,6 @@ static int axienet_remove(struct platform_device *pdev) clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); clk_disable_unprepare(lp->axi_clk); - of_node_put(lp->phy_node); - lp->phy_node = NULL; - free_netdev(ndev); return 0; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 069e8824c264..b00bc8173abe 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -460,8 +460,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) return RX_HANDLER_CONSUMED; *pskb = skb; eth = eth_hdr(skb); - if (macvlan_forward_source(skb, port, eth->h_source)) + if (macvlan_forward_source(skb, port, eth->h_source)) { + kfree_skb(skb); return RX_HANDLER_CONSUMED; + } src = macvlan_hash_lookup(port, eth->h_source); if (src && src->mode != MACVLAN_MODE_VEPA && src->mode != MACVLAN_MODE_BRIDGE) { @@ -480,8 +482,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) return RX_HANDLER_PASS; } - if (macvlan_forward_source(skb, port, eth->h_source)) + if (macvlan_forward_source(skb, port, eth->h_source)) { + kfree_skb(skb); return RX_HANDLER_CONSUMED; + } if (macvlan_passthru(port)) vlan = list_first_or_null_rcu(&port->vlans, struct macvlan_dev, list); diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c index baf7afac7857..53846c6b56ca 100644 --- a/drivers/net/mctp/mctp-i2c.c +++ b/drivers/net/mctp/mctp-i2c.c @@ -553,7 +553,7 @@ static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev, hdr->source_slave = ((llsrc << 1) & 0xff) | 0x01; mhdr->ver = 0x01; - return 0; + return sizeof(struct mctp_i2c_hdr); } static int mctp_i2c_tx_thread(void *data) diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c index 1becb1a731f6..1c1584fca632 100644 --- a/drivers/net/mdio/fwnode_mdio.c +++ b/drivers/net/mdio/fwnode_mdio.c @@ -43,6 +43,11 @@ int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio, int rc; rc = fwnode_irq_get(child, 0); + /* Don't wait forever if the IRQ provider doesn't become available, + * just fall back to poll mode + */ + if (rc == -EPROBE_DEFER) + rc = driver_deferred_probe_check_state(&phy->mdio.dev); if (rc == -EPROBE_DEFER) return rc; diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c index c483ba67c21f..582969751b4c 100644 --- a/drivers/net/mdio/mdio-mscc-miim.c +++ b/drivers/net/mdio/mdio-mscc-miim.c @@ -102,6 +102,9 @@ static int mscc_miim_read(struct mii_bus *bus, int mii_id, int regnum) u32 val; int ret; + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + ret = mscc_miim_wait_pending(bus); if (ret) goto out; @@ -145,6 +148,9 @@ static int mscc_miim_write(struct mii_bus *bus, int mii_id, struct mscc_miim_dev *miim = bus->priv; int ret; + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + ret = mscc_miim_wait_pending(bus); if (ret < 0) goto out; diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 19b11e896460..fc53b71dc872 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -99,15 +99,6 @@ #define PTP_TIMESTAMP_EN_PDREQ_ BIT(2) #define PTP_TIMESTAMP_EN_PDRES_ BIT(3) -#define PTP_RX_LATENCY_1000 0x0224 -#define PTP_TX_LATENCY_1000 0x0225 - -#define PTP_RX_LATENCY_100 0x0222 -#define PTP_TX_LATENCY_100 0x0223 - -#define PTP_RX_LATENCY_10 0x0220 -#define PTP_TX_LATENCY_10 0x0221 - #define PTP_TX_PARSE_L2_ADDR_EN 0x0284 #define PTP_RX_PARSE_L2_ADDR_EN 0x0244 @@ -268,15 +259,6 @@ struct lan8814_ptp_rx_ts { u16 seq_id; }; -struct kszphy_latencies { - u16 rx_10; - u16 tx_10; - u16 rx_100; - u16 tx_100; - u16 rx_1000; - u16 tx_1000; -}; - struct kszphy_ptp_priv { struct mii_timestamper mii_ts; struct phy_device *phydev; @@ -296,7 +278,6 @@ struct kszphy_ptp_priv { struct kszphy_priv { struct kszphy_ptp_priv ptp_priv; - struct kszphy_latencies latencies; const struct kszphy_type *type; int led_mode; bool rmii_ref_clk_sel; @@ -304,14 +285,6 @@ struct kszphy_priv { u64 stats[ARRAY_SIZE(kszphy_hw_stats)]; }; -static struct kszphy_latencies lan8814_latencies = { - .rx_10 = 0x22AA, - .tx_10 = 0x2E4A, - .rx_100 = 0x092A, - .tx_100 = 0x02C1, - .rx_1000 = 0x01AD, - .tx_1000 = 0x00C9, -}; static const struct kszphy_type ksz8021_type = { .led_mode_reg = MII_KSZPHY_CTRL_2, .has_broadcast_disable = true, @@ -2618,55 +2591,6 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev) return 0; } -static int lan8814_read_status(struct phy_device *phydev) -{ - struct kszphy_priv *priv = phydev->priv; - struct kszphy_latencies *latencies = &priv->latencies; - int err; - int regval; - - err = genphy_read_status(phydev); - if (err) - return err; - - switch (phydev->speed) { - case SPEED_1000: - lanphy_write_page_reg(phydev, 5, PTP_RX_LATENCY_1000, - latencies->rx_1000); - lanphy_write_page_reg(phydev, 5, PTP_TX_LATENCY_1000, - latencies->tx_1000); - break; - case SPEED_100: - lanphy_write_page_reg(phydev, 5, PTP_RX_LATENCY_100, - latencies->rx_100); - lanphy_write_page_reg(phydev, 5, PTP_TX_LATENCY_100, - latencies->tx_100); - break; - case SPEED_10: - lanphy_write_page_reg(phydev, 5, PTP_RX_LATENCY_10, - latencies->rx_10); - lanphy_write_page_reg(phydev, 5, PTP_TX_LATENCY_10, - latencies->tx_10); - break; - default: - break; - } - - /* Make sure the PHY is not broken. Read idle error count, - * and reset the PHY if it is maxed out. - */ - regval = phy_read(phydev, MII_STAT1000); - if ((regval & 0xFF) == 0xFF) { - phy_init_hw(phydev); - phydev->link = 0; - if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev)) - phydev->drv->config_intr(phydev); - return genphy_config_aneg(phydev); - } - - return 0; -} - static int lan8814_config_init(struct phy_device *phydev) { int val; @@ -2690,30 +2614,8 @@ static int lan8814_config_init(struct phy_device *phydev) return 0; } -static void lan8814_parse_latency(struct phy_device *phydev) -{ - const struct device_node *np = phydev->mdio.dev.of_node; - struct kszphy_priv *priv = phydev->priv; - struct kszphy_latencies *latency = &priv->latencies; - u32 val; - - if (!of_property_read_u32(np, "lan8814,latency_rx_10", &val)) - latency->rx_10 = val; - if (!of_property_read_u32(np, "lan8814,latency_tx_10", &val)) - latency->tx_10 = val; - if (!of_property_read_u32(np, "lan8814,latency_rx_100", &val)) - latency->rx_100 = val; - if (!of_property_read_u32(np, "lan8814,latency_tx_100", &val)) - latency->tx_100 = val; - if (!of_property_read_u32(np, "lan8814,latency_rx_1000", &val)) - latency->rx_1000 = val; - if (!of_property_read_u32(np, "lan8814,latency_tx_1000", &val)) - latency->tx_1000 = val; -} - static int lan8814_probe(struct phy_device *phydev) { - const struct device_node *np = phydev->mdio.dev.of_node; struct kszphy_priv *priv; u16 addr; int err; @@ -2724,13 +2626,10 @@ static int lan8814_probe(struct phy_device *phydev) priv->led_mode = -1; - priv->latencies = lan8814_latencies; - phydev->priv = priv; if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) || - !IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING) || - of_property_read_bool(np, "lan8814,ignore-ts")) + !IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) return 0; /* Strap-in value for PHY address, below register read gives starting @@ -2746,7 +2645,6 @@ static int lan8814_probe(struct phy_device *phydev) return err; } - lan8814_parse_latency(phydev); lan8814_ptp_init(phydev); return 0; @@ -2928,7 +2826,7 @@ static struct phy_driver ksphy_driver[] = { .config_init = lan8814_config_init, .probe = lan8814_probe, .soft_reset = genphy_soft_reset, - .read_status = lan8814_read_status, + .read_status = ksz9031_read_status, .get_sset_count = kszphy_get_sset_count, .get_strings = kszphy_get_strings, .get_stats = kszphy_get_stats, diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c index 389df3f4293c..3f79bbbe62d3 100644 --- a/drivers/net/phy/microchip_t1.c +++ b/drivers/net/phy/microchip_t1.c @@ -706,7 +706,6 @@ static int lan87xx_read_status(struct phy_device *phydev) static int lan87xx_config_aneg(struct phy_device *phydev) { u16 ctl = 0; - int rc; switch (phydev->master_slave_set) { case MASTER_SLAVE_CFG_MASTER_FORCE: @@ -722,11 +721,7 @@ static int lan87xx_config_aneg(struct phy_device *phydev) return -EOPNOTSUPP; } - rc = phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl); - if (rc == 1) - rc = genphy_soft_reset(phydev); - - return rc; + return phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl); } static struct phy_driver microchip_t1_phy_driver[] = { diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index 88396ff99f03..6865d32270e5 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c @@ -469,7 +469,7 @@ static void sl_tx_timeout(struct net_device *dev, unsigned int txqueue) spin_lock(&sl->lock); if (netif_queue_stopped(dev)) { - if (!netif_running(dev)) + if (!netif_running(dev) || !sl->tty) goto out; /* May be we must check transmitter timeout here ? diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 276a0e42ca8e..dbe4c0a4be2c 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1124,7 +1124,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) /* NETIF_F_LLTX requires to do our own update of trans_start */ queue = netdev_get_tx_queue(dev, txq); - queue->trans_start = jiffies; + txq_trans_cond_update(queue); /* Notify and wake up reader process */ if (tfile->flags & TUN_FASYNC) diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c index ea06d10e1c21..ca409d450a29 100644 --- a/drivers/net/usb/aqc111.c +++ b/drivers/net/usb/aqc111.c @@ -1102,10 +1102,15 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb) if (start_of_descs != desc_offset) goto err; - /* self check desc_offset from header*/ - if (desc_offset >= skb_len) + /* self check desc_offset from header and make sure that the + * bounds of the metadata array are inside the SKB + */ + if (pkt_count * 2 + desc_offset >= skb_len) goto err; + /* Packets must not overlap the metadata array */ + skb_trim(skb, desc_offset); + if (pkt_count == 0) goto err; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 1b5714926d81..eb0121a64d6d 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -320,7 +320,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) rcu_read_lock(); rcv = rcu_dereference(priv->peer); - if (unlikely(!rcv)) { + if (unlikely(!rcv) || !pskb_may_pull(skb, ETH_HLEN)) { kfree_skb(skb); goto drop; } diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 85e362461d71..cfc30ce4c6e1 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -1265,6 +1265,7 @@ static int vrf_prepare_mac_header(struct sk_buff *skb, eth = (struct ethhdr *)skb->data; skb_reset_mac_header(skb); + skb_reset_mac_len(skb); /* we set the ethernet destination and the source addresses to the * address of the VRF device. @@ -1294,9 +1295,9 @@ static int vrf_prepare_mac_header(struct sk_buff *skb, */ static int vrf_add_mac_header_if_unset(struct sk_buff *skb, struct net_device *vrf_dev, - u16 proto) + u16 proto, struct net_device *orig_dev) { - if (skb_mac_header_was_set(skb)) + if (skb_mac_header_was_set(skb) && dev_has_header(orig_dev)) return 0; return vrf_prepare_mac_header(skb, vrf_dev, proto); @@ -1402,6 +1403,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, /* if packet is NDISC then keep the ingress interface */ if (!is_ndisc) { + struct net_device *orig_dev = skb->dev; + vrf_rx_stats(vrf_dev, skb->len); skb->dev = vrf_dev; skb->skb_iif = vrf_dev->ifindex; @@ -1410,7 +1413,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, int err; err = vrf_add_mac_header_if_unset(skb, vrf_dev, - ETH_P_IPV6); + ETH_P_IPV6, + orig_dev); if (likely(!err)) { skb_push(skb, skb->mac_len); dev_queue_xmit_nit(skb, vrf_dev); @@ -1440,6 +1444,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev, struct sk_buff *skb) { + struct net_device *orig_dev = skb->dev; + skb->dev = vrf_dev; skb->skb_iif = vrf_dev->ifindex; IPCB(skb)->flags |= IPSKB_L3SLAVE; @@ -1460,7 +1466,8 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev, if (!list_empty(&vrf_dev->ptype_all)) { int err; - err = vrf_add_mac_header_if_unset(skb, vrf_dev, ETH_P_IP); + err = vrf_add_mac_header_if_unset(skb, vrf_dev, ETH_P_IP, + orig_dev); if (likely(!err)) { skb_push(skb, skb->mac_len); dev_queue_xmit_nit(skb, vrf_dev); diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c index de97ff98d36e..8a5e3a6d32d7 100644 --- a/drivers/net/vxlan/vxlan_core.c +++ b/drivers/net/vxlan/vxlan_core.c @@ -651,11 +651,11 @@ static int vxlan_fdb_append(struct vxlan_fdb *f, rd = kmalloc(sizeof(*rd), GFP_ATOMIC); if (rd == NULL) - return -ENOBUFS; + return -ENOMEM; if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) { kfree(rd); - return -ENOBUFS; + return -ENOMEM; } rd->remote_ip = *ip; diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index 63e1c2d783c5..73693c66cef1 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -1633,7 +1633,7 @@ static void ath10k_sdio_hif_power_down(struct ath10k *ar) return; } - ret = mmc_hw_reset(ar_sdio->func->card->host); + ret = mmc_hw_reset(ar_sdio->func->card); if (ret) ath10k_warn(ar, "unable to reset sdio: %d\n", ret); diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index d5b83f90d27a..e6b34b0d61bd 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -3136,6 +3136,20 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw, arvif->do_not_send_tmpl = true; else arvif->do_not_send_tmpl = false; + + if (vif->bss_conf.he_support) { + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + WMI_VDEV_PARAM_BA_MODE, + WMI_BA_MODE_BUFFER_SIZE_256); + if (ret) + ath11k_warn(ar->ab, + "failed to set BA BUFFER SIZE 256 for vdev: %d\n", + arvif->vdev_id); + else + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, + "Set BA BUFFER SIZE 256 for VDEV: %d\n", + arvif->vdev_id); + } } if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) { @@ -3171,14 +3185,6 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw, if (arvif->is_up && vif->bss_conf.he_support && vif->bss_conf.he_oper.params) { - ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, - WMI_VDEV_PARAM_BA_MODE, - WMI_BA_MODE_BUFFER_SIZE_256); - if (ret) - ath11k_warn(ar->ab, - "failed to set BA BUFFER SIZE 256 for vdev: %d\n", - arvif->vdev_id); - param_id = WMI_VDEV_PARAM_HEOPS_0_31; param_value = vif->bss_conf.he_oper.params; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 98090e40e1cf..e2791d45f5f5 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -839,7 +839,7 @@ static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix) continue; txinfo = IEEE80211_SKB_CB(bf->bf_mpdu); - fi = (struct ath_frame_info *)&txinfo->rate_driver_data[0]; + fi = (struct ath_frame_info *)&txinfo->status.status_driver_data[0]; if (fi->keyix == keyix) return true; } diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index d0caf1de2bde..db83cc4ba810 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -141,8 +141,8 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb) { struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); BUILD_BUG_ON(sizeof(struct ath_frame_info) > - sizeof(tx_info->rate_driver_data)); - return (struct ath_frame_info *) &tx_info->rate_driver_data[0]; + sizeof(tx_info->status.status_driver_data)); + return (struct ath_frame_info *) &tx_info->status.status_driver_data[0]; } static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno) @@ -2542,6 +2542,16 @@ skip_tx_complete: spin_unlock_irqrestore(&sc->tx.txbuflock, flags); } +static void ath_clear_tx_status(struct ieee80211_tx_info *tx_info) +{ + void *ptr = &tx_info->status; + + memset(ptr + sizeof(tx_info->status.rates), 0, + sizeof(tx_info->status) - + sizeof(tx_info->status.rates) - + sizeof(tx_info->status.status_driver_data)); +} + static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, struct ath_tx_status *ts, int nframes, int nbad, int txok) @@ -2553,6 +2563,8 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, struct ath_hw *ah = sc->sc_ah; u8 i, tx_rateindex; + ath_clear_tx_status(tx_info); + if (txok) tx_info->status.ack_signal = ts->ts_rssi; @@ -2567,6 +2579,13 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, tx_info->status.ampdu_len = nframes; tx_info->status.ampdu_ack_len = nframes - nbad; + tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; + + for (i = tx_rateindex + 1; i < hw->max_rates; i++) { + tx_info->status.rates[i].count = 0; + tx_info->status.rates[i].idx = -1; + } + if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 && (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) { /* @@ -2588,16 +2607,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, tx_info->status.rates[tx_rateindex].count = hw->max_rate_tries; } - - for (i = tx_rateindex + 1; i < hw->max_rates; i++) { - tx_info->status.rates[i].count = 0; - tx_info->status.rates[i].idx = -1; - } - - tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; - - /* we report airtime in ath_tx_count_airtime(), don't report twice */ - tx_info->status.tx_time = 0; } static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index ba3c159111d3..212fbbe1cd7e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -557,7 +557,7 @@ enum brcmf_sdio_frmtype { BRCMF_SDIO_FT_SUB, }; -#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu)) +#define SDIOD_DRVSTR_KEY(chip, pmu) (((unsigned int)(chip) << 16) | (pmu)) /* SDIO Pad drive strength to select value mappings */ struct sdiod_drive_str { @@ -4165,7 +4165,7 @@ static int brcmf_sdio_bus_reset(struct device *dev) /* reset the adapter */ sdio_claim_host(sdiodev->func1); - mmc_hw_reset(sdiodev->func1->card->host); + mmc_hw_reset(sdiodev->func1->card); sdio_release_host(sdiodev->func1); brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN); diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index bde9e4bbfffe..4f3238d2a171 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -2639,7 +2639,7 @@ static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter) /* Run a HW reset of the SDIO interface. */ sdio_claim_host(func); - ret = mmc_hw_reset(func->card->host); + ret = mmc_hw_reset(func->card); sdio_release_host(func); switch (ret) { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c index 8a22ee581674..df85ebc6e1df 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c @@ -80,7 +80,7 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id) mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9); /* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */ - mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf); + mt76_rmw_field(dev, 0x15a0c, 0xfU << 28, 0xf); /* RG_SSUSB_CDR_BR_PE1D = 0x3 */ mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3); diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index 72fc41ac83c0..9140b0163474 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -146,7 +146,7 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue) * To guarantee that the SDIO card is power cycled, as required to make * the FW programming to succeed, let's do a brute force HW reset. */ - mmc_hw_reset(card->host); + mmc_hw_reset(card); sdio_enable_func(func); sdio_release_host(func); diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index d6d056963c06..877d2ec4ea9f 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -91,3 +91,16 @@ config NVME_TCP from https://github.com/linux-nvme/nvme-cli. If unsure, say N. + +config NVME_APPLE + tristate "Apple ANS2 NVM Express host driver" + depends on OF && BLOCK + depends on APPLE_RTKIT && APPLE_SART + depends on ARCH_APPLE || COMPILE_TEST + select NVME_CORE + help + This provides support for the NVMe controller embedded in Apple SoCs + such as the M1. + + To compile this driver as a module, choose M here: the + module will be called nvme-apple. diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile index 476c5c988496..a36ae1612059 100644 --- a/drivers/nvme/host/Makefile +++ b/drivers/nvme/host/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_NVME_FABRICS) += nvme-fabrics.o obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o obj-$(CONFIG_NVME_FC) += nvme-fc.o obj-$(CONFIG_NVME_TCP) += nvme-tcp.o +obj-$(CONFIG_NVME_APPLE) += nvme-apple.o nvme-core-y := core.o ioctl.o constants.o nvme-core-$(CONFIG_TRACING) += trace.o @@ -25,3 +26,5 @@ nvme-rdma-y += rdma.o nvme-fc-y += fc.o nvme-tcp-y += tcp.o + +nvme-apple-y += apple.o diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c new file mode 100644 index 000000000000..d702d7d60235 --- /dev/null +++ b/drivers/nvme/host/apple.c @@ -0,0 +1,1593 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Apple ANS NVM Express device driver + * Copyright The Asahi Linux Contributors + * + * Based on the pci.c NVM Express device driver + * Copyright (c) 2011-2014, Intel Corporation. + * and on the rdma.c NVMe over Fabrics RDMA host code. + * Copyright (c) 2015-2016 HGST, a Western Digital Company. + */ + +#include <linux/async.h> +#include <linux/blkdev.h> +#include <linux/blk-mq.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/dmapool.h> +#include <linux/interrupt.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/jiffies.h> +#include <linux/mempool.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/once.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/soc/apple/rtkit.h> +#include <linux/soc/apple/sart.h> +#include <linux/reset.h> +#include <linux/time64.h> + +#include "nvme.h" + +#define APPLE_ANS_BOOT_TIMEOUT USEC_PER_SEC +#define APPLE_ANS_MAX_QUEUE_DEPTH 64 + +#define APPLE_ANS_COPROC_CPU_CONTROL 0x44 +#define APPLE_ANS_COPROC_CPU_CONTROL_RUN BIT(4) + +#define APPLE_ANS_ACQ_DB 0x1004 +#define APPLE_ANS_IOCQ_DB 0x100c + +#define APPLE_ANS_MAX_PEND_CMDS_CTRL 0x1210 + +#define APPLE_ANS_BOOT_STATUS 0x1300 +#define APPLE_ANS_BOOT_STATUS_OK 0xde71ce55 + +#define APPLE_ANS_UNKNOWN_CTRL 0x24008 +#define APPLE_ANS_PRP_NULL_CHECK BIT(11) + +#define APPLE_ANS_LINEAR_SQ_CTRL 0x24908 +#define APPLE_ANS_LINEAR_SQ_EN BIT(0) + +#define APPLE_ANS_LINEAR_ASQ_DB 0x2490c +#define APPLE_ANS_LINEAR_IOSQ_DB 0x24910 + +#define APPLE_NVMMU_NUM_TCBS 0x28100 +#define APPLE_NVMMU_ASQ_TCB_BASE 0x28108 +#define APPLE_NVMMU_IOSQ_TCB_BASE 0x28110 +#define APPLE_NVMMU_TCB_INVAL 0x28118 +#define APPLE_NVMMU_TCB_STAT 0x28120 + +/* + * This controller is a bit weird in the way command tags works: Both the + * admin and the IO queue share the same tag space. Additionally, tags + * cannot be higher than 0x40 which effectively limits the combined + * queue depth to 0x40. Instead of wasting half of that on the admin queue + * which gets much less traffic we instead reduce its size here. + * The controller also doesn't support async event such that no space must + * be reserved for NVME_NR_AEN_COMMANDS. + */ +#define APPLE_NVME_AQ_DEPTH 2 +#define APPLE_NVME_AQ_MQ_TAG_DEPTH (APPLE_NVME_AQ_DEPTH - 1) + +/* + * These can be higher, but we need to ensure that any command doesn't + * require an sg allocation that needs more than a page of data. + */ +#define NVME_MAX_KB_SZ 4096 +#define NVME_MAX_SEGS 127 + +/* + * This controller comes with an embedded IOMMU known as NVMMU. + * The NVMMU is pointed to an array of TCBs indexed by the command tag. + * Each command must be configured inside this structure before it's allowed + * to execute, including commands that don't require DMA transfers. + * + * An exception to this are Apple's vendor-specific commands (opcode 0xD8 on the + * admin queue): Those commands must still be added to the NVMMU but the DMA + * buffers cannot be represented as PRPs and must instead be allowed using SART. + * + * Programming the PRPs to the same values as those in the submission queue + * looks rather silly at first. This hardware is however designed for a kernel + * that runs the NVMMU code in a higher exception level than the NVMe driver. + * In that setting the NVMe driver first programs the submission queue entry + * and then executes a hypercall to the code that is allowed to program the + * NVMMU. The NVMMU driver then creates a shadow copy of the PRPs while + * verifying that they don't point to kernel text, data, pagetables, or similar + * protected areas before programming the TCB to point to this shadow copy. + * Since Linux doesn't do any of that we may as well just point both the queue + * and the TCB PRP pointer to the same memory. + */ +struct apple_nvmmu_tcb { + u8 opcode; + +#define APPLE_ANS_TCB_DMA_FROM_DEVICE BIT(0) +#define APPLE_ANS_TCB_DMA_TO_DEVICE BIT(1) + u8 dma_flags; + + u8 command_id; + u8 _unk0; + __le16 length; + u8 _unk1[18]; + __le64 prp1; + __le64 prp2; + u8 _unk2[16]; + u8 aes_iv[8]; + u8 _aes_unk[64]; +}; + +/* + * The Apple NVMe controller only supports a single admin and a single IO queue + * which are both limited to 64 entries and share a single interrupt. + * + * The completion queue works as usual. The submission "queue" instead is + * an array indexed by the command tag on this hardware. Commands must also be + * present in the NVMMU's tcb array. They are triggered by writing their tag to + * a MMIO register. + */ +struct apple_nvme_queue { + struct nvme_command *sqes; + struct nvme_completion *cqes; + struct apple_nvmmu_tcb *tcbs; + + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + dma_addr_t tcb_dma_addr; + + u32 __iomem *sq_db; + u32 __iomem *cq_db; + + u16 cq_head; + u8 cq_phase; + + bool is_adminq; + bool enabled; +}; + +/* + * The apple_nvme_iod describes the data in an I/O. + * + * The sg pointer contains the list of PRP chunk allocations in addition + * to the actual struct scatterlist. + */ +struct apple_nvme_iod { + struct nvme_request req; + struct nvme_command cmd; + struct apple_nvme_queue *q; + int npages; /* In the PRP list. 0 means small pool in use */ + int nents; /* Used in scatterlist */ + dma_addr_t first_dma; + unsigned int dma_len; /* length of single DMA segment mapping */ + struct scatterlist *sg; +}; + +struct apple_nvme { + struct device *dev; + + void __iomem *mmio_coproc; + void __iomem *mmio_nvme; + + struct device **pd_dev; + struct device_link **pd_link; + int pd_count; + + struct apple_sart *sart; + struct apple_rtkit *rtk; + struct reset_control *reset; + + struct dma_pool *prp_page_pool; + struct dma_pool *prp_small_pool; + mempool_t *iod_mempool; + + struct nvme_ctrl ctrl; + struct work_struct remove_work; + + struct apple_nvme_queue adminq; + struct apple_nvme_queue ioq; + + struct blk_mq_tag_set admin_tagset; + struct blk_mq_tag_set tagset; + + int irq; + spinlock_t lock; +}; + +static_assert(sizeof(struct nvme_command) == 64); +static_assert(sizeof(struct apple_nvmmu_tcb) == 128); + +static inline struct apple_nvme *ctrl_to_apple_nvme(struct nvme_ctrl *ctrl) +{ + return container_of(ctrl, struct apple_nvme, ctrl); +} + +static inline struct apple_nvme *queue_to_apple_nvme(struct apple_nvme_queue *q) +{ + if (q->is_adminq) + return container_of(q, struct apple_nvme, adminq); + else + return container_of(q, struct apple_nvme, ioq); +} + +static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q) +{ + if (q->is_adminq) + return APPLE_NVME_AQ_DEPTH; + else + return APPLE_ANS_MAX_QUEUE_DEPTH; +} + +static void apple_nvme_rtkit_crashed(void *cookie) +{ + struct apple_nvme *anv = cookie; + + dev_warn(anv->dev, "RTKit crashed; unable to recover without a reboot"); + nvme_reset_ctrl(&anv->ctrl); +} + +static int apple_nvme_sart_dma_setup(void *cookie, + struct apple_rtkit_shmem *bfr) +{ + struct apple_nvme *anv = cookie; + int ret; + + if (bfr->iova) + return -EINVAL; + if (!bfr->size) + return -EINVAL; + + bfr->buffer = + dma_alloc_coherent(anv->dev, bfr->size, &bfr->iova, GFP_KERNEL); + if (!bfr->buffer) + return -ENOMEM; + + ret = apple_sart_add_allowed_region(anv->sart, bfr->iova, bfr->size); + if (ret) { + dma_free_coherent(anv->dev, bfr->size, bfr->buffer, bfr->iova); + bfr->buffer = NULL; + return -ENOMEM; + } + + return 0; +} + +static void apple_nvme_sart_dma_destroy(void *cookie, + struct apple_rtkit_shmem *bfr) +{ + struct apple_nvme *anv = cookie; + + apple_sart_remove_allowed_region(anv->sart, bfr->iova, bfr->size); + dma_free_coherent(anv->dev, bfr->size, bfr->buffer, bfr->iova); +} + +static const struct apple_rtkit_ops apple_nvme_rtkit_ops = { + .crashed = apple_nvme_rtkit_crashed, + .shmem_setup = apple_nvme_sart_dma_setup, + .shmem_destroy = apple_nvme_sart_dma_destroy, +}; + +static void apple_nvmmu_inval(struct apple_nvme_queue *q, unsigned int tag) +{ + struct apple_nvme *anv = queue_to_apple_nvme(q); + + writel(tag, anv->mmio_nvme + APPLE_NVMMU_TCB_INVAL); + if (readl(anv->mmio_nvme + APPLE_NVMMU_TCB_STAT)) + dev_warn_ratelimited(anv->dev, + "NVMMU TCB invalidation failed\n"); +} + +static void apple_nvme_submit_cmd(struct apple_nvme_queue *q, + struct nvme_command *cmd) +{ + struct apple_nvme *anv = queue_to_apple_nvme(q); + u32 tag = nvme_tag_from_cid(cmd->common.command_id); + struct apple_nvmmu_tcb *tcb = &q->tcbs[tag]; + + tcb->opcode = cmd->common.opcode; + tcb->prp1 = cmd->common.dptr.prp1; + tcb->prp2 = cmd->common.dptr.prp2; + tcb->length = cmd->rw.length; + tcb->command_id = tag; + + if (nvme_is_write(cmd)) + tcb->dma_flags = APPLE_ANS_TCB_DMA_TO_DEVICE; + else + tcb->dma_flags = APPLE_ANS_TCB_DMA_FROM_DEVICE; + + memcpy(&q->sqes[tag], cmd, sizeof(*cmd)); + + /* + * This lock here doesn't make much sense at a first glace but + * removing it will result in occasional missed completetion + * interrupts even though the commands still appear on the CQ. + * It's unclear why this happens but our best guess is that + * there is a bug in the firmware triggered when a new command + * is issued while we're inside the irq handler between the + * NVMMU invalidation (and making the tag available again) + * and the final CQ update. + */ + spin_lock_irq(&anv->lock); + writel(tag, q->sq_db); + spin_unlock_irq(&anv->lock); +} + +/* + * From pci.c: + * Will slightly overestimate the number of pages needed. This is OK + * as it only leads to a small amount of wasted memory for the lifetime of + * the I/O. + */ +static inline size_t apple_nvme_iod_alloc_size(void) +{ + const unsigned int nprps = DIV_ROUND_UP( + NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE, NVME_CTRL_PAGE_SIZE); + const int npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); + const size_t alloc_size = sizeof(__le64 *) * npages + + sizeof(struct scatterlist) * NVME_MAX_SEGS; + + return alloc_size; +} + +static void **apple_nvme_iod_list(struct request *req) +{ + struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); + + return (void **)(iod->sg + blk_rq_nr_phys_segments(req)); +} + +static void apple_nvme_free_prps(struct apple_nvme *anv, struct request *req) +{ + const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; + struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); + dma_addr_t dma_addr = iod->first_dma; + int i; + + for (i = 0; i < iod->npages; i++) { + __le64 *prp_list = apple_nvme_iod_list(req)[i]; + dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]); + + dma_pool_free(anv->prp_page_pool, prp_list, dma_addr); + dma_addr = next_dma_addr; + } +} + +static void apple_nvme_unmap_data(struct apple_nvme *anv, struct request *req) +{ + struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); + + if (iod->dma_len) { + dma_unmap_page(anv->dev, iod->first_dma, iod->dma_len, + rq_dma_dir(req)); + return; + } + + WARN_ON_ONCE(!iod->nents); + + dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req)); + if (iod->npages == 0) + dma_pool_free(anv->prp_small_pool, apple_nvme_iod_list(req)[0], + iod->first_dma); + else + apple_nvme_free_prps(anv, req); + mempool_free(iod->sg, anv->iod_mempool); +} + +static void apple_nvme_print_sgl(struct scatterlist *sgl, int nents) +{ + int i; + struct scatterlist *sg; + + for_each_sg(sgl, sg, nents, i) { + dma_addr_t phys = sg_phys(sg); + + pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d dma_address:%pad dma_length:%d\n", + i, &phys, sg->offset, sg->length, &sg_dma_address(sg), + sg_dma_len(sg)); + } +} + +static blk_status_t apple_nvme_setup_prps(struct apple_nvme *anv, + struct request *req, + struct nvme_rw_command *cmnd) +{ + struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct dma_pool *pool; + int length = blk_rq_payload_bytes(req); + struct scatterlist *sg = iod->sg; + int dma_len = sg_dma_len(sg); + u64 dma_addr = sg_dma_address(sg); + int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1); + __le64 *prp_list; + void **list = apple_nvme_iod_list(req); + dma_addr_t prp_dma; + int nprps, i; + + length -= (NVME_CTRL_PAGE_SIZE - offset); + if (length <= 0) { + iod->first_dma = 0; + goto done; + } + + dma_len -= (NVME_CTRL_PAGE_SIZE - offset); + if (dma_len) { + dma_addr += (NVME_CTRL_PAGE_SIZE - offset); + } else { + sg = sg_next(sg); + dma_addr = sg_dma_address(sg); + dma_len = sg_dma_len(sg); + } + + if (length <= NVME_CTRL_PAGE_SIZE) { + iod->first_dma = dma_addr; + goto done; + } + + nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE); + if (nprps <= (256 / 8)) { + pool = anv->prp_small_pool; + iod->npages = 0; + } else { + pool = anv->prp_page_pool; + iod->npages = 1; + } + + prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); + if (!prp_list) { + iod->first_dma = dma_addr; + iod->npages = -1; + return BLK_STS_RESOURCE; + } + list[0] = prp_list; + iod->first_dma = prp_dma; + i = 0; + for (;;) { + if (i == NVME_CTRL_PAGE_SIZE >> 3) { + __le64 *old_prp_list = prp_list; + + prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); + if (!prp_list) + goto free_prps; + list[iod->npages++] = prp_list; + prp_list[0] = old_prp_list[i - 1]; + old_prp_list[i - 1] = cpu_to_le64(prp_dma); + i = 1; + } + prp_list[i++] = cpu_to_le64(dma_addr); + dma_len -= NVME_CTRL_PAGE_SIZE; + dma_addr += NVME_CTRL_PAGE_SIZE; + length -= NVME_CTRL_PAGE_SIZE; + if (length <= 0) + break; + if (dma_len > 0) + continue; + if (unlikely(dma_len < 0)) + goto bad_sgl; + sg = sg_next(sg); + dma_addr = sg_dma_address(sg); + dma_len = sg_dma_len(sg); + } +done: + cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); + cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); + return BLK_STS_OK; +free_prps: + apple_nvme_free_prps(anv, req); + return BLK_STS_RESOURCE; +bad_sgl: + WARN(DO_ONCE(apple_nvme_print_sgl, iod->sg, iod->nents), + "Invalid SGL for payload:%d nents:%d\n", blk_rq_payload_bytes(req), + iod->nents); + return BLK_STS_IOERR; +} + +static blk_status_t apple_nvme_setup_prp_simple(struct apple_nvme *anv, + struct request *req, + struct nvme_rw_command *cmnd, + struct bio_vec *bv) +{ + struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); + unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1); + unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset; + + iod->first_dma = dma_map_bvec(anv->dev, bv, rq_dma_dir(req), 0); + if (dma_mapping_error(anv->dev, iod->first_dma)) + return BLK_STS_RESOURCE; + iod->dma_len = bv->bv_len; + + cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma); + if (bv->bv_len > first_prp_len) + cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len); + return BLK_STS_OK; +} + +static blk_status_t apple_nvme_map_data(struct apple_nvme *anv, + struct request *req, + struct nvme_command *cmnd) +{ + struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); + blk_status_t ret = BLK_STS_RESOURCE; + int nr_mapped; + + if (blk_rq_nr_phys_segments(req) == 1) { + struct bio_vec bv = req_bvec(req); + + if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2) + return apple_nvme_setup_prp_simple(anv, req, &cmnd->rw, + &bv); + } + + iod->dma_len = 0; + iod->sg = mempool_alloc(anv->iod_mempool, GFP_ATOMIC); + if (!iod->sg) + return BLK_STS_RESOURCE; + sg_init_table(iod->sg, blk_rq_nr_phys_segments(req)); + iod->nents = blk_rq_map_sg(req->q, req, iod->sg); + if (!iod->nents) + goto out_free_sg; + + nr_mapped = dma_map_sg_attrs(anv->dev, iod->sg, iod->nents, + rq_dma_dir(req), DMA_ATTR_NO_WARN); + if (!nr_mapped) + goto out_free_sg; + + ret = apple_nvme_setup_prps(anv, req, &cmnd->rw); + if (ret != BLK_STS_OK) + goto out_unmap_sg; + return BLK_STS_OK; + +out_unmap_sg: + dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req)); +out_free_sg: + mempool_free(iod->sg, anv->iod_mempool); + return ret; +} + +static __always_inline void apple_nvme_unmap_rq(struct request *req) +{ + struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct apple_nvme *anv = queue_to_apple_nvme(iod->q); + + if (blk_rq_nr_phys_segments(req)) + apple_nvme_unmap_data(anv, req); +} + +static void apple_nvme_complete_rq(struct request *req) +{ + apple_nvme_unmap_rq(req); + nvme_complete_rq(req); +} + +static void apple_nvme_complete_batch(struct io_comp_batch *iob) +{ + nvme_complete_batch(iob, apple_nvme_unmap_rq); +} + +static inline bool apple_nvme_cqe_pending(struct apple_nvme_queue *q) +{ + struct nvme_completion *hcqe = &q->cqes[q->cq_head]; + + return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == q->cq_phase; +} + +static inline struct blk_mq_tags * +apple_nvme_queue_tagset(struct apple_nvme *anv, struct apple_nvme_queue *q) +{ + if (q->is_adminq) + return anv->admin_tagset.tags[0]; + else + return anv->tagset.tags[0]; +} + +static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q, + struct io_comp_batch *iob, u16 idx) +{ + struct apple_nvme *anv = queue_to_apple_nvme(q); + struct nvme_completion *cqe = &q->cqes[idx]; + __u16 command_id = READ_ONCE(cqe->command_id); + struct request *req; + + apple_nvmmu_inval(q, command_id); + + req = nvme_find_rq(apple_nvme_queue_tagset(anv, q), command_id); + if (unlikely(!req)) { + dev_warn(anv->dev, "invalid id %d completed", command_id); + return; + } + + if (!nvme_try_complete_req(req, cqe->status, cqe->result) && + !blk_mq_add_to_batch(req, iob, nvme_req(req)->status, + apple_nvme_complete_batch)) + apple_nvme_complete_rq(req); +} + +static inline void apple_nvme_update_cq_head(struct apple_nvme_queue *q) +{ + u32 tmp = q->cq_head + 1; + + if (tmp == apple_nvme_queue_depth(q)) { + q->cq_head = 0; + q->cq_phase ^= 1; + } else { + q->cq_head = tmp; + } +} + +static bool apple_nvme_poll_cq(struct apple_nvme_queue *q, + struct io_comp_batch *iob) +{ + bool found = false; + + while (apple_nvme_cqe_pending(q)) { + found = true; + + /* + * load-load control dependency between phase and the rest of + * the cqe requires a full read memory barrier + */ + dma_rmb(); + apple_nvme_handle_cqe(q, iob, q->cq_head); + apple_nvme_update_cq_head(q); + } + + if (found) + writel(q->cq_head, q->cq_db); + + return found; +} + +static bool apple_nvme_handle_cq(struct apple_nvme_queue *q, bool force) +{ + bool found; + DEFINE_IO_COMP_BATCH(iob); + + if (!READ_ONCE(q->enabled) && !force) + return false; + + found = apple_nvme_poll_cq(q, &iob); + + if (!rq_list_empty(iob.req_list)) + apple_nvme_complete_batch(&iob); + + return found; +} + +static irqreturn_t apple_nvme_irq(int irq, void *data) +{ + struct apple_nvme *anv = data; + bool handled = false; + unsigned long flags; + + spin_lock_irqsave(&anv->lock, flags); + if (apple_nvme_handle_cq(&anv->ioq, false)) + handled = true; + if (apple_nvme_handle_cq(&anv->adminq, false)) + handled = true; + spin_unlock_irqrestore(&anv->lock, flags); + + if (handled) + return IRQ_HANDLED; + return IRQ_NONE; +} + +static int apple_nvme_create_cq(struct apple_nvme *anv) +{ + struct nvme_command c = {}; + + /* + * Note: we (ab)use the fact that the prp fields survive if no data + * is attached to the request. + */ + c.create_cq.opcode = nvme_admin_create_cq; + c.create_cq.prp1 = cpu_to_le64(anv->ioq.cq_dma_addr); + c.create_cq.cqid = cpu_to_le16(1); + c.create_cq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1); + c.create_cq.cq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED); + c.create_cq.irq_vector = cpu_to_le16(0); + + return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0); +} + +static int apple_nvme_remove_cq(struct apple_nvme *anv) +{ + struct nvme_command c = {}; + + c.delete_queue.opcode = nvme_admin_delete_cq; + c.delete_queue.qid = cpu_to_le16(1); + + return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0); +} + +static int apple_nvme_create_sq(struct apple_nvme *anv) +{ + struct nvme_command c = {}; + + /* + * Note: we (ab)use the fact that the prp fields survive if no data + * is attached to the request. + */ + c.create_sq.opcode = nvme_admin_create_sq; + c.create_sq.prp1 = cpu_to_le64(anv->ioq.sq_dma_addr); + c.create_sq.sqid = cpu_to_le16(1); + c.create_sq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1); + c.create_sq.sq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG); + c.create_sq.cqid = cpu_to_le16(1); + + return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0); +} + +static int apple_nvme_remove_sq(struct apple_nvme *anv) +{ + struct nvme_command c = {}; + + c.delete_queue.opcode = nvme_admin_delete_sq; + c.delete_queue.qid = cpu_to_le16(1); + + return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0); +} + +static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *bd) +{ + struct nvme_ns *ns = hctx->queue->queuedata; + struct apple_nvme_queue *q = hctx->driver_data; + struct apple_nvme *anv = queue_to_apple_nvme(q); + struct request *req = bd->rq; + struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct nvme_command *cmnd = &iod->cmd; + blk_status_t ret; + + iod->npages = -1; + iod->nents = 0; + + /* + * We should not need to do this, but we're still using this to + * ensure we can drain requests on a dying queue. + */ + if (unlikely(!READ_ONCE(q->enabled))) + return BLK_STS_IOERR; + + if (!nvme_check_ready(&anv->ctrl, req, true)) + return nvme_fail_nonready_command(&anv->ctrl, req); + + ret = nvme_setup_cmd(ns, req); + if (ret) + return ret; + + if (blk_rq_nr_phys_segments(req)) { + ret = apple_nvme_map_data(anv, req, cmnd); + if (ret) + goto out_free_cmd; + } + + blk_mq_start_request(req); + apple_nvme_submit_cmd(q, cmnd); + return BLK_STS_OK; + +out_free_cmd: + nvme_cleanup_cmd(req); + return ret; +} + +static int apple_nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, + unsigned int hctx_idx) +{ + hctx->driver_data = data; + return 0; +} + +static int apple_nvme_init_request(struct blk_mq_tag_set *set, + struct request *req, unsigned int hctx_idx, + unsigned int numa_node) +{ + struct apple_nvme_queue *q = set->driver_data; + struct apple_nvme *anv = queue_to_apple_nvme(q); + struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct nvme_request *nreq = nvme_req(req); + + iod->q = q; + nreq->ctrl = &anv->ctrl; + nreq->cmd = &iod->cmd; + + return 0; +} + +static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown) +{ + u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS); + bool dead = false, freeze = false; + unsigned long flags; + + if (apple_rtkit_is_crashed(anv->rtk)) + dead = true; + if (!(csts & NVME_CSTS_RDY)) + dead = true; + if (csts & NVME_CSTS_CFS) + dead = true; + + if (anv->ctrl.state == NVME_CTRL_LIVE || + anv->ctrl.state == NVME_CTRL_RESETTING) { + freeze = true; + nvme_start_freeze(&anv->ctrl); + } + + /* + * Give the controller a chance to complete all entered requests if + * doing a safe shutdown. + */ + if (!dead && shutdown && freeze) + nvme_wait_freeze_timeout(&anv->ctrl, NVME_IO_TIMEOUT); + + nvme_stop_queues(&anv->ctrl); + + if (!dead) { + if (READ_ONCE(anv->ioq.enabled)) { + apple_nvme_remove_sq(anv); + apple_nvme_remove_cq(anv); + } + + if (shutdown) + nvme_shutdown_ctrl(&anv->ctrl); + nvme_disable_ctrl(&anv->ctrl); + } + + WRITE_ONCE(anv->ioq.enabled, false); + WRITE_ONCE(anv->adminq.enabled, false); + mb(); /* ensure that nvme_queue_rq() sees that enabled is cleared */ + nvme_stop_admin_queue(&anv->ctrl); + + /* last chance to complete any requests before nvme_cancel_request */ + spin_lock_irqsave(&anv->lock, flags); + apple_nvme_handle_cq(&anv->ioq, true); + apple_nvme_handle_cq(&anv->adminq, true); + spin_unlock_irqrestore(&anv->lock, flags); + + blk_mq_tagset_busy_iter(&anv->tagset, nvme_cancel_request, &anv->ctrl); + blk_mq_tagset_busy_iter(&anv->admin_tagset, nvme_cancel_request, + &anv->ctrl); + blk_mq_tagset_wait_completed_request(&anv->tagset); + blk_mq_tagset_wait_completed_request(&anv->admin_tagset); + + /* + * The driver will not be starting up queues again if shutting down so + * must flush all entered requests to their failed completion to avoid + * deadlocking blk-mq hot-cpu notifier. + */ + if (shutdown) { + nvme_start_queues(&anv->ctrl); + nvme_start_admin_queue(&anv->ctrl); + } +} + +static enum blk_eh_timer_return apple_nvme_timeout(struct request *req, + bool reserved) +{ + struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct apple_nvme_queue *q = iod->q; + struct apple_nvme *anv = queue_to_apple_nvme(q); + unsigned long flags; + u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS); + + if (anv->ctrl.state != NVME_CTRL_LIVE) { + /* + * From rdma.c: + * If we are resetting, connecting or deleting we should + * complete immediately because we may block controller + * teardown or setup sequence + * - ctrl disable/shutdown fabrics requests + * - connect requests + * - initialization admin requests + * - I/O requests that entered after unquiescing and + * the controller stopped responding + * + * All other requests should be cancelled by the error + * recovery work, so it's fine that we fail it here. + */ + dev_warn(anv->dev, + "I/O %d(aq:%d) timeout while not in live state\n", + req->tag, q->is_adminq); + if (blk_mq_request_started(req) && + !blk_mq_request_completed(req)) { + nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD; + nvme_req(req)->flags |= NVME_REQ_CANCELLED; + blk_mq_complete_request(req); + } + return BLK_EH_DONE; + } + + /* check if we just missed an interrupt if we're still alive */ + if (!apple_rtkit_is_crashed(anv->rtk) && !(csts & NVME_CSTS_CFS)) { + spin_lock_irqsave(&anv->lock, flags); + apple_nvme_handle_cq(q, false); + spin_unlock_irqrestore(&anv->lock, flags); + if (blk_mq_request_completed(req)) { + dev_warn(anv->dev, + "I/O %d(aq:%d) timeout: completion polled\n", + req->tag, q->is_adminq); + return BLK_EH_DONE; + } + } + + /* + * aborting commands isn't supported which leaves a full reset as our + * only option here + */ + dev_warn(anv->dev, "I/O %d(aq:%d) timeout: resetting controller\n", + req->tag, q->is_adminq); + nvme_req(req)->flags |= NVME_REQ_CANCELLED; + apple_nvme_disable(anv, false); + nvme_reset_ctrl(&anv->ctrl); + return BLK_EH_DONE; +} + +static int apple_nvme_poll(struct blk_mq_hw_ctx *hctx, + struct io_comp_batch *iob) +{ + struct apple_nvme_queue *q = hctx->driver_data; + struct apple_nvme *anv = queue_to_apple_nvme(q); + bool found; + unsigned long flags; + + spin_lock_irqsave(&anv->lock, flags); + found = apple_nvme_poll_cq(q, iob); + spin_unlock_irqrestore(&anv->lock, flags); + + return found; +} + +static const struct blk_mq_ops apple_nvme_mq_admin_ops = { + .queue_rq = apple_nvme_queue_rq, + .complete = apple_nvme_complete_rq, + .init_hctx = apple_nvme_init_hctx, + .init_request = apple_nvme_init_request, + .timeout = apple_nvme_timeout, +}; + +static const struct blk_mq_ops apple_nvme_mq_ops = { + .queue_rq = apple_nvme_queue_rq, + .complete = apple_nvme_complete_rq, + .init_hctx = apple_nvme_init_hctx, + .init_request = apple_nvme_init_request, + .timeout = apple_nvme_timeout, + .poll = apple_nvme_poll, +}; + +static void apple_nvme_init_queue(struct apple_nvme_queue *q) +{ + unsigned int depth = apple_nvme_queue_depth(q); + + q->cq_head = 0; + q->cq_phase = 1; + memset(q->tcbs, 0, + APPLE_ANS_MAX_QUEUE_DEPTH * sizeof(struct apple_nvmmu_tcb)); + memset(q->cqes, 0, depth * sizeof(struct nvme_completion)); + WRITE_ONCE(q->enabled, true); + wmb(); /* ensure the first interrupt sees the initialization */ +} + +static void apple_nvme_reset_work(struct work_struct *work) +{ + unsigned int nr_io_queues = 1; + int ret; + u32 boot_status, aqa; + struct apple_nvme *anv = + container_of(work, struct apple_nvme, ctrl.reset_work); + + if (anv->ctrl.state != NVME_CTRL_RESETTING) { + dev_warn(anv->dev, "ctrl state %d is not RESETTING\n", + anv->ctrl.state); + ret = -ENODEV; + goto out; + } + + /* there's unfortunately no known way to recover if RTKit crashed :( */ + if (apple_rtkit_is_crashed(anv->rtk)) { + dev_err(anv->dev, + "RTKit has crashed without any way to recover."); + ret = -EIO; + goto out; + } + + if (anv->ctrl.ctrl_config & NVME_CC_ENABLE) + apple_nvme_disable(anv, false); + + /* RTKit must be shut down cleanly for the (soft)-reset to work */ + if (apple_rtkit_is_running(anv->rtk)) { + dev_dbg(anv->dev, "Trying to shut down RTKit before reset."); + ret = apple_rtkit_shutdown(anv->rtk); + if (ret) + goto out; + } + + writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); + + ret = reset_control_assert(anv->reset); + if (ret) + goto out; + + ret = apple_rtkit_reinit(anv->rtk); + if (ret) + goto out; + + ret = reset_control_deassert(anv->reset); + if (ret) + goto out; + + writel(APPLE_ANS_COPROC_CPU_CONTROL_RUN, + anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); + ret = apple_rtkit_boot(anv->rtk); + if (ret) { + dev_err(anv->dev, "ANS did not boot"); + goto out; + } + + ret = readl_poll_timeout(anv->mmio_nvme + APPLE_ANS_BOOT_STATUS, + boot_status, + boot_status == APPLE_ANS_BOOT_STATUS_OK, + USEC_PER_MSEC, APPLE_ANS_BOOT_TIMEOUT); + if (ret) { + dev_err(anv->dev, "ANS did not initialize"); + goto out; + } + + dev_dbg(anv->dev, "ANS booted successfully."); + + /* + * Limit the max command size to prevent iod->sg allocations going + * over a single page. + */ + anv->ctrl.max_hw_sectors = min_t(u32, NVME_MAX_KB_SZ << 1, + dma_max_mapping_size(anv->dev) >> 9); + anv->ctrl.max_segments = NVME_MAX_SEGS; + + /* + * Enable NVMMU and linear submission queues. + * While we could keep those disabled and pretend this is slightly + * more common NVMe controller we'd still need some quirks (e.g. + * sq entries will be 128 bytes) and Apple might drop support for + * that mode in the future. + */ + writel(APPLE_ANS_LINEAR_SQ_EN, + anv->mmio_nvme + APPLE_ANS_LINEAR_SQ_CTRL); + + /* Allow as many pending command as possible for both queues */ + writel(APPLE_ANS_MAX_QUEUE_DEPTH | (APPLE_ANS_MAX_QUEUE_DEPTH << 16), + anv->mmio_nvme + APPLE_ANS_MAX_PEND_CMDS_CTRL); + + /* Setup the NVMMU for the maximum admin and IO queue depth */ + writel(APPLE_ANS_MAX_QUEUE_DEPTH - 1, + anv->mmio_nvme + APPLE_NVMMU_NUM_TCBS); + + /* + * This is probably a chicken bit: without it all commands where any PRP + * is set to zero (including those that don't use that field) fail and + * the co-processor complains about "completed with err BAD_CMD-" or + * a "NULL_PRP_PTR_ERR" in the syslog + */ + writel(readl(anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL) & + ~APPLE_ANS_PRP_NULL_CHECK, + anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL); + + /* Setup the admin queue */ + aqa = APPLE_NVME_AQ_DEPTH - 1; + aqa |= aqa << 16; + writel(aqa, anv->mmio_nvme + NVME_REG_AQA); + writeq(anv->adminq.sq_dma_addr, anv->mmio_nvme + NVME_REG_ASQ); + writeq(anv->adminq.cq_dma_addr, anv->mmio_nvme + NVME_REG_ACQ); + + /* Setup NVMMU for both queues */ + writeq(anv->adminq.tcb_dma_addr, + anv->mmio_nvme + APPLE_NVMMU_ASQ_TCB_BASE); + writeq(anv->ioq.tcb_dma_addr, + anv->mmio_nvme + APPLE_NVMMU_IOSQ_TCB_BASE); + + anv->ctrl.sqsize = + APPLE_ANS_MAX_QUEUE_DEPTH - 1; /* 0's based queue depth */ + anv->ctrl.cap = readq(anv->mmio_nvme + NVME_REG_CAP); + + dev_dbg(anv->dev, "Enabling controller now"); + ret = nvme_enable_ctrl(&anv->ctrl); + if (ret) + goto out; + + dev_dbg(anv->dev, "Starting admin queue"); + apple_nvme_init_queue(&anv->adminq); + nvme_start_admin_queue(&anv->ctrl); + + if (!nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_CONNECTING)) { + dev_warn(anv->ctrl.device, + "failed to mark controller CONNECTING\n"); + ret = -ENODEV; + goto out; + } + + ret = nvme_init_ctrl_finish(&anv->ctrl); + if (ret) + goto out; + + dev_dbg(anv->dev, "Creating IOCQ"); + ret = apple_nvme_create_cq(anv); + if (ret) + goto out; + dev_dbg(anv->dev, "Creating IOSQ"); + ret = apple_nvme_create_sq(anv); + if (ret) + goto out_remove_cq; + + apple_nvme_init_queue(&anv->ioq); + nr_io_queues = 1; + ret = nvme_set_queue_count(&anv->ctrl, &nr_io_queues); + if (ret) + goto out_remove_sq; + if (nr_io_queues != 1) { + ret = -ENXIO; + goto out_remove_sq; + } + + anv->ctrl.queue_count = nr_io_queues + 1; + + nvme_start_queues(&anv->ctrl); + nvme_wait_freeze(&anv->ctrl); + blk_mq_update_nr_hw_queues(&anv->tagset, 1); + nvme_unfreeze(&anv->ctrl); + + if (!nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_LIVE)) { + dev_warn(anv->ctrl.device, + "failed to mark controller live state\n"); + ret = -ENODEV; + goto out_remove_sq; + } + + nvme_start_ctrl(&anv->ctrl); + + dev_dbg(anv->dev, "ANS boot and NVMe init completed."); + return; + +out_remove_sq: + apple_nvme_remove_sq(anv); +out_remove_cq: + apple_nvme_remove_cq(anv); +out: + dev_warn(anv->ctrl.device, "Reset failure status: %d\n", ret); + nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING); + nvme_get_ctrl(&anv->ctrl); + apple_nvme_disable(anv, false); + nvme_kill_queues(&anv->ctrl); + if (!queue_work(nvme_wq, &anv->remove_work)) + nvme_put_ctrl(&anv->ctrl); +} + +static void apple_nvme_remove_dead_ctrl_work(struct work_struct *work) +{ + struct apple_nvme *anv = + container_of(work, struct apple_nvme, remove_work); + + nvme_put_ctrl(&anv->ctrl); + device_release_driver(anv->dev); +} + +static int apple_nvme_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) +{ + *val = readl(ctrl_to_apple_nvme(ctrl)->mmio_nvme + off); + return 0; +} + +static int apple_nvme_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) +{ + writel(val, ctrl_to_apple_nvme(ctrl)->mmio_nvme + off); + return 0; +} + +static int apple_nvme_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) +{ + *val = readq(ctrl_to_apple_nvme(ctrl)->mmio_nvme + off); + return 0; +} + +static int apple_nvme_get_address(struct nvme_ctrl *ctrl, char *buf, int size) +{ + struct device *dev = ctrl_to_apple_nvme(ctrl)->dev; + + return snprintf(buf, size, "%s\n", dev_name(dev)); +} + +static void apple_nvme_free_ctrl(struct nvme_ctrl *ctrl) +{ + struct apple_nvme *anv = ctrl_to_apple_nvme(ctrl); + + if (anv->ctrl.admin_q) + blk_put_queue(anv->ctrl.admin_q); + put_device(anv->dev); +} + +static const struct nvme_ctrl_ops nvme_ctrl_ops = { + .name = "apple-nvme", + .module = THIS_MODULE, + .flags = 0, + .reg_read32 = apple_nvme_reg_read32, + .reg_write32 = apple_nvme_reg_write32, + .reg_read64 = apple_nvme_reg_read64, + .free_ctrl = apple_nvme_free_ctrl, + .get_address = apple_nvme_get_address, +}; + +static void apple_nvme_async_probe(void *data, async_cookie_t cookie) +{ + struct apple_nvme *anv = data; + + flush_work(&anv->ctrl.reset_work); + flush_work(&anv->ctrl.scan_work); + nvme_put_ctrl(&anv->ctrl); +} + +static int apple_nvme_alloc_tagsets(struct apple_nvme *anv) +{ + int ret; + + anv->admin_tagset.ops = &apple_nvme_mq_admin_ops; + anv->admin_tagset.nr_hw_queues = 1; + anv->admin_tagset.queue_depth = APPLE_NVME_AQ_MQ_TAG_DEPTH; + anv->admin_tagset.timeout = NVME_ADMIN_TIMEOUT; + anv->admin_tagset.numa_node = NUMA_NO_NODE; + anv->admin_tagset.cmd_size = sizeof(struct apple_nvme_iod); + anv->admin_tagset.flags = BLK_MQ_F_NO_SCHED; + anv->admin_tagset.driver_data = &anv->adminq; + + ret = blk_mq_alloc_tag_set(&anv->admin_tagset); + if (ret) + return ret; + ret = devm_add_action_or_reset(anv->dev, + (void (*)(void *))blk_mq_free_tag_set, + &anv->admin_tagset); + if (ret) + return ret; + + anv->tagset.ops = &apple_nvme_mq_ops; + anv->tagset.nr_hw_queues = 1; + anv->tagset.nr_maps = 1; + /* + * Tags are used as an index to the NVMMU and must be unique across + * both queues. The admin queue gets the first APPLE_NVME_AQ_DEPTH which + * must be marked as reserved in the IO queue. + */ + anv->tagset.reserved_tags = APPLE_NVME_AQ_DEPTH; + anv->tagset.queue_depth = APPLE_ANS_MAX_QUEUE_DEPTH - 1; + anv->tagset.timeout = NVME_IO_TIMEOUT; + anv->tagset.numa_node = NUMA_NO_NODE; + anv->tagset.cmd_size = sizeof(struct apple_nvme_iod); + anv->tagset.flags = BLK_MQ_F_SHOULD_MERGE; + anv->tagset.driver_data = &anv->ioq; + + ret = blk_mq_alloc_tag_set(&anv->tagset); + if (ret) + return ret; + ret = devm_add_action_or_reset( + anv->dev, (void (*)(void *))blk_mq_free_tag_set, &anv->tagset); + if (ret) + return ret; + + anv->ctrl.admin_tagset = &anv->admin_tagset; + anv->ctrl.tagset = &anv->tagset; + + return 0; +} + +static int apple_nvme_queue_alloc(struct apple_nvme *anv, + struct apple_nvme_queue *q) +{ + unsigned int depth = apple_nvme_queue_depth(q); + + q->cqes = dmam_alloc_coherent(anv->dev, + depth * sizeof(struct nvme_completion), + &q->cq_dma_addr, GFP_KERNEL); + if (!q->cqes) + return -ENOMEM; + + q->sqes = dmam_alloc_coherent(anv->dev, + depth * sizeof(struct nvme_command), + &q->sq_dma_addr, GFP_KERNEL); + if (!q->sqes) + return -ENOMEM; + + /* + * We need the maximum queue depth here because the NVMMU only has a + * single depth configuration shared between both queues. + */ + q->tcbs = dmam_alloc_coherent(anv->dev, + APPLE_ANS_MAX_QUEUE_DEPTH * + sizeof(struct apple_nvmmu_tcb), + &q->tcb_dma_addr, GFP_KERNEL); + if (!q->tcbs) + return -ENOMEM; + + /* + * initialize phase to make sure the allocated and empty memory + * doesn't look like a full cq already. + */ + q->cq_phase = 1; + return 0; +} + +static void apple_nvme_detach_genpd(struct apple_nvme *anv) +{ + int i; + + if (anv->pd_count <= 1) + return; + + for (i = anv->pd_count - 1; i >= 0; i--) { + if (anv->pd_link[i]) + device_link_del(anv->pd_link[i]); + if (!IS_ERR_OR_NULL(anv->pd_dev[i])) + dev_pm_domain_detach(anv->pd_dev[i], true); + } +} + +static int apple_nvme_attach_genpd(struct apple_nvme *anv) +{ + struct device *dev = anv->dev; + int i; + + anv->pd_count = of_count_phandle_with_args( + dev->of_node, "power-domains", "#power-domain-cells"); + if (anv->pd_count <= 1) + return 0; + + anv->pd_dev = devm_kcalloc(dev, anv->pd_count, sizeof(*anv->pd_dev), + GFP_KERNEL); + if (!anv->pd_dev) + return -ENOMEM; + + anv->pd_link = devm_kcalloc(dev, anv->pd_count, sizeof(*anv->pd_link), + GFP_KERNEL); + if (!anv->pd_link) + return -ENOMEM; + + for (i = 0; i < anv->pd_count; i++) { + anv->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i); + if (IS_ERR(anv->pd_dev[i])) { + apple_nvme_detach_genpd(anv); + return PTR_ERR(anv->pd_dev[i]); + } + + anv->pd_link[i] = device_link_add(dev, anv->pd_dev[i], + DL_FLAG_STATELESS | + DL_FLAG_PM_RUNTIME | + DL_FLAG_RPM_ACTIVE); + if (!anv->pd_link[i]) { + apple_nvme_detach_genpd(anv); + return -EINVAL; + } + } + + return 0; +} + +static int apple_nvme_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct apple_nvme *anv; + int ret; + + anv = devm_kzalloc(dev, sizeof(*anv), GFP_KERNEL); + if (!anv) + return -ENOMEM; + + anv->dev = get_device(dev); + anv->adminq.is_adminq = true; + platform_set_drvdata(pdev, anv); + + ret = apple_nvme_attach_genpd(anv); + if (ret < 0) { + dev_err_probe(dev, ret, "Failed to attach power domains"); + goto put_dev; + } + if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) { + ret = -ENXIO; + goto put_dev; + } + + anv->irq = platform_get_irq(pdev, 0); + if (anv->irq < 0) { + ret = anv->irq; + goto put_dev; + } + if (!anv->irq) { + ret = -ENXIO; + goto put_dev; + } + + anv->mmio_coproc = devm_platform_ioremap_resource_byname(pdev, "ans"); + if (IS_ERR(anv->mmio_coproc)) { + ret = PTR_ERR(anv->mmio_coproc); + goto put_dev; + } + anv->mmio_nvme = devm_platform_ioremap_resource_byname(pdev, "nvme"); + if (IS_ERR(anv->mmio_nvme)) { + ret = PTR_ERR(anv->mmio_nvme); + goto put_dev; + } + + anv->adminq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_ASQ_DB; + anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB; + anv->ioq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_IOSQ_DB; + anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB; + + anv->sart = devm_apple_sart_get(dev); + if (IS_ERR(anv->sart)) { + ret = dev_err_probe(dev, PTR_ERR(anv->sart), + "Failed to initialize SART"); + goto put_dev; + } + + anv->reset = devm_reset_control_array_get_exclusive(anv->dev); + if (IS_ERR(anv->reset)) { + ret = dev_err_probe(dev, PTR_ERR(anv->reset), + "Failed to get reset control"); + goto put_dev; + } + + INIT_WORK(&anv->ctrl.reset_work, apple_nvme_reset_work); + INIT_WORK(&anv->remove_work, apple_nvme_remove_dead_ctrl_work); + spin_lock_init(&anv->lock); + + ret = apple_nvme_queue_alloc(anv, &anv->adminq); + if (ret) + goto put_dev; + ret = apple_nvme_queue_alloc(anv, &anv->ioq); + if (ret) + goto put_dev; + + anv->prp_page_pool = dmam_pool_create("prp list page", anv->dev, + NVME_CTRL_PAGE_SIZE, + NVME_CTRL_PAGE_SIZE, 0); + if (!anv->prp_page_pool) { + ret = -ENOMEM; + goto put_dev; + } + + anv->prp_small_pool = + dmam_pool_create("prp list 256", anv->dev, 256, 256, 0); + if (!anv->prp_small_pool) { + ret = -ENOMEM; + goto put_dev; + } + + WARN_ON_ONCE(apple_nvme_iod_alloc_size() > PAGE_SIZE); + anv->iod_mempool = + mempool_create_kmalloc_pool(1, apple_nvme_iod_alloc_size()); + if (!anv->iod_mempool) { + ret = -ENOMEM; + goto put_dev; + } + ret = devm_add_action_or_reset( + anv->dev, (void (*)(void *))mempool_destroy, anv->iod_mempool); + if (ret) + goto put_dev; + + ret = apple_nvme_alloc_tagsets(anv); + if (ret) + goto put_dev; + + ret = devm_request_irq(anv->dev, anv->irq, apple_nvme_irq, 0, + "nvme-apple", anv); + if (ret) { + dev_err_probe(dev, ret, "Failed to request IRQ"); + goto put_dev; + } + + anv->rtk = + devm_apple_rtkit_init(dev, anv, NULL, 0, &apple_nvme_rtkit_ops); + if (IS_ERR(anv->rtk)) { + ret = dev_err_probe(dev, PTR_ERR(anv->rtk), + "Failed to initialize RTKit"); + goto put_dev; + } + + ret = nvme_init_ctrl(&anv->ctrl, anv->dev, &nvme_ctrl_ops, + NVME_QUIRK_SKIP_CID_GEN); + if (ret) { + dev_err_probe(dev, ret, "Failed to initialize nvme_ctrl"); + goto put_dev; + } + + anv->ctrl.admin_q = blk_mq_init_queue(&anv->admin_tagset); + if (IS_ERR(anv->ctrl.admin_q)) { + ret = -ENOMEM; + goto put_dev; + } + + if (!blk_get_queue(anv->ctrl.admin_q)) { + nvme_start_admin_queue(&anv->ctrl); + blk_cleanup_queue(anv->ctrl.admin_q); + anv->ctrl.admin_q = NULL; + ret = -ENODEV; + goto put_dev; + } + + nvme_reset_ctrl(&anv->ctrl); + async_schedule(apple_nvme_async_probe, anv); + + return 0; + +put_dev: + put_device(anv->dev); + return ret; +} + +static int apple_nvme_remove(struct platform_device *pdev) +{ + struct apple_nvme *anv = platform_get_drvdata(pdev); + + nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING); + flush_work(&anv->ctrl.reset_work); + nvme_stop_ctrl(&anv->ctrl); + nvme_remove_namespaces(&anv->ctrl); + apple_nvme_disable(anv, true); + nvme_uninit_ctrl(&anv->ctrl); + + if (apple_rtkit_is_running(anv->rtk)) + apple_rtkit_shutdown(anv->rtk); + + apple_nvme_detach_genpd(anv); + + return 0; +} + +static void apple_nvme_shutdown(struct platform_device *pdev) +{ + struct apple_nvme *anv = platform_get_drvdata(pdev); + + apple_nvme_disable(anv, true); + if (apple_rtkit_is_running(anv->rtk)) + apple_rtkit_shutdown(anv->rtk); +} + +static int apple_nvme_resume(struct device *dev) +{ + struct apple_nvme *anv = dev_get_drvdata(dev); + + return nvme_reset_ctrl(&anv->ctrl); +} + +static int apple_nvme_suspend(struct device *dev) +{ + struct apple_nvme *anv = dev_get_drvdata(dev); + int ret = 0; + + apple_nvme_disable(anv, true); + + if (apple_rtkit_is_running(anv->rtk)) + ret = apple_rtkit_shutdown(anv->rtk); + + writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); + + return ret; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(apple_nvme_pm_ops, apple_nvme_suspend, + apple_nvme_resume); + +static const struct of_device_id apple_nvme_of_match[] = { + { .compatible = "apple,nvme-ans2" }, + {}, +}; +MODULE_DEVICE_TABLE(of, apple_nvme_of_match); + +static struct platform_driver apple_nvme_driver = { + .driver = { + .name = "nvme-apple", + .of_match_table = apple_nvme_of_match, + .pm = pm_sleep_ptr(&apple_nvme_pm_ops), + }, + .probe = apple_nvme_probe, + .remove = apple_nvme_remove, + .shutdown = apple_nvme_shutdown, +}; +module_platform_driver(apple_nvme_driver); + +MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index efb85c6d8e2d..e1846d04817f 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -366,7 +366,7 @@ static inline void nvme_end_req(struct request *req) { blk_status_t status = nvme_error_status(nvme_req(req)->status); - if (unlikely(nvme_req(req)->status != NVME_SC_SUCCESS)) + if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) nvme_log_error(req); nvme_end_req_zoned(req); nvme_trace_bio_complete(req); @@ -1015,6 +1015,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, goto out; } + req->rq_flags |= RQF_QUIET; ret = nvme_execute_rq(req, at_head); if (result && ret >= 0) *result = nvme_req(req)->result; @@ -1287,6 +1288,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids, warn_str, cur->nidl); return -1; } + if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) + return NVME_NIDT_EUI64_LEN; memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN); return NVME_NIDT_EUI64_LEN; case NVME_NIDT_NGUID: @@ -1295,6 +1298,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids, warn_str, cur->nidl); return -1; } + if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) + return NVME_NIDT_NGUID_LEN; memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN); return NVME_NIDT_NGUID_LEN; case NVME_NIDT_UUID: @@ -1303,6 +1308,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids, warn_str, cur->nidl); return -1; } + if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) + return NVME_NIDT_UUID_LEN; uuid_copy(&ids->uuid, data + sizeof(*cur)); return NVME_NIDT_UUID_LEN; case NVME_NIDT_CSI: @@ -1399,12 +1406,18 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid, if ((*id)->ncap == 0) /* namespace not allocated or attached */ goto out_free_id; - if (ctrl->vs >= NVME_VS(1, 1, 0) && - !memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) - memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64)); - if (ctrl->vs >= NVME_VS(1, 2, 0) && - !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) - memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid)); + + if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) { + dev_info(ctrl->device, + "Ignoring bogus Namespace Identifiers\n"); + } else { + if (ctrl->vs >= NVME_VS(1, 1, 0) && + !memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) + memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64)); + if (ctrl->vs >= NVME_VS(1, 2, 0) && + !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) + memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid)); + } return 0; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 1393bbf82d71..a2b53ca63335 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -144,6 +144,11 @@ enum nvme_quirks { * encoding the generation sequence number. */ NVME_QUIRK_SKIP_CID_GEN = (1 << 17), + + /* + * Reports garbage in the namespace identifiers (eui64, nguid, uuid). + */ + NVME_QUIRK_BOGUS_NID = (1 << 18), }; /* diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index d817ca17463e..3aacf1c0d5a5 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -3409,7 +3409,10 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ .driver_data = NVME_QUIRK_IDENTIFY_CNS | - NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + NVME_QUIRK_DISABLE_WRITE_ZEROES | + NVME_QUIRK_BOGUS_NID, }, + { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */ .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, }, { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ @@ -3447,6 +3450,10 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, + { PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065), diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index 558b35aba610..d270a204324e 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -3407,6 +3407,15 @@ static int hv_pci_probe(struct hv_device *hdev, hbus->bridge->domain_nr = dom; #ifdef CONFIG_X86 hbus->sysdata.domain = dom; +#elif defined(CONFIG_ARM64) + /* + * Set the PCI bus parent to be the corresponding VMbus + * device. Then the VMbus device will be assigned as the + * ACPI companion in pcibios_root_bridge_prepare() and + * pci_dma_configure() will propagate device coherence + * information to devices created on the bus. + */ + hbus->sysdata.parent = hdev->device.parent; #endif hbus->hdev = hdev; diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index afdcb91601d2..1e2d69453771 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -187,7 +187,7 @@ source "drivers/perf/hisilicon/Kconfig" config MARVELL_CN10K_DDR_PMU tristate "Enable MARVELL CN10K DRAM Subsystem(DSS) PMU Support" - depends on ARM64 || (COMPILE_TEST && 64BIT) + depends on ARCH_THUNDER || (COMPILE_TEST && 64BIT) help Enable perf support for Marvell DDR Performance monitoring event on CN10K platform. diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c index 94ebc1ecace7..b1b2a55de77f 100644 --- a/drivers/perf/fsl_imx8_ddr_perf.c +++ b/drivers/perf/fsl_imx8_ddr_perf.c @@ -29,7 +29,7 @@ #define CNTL_OVER_MASK 0xFFFFFFFE #define CNTL_CSV_SHIFT 24 -#define CNTL_CSV_MASK (0xFF << CNTL_CSV_SHIFT) +#define CNTL_CSV_MASK (0xFFU << CNTL_CSV_SHIFT) #define EVENT_CYCLES_ID 0 #define EVENT_CYCLES_COUNTER 0 diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c index 7640491aab12..30234c261b05 100644 --- a/drivers/perf/qcom_l2_pmu.c +++ b/drivers/perf/qcom_l2_pmu.c @@ -736,7 +736,7 @@ static struct cluster_pmu *l2_cache_associate_cpu_with_cluster( { u64 mpidr; int cpu_cluster_id; - struct cluster_pmu *cluster = NULL; + struct cluster_pmu *cluster; /* * This assumes that the cluster_id is in MPIDR[aff1] for @@ -758,10 +758,10 @@ static struct cluster_pmu *l2_cache_associate_cpu_with_cluster( cluster->cluster_id); cpumask_set_cpu(cpu, &cluster->cluster_cpus); *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster; - break; + return cluster; } - return cluster; + return NULL; } static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c index 6b8b3ab8db48..3463629f8764 100644 --- a/drivers/platform/x86/acerhdf.c +++ b/drivers/platform/x86/acerhdf.c @@ -584,21 +584,6 @@ static struct platform_driver acerhdf_driver = { .remove = acerhdf_remove, }; -/* checks if str begins with start */ -static int str_starts_with(const char *str, const char *start) -{ - unsigned long str_len = 0, start_len = 0; - - str_len = strlen(str); - start_len = strlen(start); - - if (str_len >= start_len && - !strncmp(str, start, start_len)) - return 1; - - return 0; -} - /* check hardware */ static int __init acerhdf_check_hardware(void) { @@ -651,9 +636,9 @@ static int __init acerhdf_check_hardware(void) * check if actual hardware BIOS vendor, product and version * IDs start with the strings of BIOS table entry */ - if (str_starts_with(vendor, bt->vendor) && - str_starts_with(product, bt->product) && - str_starts_with(version, bt->version)) { + if (strstarts(vendor, bt->vendor) && + strstarts(product, bt->product) && + strstarts(version, bt->version)) { found = 1; break; } diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c index e9d0dbbb2887..fa4123dbdf7f 100644 --- a/drivers/platform/x86/amd-pmc.c +++ b/drivers/platform/x86/amd-pmc.c @@ -160,8 +160,10 @@ MODULE_PARM_DESC(enable_stb, "Enable the STB debug mechanism"); static struct amd_pmc_dev pmc; static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret); -static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data); static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf); +#ifdef CONFIG_SUSPEND +static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data); +#endif static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset) { @@ -325,6 +327,7 @@ static int get_metrics_table(struct amd_pmc_dev *pdev, struct smu_metrics *table return 0; } +#ifdef CONFIG_SUSPEND static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev) { struct smu_metrics table; @@ -338,6 +341,7 @@ static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev) dev_dbg(pdev->dev, "Last suspend in deepest state for %lluus\n", table.timein_s0i3_lastcapture); } +#endif #ifdef CONFIG_DEBUG_FS static int smu_fw_info_show(struct seq_file *s, void *unused) @@ -569,6 +573,7 @@ out_unlock: return rc; } +#ifdef CONFIG_SUSPEND static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev) { switch (dev->cpu_id) { @@ -694,6 +699,7 @@ static struct acpi_s2idle_dev_ops amd_pmc_s2idle_dev_ops = { .prepare = amd_pmc_s2idle_prepare, .restore = amd_pmc_s2idle_restore, }; +#endif static const struct pci_device_id pmc_pci_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) }, @@ -733,6 +739,7 @@ static int amd_pmc_s2d_init(struct amd_pmc_dev *dev) return 0; } +#ifdef CONFIG_SUSPEND static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data) { int err; @@ -753,6 +760,7 @@ static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data) return 0; } +#endif static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf) { @@ -859,9 +867,11 @@ static int amd_pmc_probe(struct platform_device *pdev) amd_pmc_get_smu_version(dev); platform_set_drvdata(pdev, dev); +#ifdef CONFIG_SUSPEND err = acpi_register_lps0_dev(&amd_pmc_s2idle_dev_ops); if (err) dev_warn(dev->dev, "failed to register LPS0 sleep handler, expect increased power consumption\n"); +#endif amd_pmc_dbgfs_register(dev); return 0; @@ -875,7 +885,9 @@ static int amd_pmc_remove(struct platform_device *pdev) { struct amd_pmc_dev *dev = platform_get_drvdata(pdev); +#ifdef CONFIG_SUSPEND acpi_unregister_lps0_dev(&amd_pmc_s2idle_dev_ops); +#endif amd_pmc_dbgfs_unregister(dev); pci_dev_put(dev->rdev); mutex_destroy(&dev->lock); diff --git a/drivers/platform/x86/barco-p50-gpio.c b/drivers/platform/x86/barco-p50-gpio.c index f5c72e33f9ae..05534287bc26 100644 --- a/drivers/platform/x86/barco-p50-gpio.c +++ b/drivers/platform/x86/barco-p50-gpio.c @@ -10,7 +10,6 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/io.h> #include <linux/delay.h> #include <linux/dmi.h> #include <linux/err.h> diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c index c1d9ed9b7b67..19f6b456234f 100644 --- a/drivers/platform/x86/samsung-laptop.c +++ b/drivers/platform/x86/samsung-laptop.c @@ -1121,8 +1121,6 @@ static void kbd_led_set(struct led_classdev *led_cdev, if (value > samsung->kbd_led.max_brightness) value = samsung->kbd_led.max_brightness; - else if (value < 0) - value = 0; samsung->kbd_led_wk = value; queue_work(samsung->led_workqueue, &samsung->kbd_led_work); diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c index bce17ca97947..a01a92769c1a 100644 --- a/drivers/platform/x86/think-lmi.c +++ b/drivers/platform/x86/think-lmi.c @@ -740,16 +740,8 @@ static ssize_t certificate_store(struct kobject *kobj, if (!tlmi_priv.certificate_support) return -EOPNOTSUPP; - new_cert = kstrdup(buf, GFP_KERNEL); - if (!new_cert) - return -ENOMEM; - /* Strip out CR if one is present */ - strip_cr(new_cert); - /* If empty then clear installed certificate */ - if (new_cert[0] == '\0') { /* Clear installed certificate */ - kfree(new_cert); - + if ((buf[0] == '\0') || (buf[0] == '\n')) { /* Clear installed certificate */ /* Check that signature is set */ if (!setting->signature || !setting->signature[0]) return -EACCES; @@ -763,14 +755,16 @@ static ssize_t certificate_store(struct kobject *kobj, ret = tlmi_simple_call(LENOVO_CLEAR_BIOS_CERT_GUID, auth_str); kfree(auth_str); - if (ret) - return ret; - kfree(setting->certificate); - setting->certificate = NULL; - return count; + return ret ?: count; } + new_cert = kstrdup(buf, GFP_KERNEL); + if (!new_cert) + return -ENOMEM; + /* Strip out CR if one is present */ + strip_cr(new_cert); + if (setting->cert_installed) { /* Certificate is installed so this is an update */ if (!setting->signature || !setting->signature[0]) { @@ -792,21 +786,14 @@ static ssize_t certificate_store(struct kobject *kobj, auth_str = kasprintf(GFP_KERNEL, "%s,%s", new_cert, setting->password); } - if (!auth_str) { - kfree(new_cert); + kfree(new_cert); + if (!auth_str) return -ENOMEM; - } ret = tlmi_simple_call(guid, auth_str); kfree(auth_str); - if (ret) { - kfree(new_cert); - return ret; - } - kfree(setting->certificate); - setting->certificate = new_cert; - return count; + return ret ?: count; } static struct kobj_attribute auth_certificate = __ATTR_WO(certificate); @@ -1194,6 +1181,10 @@ static void tlmi_release_attr(void) kset_unregister(tlmi_priv.attribute_kset); + /* Free up any saved signatures */ + kfree(tlmi_priv.pwd_admin->signature); + kfree(tlmi_priv.pwd_admin->save_signature); + /* Authentication structures */ sysfs_remove_group(&tlmi_priv.pwd_admin->kobj, &auth_attr_group); kobject_put(&tlmi_priv.pwd_admin->kobj); @@ -1210,11 +1201,6 @@ static void tlmi_release_attr(void) } kset_unregister(tlmi_priv.authentication_kset); - - /* Free up any saved certificates/signatures */ - kfree(tlmi_priv.pwd_admin->certificate); - kfree(tlmi_priv.pwd_admin->signature); - kfree(tlmi_priv.pwd_admin->save_signature); } static int tlmi_sysfs_init(void) diff --git a/drivers/platform/x86/think-lmi.h b/drivers/platform/x86/think-lmi.h index 4f69df6eed07..4daba6151cd6 100644 --- a/drivers/platform/x86/think-lmi.h +++ b/drivers/platform/x86/think-lmi.h @@ -63,7 +63,6 @@ struct tlmi_pwd_setting { int index; /*Used for HDD and NVME auth */ enum level_option level; bool cert_installed; - char *certificate; char *signature; char *save_signature; }; diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index ea02c8dcd748..d925cb137e12 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -604,6 +604,12 @@ int power_supply_get_battery_info(struct power_supply *psy, err = samsung_sdi_battery_get_info(&psy->dev, value, &info); if (!err) goto out_ret_pointer; + else if (err == -ENODEV) + /* + * Device does not have a static battery. + * Proceed to look for a simple battery. + */ + err = 0; if (strcmp("simple-battery", value)) { err = -ENODEV; diff --git a/drivers/power/supply/samsung-sdi-battery.c b/drivers/power/supply/samsung-sdi-battery.c index 9d59f277f519..b33daab798b9 100644 --- a/drivers/power/supply/samsung-sdi-battery.c +++ b/drivers/power/supply/samsung-sdi-battery.c @@ -824,6 +824,7 @@ static struct samsung_sdi_battery samsung_sdi_batteries[] = { .constant_charge_current_max_ua = 900000, .constant_charge_voltage_max_uv = 4200000, .charge_term_current_ua = 200000, + .charge_restart_voltage_uv = 4170000, .maintenance_charge = samsung_maint_charge_table, .maintenance_charge_size = ARRAY_SIZE(samsung_maint_charge_table), .alert_low_temp_charge_current_ua = 300000, @@ -867,6 +868,7 @@ static struct samsung_sdi_battery samsung_sdi_batteries[] = { .constant_charge_current_max_ua = 1500000, .constant_charge_voltage_max_uv = 4350000, .charge_term_current_ua = 120000, + .charge_restart_voltage_uv = 4300000, .maintenance_charge = samsung_maint_charge_table, .maintenance_charge_size = ARRAY_SIZE(samsung_maint_charge_table), .alert_low_temp_charge_current_ua = 300000, diff --git a/drivers/regulator/atc260x-regulator.c b/drivers/regulator/atc260x-regulator.c index 05147d2c3842..485e58b264c0 100644 --- a/drivers/regulator/atc260x-regulator.c +++ b/drivers/regulator/atc260x-regulator.c @@ -292,6 +292,7 @@ enum atc2603c_reg_ids { .bypass_mask = BIT(5), \ .active_discharge_reg = ATC2603C_PMU_SWITCH_CTL, \ .active_discharge_mask = BIT(1), \ + .active_discharge_on = BIT(1), \ .owner = THIS_MODULE, \ } diff --git a/drivers/regulator/rtq2134-regulator.c b/drivers/regulator/rtq2134-regulator.c index f21e3f8b21f2..8e13dea354a2 100644 --- a/drivers/regulator/rtq2134-regulator.c +++ b/drivers/regulator/rtq2134-regulator.c @@ -285,6 +285,7 @@ static const unsigned int rtq2134_buck_ramp_delay_table[] = { .enable_mask = RTQ2134_VOUTEN_MASK, \ .active_discharge_reg = RTQ2134_REG_BUCK##_id##_CFG0, \ .active_discharge_mask = RTQ2134_ACTDISCHG_MASK, \ + .active_discharge_on = RTQ2134_ACTDISCHG_MASK, \ .ramp_reg = RTQ2134_REG_BUCK##_id##_RSPCFG, \ .ramp_mask = RTQ2134_RSPUP_MASK, \ .ramp_delay_table = rtq2134_buck_ramp_delay_table, \ diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c index cadea0344486..40befdd9dfa9 100644 --- a/drivers/regulator/wm8994-regulator.c +++ b/drivers/regulator/wm8994-regulator.c @@ -82,6 +82,35 @@ static const struct regulator_desc wm8994_ldo_desc[] = { .min_uV = 2400000, .uV_step = 100000, .enable_time = 3000, + .off_on_delay = 36000, + .owner = THIS_MODULE, + }, + { + .name = "LDO2", + .id = 2, + .type = REGULATOR_VOLTAGE, + .n_voltages = WM8994_LDO2_MAX_SELECTOR + 1, + .vsel_reg = WM8994_LDO_2, + .vsel_mask = WM8994_LDO2_VSEL_MASK, + .ops = &wm8994_ldo2_ops, + .enable_time = 3000, + .off_on_delay = 36000, + .owner = THIS_MODULE, + }, +}; + +static const struct regulator_desc wm8958_ldo_desc[] = { + { + .name = "LDO1", + .id = 1, + .type = REGULATOR_VOLTAGE, + .n_voltages = WM8994_LDO1_MAX_SELECTOR + 1, + .vsel_reg = WM8994_LDO_1, + .vsel_mask = WM8994_LDO1_VSEL_MASK, + .ops = &wm8994_ldo1_ops, + .min_uV = 2400000, + .uV_step = 100000, + .enable_time = 3000, .owner = THIS_MODULE, }, { @@ -172,9 +201,16 @@ static int wm8994_ldo_probe(struct platform_device *pdev) * regulator core and we need not worry about it on the * error path. */ - ldo->regulator = devm_regulator_register(&pdev->dev, - &wm8994_ldo_desc[id], - &config); + if (ldo->wm8994->type == WM8994) { + ldo->regulator = devm_regulator_register(&pdev->dev, + &wm8994_ldo_desc[id], + &config); + } else { + ldo->regulator = devm_regulator_register(&pdev->dev, + &wm8958_ldo_desc[id], + &config); + } + if (IS_ERR(ldo->regulator)) { ret = PTR_ERR(ldo->regulator); dev_err(wm8994->dev, "Failed to register LDO%d: %d\n", diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index b496028b6bfa..e0fc80e041ea 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig @@ -183,7 +183,7 @@ config RESET_RASPBERRYPI config RESET_RZG2L_USBPHY_CTRL tristate "Renesas RZ/G2L USBPHY control driver" - depends on ARCH_R9A07G044 || COMPILE_TEST + depends on ARCH_RZG2L || COMPILE_TEST help Support for USBPHY Control found on RZ/G2L family. It mainly controls reset and power down of the USB/PHY. diff --git a/drivers/reset/core.c b/drivers/reset/core.c index 61e688882643..f0a076e94118 100644 --- a/drivers/reset/core.c +++ b/drivers/reset/core.c @@ -12,6 +12,7 @@ #include <linux/kref.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/acpi.h> #include <linux/reset.h> #include <linux/reset-controller.h> #include <linux/slab.h> @@ -1100,13 +1101,25 @@ EXPORT_SYMBOL_GPL(__devm_reset_control_bulk_get); * * Convenience wrapper for __reset_control_get() and reset_control_reset(). * This is useful for the common case of devices with single, dedicated reset - * lines. + * lines. _RST firmware method will be called for devices with ACPI. */ int __device_reset(struct device *dev, bool optional) { struct reset_control *rstc; int ret; +#ifdef CONFIG_ACPI + acpi_handle handle = ACPI_HANDLE(dev); + + if (handle) { + if (!acpi_has_method(handle, "_RST")) + return optional ? 0 : -ENOENT; + if (ACPI_FAILURE(acpi_evaluate_object(handle, "_RST", NULL, + NULL))) + return -EIO; + } +#endif + rstc = __reset_control_get(dev, NULL, 0, 0, optional, true); if (IS_ERR(rstc)) return PTR_ERR(rstc); diff --git a/drivers/reset/reset-meson.c b/drivers/reset/reset-meson.c index c9bc325ad65a..26dc54778615 100644 --- a/drivers/reset/reset-meson.c +++ b/drivers/reset/reset-meson.c @@ -98,11 +98,17 @@ static const struct meson_reset_param meson_a1_param = { .level_offset = 0x40, }; +static const struct meson_reset_param meson_s4_param = { + .reg_count = 6, + .level_offset = 0x40, +}; + static const struct of_device_id meson_reset_dt_ids[] = { { .compatible = "amlogic,meson8b-reset", .data = &meson8b_param}, { .compatible = "amlogic,meson-gxbb-reset", .data = &meson8b_param}, { .compatible = "amlogic,meson-axg-reset", .data = &meson8b_param}, { .compatible = "amlogic,meson-a1-reset", .data = &meson_a1_param}, + { .compatible = "amlogic,meson-s4-reset", .data = &meson_s4_param}, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, meson_reset_dt_ids); diff --git a/drivers/reset/reset-rzg2l-usbphy-ctrl.c b/drivers/reset/reset-rzg2l-usbphy-ctrl.c index 1e8315038850..a8dde4606360 100644 --- a/drivers/reset/reset-rzg2l-usbphy-ctrl.c +++ b/drivers/reset/reset-rzg2l-usbphy-ctrl.c @@ -121,7 +121,9 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev) return dev_err_probe(dev, PTR_ERR(priv->rstc), "failed to get reset\n"); - reset_control_deassert(priv->rstc); + error = reset_control_deassert(priv->rstc); + if (error) + return error; priv->rcdev.ops = &rzg2l_usbphy_ctrl_reset_ops; priv->rcdev.of_reset_n_cells = 1; diff --git a/drivers/reset/reset-simple.c b/drivers/reset/reset-simple.c index 4dda0daf2c6f..361a68314265 100644 --- a/drivers/reset/reset-simple.c +++ b/drivers/reset/reset-simple.c @@ -144,6 +144,7 @@ static const struct of_device_id reset_simple_dt_ids[] = { .data = &reset_simple_active_low }, { .compatible = "aspeed,ast2400-lpc-reset" }, { .compatible = "aspeed,ast2500-lpc-reset" }, + { .compatible = "aspeed,ast2600-lpc-reset" }, { .compatible = "bitmain,bm1880-reset", .data = &reset_simple_active_low }, { .compatible = "brcm,bcm4908-misc-pcie-reset", diff --git a/drivers/reset/reset-uniphier-glue.c b/drivers/reset/reset-uniphier-glue.c index 908c1d5bc41e..146fd5d45e99 100644 --- a/drivers/reset/reset-uniphier-glue.c +++ b/drivers/reset/reset-uniphier-glue.c @@ -23,19 +23,32 @@ struct uniphier_glue_reset_soc_data { struct uniphier_glue_reset_priv { struct clk_bulk_data clk[MAX_CLKS]; - struct reset_control *rst[MAX_RSTS]; + struct reset_control_bulk_data rst[MAX_RSTS]; struct reset_simple_data rdata; const struct uniphier_glue_reset_soc_data *data; }; +static void uniphier_clk_disable(void *_priv) +{ + struct uniphier_glue_reset_priv *priv = _priv; + + clk_bulk_disable_unprepare(priv->data->nclks, priv->clk); +} + +static void uniphier_rst_assert(void *_priv) +{ + struct uniphier_glue_reset_priv *priv = _priv; + + reset_control_bulk_assert(priv->data->nrsts, priv->rst); +} + static int uniphier_glue_reset_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct uniphier_glue_reset_priv *priv; struct resource *res; resource_size_t size; - const char *name; - int i, ret, nr; + int i, ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -58,22 +71,28 @@ static int uniphier_glue_reset_probe(struct platform_device *pdev) if (ret) return ret; - for (i = 0; i < priv->data->nrsts; i++) { - name = priv->data->reset_names[i]; - priv->rst[i] = devm_reset_control_get_shared(dev, name); - if (IS_ERR(priv->rst[i])) - return PTR_ERR(priv->rst[i]); - } + for (i = 0; i < priv->data->nrsts; i++) + priv->rst[i].id = priv->data->reset_names[i]; + ret = devm_reset_control_bulk_get_shared(dev, priv->data->nrsts, + priv->rst); + if (ret) + return ret; ret = clk_bulk_prepare_enable(priv->data->nclks, priv->clk); if (ret) return ret; - for (nr = 0; nr < priv->data->nrsts; nr++) { - ret = reset_control_deassert(priv->rst[nr]); - if (ret) - goto out_rst_assert; - } + ret = devm_add_action_or_reset(dev, uniphier_clk_disable, priv); + if (ret) + return ret; + + ret = reset_control_bulk_deassert(priv->data->nrsts, priv->rst); + if (ret) + return ret; + + ret = devm_add_action_or_reset(dev, uniphier_rst_assert, priv); + if (ret) + return ret; spin_lock_init(&priv->rdata.lock); priv->rdata.rcdev.owner = THIS_MODULE; @@ -84,32 +103,7 @@ static int uniphier_glue_reset_probe(struct platform_device *pdev) platform_set_drvdata(pdev, priv); - ret = devm_reset_controller_register(dev, &priv->rdata.rcdev); - if (ret) - goto out_rst_assert; - - return 0; - -out_rst_assert: - while (nr--) - reset_control_assert(priv->rst[nr]); - - clk_bulk_disable_unprepare(priv->data->nclks, priv->clk); - - return ret; -} - -static int uniphier_glue_reset_remove(struct platform_device *pdev) -{ - struct uniphier_glue_reset_priv *priv = platform_get_drvdata(pdev); - int i; - - for (i = 0; i < priv->data->nrsts; i++) - reset_control_assert(priv->rst[i]); - - clk_bulk_disable_unprepare(priv->data->nclks, priv->clk); - - return 0; + return devm_reset_controller_register(dev, &priv->rdata.rcdev); } static const char * const uniphier_pro4_clock_reset_names[] = { @@ -177,7 +171,6 @@ MODULE_DEVICE_TABLE(of, uniphier_glue_reset_match); static struct platform_driver uniphier_glue_reset_driver = { .probe = uniphier_glue_reset_probe, - .remove = uniphier_glue_reset_remove, .driver = { .name = "uniphier-glue-reset", .of_match_table = uniphier_glue_reset_match, diff --git a/drivers/reset/tegra/reset-bpmp.c b/drivers/reset/tegra/reset-bpmp.c index 24d3395964cc..4c5bba52b105 100644 --- a/drivers/reset/tegra/reset-bpmp.c +++ b/drivers/reset/tegra/reset-bpmp.c @@ -20,6 +20,7 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc, struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc); struct mrq_reset_request request; struct tegra_bpmp_message msg; + int err; memset(&request, 0, sizeof(request)); request.cmd = command; @@ -30,7 +31,13 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc, msg.tx.data = &request; msg.tx.size = sizeof(request); - return tegra_bpmp_transfer(bpmp, &msg); + err = tegra_bpmp_transfer(bpmp, &msg); + if (err) + return err; + if (msg.rx.ret) + return -EINVAL; + + return 0; } static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc, diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index 5f554a3a0f62..caeebfb67149 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c @@ -317,14 +317,18 @@ enum { }; struct aha152x_cmd_priv { - struct scsi_pointer scsi_pointer; + char *ptr; + int this_residual; + struct scatterlist *buffer; + int status; + int message; + int sent_command; + int phase; }; -static struct scsi_pointer *aha152x_scsi_pointer(struct scsi_cmnd *cmd) +static struct aha152x_cmd_priv *aha152x_priv(struct scsi_cmnd *cmd) { - struct aha152x_cmd_priv *acmd = scsi_cmd_priv(cmd); - - return &acmd->scsi_pointer; + return scsi_cmd_priv(cmd); } MODULE_AUTHOR("Jürgen Fischer"); @@ -890,17 +894,16 @@ void aha152x_release(struct Scsi_Host *shpnt) static int setup_expected_interrupts(struct Scsi_Host *shpnt) { if(CURRENT_SC) { - struct scsi_pointer *scsi_pointer = - aha152x_scsi_pointer(CURRENT_SC); + struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); - scsi_pointer->phase |= 1 << 16; + acp->phase |= 1 << 16; - if (scsi_pointer->phase & selecting) { + if (acp->phase & selecting) { SETPORT(SSTAT1, SELTO); SETPORT(SIMODE0, ENSELDO | (DISCONNECTED_SC ? ENSELDI : 0)); SETPORT(SIMODE1, ENSELTIMO); } else { - SETPORT(SIMODE0, (scsi_pointer->phase & spiordy) ? ENSPIORDY : 0); + SETPORT(SIMODE0, (acp->phase & spiordy) ? ENSPIORDY : 0); SETPORT(SIMODE1, ENPHASEMIS | ENSCSIRST | ENSCSIPERR | ENBUSFREE); } } else if(STATE==seldi) { @@ -924,17 +927,16 @@ static int setup_expected_interrupts(struct Scsi_Host *shpnt) static int aha152x_internal_queue(struct scsi_cmnd *SCpnt, struct completion *complete, int phase) { - struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(SCpnt); + struct aha152x_cmd_priv *acp = aha152x_priv(SCpnt); struct Scsi_Host *shpnt = SCpnt->device->host; unsigned long flags; - scsi_pointer->phase = not_issued | phase; - scsi_pointer->Status = 0x1; /* Ilegal status by SCSI standard */ - scsi_pointer->Message = 0; - scsi_pointer->have_data_in = 0; - scsi_pointer->sent_command = 0; + acp->phase = not_issued | phase; + acp->status = 0x1; /* Illegal status by SCSI standard */ + acp->message = 0; + acp->sent_command = 0; - if (scsi_pointer->phase & (resetting | check_condition)) { + if (acp->phase & (resetting | check_condition)) { if (!SCpnt->host_scribble || SCSEM(SCpnt) || SCNEXT(SCpnt)) { scmd_printk(KERN_ERR, SCpnt, "cannot reuse command\n"); return FAILED; @@ -957,15 +959,15 @@ static int aha152x_internal_queue(struct scsi_cmnd *SCpnt, SCp.phase : current state of the command */ if ((phase & resetting) || !scsi_sglist(SCpnt)) { - scsi_pointer->ptr = NULL; - scsi_pointer->this_residual = 0; + acp->ptr = NULL; + acp->this_residual = 0; scsi_set_resid(SCpnt, 0); - scsi_pointer->buffer = NULL; + acp->buffer = NULL; } else { scsi_set_resid(SCpnt, scsi_bufflen(SCpnt)); - scsi_pointer->buffer = scsi_sglist(SCpnt); - scsi_pointer->ptr = SG_ADDRESS(scsi_pointer->buffer); - scsi_pointer->this_residual = scsi_pointer->buffer->length; + acp->buffer = scsi_sglist(SCpnt); + acp->ptr = SG_ADDRESS(acp->buffer); + acp->this_residual = acp->buffer->length; } DO_LOCK(flags); @@ -1015,7 +1017,7 @@ static void reset_done(struct scsi_cmnd *SCpnt) static void aha152x_scsi_done(struct scsi_cmnd *SCpnt) { - if (aha152x_scsi_pointer(SCpnt)->phase & resetting) + if (aha152x_priv(SCpnt)->phase & resetting) reset_done(SCpnt); else scsi_done(SCpnt); @@ -1101,7 +1103,7 @@ static int aha152x_device_reset(struct scsi_cmnd * SCpnt) DO_LOCK(flags); - if (aha152x_scsi_pointer(SCpnt)->phase & resetted) { + if (aha152x_priv(SCpnt)->phase & resetted) { HOSTDATA(shpnt)->commands--; if (!HOSTDATA(shpnt)->commands) SETPORT(PORTA, 0); @@ -1395,31 +1397,30 @@ static void busfree_run(struct Scsi_Host *shpnt) SETPORT(SSTAT1, CLRBUSFREE); if(CURRENT_SC) { - struct scsi_pointer *scsi_pointer = - aha152x_scsi_pointer(CURRENT_SC); + struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); #if defined(AHA152X_STAT) action++; #endif - scsi_pointer->phase &= ~syncneg; + acp->phase &= ~syncneg; - if (scsi_pointer->phase & completed) { + if (acp->phase & completed) { /* target sent COMMAND COMPLETE */ - done(shpnt, scsi_pointer->Status, DID_OK); + done(shpnt, acp->status, DID_OK); - } else if (scsi_pointer->phase & aborted) { - done(shpnt, scsi_pointer->Status, DID_ABORT); + } else if (acp->phase & aborted) { + done(shpnt, acp->status, DID_ABORT); - } else if (scsi_pointer->phase & resetted) { - done(shpnt, scsi_pointer->Status, DID_RESET); + } else if (acp->phase & resetted) { + done(shpnt, acp->status, DID_RESET); - } else if (scsi_pointer->phase & disconnected) { + } else if (acp->phase & disconnected) { /* target sent DISCONNECT */ #if defined(AHA152X_STAT) HOSTDATA(shpnt)->disconnections++; #endif append_SC(&DISCONNECTED_SC, CURRENT_SC); - scsi_pointer->phase |= 1 << 16; + acp->phase |= 1 << 16; CURRENT_SC = NULL; } else { @@ -1438,24 +1439,23 @@ static void busfree_run(struct Scsi_Host *shpnt) action++; #endif - if (aha152x_scsi_pointer(DONE_SC)->phase & check_condition) { + if (aha152x_priv(DONE_SC)->phase & check_condition) { struct scsi_cmnd *cmd = HOSTDATA(shpnt)->done_SC; struct aha152x_scdata *sc = SCDATA(cmd); scsi_eh_restore_cmnd(cmd, &sc->ses); - aha152x_scsi_pointer(cmd)->Status = SAM_STAT_CHECK_CONDITION; + aha152x_priv(cmd)->status = SAM_STAT_CHECK_CONDITION; HOSTDATA(shpnt)->commands--; if (!HOSTDATA(shpnt)->commands) SETPORT(PORTA, 0); /* turn led off */ - } else if (aha152x_scsi_pointer(DONE_SC)->Status == - SAM_STAT_CHECK_CONDITION) { + } else if (aha152x_priv(DONE_SC)->status == SAM_STAT_CHECK_CONDITION) { #if defined(AHA152X_STAT) HOSTDATA(shpnt)->busfree_with_check_condition++; #endif - if(!(aha152x_scsi_pointer(DONE_SC)->phase & not_issued)) { + if (!(aha152x_priv(DONE_SC)->phase & not_issued)) { struct aha152x_scdata *sc; struct scsi_cmnd *ptr = DONE_SC; DONE_SC=NULL; @@ -1480,7 +1480,7 @@ static void busfree_run(struct Scsi_Host *shpnt) if (!HOSTDATA(shpnt)->commands) SETPORT(PORTA, 0); /* turn led off */ - if (!(aha152x_scsi_pointer(ptr)->phase & resetting)) { + if (!(aha152x_priv(ptr)->phase & resetting)) { kfree(ptr->host_scribble); ptr->host_scribble=NULL; } @@ -1503,13 +1503,12 @@ static void busfree_run(struct Scsi_Host *shpnt) DO_UNLOCK(flags); if(CURRENT_SC) { - struct scsi_pointer *scsi_pointer = - aha152x_scsi_pointer(CURRENT_SC); + struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); #if defined(AHA152X_STAT) action++; #endif - scsi_pointer->phase |= selecting; + acp->phase |= selecting; /* clear selection timeout */ SETPORT(SSTAT1, SELTO); @@ -1537,13 +1536,13 @@ static void busfree_run(struct Scsi_Host *shpnt) */ static void seldo_run(struct Scsi_Host *shpnt) { - struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC); + struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); SETPORT(SCSISIG, 0); SETPORT(SSTAT1, CLRBUSFREE); SETPORT(SSTAT1, CLRPHASECHG); - scsi_pointer->phase &= ~(selecting | not_issued); + acp->phase &= ~(selecting | not_issued); SETPORT(SCSISEQ, 0); @@ -1558,12 +1557,12 @@ static void seldo_run(struct Scsi_Host *shpnt) ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun)); - if (scsi_pointer->phase & aborting) { + if (acp->phase & aborting) { ADDMSGO(ABORT); - } else if (scsi_pointer->phase & resetting) { + } else if (acp->phase & resetting) { ADDMSGO(BUS_DEVICE_RESET); } else if (SYNCNEG==0 && SYNCHRONOUS) { - scsi_pointer->phase |= syncneg; + acp->phase |= syncneg; MSGOLEN += spi_populate_sync_msg(&MSGO(MSGOLEN), 50, 8); SYNCNEG=1; /* negotiation in progress */ } @@ -1578,7 +1577,7 @@ static void seldo_run(struct Scsi_Host *shpnt) */ static void selto_run(struct Scsi_Host *shpnt) { - struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC); + struct aha152x_cmd_priv *acp; SETPORT(SCSISEQ, 0); SETPORT(SSTAT1, CLRSELTIMO); @@ -1586,9 +1585,10 @@ static void selto_run(struct Scsi_Host *shpnt) if (!CURRENT_SC) return; - scsi_pointer->phase &= ~selecting; + acp = aha152x_priv(CURRENT_SC); + acp->phase &= ~selecting; - if (scsi_pointer->phase & aborted) + if (acp->phase & aborted) done(shpnt, SAM_STAT_GOOD, DID_ABORT); else if (TESTLO(SSTAT0, SELINGO)) done(shpnt, SAM_STAT_GOOD, DID_BUS_BUSY); @@ -1616,10 +1616,9 @@ static void seldi_run(struct Scsi_Host *shpnt) SETPORT(SSTAT1, CLRPHASECHG); if(CURRENT_SC) { - struct scsi_pointer *scsi_pointer = - aha152x_scsi_pointer(CURRENT_SC); + struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); - if (!(scsi_pointer->phase & not_issued)) + if (!(acp->phase & not_issued)) scmd_printk(KERN_ERR, CURRENT_SC, "command should not have been issued yet\n"); @@ -1676,7 +1675,7 @@ static void seldi_run(struct Scsi_Host *shpnt) static void msgi_run(struct Scsi_Host *shpnt) { for(;;) { - struct scsi_pointer *scsi_pointer; + struct aha152x_cmd_priv *acp; int sstat1 = GETPORT(SSTAT1); if(sstat1 & (PHASECHG|PHASEMIS|BUSFREE) || !(sstat1 & REQINIT)) @@ -1714,9 +1713,9 @@ static void msgi_run(struct Scsi_Host *shpnt) continue; } - scsi_pointer = aha152x_scsi_pointer(CURRENT_SC); - scsi_pointer->Message = MSGI(0); - scsi_pointer->phase &= ~disconnected; + acp = aha152x_priv(CURRENT_SC); + acp->message = MSGI(0); + acp->phase &= ~disconnected; MSGILEN=0; @@ -1724,8 +1723,8 @@ static void msgi_run(struct Scsi_Host *shpnt) continue; } - scsi_pointer = aha152x_scsi_pointer(CURRENT_SC); - scsi_pointer->Message = MSGI(0); + acp = aha152x_priv(CURRENT_SC); + acp->message = MSGI(0); switch (MSGI(0)) { case DISCONNECT: @@ -1733,11 +1732,11 @@ static void msgi_run(struct Scsi_Host *shpnt) scmd_printk(KERN_WARNING, CURRENT_SC, "target was not allowed to disconnect\n"); - scsi_pointer->phase |= disconnected; + acp->phase |= disconnected; break; case COMMAND_COMPLETE: - scsi_pointer->phase |= completed; + acp->phase |= completed; break; case MESSAGE_REJECT: @@ -1867,11 +1866,9 @@ static void msgi_end(struct Scsi_Host *shpnt) */ static void msgo_init(struct Scsi_Host *shpnt) { - struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC); - if(MSGOLEN==0) { - if ((scsi_pointer->phase & syncneg) && SYNCNEG==2 && - SYNCRATE==0) { + if ((aha152x_priv(CURRENT_SC)->phase & syncneg) && + SYNCNEG == 2 && SYNCRATE == 0) { ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun)); } else { scmd_printk(KERN_INFO, CURRENT_SC, @@ -1888,7 +1885,7 @@ static void msgo_init(struct Scsi_Host *shpnt) */ static void msgo_run(struct Scsi_Host *shpnt) { - struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC); + struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); while(MSGO_I<MSGOLEN) { if (TESTLO(SSTAT0, SPIORDY)) @@ -1901,13 +1898,13 @@ static void msgo_run(struct Scsi_Host *shpnt) if (MSGO(MSGO_I) & IDENTIFY_BASE) - scsi_pointer->phase |= identified; + acp->phase |= identified; if (MSGO(MSGO_I)==ABORT) - scsi_pointer->phase |= aborted; + acp->phase |= aborted; if (MSGO(MSGO_I)==BUS_DEVICE_RESET) - scsi_pointer->phase |= resetted; + acp->phase |= resetted; SETPORT(SCSIDAT, MSGO(MSGO_I++)); } @@ -1936,7 +1933,7 @@ static void msgo_end(struct Scsi_Host *shpnt) */ static void cmd_init(struct Scsi_Host *shpnt) { - if (aha152x_scsi_pointer(CURRENT_SC)->sent_command) { + if (aha152x_priv(CURRENT_SC)->sent_command) { scmd_printk(KERN_ERR, CURRENT_SC, "command already sent\n"); done(shpnt, SAM_STAT_GOOD, DID_ERROR); @@ -1967,7 +1964,7 @@ static void cmd_end(struct Scsi_Host *shpnt) "command sent incompletely (%d/%d)\n", CMD_I, CURRENT_SC->cmd_len); else - aha152x_scsi_pointer(CURRENT_SC)->sent_command++; + aha152x_priv(CURRENT_SC)->sent_command++; } /* @@ -1979,7 +1976,7 @@ static void status_run(struct Scsi_Host *shpnt) if (TESTLO(SSTAT0, SPIORDY)) return; - aha152x_scsi_pointer(CURRENT_SC)->Status = GETPORT(SCSIDAT); + aha152x_priv(CURRENT_SC)->status = GETPORT(SCSIDAT); } @@ -2003,7 +2000,7 @@ static void datai_init(struct Scsi_Host *shpnt) static void datai_run(struct Scsi_Host *shpnt) { - struct scsi_pointer *scsi_pointer; + struct aha152x_cmd_priv *acp; unsigned long the_time; int fifodata, data_count; @@ -2041,36 +2038,35 @@ static void datai_run(struct Scsi_Host *shpnt) fifodata = GETPORT(FIFOSTAT); } - scsi_pointer = aha152x_scsi_pointer(CURRENT_SC); - if (scsi_pointer->this_residual > 0) { - while (fifodata > 0 && scsi_pointer->this_residual > 0) { - data_count = fifodata > scsi_pointer->this_residual ? - scsi_pointer->this_residual : - fifodata; + acp = aha152x_priv(CURRENT_SC); + if (acp->this_residual > 0) { + while (fifodata > 0 && acp->this_residual > 0) { + data_count = fifodata > acp->this_residual ? + acp->this_residual : fifodata; fifodata -= data_count; if (data_count & 1) { SETPORT(DMACNTRL0, ENDMA|_8BIT); - *scsi_pointer->ptr++ = GETPORT(DATAPORT); - scsi_pointer->this_residual--; + *acp->ptr++ = GETPORT(DATAPORT); + acp->this_residual--; DATA_LEN++; SETPORT(DMACNTRL0, ENDMA); } if (data_count > 1) { data_count >>= 1; - insw(DATAPORT, scsi_pointer->ptr, data_count); - scsi_pointer->ptr += 2 * data_count; - scsi_pointer->this_residual -= 2 * data_count; + insw(DATAPORT, acp->ptr, data_count); + acp->ptr += 2 * data_count; + acp->this_residual -= 2 * data_count; DATA_LEN += 2 * data_count; } - if (scsi_pointer->this_residual == 0 && - !sg_is_last(scsi_pointer->buffer)) { + if (acp->this_residual == 0 && + !sg_is_last(acp->buffer)) { /* advance to next buffer */ - scsi_pointer->buffer = sg_next(scsi_pointer->buffer); - scsi_pointer->ptr = SG_ADDRESS(scsi_pointer->buffer); - scsi_pointer->this_residual = scsi_pointer->buffer->length; + acp->buffer = sg_next(acp->buffer); + acp->ptr = SG_ADDRESS(acp->buffer); + acp->this_residual = acp->buffer->length; } } } else if (fifodata > 0) { @@ -2138,15 +2134,15 @@ static void datao_init(struct Scsi_Host *shpnt) static void datao_run(struct Scsi_Host *shpnt) { - struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC); + struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); unsigned long the_time; int data_count; /* until phase changes or all data sent */ - while (TESTLO(DMASTAT, INTSTAT) && scsi_pointer->this_residual > 0) { + while (TESTLO(DMASTAT, INTSTAT) && acp->this_residual > 0) { data_count = 128; - if (data_count > scsi_pointer->this_residual) - data_count = scsi_pointer->this_residual; + if (data_count > acp->this_residual) + data_count = acp->this_residual; if(TESTLO(DMASTAT, DFIFOEMP)) { scmd_printk(KERN_ERR, CURRENT_SC, @@ -2157,26 +2153,25 @@ static void datao_run(struct Scsi_Host *shpnt) if(data_count & 1) { SETPORT(DMACNTRL0,WRITE_READ|ENDMA|_8BIT); - SETPORT(DATAPORT, *scsi_pointer->ptr++); - scsi_pointer->this_residual--; + SETPORT(DATAPORT, *acp->ptr++); + acp->this_residual--; CMD_INC_RESID(CURRENT_SC, -1); SETPORT(DMACNTRL0,WRITE_READ|ENDMA); } if(data_count > 1) { data_count >>= 1; - outsw(DATAPORT, scsi_pointer->ptr, data_count); - scsi_pointer->ptr += 2 * data_count; - scsi_pointer->this_residual -= 2 * data_count; + outsw(DATAPORT, acp->ptr, data_count); + acp->ptr += 2 * data_count; + acp->this_residual -= 2 * data_count; CMD_INC_RESID(CURRENT_SC, -2 * data_count); } - if (scsi_pointer->this_residual == 0 && - !sg_is_last(scsi_pointer->buffer)) { + if (acp->this_residual == 0 && !sg_is_last(acp->buffer)) { /* advance to next buffer */ - scsi_pointer->buffer = sg_next(scsi_pointer->buffer); - scsi_pointer->ptr = SG_ADDRESS(scsi_pointer->buffer); - scsi_pointer->this_residual = scsi_pointer->buffer->length; + acp->buffer = sg_next(acp->buffer); + acp->ptr = SG_ADDRESS(acp->buffer); + acp->this_residual = acp->buffer->length; } the_time=jiffies + 100*HZ; @@ -2192,7 +2187,7 @@ static void datao_run(struct Scsi_Host *shpnt) static void datao_end(struct Scsi_Host *shpnt) { - struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC); + struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC); if(TESTLO(DMASTAT, DFIFOEMP)) { u32 datao_cnt = GETSTCNT(); @@ -2211,10 +2206,9 @@ static void datao_end(struct Scsi_Host *shpnt) sg = sg_next(sg); } - scsi_pointer->buffer = sg; - scsi_pointer->ptr = SG_ADDRESS(scsi_pointer->buffer) + done; - scsi_pointer->this_residual = scsi_pointer->buffer->length - - done; + acp->buffer = sg; + acp->ptr = SG_ADDRESS(acp->buffer) + done; + acp->this_residual = acp->buffer->length - done; } SETPORT(SXFRCTL0, CH1|CLRCH1|CLRSTCNT); @@ -2229,7 +2223,6 @@ static void datao_end(struct Scsi_Host *shpnt) */ static int update_state(struct Scsi_Host *shpnt) { - struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC); int dataphase=0; unsigned int stat0 = GETPORT(SSTAT0); unsigned int stat1 = GETPORT(SSTAT1); @@ -2244,7 +2237,7 @@ static int update_state(struct Scsi_Host *shpnt) } else if (stat0 & SELDI && PREVSTATE == busfree) { STATE=seldi; } else if (stat0 & SELDO && CURRENT_SC && - (scsi_pointer->phase & selecting)) { + (aha152x_priv(CURRENT_SC)->phase & selecting)) { STATE=seldo; } else if(stat1 & SELTO) { STATE=selto; @@ -2376,8 +2369,7 @@ static void is_complete(struct Scsi_Host *shpnt) SETPORT(SXFRCTL0, CH1); SETPORT(DMACNTRL0, 0); if(CURRENT_SC) - aha152x_scsi_pointer(CURRENT_SC)->phase &= - ~spiordy; + aha152x_priv(CURRENT_SC)->phase &= ~spiordy; } /* @@ -2399,8 +2391,7 @@ static void is_complete(struct Scsi_Host *shpnt) SETPORT(DMACNTRL0, 0); SETPORT(SXFRCTL0, CH1|SPIOEN); if(CURRENT_SC) - aha152x_scsi_pointer(CURRENT_SC)->phase |= - spiordy; + aha152x_priv(CURRENT_SC)->phase |= spiordy; } /* @@ -2490,7 +2481,7 @@ static void disp_enintr(struct Scsi_Host *shpnt) */ static void show_command(struct scsi_cmnd *ptr) { - const int phase = aha152x_scsi_pointer(ptr)->phase; + const int phase = aha152x_priv(ptr)->phase; scsi_print_command(ptr); scmd_printk(KERN_DEBUG, ptr, @@ -2538,8 +2529,8 @@ static void show_queues(struct Scsi_Host *shpnt) static void get_command(struct seq_file *m, struct scsi_cmnd * ptr) { - struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(ptr); - const int phase = scsi_pointer->phase; + struct aha152x_cmd_priv *acp = aha152x_priv(ptr); + const int phase = acp->phase; int i; seq_printf(m, "%p: target=%d; lun=%d; cmnd=( ", @@ -2549,8 +2540,8 @@ static void get_command(struct seq_file *m, struct scsi_cmnd * ptr) seq_printf(m, "0x%02x ", ptr->cmnd[i]); seq_printf(m, "); resid=%d; residual=%d; buffers=%d; phase |", - scsi_get_resid(ptr), scsi_pointer->this_residual, - sg_nents(scsi_pointer->buffer) - 1); + scsi_get_resid(ptr), acp->this_residual, + sg_nents(acp->buffer) - 1); if (phase & not_issued) seq_puts(m, "not issued|"); diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h index 679a4fd13874..793fe19993a9 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm.h +++ b/drivers/scsi/aic7xxx/aic79xx_osm.h @@ -420,8 +420,6 @@ ahd_unlock(struct ahd_softc *ahd, unsigned long *flags) /* config registers for header type 0 devices */ #define PCIR_MAPS 0x10 -#define PCIR_SUBVEND_0 0x2c -#define PCIR_SUBDEV_0 0x2e /****************************** PCI-X definitions *****************************/ #define PCIXR_COMMAND 0x96 diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c index 2f0bdb9225a4..5fad41b1ab58 100644 --- a/drivers/scsi/aic7xxx/aic79xx_pci.c +++ b/drivers/scsi/aic7xxx/aic79xx_pci.c @@ -260,8 +260,8 @@ ahd_find_pci_device(ahd_dev_softc_t pci) vendor = ahd_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2); device = ahd_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2); - subvendor = ahd_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2); - subdevice = ahd_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2); + subvendor = ahd_pci_read_config(pci, PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2); + subdevice = ahd_pci_read_config(pci, PCI_SUBSYSTEM_ID, /*bytes*/2); full_id = ahd_compose_id(device, vendor, subdevice, @@ -298,7 +298,7 @@ ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry) * Record if this is an HP board. */ subvendor = ahd_pci_read_config(ahd->dev_softc, - PCIR_SUBVEND_0, /*bytes*/2); + PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2); if (subvendor == SUBID_HP) ahd->flags |= AHD_HP_BOARD; diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h index 4782a304e93c..51d9f4de0734 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.h +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h @@ -433,8 +433,6 @@ ahc_unlock(struct ahc_softc *ahc, unsigned long *flags) /* config registers for header type 0 devices */ #define PCIR_MAPS 0x10 -#define PCIR_SUBVEND_0 0x2c -#define PCIR_SUBDEV_0 0x2e typedef enum { diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c index dab3a6d12c4d..2d4c85426dc3 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_pci.c +++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c @@ -673,8 +673,8 @@ ahc_find_pci_device(ahc_dev_softc_t pci) vendor = ahc_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2); device = ahc_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2); - subvendor = ahc_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2); - subdevice = ahc_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2); + subvendor = ahc_pci_read_config(pci, PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2); + subdevice = ahc_pci_read_config(pci, PCI_SUBSYSTEM_ID, /*bytes*/2); full_id = ahc_compose_id(device, vendor, subdevice, subvendor); /* diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index 0103f811cc25..776544385598 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -1169,7 +1169,7 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, ofld_kcqe->fcoe_conn_context_id); interface = tgt->port->priv; if (hba != interface->hba) { - printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n"); + printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mismatch\n"); goto ofld_cmpl_err; } /* @@ -1226,12 +1226,12 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, * and enable */ if (tgt->context_id != context_id) { - printk(KERN_ERR PFX "context id mis-match\n"); + printk(KERN_ERR PFX "context id mismatch\n"); return; } interface = tgt->port->priv; if (hba != interface->hba) { - printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n"); + printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mismatch\n"); goto enbl_cmpl_err; } if (!ofld_kcqe->completion_status) diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index 5521469ce678..6c864b093ac9 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -1977,7 +1977,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn) if (nopin->cq_req_sn != qp->cqe_exp_seq_sn) break; - if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx))) { + if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) { if (nopin->op_code == ISCSI_OP_NOOP_IN && nopin->itt == (u16) RESERVED_ITT) { printk(KERN_ALERT "bnx2i: Unsolicited " @@ -2398,7 +2398,7 @@ static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba, } if (hba != ep->hba) { - printk(KERN_ALERT "conn destroy- error hba mis-match\n"); + printk(KERN_ALERT "conn destroy- error hba mismatch\n"); return; } @@ -2432,7 +2432,7 @@ static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba, } if (hba != ep->hba) { - printk(KERN_ALERT "ofld_cmpl: error hba mis-match\n"); + printk(KERN_ALERT "ofld_cmpl: error hba mismatch\n"); return; } diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index fe86fd61a995..15fbd09baa94 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -1721,7 +1721,7 @@ static int bnx2i_tear_down_conn(struct bnx2i_hba *hba, struct iscsi_conn *conn = ep->conn->cls_conn->dd_data; /* Must suspend all rx queue activity for this ep */ - set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); + set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags); } /* CONN_DISCONNECT timeout may or may not be an issue depending * on what transcribed in TCP layer, different targets behave diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index 8c7d4dda4cf2..4365d52c6430 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -1634,11 +1634,11 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk) log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, conn 0x%p.\n", csk, conn); - if (unlikely(!conn || conn->suspend_rx)) { + if (unlikely(!conn || test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) { log_debug(1 << CXGBI_DBG_PDU_RX, - "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n", + "csk 0x%p, conn 0x%p, id %d, conn flags 0x%lx!\n", csk, conn, conn ? conn->id : 0xFF, - conn ? conn->suspend_rx : 0xFF); + conn ? conn->flags : 0xFF); return; } diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 461ef8a76c4c..4bda2f6cb352 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -442,7 +442,6 @@ void hisi_sas_task_deliver(struct hisi_hba *hisi_hba, case SAS_PROTOCOL_INTERNAL_ABORT: hisi_sas_task_prep_abort(hisi_hba, slot); break; - fallthrough; default: return; } diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index 80238e6a3c98..eee1a24f7e15 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c @@ -36,7 +36,7 @@ #define IBMVSCSIS_VERSION "v0.2" -#define INITIAL_SRP_LIMIT 800 +#define INITIAL_SRP_LIMIT 1024 #define DEFAULT_MAX_SECTORS 256 #define MAX_TXU 1024 * 1024 diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index d690d9cf7eb1..35589b6af90d 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c @@ -413,7 +413,7 @@ static void sci_controller_event_completion(struct isci_host *ihost, u32 ent) dev_warn(&ihost->pdev->dev, "%s: SCIC Controller 0x%p received " "event 0x%x for io request object " - "that doesnt exist.\n", + "that doesn't exist.\n", __func__, ihost, ent); @@ -428,7 +428,7 @@ static void sci_controller_event_completion(struct isci_host *ihost, u32 ent) dev_warn(&ihost->pdev->dev, "%s: SCIC Controller 0x%p received " "event 0x%x for remote device object " - "that doesnt exist.\n", + "that doesn't exist.\n", __func__, ihost, ent); @@ -462,7 +462,7 @@ static void sci_controller_event_completion(struct isci_host *ihost, u32 ent) } else dev_err(&ihost->pdev->dev, "%s: SCIC Controller 0x%p received event 0x%x " - "for remote device object 0x%0x that doesnt " + "for remote device object 0x%0x that doesn't " "exist.\n", __func__, ihost, diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index d09926e6c8a8..797abf4f5399 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -678,7 +678,8 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, struct iscsi_task *task; itt_t itt; - if (session->state == ISCSI_STATE_TERMINATE) + if (session->state == ISCSI_STATE_TERMINATE || + !test_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags)) return NULL; if (opcode == ISCSI_OP_LOGIN || opcode == ISCSI_OP_TEXT) { @@ -1392,8 +1393,8 @@ static bool iscsi_set_conn_failed(struct iscsi_conn *conn) if (conn->stop_stage == 0) session->state = ISCSI_STATE_FAILED; - set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); - set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); + set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags); + set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags); return true; } @@ -1454,7 +1455,7 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task, * Do this after dropping the extra ref because if this was a requeue * it's removed from that list and cleanup_queued_task would miss it. */ - if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) { + if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) { /* * Save the task and ref in case we weren't cleaning up this * task and get woken up again. @@ -1532,7 +1533,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) int rc = 0; spin_lock_bh(&conn->session->frwd_lock); - if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) { + if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) { ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n"); spin_unlock_bh(&conn->session->frwd_lock); return -ENODATA; @@ -1746,7 +1747,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) goto fault; } - if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) { + if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) { reason = FAILURE_SESSION_IN_RECOVERY; sc->result = DID_REQUEUE << 16; goto fault; @@ -1935,7 +1936,7 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error) void iscsi_suspend_queue(struct iscsi_conn *conn) { spin_lock_bh(&conn->session->frwd_lock); - set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); + set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags); spin_unlock_bh(&conn->session->frwd_lock); } EXPORT_SYMBOL_GPL(iscsi_suspend_queue); @@ -1953,7 +1954,7 @@ void iscsi_suspend_tx(struct iscsi_conn *conn) struct Scsi_Host *shost = conn->session->host; struct iscsi_host *ihost = shost_priv(shost); - set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); + set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags); if (ihost->workq) flush_workqueue(ihost->workq); } @@ -1961,7 +1962,7 @@ EXPORT_SYMBOL_GPL(iscsi_suspend_tx); static void iscsi_start_tx(struct iscsi_conn *conn) { - clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); + clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags); iscsi_conn_queue_work(conn); } @@ -2214,6 +2215,8 @@ void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active) iscsi_suspend_tx(conn); spin_lock_bh(&session->frwd_lock); + clear_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags); + if (!is_active) { /* * if logout timed out before userspace could even send a PDU @@ -3045,7 +3048,6 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size, if (!cls_conn) return NULL; conn = cls_conn->dd_data; - memset(conn, 0, sizeof(*conn) + dd_size); conn->dd_data = cls_conn->dd_data + sizeof(*conn); conn->session = session; @@ -3318,6 +3320,8 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session, spin_lock_bh(&session->frwd_lock); if (is_leading) session->leadconn = conn; + + set_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags); spin_unlock_bh(&session->frwd_lock); /* @@ -3330,8 +3334,8 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session, /* * Unblock xmitworker(), Login Phase will pass through. */ - clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); - clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); + clear_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags); + clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags); return 0; } EXPORT_SYMBOL_GPL(iscsi_conn_bind); diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index 2e9ffe3d1a55..883005757ddb 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c @@ -927,7 +927,7 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb, */ conn->last_recv = jiffies; - if (unlikely(conn->suspend_rx)) { + if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) { ISCSI_DBG_TCP(conn, "Rx suspended!\n"); *status = ISCSI_TCP_SUSPENDED; return 0; diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index f0cf8ffdc5f3..0025760230e5 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -897,6 +897,11 @@ enum lpfc_irq_chann_mode { NHT_MODE, }; +enum lpfc_hba_bit_flags { + FABRIC_COMANDS_BLOCKED, + HBA_PCI_ERR, +}; + struct lpfc_hba { /* SCSI interface function jump table entries */ struct lpfc_io_buf * (*lpfc_get_scsi_buf) @@ -1043,7 +1048,6 @@ struct lpfc_hba { * Firmware supports Forced Link Speed * capability */ -#define HBA_PCI_ERR 0x80000 /* The PCI slot is offline */ #define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */ #define HBA_SHORT_CMF 0x200000 /* shorter CMF timer routine */ #define HBA_CGN_DAY_WRAP 0x400000 /* HBA Congestion info day wraps */ @@ -1350,7 +1354,6 @@ struct lpfc_hba { atomic_t fabric_iocb_count; struct timer_list fabric_block_timer; unsigned long bit_flags; -#define FABRIC_COMANDS_BLOCKED 0 atomic_t num_rsrc_err; atomic_t num_cmd_success; unsigned long last_rsrc_error_time; diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 96408cd6c4c8..9897a1aa387b 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -670,3 +670,6 @@ struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport, uint32_t hash, uint8_t *buf); void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport); int lpfc_issue_els_qfpa(struct lpfc_vport *vport); + +void lpfc_sli_rpi_release(struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp); diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 0144da30e3db..2b877dff5ed4 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -109,8 +109,8 @@ lpfc_rport_invalid(struct fc_rport *rport) ndlp = rdata->pnode; if (!rdata->pnode) { - pr_err("**** %s: NULL ndlp on rport x%px SID x%x\n", - __func__, rport, rport->scsi_target_id); + pr_info("**** %s: NULL ndlp on rport x%px SID x%x\n", + __func__, rport, rport->scsi_target_id); return -EINVAL; } @@ -169,9 +169,10 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, "3181 dev_loss_callbk x%06x, rport x%px flg x%x " - "load_flag x%x refcnt %d\n", + "load_flag x%x refcnt %d state %d xpt x%x\n", ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag, - vport->load_flag, kref_read(&ndlp->kref)); + vport->load_flag, kref_read(&ndlp->kref), + ndlp->nlp_state, ndlp->fc4_xpt_flags); /* Don't schedule a worker thread event if the vport is going down. * The teardown process cleans up the node via lpfc_drop_node. @@ -181,6 +182,11 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) ndlp->rport = NULL; ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD; + /* clear the NLP_XPT_REGD if the node is not registered + * with nvme-fc + */ + if (ndlp->fc4_xpt_flags == NLP_XPT_REGD) + ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD; /* Remove the node reference from remote_port_add now. * The driver will not call remote_port_delete. @@ -225,18 +231,36 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) ndlp->rport = NULL; spin_unlock_irqrestore(&ndlp->lock, iflags); - /* We need to hold the node by incrementing the reference - * count until this queued work is done - */ - evtp->evt_arg1 = lpfc_nlp_get(ndlp); + if (phba->worker_thread) { + /* We need to hold the node by incrementing the reference + * count until this queued work is done + */ + evtp->evt_arg1 = lpfc_nlp_get(ndlp); + + spin_lock_irqsave(&phba->hbalock, iflags); + if (evtp->evt_arg1) { + evtp->evt = LPFC_EVT_DEV_LOSS; + list_add_tail(&evtp->evt_listp, &phba->work_list); + lpfc_worker_wake_up(phba); + } + spin_unlock_irqrestore(&phba->hbalock, iflags); + } else { + lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, + "3188 worker thread is stopped %s x%06x, " + " rport x%px flg x%x load_flag x%x refcnt " + "%d\n", __func__, ndlp->nlp_DID, + ndlp->rport, ndlp->nlp_flag, + vport->load_flag, kref_read(&ndlp->kref)); + if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) { + spin_lock_irqsave(&ndlp->lock, iflags); + /* Node is in dev loss. No further transaction. */ + ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; + spin_unlock_irqrestore(&ndlp->lock, iflags); + lpfc_disc_state_machine(vport, ndlp, NULL, + NLP_EVT_DEVICE_RM); + } - spin_lock_irqsave(&phba->hbalock, iflags); - if (evtp->evt_arg1) { - evtp->evt = LPFC_EVT_DEV_LOSS; - list_add_tail(&evtp->evt_listp, &phba->work_list); - lpfc_worker_wake_up(phba); } - spin_unlock_irqrestore(&phba->hbalock, iflags); return; } @@ -503,11 +527,12 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0203 Devloss timeout on " "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " - "NPort x%06x Data: x%x x%x x%x\n", + "NPort x%06x Data: x%x x%x x%x refcnt %d\n", *name, *(name+1), *(name+2), *(name+3), *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID, ndlp->nlp_flag, - ndlp->nlp_state, ndlp->nlp_rpi); + ndlp->nlp_state, ndlp->nlp_rpi, + kref_read(&ndlp->kref)); } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT, "0204 Devloss timeout on " @@ -755,18 +780,22 @@ lpfc_work_list_done(struct lpfc_hba *phba) int free_evt; int fcf_inuse; uint32_t nlp_did; + bool hba_pci_err; spin_lock_irq(&phba->hbalock); while (!list_empty(&phba->work_list)) { list_remove_head((&phba->work_list), evtp, typeof(*evtp), evt_listp); spin_unlock_irq(&phba->hbalock); + hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags); free_evt = 1; switch (evtp->evt) { case LPFC_EVT_ELS_RETRY: ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1); - lpfc_els_retry_delay_handler(ndlp); - free_evt = 0; /* evt is part of ndlp */ + if (!hba_pci_err) { + lpfc_els_retry_delay_handler(ndlp); + free_evt = 0; /* evt is part of ndlp */ + } /* decrement the node reference count held * for this queued work */ @@ -788,8 +817,10 @@ lpfc_work_list_done(struct lpfc_hba *phba) break; case LPFC_EVT_RECOVER_PORT: ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); - lpfc_sli_abts_recover_port(ndlp->vport, ndlp); - free_evt = 0; + if (!hba_pci_err) { + lpfc_sli_abts_recover_port(ndlp->vport, ndlp); + free_evt = 0; + } /* decrement the node reference count held for * this queued work */ @@ -859,14 +890,18 @@ lpfc_work_done(struct lpfc_hba *phba) struct lpfc_vport **vports; struct lpfc_vport *vport; int i; + bool hba_pci_err; + hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags); spin_lock_irq(&phba->hbalock); ha_copy = phba->work_ha; phba->work_ha = 0; spin_unlock_irq(&phba->hbalock); + if (hba_pci_err) + ha_copy = 0; /* First, try to post the next mailbox command to SLI4 device */ - if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) + if (phba->pci_dev_grp == LPFC_PCI_DEV_OC && !hba_pci_err) lpfc_sli4_post_async_mbox(phba); if (ha_copy & HA_ERATT) { @@ -886,7 +921,7 @@ lpfc_work_done(struct lpfc_hba *phba) lpfc_handle_latt(phba); /* Handle VMID Events */ - if (lpfc_is_vmid_enabled(phba)) { + if (lpfc_is_vmid_enabled(phba) && !hba_pci_err) { if (phba->pport->work_port_events & WORKER_CHECK_VMID_ISSUE_QFPA) { lpfc_check_vmid_qfpa_issue(phba); @@ -936,6 +971,8 @@ lpfc_work_done(struct lpfc_hba *phba) work_port_events = vport->work_port_events; vport->work_port_events &= ~work_port_events; spin_unlock_irq(&vport->work_port_lock); + if (hba_pci_err) + continue; if (work_port_events & WORKER_DISC_TMO) lpfc_disc_timeout_handler(vport); if (work_port_events & WORKER_ELS_TMO) @@ -1173,12 +1210,14 @@ lpfc_linkdown(struct lpfc_hba *phba) struct lpfc_vport **vports; LPFC_MBOXQ_t *mb; int i; + int offline; if (phba->link_state == LPFC_LINK_DOWN) return 0; /* Block all SCSI stack I/Os */ lpfc_scsi_dev_block(phba); + offline = pci_channel_offline(phba->pcidev); phba->defer_flogi_acc_flag = false; @@ -1219,7 +1258,7 @@ lpfc_linkdown(struct lpfc_hba *phba) lpfc_destroy_vport_work_array(phba, vports); /* Clean up any SLI3 firmware default rpi's */ - if (phba->sli_rev > LPFC_SLI_REV3) + if (phba->sli_rev > LPFC_SLI_REV3 || offline) goto skip_unreg_did; mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -4712,6 +4751,11 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) spin_lock_irqsave(&ndlp->lock, iflags); if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) { spin_unlock_irqrestore(&ndlp->lock, iflags); + lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, + "0999 %s Not regd: ndlp x%px rport x%px DID " + "x%x FLG x%x XPT x%x\n", + __func__, ndlp, ndlp->rport, ndlp->nlp_DID, + ndlp->nlp_flag, ndlp->fc4_xpt_flags); return; } @@ -4722,6 +4766,13 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) ndlp->fc4_xpt_flags & SCSI_XPT_REGD) { vport->phba->nport_event_cnt++; lpfc_unregister_remote_port(ndlp); + } else if (!ndlp->rport) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, + "1999 %s NDLP in devloss x%px DID x%x FLG x%x" + " XPT x%x refcnt %d\n", + __func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag, + ndlp->fc4_xpt_flags, + kref_read(&ndlp->kref)); } if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) { @@ -5371,6 +5422,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) ndlp->nlp_flag &= ~NLP_UNREG_INP; mempool_free(mbox, phba->mbox_mem_pool); acc_plogi = 1; + lpfc_nlp_put(ndlp); } } else { lpfc_printf_vlog(vport, KERN_INFO, @@ -6097,12 +6149,34 @@ lpfc_disc_flush_list(struct lpfc_vport *vport) } } +/* + * lpfc_notify_xport_npr - notifies xport of node disappearance + * @vport: Pointer to Virtual Port object. + * + * Transitions all ndlps to NPR state. When lpfc_nlp_set_state + * calls lpfc_nlp_state_cleanup, the ndlp->rport is unregistered + * and transport notified that the node is gone. + * Return Code: + * none + */ +static void +lpfc_notify_xport_npr(struct lpfc_vport *vport) +{ + struct lpfc_nodelist *ndlp, *next_ndlp; + + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, + nlp_listp) { + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + } +} void lpfc_cleanup_discovery_resources(struct lpfc_vport *vport) { lpfc_els_flush_rscn(vport); lpfc_els_flush_cmd(vport); lpfc_disc_flush_list(vport); + if (pci_channel_offline(vport->phba->pcidev)) + lpfc_notify_xport_npr(vport); } /*****************************************************************************/ diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index eed6464bd880..461d333b1b3a 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -95,6 +95,7 @@ static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int); static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *); static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *); +static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba); static struct scsi_transport_template *lpfc_transport_template = NULL; static struct scsi_transport_template *lpfc_vport_transport_template = NULL; @@ -1642,7 +1643,7 @@ lpfc_sli4_offline_eratt(struct lpfc_hba *phba) { spin_lock_irq(&phba->hbalock); if (phba->link_state == LPFC_HBA_ERROR && - phba->hba_flag & HBA_PCI_ERR) { + test_bit(HBA_PCI_ERR, &phba->bit_flags)) { spin_unlock_irq(&phba->hbalock); return; } @@ -1985,6 +1986,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) if (pci_channel_offline(phba->pcidev)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3166 pci channel is offline\n"); + lpfc_sli_flush_io_rings(phba); return; } @@ -2973,6 +2975,22 @@ lpfc_cleanup(struct lpfc_vport *vport) NLP_EVT_DEVICE_RM); } + /* This is a special case flush to return all + * IOs before entering this loop. There are + * two points in the code where a flush is + * avoided if the FC_UNLOADING flag is set. + * one is in the multipool destroy, + * (this prevents a crash) and the other is + * in the nvme abort handler, ( also prevents + * a crash). Both of these exceptions are + * cases where the slot is still accessible. + * The flush here is only when the pci slot + * is offline. + */ + if (vport->load_flag & FC_UNLOADING && + pci_channel_offline(phba->pcidev)) + lpfc_sli_flush_io_rings(vport->phba); + /* At this point, ALL ndlp's should be gone * because of the previous NLP_EVT_DEVICE_RM. * Lets wait for this to happen, if needed. @@ -2985,7 +3003,7 @@ lpfc_cleanup(struct lpfc_vport *vport) list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { lpfc_printf_vlog(ndlp->vport, KERN_ERR, - LOG_TRACE_EVENT, + LOG_DISCOVERY, "0282 did:x%x ndlp:x%px " "refcnt:%d xflags x%x nflag x%x\n", ndlp->nlp_DID, (void *)ndlp, @@ -3682,7 +3700,8 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) struct lpfc_vport **vports; struct Scsi_Host *shost; int i; - int offline = 0; + int offline; + bool hba_pci_err; if (vport->fc_flag & FC_OFFLINE_MODE) return; @@ -3692,6 +3711,7 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) lpfc_linkdown(phba); offline = pci_channel_offline(phba->pcidev); + hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags); /* Issue an unreg_login to all nodes on all vports */ vports = lpfc_create_vport_work_array(phba); @@ -3715,11 +3735,14 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) ndlp->nlp_flag &= ~NLP_NPR_ADISC; spin_unlock_irq(&ndlp->lock); - if (offline) { + if (offline || hba_pci_err) { spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~(NLP_UNREG_INP | NLP_RPI_REGISTERED); spin_unlock_irq(&ndlp->lock); + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_sli_rpi_release(vports[i], + ndlp); } else { lpfc_unreg_rpi(vports[i], ndlp); } @@ -13354,8 +13377,9 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba) /* Abort all iocbs associated with the hba */ lpfc_sli_hba_iocb_abort(phba); - /* Wait for completion of device XRI exchange busy */ - lpfc_sli4_xri_exchange_busy_wait(phba); + if (!pci_channel_offline(phba->pcidev)) + /* Wait for completion of device XRI exchange busy */ + lpfc_sli4_xri_exchange_busy_wait(phba); /* per-phba callback de-registration for hotplug event */ if (phba->pport) @@ -13374,15 +13398,12 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba) /* Disable FW logging to host memory */ lpfc_ras_stop_fwlog(phba); - /* Unset the queues shared with the hardware then release all - * allocated resources. - */ - lpfc_sli4_queue_unset(phba); - lpfc_sli4_queue_destroy(phba); - /* Reset SLI4 HBA FCoE function */ lpfc_pci_function_reset(phba); + /* release all queue allocated resources. */ + lpfc_sli4_queue_destroy(phba); + /* Free RAS DMA memory */ if (phba->ras_fwlog.ras_enabled) lpfc_sli4_ras_dma_free(phba); @@ -14262,6 +14283,7 @@ lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) "2711 PCI channel permanent disable for failure\n"); /* Block all SCSI devices' I/Os on the host */ lpfc_scsi_dev_block(phba); + lpfc_sli4_prep_dev_for_reset(phba); /* stop all timers */ lpfc_stop_hba_timers(phba); @@ -15057,24 +15079,28 @@ lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) { - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, - "2826 PCI channel disable preparing for reset\n"); + int offline = pci_channel_offline(phba->pcidev); + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2826 PCI channel disable preparing for reset offline" + " %d\n", offline); /* Block any management I/Os to the device */ lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); - /* Block all SCSI devices' I/Os on the host */ - lpfc_scsi_dev_block(phba); + /* HBA_PCI_ERR was set in io_error_detect */ + lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); /* Flush all driver's outstanding I/Os as we are to reset */ lpfc_sli_flush_io_rings(phba); + lpfc_offline(phba); /* stop all timers */ lpfc_stop_hba_timers(phba); + lpfc_sli4_queue_destroy(phba); /* Disable interrupt and pci device */ lpfc_sli4_disable_intr(phba); - lpfc_sli4_queue_destroy(phba); pci_disable_device(phba->pcidev); } @@ -15123,6 +15149,7 @@ lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + bool hba_pci_err; switch (state) { case pci_channel_io_normal: @@ -15130,17 +15157,24 @@ lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) lpfc_sli4_prep_dev_for_recover(phba); return PCI_ERS_RESULT_CAN_RECOVER; case pci_channel_io_frozen: - phba->hba_flag |= HBA_PCI_ERR; + hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags); /* Fatal error, prepare for slot reset */ - lpfc_sli4_prep_dev_for_reset(phba); + if (!hba_pci_err) + lpfc_sli4_prep_dev_for_reset(phba); + else + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2832 Already handling PCI error " + "state: x%x\n", state); return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: - phba->hba_flag |= HBA_PCI_ERR; + set_bit(HBA_PCI_ERR, &phba->bit_flags); /* Permanent failure, prepare for device down */ lpfc_sli4_prep_dev_for_perm_failure(phba); return PCI_ERS_RESULT_DISCONNECT; default: - phba->hba_flag |= HBA_PCI_ERR; + hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags); + if (!hba_pci_err) + lpfc_sli4_prep_dev_for_reset(phba); /* Unknown state, prepare and request slot reset */ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2825 Unknown PCI error state: x%x\n", state); @@ -15174,17 +15208,21 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev) struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; struct lpfc_sli *psli = &phba->sli; uint32_t intr_mode; + bool hba_pci_err; dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); if (pci_enable_device_mem(pdev)) { printk(KERN_ERR "lpfc: Cannot re-enable " - "PCI device after reset.\n"); + "PCI device after reset.\n"); return PCI_ERS_RESULT_DISCONNECT; } pci_restore_state(pdev); - phba->hba_flag &= ~HBA_PCI_ERR; + hba_pci_err = test_and_clear_bit(HBA_PCI_ERR, &phba->bit_flags); + if (!hba_pci_err) + dev_info(&pdev->dev, + "hba_pci_err was not set, recovering slot reset.\n"); /* * As the new kernel behavior of pci_restore_state() API call clears * device saved_state flag, need to save the restored state again. @@ -15198,6 +15236,8 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev) psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); + /* Init cpu_map array */ + lpfc_cpu_map_array_init(phba); /* Configure and enable interrupt */ intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); if (intr_mode == LPFC_INTR_ERROR) { @@ -15239,8 +15279,6 @@ lpfc_io_resume_s4(struct pci_dev *pdev) */ if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { /* Perform device reset */ - lpfc_offline_prep(phba, LPFC_MBX_WAIT); - lpfc_offline(phba); lpfc_sli_brdrestart(phba); /* Bring the device back online */ lpfc_online(phba); diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 1213a299f9aa..8d26f207ebd2 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -93,6 +93,11 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport, lport = (struct lpfc_nvme_lport *)pnvme_lport->private; vport = lport->vport; + + if (!vport || vport->load_flag & FC_UNLOADING || + vport->phba->hba_flag & HBA_IOQ_FLUSH) + return -ENODEV; + qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL); if (qhandle == NULL) return -ENOMEM; @@ -267,7 +272,8 @@ lpfc_nvme_handle_lsreq(struct lpfc_hba *phba, return -EINVAL; remoteport = lpfc_rport->remoteport; - if (!vport->localport) + if (!vport->localport || + vport->phba->hba_flag & HBA_IOQ_FLUSH) return -EINVAL; lport = vport->localport->private; @@ -559,6 +565,8 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_DID, ntype, nstate); return -ENODEV; } + if (vport->phba->hba_flag & HBA_IOQ_FLUSH) + return -ENODEV; if (!vport->phba->sli4_hba.nvmels_wq) return -ENOMEM; @@ -662,7 +670,8 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, return -EINVAL; vport = lport->vport; - if (vport->load_flag & FC_UNLOADING) + if (vport->load_flag & FC_UNLOADING || + vport->phba->hba_flag & HBA_IOQ_FLUSH) return -ENODEV; atomic_inc(&lport->fc4NvmeLsRequests); @@ -1516,7 +1525,8 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, phba = vport->phba; - if (unlikely(vport->load_flag & FC_UNLOADING)) { + if ((unlikely(vport->load_flag & FC_UNLOADING)) || + phba->hba_flag & HBA_IOQ_FLUSH) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, "6124 Fail IO, Driver unload\n"); atomic_inc(&lport->xmt_fcp_err); @@ -2169,8 +2179,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, abts_nvme = 0; for (i = 0; i < phba->cfg_hdw_queue; i++) { qp = &phba->sli4_hba.hdwq[i]; - if (!vport || !vport->localport || - !qp || !qp->io_wq) + if (!vport->localport || !qp || !qp->io_wq) return; pring = qp->io_wq->pring; @@ -2180,8 +2189,9 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, abts_scsi += qp->abts_scsi_io_bufs; abts_nvme += qp->abts_nvme_io_bufs; } - if (!vport || !vport->localport || - vport->phba->hba_flag & HBA_PCI_ERR) + if (!vport->localport || + test_bit(HBA_PCI_ERR, &vport->phba->bit_flags) || + vport->load_flag & FC_UNLOADING) return; lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, @@ -2541,8 +2551,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * return values is ignored. The upcall is a courtesy to the * transport. */ - if (vport->load_flag & FC_UNLOADING || - unlikely(vport->phba->hba_flag & HBA_PCI_ERR)) + if (vport->load_flag & FC_UNLOADING) (void)nvme_fc_set_remoteport_devloss(remoteport, 0); ret = nvme_fc_unregister_remoteport(remoteport); diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 3c132604fd91..ba9dbb51b75f 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -5929,13 +5929,15 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) } lpfc_cmd->waitq = &waitq; - if (phba->sli_rev == LPFC_SLI_REV4) + if (phba->sli_rev == LPFC_SLI_REV4) { spin_unlock(&pring_s4->ring_lock); - else + ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb, + lpfc_sli_abort_fcp_cmpl); + } else { pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; - - ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb, - lpfc_sli_abort_fcp_cmpl); + ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb, + lpfc_sli_abort_fcp_cmpl); + } /* Make sure HBA is alive */ lpfc_issue_hb_tmo(phba); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 20d40957a385..bda2a7ba4e77 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -2828,6 +2828,12 @@ __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) ndlp->nlp_flag &= ~NLP_UNREG_INP; } +void +lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + __lpfc_sli_rpi_release(vport, ndlp); +} + /** * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler * @phba: Pointer to HBA context object. @@ -3715,7 +3721,15 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, unsigned long iflag; u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag; + if (phba->sli_rev == LPFC_SLI_REV4) + spin_lock_irqsave(&pring->ring_lock, iflag); + else + spin_lock_irqsave(&phba->hbalock, iflag); cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock_irqrestore(&pring->ring_lock, iflag); + else + spin_unlock_irqrestore(&phba->hbalock, iflag); ulp_command = get_job_cmnd(phba, saveq); ulp_status = get_job_ulpstatus(phba, saveq); @@ -4052,10 +4066,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, break; } - spin_unlock_irqrestore(&phba->hbalock, iflag); cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, &rspiocbq); - spin_lock_irqsave(&phba->hbalock, iflag); if (unlikely(!cmdiocbq)) break; if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) @@ -4536,42 +4548,62 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, void lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) { - LIST_HEAD(completions); + LIST_HEAD(tx_completions); + LIST_HEAD(txcmplq_completions); struct lpfc_iocbq *iocb, *next_iocb; + int offline; if (pring->ringno == LPFC_ELS_RING) { lpfc_fabric_abort_hba(phba); } + offline = pci_channel_offline(phba->pcidev); /* Error everything on txq and txcmplq * First do the txq. */ if (phba->sli_rev >= LPFC_SLI_REV4) { spin_lock_irq(&pring->ring_lock); - list_splice_init(&pring->txq, &completions); + list_splice_init(&pring->txq, &tx_completions); pring->txq_cnt = 0; - spin_unlock_irq(&pring->ring_lock); - spin_lock_irq(&phba->hbalock); - /* Next issue ABTS for everything on the txcmplq */ - list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) - lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL); - spin_unlock_irq(&phba->hbalock); + if (offline) { + list_splice_init(&pring->txcmplq, + &txcmplq_completions); + } else { + /* Next issue ABTS for everything on the txcmplq */ + list_for_each_entry_safe(iocb, next_iocb, + &pring->txcmplq, list) + lpfc_sli_issue_abort_iotag(phba, pring, + iocb, NULL); + } + spin_unlock_irq(&pring->ring_lock); } else { spin_lock_irq(&phba->hbalock); - list_splice_init(&pring->txq, &completions); + list_splice_init(&pring->txq, &tx_completions); pring->txq_cnt = 0; - /* Next issue ABTS for everything on the txcmplq */ - list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) - lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL); + if (offline) { + list_splice_init(&pring->txcmplq, &txcmplq_completions); + } else { + /* Next issue ABTS for everything on the txcmplq */ + list_for_each_entry_safe(iocb, next_iocb, + &pring->txcmplq, list) + lpfc_sli_issue_abort_iotag(phba, pring, + iocb, NULL); + } spin_unlock_irq(&phba->hbalock); } - /* Make sure HBA is alive */ - lpfc_issue_hb_tmo(phba); + if (offline) { + /* Cancel all the IOCBs from the completions list */ + lpfc_sli_cancel_iocbs(phba, &txcmplq_completions, + IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); + } else { + /* Make sure HBA is alive */ + lpfc_issue_hb_tmo(phba); + } /* Cancel all the IOCBs from the completions list */ - lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, + lpfc_sli_cancel_iocbs(phba, &tx_completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); } @@ -4624,11 +4656,6 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba) struct lpfc_iocbq *piocb, *next_iocb; spin_lock_irq(&phba->hbalock); - if (phba->hba_flag & HBA_IOQ_FLUSH || - !phba->sli4_hba.hdwq) { - spin_unlock_irq(&phba->hbalock); - return; - } /* Indicate the I/O queues are flushed */ phba->hba_flag |= HBA_IOQ_FLUSH; spin_unlock_irq(&phba->hbalock); @@ -10997,6 +11024,10 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, unsigned long iflags; int rc; + /* If the PCI channel is in offline state, do not post iocbs. */ + if (unlikely(pci_channel_offline(phba->pcidev))) + return IOCB_ERROR; + if (phba->sli_rev == LPFC_SLI_REV4) { lpfc_sli_prep_wqe(phba, piocb); diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index e52f37e5d896..a4d3259b8c52 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "14.2.0.0" +#define LPFC_DRIVER_VERSION "14.2.0.1" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 611871ef15b5..4919ea54b827 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -2560,6 +2560,9 @@ struct megasas_instance_template { #define MEGASAS_IS_LOGICAL(sdev) \ ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1) +#define MEGASAS_IS_LUN_VALID(sdev) \ + (((sdev)->lun == 0) ? 1 : 0) + #define MEGASAS_DEV_INDEX(scp) \ (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \ scp->device->id) diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 8bf72dbc33b7..db6793608447 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -2126,6 +2126,9 @@ static int megasas_slave_alloc(struct scsi_device *sdev) goto scan_target; } return -ENXIO; + } else if (!MEGASAS_IS_LUN_VALID(sdev)) { + sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__); + return -ENXIO; } scan_target: @@ -2156,6 +2159,10 @@ static void megasas_slave_destroy(struct scsi_device *sdev) instance = megasas_lookup_instance(sdev->host->host_no); if (MEGASAS_IS_LOGICAL(sdev)) { + if (!MEGASAS_IS_LUN_VALID(sdev)) { + sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__); + return; + } ld_tgt_id = MEGASAS_TARGET_ID(sdev); instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED; if (megasas_dbg_lvl & LD_PD_DEBUG) diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index b57f1803371e..538d2c0cd971 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -5716,13 +5716,12 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) /** * mpt3sas_check_same_4gb_region - checks whether all reply queues in a set are * having same upper 32bits in their base memory address. - * @reply_pool_start_address: Base address of a reply queue set + * @start_address: Base address of a reply queue set * @pool_sz: Size of single Reply Descriptor Post Queues pool size * * Return: 1 if reply queues in a set have a same upper 32bits in their base * memory address, else 0. */ - static int mpt3sas_check_same_4gb_region(dma_addr_t start_address, u32 pool_sz) { diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c index 0563078227de..a8dd14c91efd 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_config.c +++ b/drivers/scsi/mpt3sas/mpt3sas_config.c @@ -394,10 +394,13 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t retry_count++; if (ioc->config_cmds.smid == smid) mpt3sas_base_free_smid(ioc, smid); - if ((ioc->shost_recovery) || (ioc->config_cmds.status & - MPT3_CMD_RESET) || ioc->pci_error_recovery) + if (ioc->config_cmds.status & MPT3_CMD_RESET) goto retry_config; - issue_host_reset = 1; + if (ioc->shost_recovery || ioc->pci_error_recovery) { + issue_host_reset = 0; + r = -EFAULT; + } else + issue_host_reset = 1; goto free_mem; } diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 00792767c620..7e476f50935b 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -11035,6 +11035,7 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, { struct _sas_port *mpt3sas_port, *next; unsigned long flags; + int port_id; /* remove sibling ports attached to this expander */ list_for_each_entry_safe(mpt3sas_port, next, @@ -11055,6 +11056,8 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, mpt3sas_port->hba_port); } + port_id = sas_expander->port->port_id; + mpt3sas_transport_port_remove(ioc, sas_expander->sas_address, sas_expander->sas_address_parent, sas_expander->port); @@ -11062,7 +11065,7 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n", sas_expander->handle, (unsigned long long) sas_expander->sas_address, - sas_expander->port->port_id); + port_id); spin_lock_irqsave(&ioc->sas_node_lock, flags); list_del(&sas_expander->list); diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 7ac63eb5ccd3..2fde496fff5f 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -647,6 +647,7 @@ static struct pci_device_id mvs_pci_table[] = { { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 }, { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 }, { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 }, + { PCI_VDEVICE(TTI, 0x2640), chip_6440 }, { PCI_VDEVICE(TTI, 0x2710), chip_9480 }, { PCI_VDEVICE(TTI, 0x2720), chip_9480 }, { PCI_VDEVICE(TTI, 0x2721), chip_9480 }, diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c index c4a838635893..5d7dfefd6f6c 100644 --- a/drivers/scsi/pcmcia/sym53c500_cs.c +++ b/drivers/scsi/pcmcia/sym53c500_cs.c @@ -192,10 +192,11 @@ struct sym53c500_data { int fast_pio; }; -static struct scsi_pointer *sym53c500_scsi_pointer(struct scsi_cmnd *cmd) -{ - return scsi_cmd_priv(cmd); -} +struct sym53c500_cmd_priv { + int status; + int message; + int phase; +}; enum Phase { idle, @@ -356,7 +357,7 @@ SYM53C500_intr(int irq, void *dev_id) struct sym53c500_data *data = (struct sym53c500_data *)dev->hostdata; struct scsi_cmnd *curSC = data->current_SC; - struct scsi_pointer *scsi_pointer = sym53c500_scsi_pointer(curSC); + struct sym53c500_cmd_priv *scp = scsi_cmd_priv(curSC); int fast_pio = data->fast_pio; spin_lock_irqsave(dev->host_lock, flags); @@ -403,12 +404,11 @@ SYM53C500_intr(int irq, void *dev_id) if (int_reg & 0x20) { /* Disconnect */ DEB(printk("SYM53C500: disconnect intr received\n")); - if (scsi_pointer->phase != message_in) { /* Unexpected disconnect */ + if (scp->phase != message_in) { /* Unexpected disconnect */ curSC->result = DID_NO_CONNECT << 16; } else { /* Command complete, return status and message */ - curSC->result = (scsi_pointer->Status & 0xff) | - ((scsi_pointer->Message & 0xff) << 8) | - (DID_OK << 16); + curSC->result = (scp->status & 0xff) | + ((scp->message & 0xff) << 8) | (DID_OK << 16); } goto idle_out; } @@ -419,7 +419,7 @@ SYM53C500_intr(int irq, void *dev_id) struct scatterlist *sg; int i; - scsi_pointer->phase = data_out; + scp->phase = data_out; VDEB(printk("SYM53C500: Data-Out phase\n")); outb(FLUSH_FIFO, port_base + CMD_REG); LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */ @@ -438,7 +438,7 @@ SYM53C500_intr(int irq, void *dev_id) struct scatterlist *sg; int i; - scsi_pointer->phase = data_in; + scp->phase = data_in; VDEB(printk("SYM53C500: Data-In phase\n")); outb(FLUSH_FIFO, port_base + CMD_REG); LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */ @@ -453,12 +453,12 @@ SYM53C500_intr(int irq, void *dev_id) break; case 0x02: /* COMMAND */ - scsi_pointer->phase = command_ph; + scp->phase = command_ph; printk("SYM53C500: Warning: Unknown interrupt occurred in command phase!\n"); break; case 0x03: /* STATUS */ - scsi_pointer->phase = status_ph; + scp->phase = status_ph; VDEB(printk("SYM53C500: Status phase\n")); outb(FLUSH_FIFO, port_base + CMD_REG); outb(INIT_CMD_COMPLETE, port_base + CMD_REG); @@ -471,24 +471,22 @@ SYM53C500_intr(int irq, void *dev_id) case 0x06: /* MESSAGE-OUT */ DEB(printk("SYM53C500: Message-Out phase\n")); - scsi_pointer->phase = message_out; + scp->phase = message_out; outb(SET_ATN, port_base + CMD_REG); /* Reject the message */ outb(MSG_ACCEPT, port_base + CMD_REG); break; case 0x07: /* MESSAGE-IN */ VDEB(printk("SYM53C500: Message-In phase\n")); - scsi_pointer->phase = message_in; + scp->phase = message_in; - scsi_pointer->Status = inb(port_base + SCSI_FIFO); - scsi_pointer->Message = inb(port_base + SCSI_FIFO); + scp->status = inb(port_base + SCSI_FIFO); + scp->message = inb(port_base + SCSI_FIFO); VDEB(printk("SCSI FIFO size=%d\n", inb(port_base + FIFO_FLAGS) & 0x1f)); - DEB(printk("Status = %02x Message = %02x\n", - scsi_pointer->Status, scsi_pointer->Message)); + DEB(printk("Status = %02x Message = %02x\n", scp->status, scp->message)); - if (scsi_pointer->Message == SAVE_POINTERS || - scsi_pointer->Message == DISCONNECT) { + if (scp->message == SAVE_POINTERS || scp->message == DISCONNECT) { outb(SET_ATN, port_base + CMD_REG); /* Reject message */ DEB(printk("Discarding SAVE_POINTERS message\n")); } @@ -500,7 +498,7 @@ out: return IRQ_HANDLED; idle_out: - scsi_pointer->phase = idle; + scp->phase = idle; scsi_done(curSC); goto out; } @@ -548,7 +546,7 @@ SYM53C500_info(struct Scsi_Host *SChost) static int SYM53C500_queue_lck(struct scsi_cmnd *SCpnt) { - struct scsi_pointer *scsi_pointer = sym53c500_scsi_pointer(SCpnt); + struct sym53c500_cmd_priv *scp = scsi_cmd_priv(SCpnt); int i; int port_base = SCpnt->device->host->io_port; struct sym53c500_data *data = @@ -565,9 +563,9 @@ static int SYM53C500_queue_lck(struct scsi_cmnd *SCpnt) VDEB(printk("\n")); data->current_SC = SCpnt; - scsi_pointer->phase = command_ph; - scsi_pointer->Status = 0; - scsi_pointer->Message = 0; + scp->phase = command_ph; + scp->status = 0; + scp->message = 0; /* We are locked here already by the mid layer */ REG0(port_base); @@ -682,7 +680,7 @@ static struct scsi_host_template sym53c500_driver_template = { .this_id = 7, .sg_tablesize = 32, .shost_groups = SYM53C500_shost_groups, - .cmd_size = sizeof(struct scsi_pointer), + .cmd_size = sizeof(struct sym53c500_cmd_priv), }; static int SYM53C500_config_check(struct pcmcia_device *p_dev, void *priv_data) diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index f90b707c190b..01c5e8ff4cc5 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -766,6 +766,10 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = 0x01; pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt = 0x01; + /* Enable higher IQs and OQs, 32 to 63, bit 16 */ + if (pm8001_ha->max_q_num > 32) + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |= + 1 << 16; /* Disable end to end CRC checking */ pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16); @@ -1027,6 +1031,13 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha) if (0x0000 != gst_len_mpistate) return -EBUSY; + /* + * As per controller datasheet, after successful MPI + * initialization minimum 500ms delay is required before + * issuing commands. + */ + msleep(500); + return 0; } @@ -1727,10 +1738,11 @@ static void pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec) { #ifdef PM8001_USE_MSIX - u32 mask; - mask = (u32)(1 << vec); - - pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF)); + if (vec < 32) + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, 1U << vec); + else + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR_U, + 1U << (vec - 32)); return; #endif pm80xx_chip_intx_interrupt_enable(pm8001_ha); @@ -1746,12 +1758,15 @@ static void pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec) { #ifdef PM8001_USE_MSIX - u32 mask; - if (vec == 0xFF) - mask = 0xFFFFFFFF; + if (vec == 0xFF) { + /* disable all vectors 0-31, 32-63 */ + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 0xFFFFFFFF); + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, 0xFFFFFFFF); + } else if (vec < 32) + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 1U << vec); else - mask = (u32)(1 << vec); - pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF)); + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, + 1U << (vec - 32)); return; #endif pm80xx_chip_intx_interrupt_disable(pm8001_ha); diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index 928532180d32..fd674ed1febe 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -3182,124 +3182,6 @@ static int pmcraid_build_ioadl( } /** - * pmcraid_free_sglist - Frees an allocated SG buffer list - * @sglist: scatter/gather list pointer - * - * Free a DMA'able memory previously allocated with pmcraid_alloc_sglist - * - * Return value: - * none - */ -static void pmcraid_free_sglist(struct pmcraid_sglist *sglist) -{ - sgl_free_order(sglist->scatterlist, sglist->order); - kfree(sglist); -} - -/** - * pmcraid_alloc_sglist - Allocates memory for a SG list - * @buflen: buffer length - * - * Allocates a DMA'able buffer in chunks and assembles a scatter/gather - * list. - * - * Return value - * pointer to sglist / NULL on failure - */ -static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen) -{ - struct pmcraid_sglist *sglist; - int sg_size; - int order; - - sg_size = buflen / (PMCRAID_MAX_IOADLS - 1); - order = (sg_size > 0) ? get_order(sg_size) : 0; - - /* Allocate a scatter/gather list for the DMA */ - sglist = kzalloc(sizeof(struct pmcraid_sglist), GFP_KERNEL); - if (sglist == NULL) - return NULL; - - sglist->order = order; - sgl_alloc_order(buflen, order, false, GFP_KERNEL | __GFP_ZERO, - &sglist->num_sg); - - return sglist; -} - -/** - * pmcraid_copy_sglist - Copy user buffer to kernel buffer's SG list - * @sglist: scatter/gather list pointer - * @buffer: buffer pointer - * @len: buffer length - * @direction: data transfer direction - * - * Copy a user buffer into a buffer allocated by pmcraid_alloc_sglist - * - * Return value: - * 0 on success / other on failure - */ -static int pmcraid_copy_sglist( - struct pmcraid_sglist *sglist, - void __user *buffer, - u32 len, - int direction -) -{ - struct scatterlist *sg; - void *kaddr; - int bsize_elem; - int i; - int rc = 0; - - /* Determine the actual number of bytes per element */ - bsize_elem = PAGE_SIZE * (1 << sglist->order); - - sg = sglist->scatterlist; - - for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg), buffer += bsize_elem) { - struct page *page = sg_page(sg); - - kaddr = kmap(page); - if (direction == DMA_TO_DEVICE) - rc = copy_from_user(kaddr, buffer, bsize_elem); - else - rc = copy_to_user(buffer, kaddr, bsize_elem); - - kunmap(page); - - if (rc) { - pmcraid_err("failed to copy user data into sg list\n"); - return -EFAULT; - } - - sg->length = bsize_elem; - } - - if (len % bsize_elem) { - struct page *page = sg_page(sg); - - kaddr = kmap(page); - - if (direction == DMA_TO_DEVICE) - rc = copy_from_user(kaddr, buffer, len % bsize_elem); - else - rc = copy_to_user(buffer, kaddr, len % bsize_elem); - - kunmap(page); - - sg->length = len % bsize_elem; - } - - if (rc) { - pmcraid_err("failed to copy user data into sg list\n"); - rc = -EFAULT; - } - - return rc; -} - -/** * pmcraid_queuecommand_lck - Queue a mid-layer request * @scsi_cmd: scsi command struct * @@ -3454,365 +3336,6 @@ static int pmcraid_chr_fasync(int fd, struct file *filep, int mode) return rc; } - -/** - * pmcraid_build_passthrough_ioadls - builds SG elements for passthrough - * commands sent over IOCTL interface - * - * @cmd : pointer to struct pmcraid_cmd - * @buflen : length of the request buffer - * @direction : data transfer direction - * - * Return value - * 0 on success, non-zero error code on failure - */ -static int pmcraid_build_passthrough_ioadls( - struct pmcraid_cmd *cmd, - int buflen, - int direction -) -{ - struct pmcraid_sglist *sglist = NULL; - struct scatterlist *sg = NULL; - struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; - struct pmcraid_ioadl_desc *ioadl; - int i; - - sglist = pmcraid_alloc_sglist(buflen); - - if (!sglist) { - pmcraid_err("can't allocate memory for passthrough SGls\n"); - return -ENOMEM; - } - - sglist->num_dma_sg = dma_map_sg(&cmd->drv_inst->pdev->dev, - sglist->scatterlist, - sglist->num_sg, direction); - - if (!sglist->num_dma_sg || sglist->num_dma_sg > PMCRAID_MAX_IOADLS) { - dev_err(&cmd->drv_inst->pdev->dev, - "Failed to map passthrough buffer!\n"); - pmcraid_free_sglist(sglist); - return -EIO; - } - - cmd->sglist = sglist; - ioarcb->request_flags0 |= NO_LINK_DESCS; - - ioadl = pmcraid_init_ioadls(cmd, sglist->num_dma_sg); - - /* Initialize IOADL descriptor addresses */ - for_each_sg(sglist->scatterlist, sg, sglist->num_dma_sg, i) { - ioadl[i].data_len = cpu_to_le32(sg_dma_len(sg)); - ioadl[i].address = cpu_to_le64(sg_dma_address(sg)); - ioadl[i].flags = 0; - } - - /* setup the last descriptor */ - ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC; - - return 0; -} - - -/** - * pmcraid_release_passthrough_ioadls - release passthrough ioadls - * - * @cmd: pointer to struct pmcraid_cmd for which ioadls were allocated - * @buflen: size of the request buffer - * @direction: data transfer direction - * - * Return value - * 0 on success, non-zero error code on failure - */ -static void pmcraid_release_passthrough_ioadls( - struct pmcraid_cmd *cmd, - int buflen, - int direction -) -{ - struct pmcraid_sglist *sglist = cmd->sglist; - - if (buflen > 0) { - dma_unmap_sg(&cmd->drv_inst->pdev->dev, - sglist->scatterlist, - sglist->num_sg, - direction); - pmcraid_free_sglist(sglist); - cmd->sglist = NULL; - } -} - -/** - * pmcraid_ioctl_passthrough - handling passthrough IOCTL commands - * - * @pinstance: pointer to adapter instance structure - * @ioctl_cmd: ioctl code - * @buflen: unused - * @arg: pointer to pmcraid_passthrough_buffer user buffer - * - * Return value - * 0 on success, non-zero error code on failure - */ -static long pmcraid_ioctl_passthrough( - struct pmcraid_instance *pinstance, - unsigned int ioctl_cmd, - unsigned int buflen, - void __user *arg -) -{ - struct pmcraid_passthrough_ioctl_buffer *buffer; - struct pmcraid_ioarcb *ioarcb; - struct pmcraid_cmd *cmd; - struct pmcraid_cmd *cancel_cmd; - void __user *request_buffer; - unsigned long request_offset; - unsigned long lock_flags; - void __user *ioasa; - u32 ioasc; - int request_size; - int buffer_size; - u8 direction; - int rc = 0; - - /* If IOA reset is in progress, wait 10 secs for reset to complete */ - if (pinstance->ioa_reset_in_progress) { - rc = wait_event_interruptible_timeout( - pinstance->reset_wait_q, - !pinstance->ioa_reset_in_progress, - msecs_to_jiffies(10000)); - - if (!rc) - return -ETIMEDOUT; - else if (rc < 0) - return -ERESTARTSYS; - } - - /* If adapter is not in operational state, return error */ - if (pinstance->ioa_state != IOA_STATE_OPERATIONAL) { - pmcraid_err("IOA is not operational\n"); - return -ENOTTY; - } - - buffer_size = sizeof(struct pmcraid_passthrough_ioctl_buffer); - buffer = kmalloc(buffer_size, GFP_KERNEL); - - if (!buffer) { - pmcraid_err("no memory for passthrough buffer\n"); - return -ENOMEM; - } - - request_offset = - offsetof(struct pmcraid_passthrough_ioctl_buffer, request_buffer); - - request_buffer = arg + request_offset; - - rc = copy_from_user(buffer, arg, - sizeof(struct pmcraid_passthrough_ioctl_buffer)); - - ioasa = arg + offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa); - - if (rc) { - pmcraid_err("ioctl: can't copy passthrough buffer\n"); - rc = -EFAULT; - goto out_free_buffer; - } - - request_size = le32_to_cpu(buffer->ioarcb.data_transfer_length); - - if (buffer->ioarcb.request_flags0 & TRANSFER_DIR_WRITE) { - direction = DMA_TO_DEVICE; - } else { - direction = DMA_FROM_DEVICE; - } - - if (request_size < 0) { - rc = -EINVAL; - goto out_free_buffer; - } - - /* check if we have any additional command parameters */ - if (le16_to_cpu(buffer->ioarcb.add_cmd_param_length) - > PMCRAID_ADD_CMD_PARAM_LEN) { - rc = -EINVAL; - goto out_free_buffer; - } - - cmd = pmcraid_get_free_cmd(pinstance); - - if (!cmd) { - pmcraid_err("free command block is not available\n"); - rc = -ENOMEM; - goto out_free_buffer; - } - - cmd->scsi_cmd = NULL; - ioarcb = &(cmd->ioa_cb->ioarcb); - - /* Copy the user-provided IOARCB stuff field by field */ - ioarcb->resource_handle = buffer->ioarcb.resource_handle; - ioarcb->data_transfer_length = buffer->ioarcb.data_transfer_length; - ioarcb->cmd_timeout = buffer->ioarcb.cmd_timeout; - ioarcb->request_type = buffer->ioarcb.request_type; - ioarcb->request_flags0 = buffer->ioarcb.request_flags0; - ioarcb->request_flags1 = buffer->ioarcb.request_flags1; - memcpy(ioarcb->cdb, buffer->ioarcb.cdb, PMCRAID_MAX_CDB_LEN); - - if (buffer->ioarcb.add_cmd_param_length) { - ioarcb->add_cmd_param_length = - buffer->ioarcb.add_cmd_param_length; - ioarcb->add_cmd_param_offset = - buffer->ioarcb.add_cmd_param_offset; - memcpy(ioarcb->add_data.u.add_cmd_params, - buffer->ioarcb.add_data.u.add_cmd_params, - le16_to_cpu(buffer->ioarcb.add_cmd_param_length)); - } - - /* set hrrq number where the IOA should respond to. Note that all cmds - * generated internally uses hrrq_id 0, exception to this is the cmd - * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses - * hrrq_id assigned here in queuecommand - */ - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) % - pinstance->num_hrrq; - - if (request_size) { - rc = pmcraid_build_passthrough_ioadls(cmd, - request_size, - direction); - if (rc) { - pmcraid_err("couldn't build passthrough ioadls\n"); - goto out_free_cmd; - } - } - - /* If data is being written into the device, copy the data from user - * buffers - */ - if (direction == DMA_TO_DEVICE && request_size > 0) { - rc = pmcraid_copy_sglist(cmd->sglist, - request_buffer, - request_size, - direction); - if (rc) { - pmcraid_err("failed to copy user buffer\n"); - goto out_free_sglist; - } - } - - /* passthrough ioctl is a blocking command so, put the user to sleep - * until timeout. Note that a timeout value of 0 means, do timeout. - */ - cmd->cmd_done = pmcraid_internal_done; - init_completion(&cmd->wait_for_completion); - cmd->completion_req = 1; - - pmcraid_info("command(%d) (CDB[0] = %x) for %x\n", - le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2, - cmd->ioa_cb->ioarcb.cdb[0], - le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle)); - - spin_lock_irqsave(pinstance->host->host_lock, lock_flags); - _pmcraid_fire_command(cmd); - spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); - - /* NOTE ! Remove the below line once abort_task is implemented - * in firmware. This line disables ioctl command timeout handling logic - * similar to IO command timeout handling, making ioctl commands to wait - * until the command completion regardless of timeout value specified in - * ioarcb - */ - buffer->ioarcb.cmd_timeout = 0; - - /* If command timeout is specified put caller to wait till that time, - * otherwise it would be blocking wait. If command gets timed out, it - * will be aborted. - */ - if (buffer->ioarcb.cmd_timeout == 0) { - wait_for_completion(&cmd->wait_for_completion); - } else if (!wait_for_completion_timeout( - &cmd->wait_for_completion, - msecs_to_jiffies(le16_to_cpu(buffer->ioarcb.cmd_timeout) * 1000))) { - - pmcraid_info("aborting cmd %d (CDB[0] = %x) due to timeout\n", - le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2, - cmd->ioa_cb->ioarcb.cdb[0]); - - spin_lock_irqsave(pinstance->host->host_lock, lock_flags); - cancel_cmd = pmcraid_abort_cmd(cmd); - spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); - - if (cancel_cmd) { - wait_for_completion(&cancel_cmd->wait_for_completion); - ioasc = le32_to_cpu(cancel_cmd->ioa_cb->ioasa.ioasc); - pmcraid_return_cmd(cancel_cmd); - - /* if abort task couldn't find the command i.e it got - * completed prior to aborting, return good completion. - * if command got aborted successfully or there was IOA - * reset due to abort task itself getting timedout then - * return -ETIMEDOUT - */ - if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET || - PMCRAID_IOASC_SENSE_KEY(ioasc) == 0x00) { - if (ioasc != PMCRAID_IOASC_GC_IOARCB_NOTFOUND) - rc = -ETIMEDOUT; - goto out_handle_response; - } - } - - /* no command block for abort task or abort task failed to abort - * the IOARCB, then wait for 150 more seconds and initiate reset - * sequence after timeout - */ - if (!wait_for_completion_timeout( - &cmd->wait_for_completion, - msecs_to_jiffies(150 * 1000))) { - pmcraid_reset_bringup(cmd->drv_inst); - rc = -ETIMEDOUT; - } - } - -out_handle_response: - /* copy entire IOASA buffer and return IOCTL success. - * If copying IOASA to user-buffer fails, return - * EFAULT - */ - if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa, - sizeof(struct pmcraid_ioasa))) { - pmcraid_err("failed to copy ioasa buffer to user\n"); - rc = -EFAULT; - } - - /* If the data transfer was from device, copy the data onto user - * buffers - */ - else if (direction == DMA_FROM_DEVICE && request_size > 0) { - rc = pmcraid_copy_sglist(cmd->sglist, - request_buffer, - request_size, - direction); - if (rc) { - pmcraid_err("failed to copy user buffer\n"); - rc = -EFAULT; - } - } - -out_free_sglist: - pmcraid_release_passthrough_ioadls(cmd, request_size, direction); - -out_free_cmd: - pmcraid_return_cmd(cmd); - -out_free_buffer: - kfree(buffer); - - return rc; -} - - - - /** * pmcraid_ioctl_driver - ioctl handler for commands handled by driver itself * @@ -3922,20 +3445,6 @@ static long pmcraid_chr_ioctl( switch (_IOC_TYPE(cmd)) { - case PMCRAID_PASSTHROUGH_IOCTL: - /* If ioctl code is to download microcode, we need to block - * mid-layer requests. - */ - if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE) - scsi_block_requests(pinstance->host); - - retval = pmcraid_ioctl_passthrough(pinstance, cmd, - hdr->buffer_length, argp); - - if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE) - scsi_unblock_requests(pinstance->host); - break; - case PMCRAID_DRIVER_IOCTL: arg += sizeof(struct pmcraid_ioctl_header); retval = pmcraid_ioctl_driver(pinstance, cmd, diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h index bbb75318f1e7..9f59930e8b4f 100644 --- a/drivers/scsi/pmcraid.h +++ b/drivers/scsi/pmcraid.h @@ -1023,40 +1023,15 @@ struct pmcraid_ioctl_header { #define PMCRAID_IOCTL_SIGNATURE "PMCRAID" /* - * pmcraid_passthrough_ioctl_buffer - structure given as argument to - * passthrough(or firmware handled) IOCTL commands. Note that ioarcb requires - * 32-byte alignment so, it is necessary to pack this structure to avoid any - * holes between ioctl_header and passthrough buffer - * - * .ioactl_header : ioctl header - * .ioarcb : filled-up ioarcb buffer, driver always reads this buffer - * .ioasa : buffer for ioasa, driver fills this with IOASA from firmware - * .request_buffer: The I/O buffer (flat), driver reads/writes to this based on - * the transfer directions passed in ioarcb.flags0. Contents - * of this buffer are valid only when ioarcb.data_transfer_len - * is not zero. - */ -struct pmcraid_passthrough_ioctl_buffer { - struct pmcraid_ioctl_header ioctl_header; - struct pmcraid_ioarcb ioarcb; - struct pmcraid_ioasa ioasa; - u8 request_buffer[]; -} __attribute__ ((packed, aligned(PMCRAID_IOARCB_ALIGNMENT))); - -/* * keys to differentiate between driver handled IOCTLs and passthrough * IOCTLs passed to IOA. driver determines the ioctl type using macro * _IOC_TYPE */ #define PMCRAID_DRIVER_IOCTL 'D' -#define PMCRAID_PASSTHROUGH_IOCTL 'F' #define DRV_IOCTL(n, size) \ _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size)) -#define FMW_IOCTL(n, size) \ - _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_PASSTHROUGH_IOCTL, (n), (size)) - /* * _ARGSIZE: macro that gives size of the argument type passed to an IOCTL cmd. * This is to facilitate applications avoiding un-necessary memory allocations. @@ -1069,12 +1044,4 @@ struct pmcraid_passthrough_ioctl_buffer { #define PMCRAID_IOCTL_RESET_ADAPTER \ DRV_IOCTL(5, sizeof(struct pmcraid_ioctl_header)) -/* passthrough/firmware handled commands */ -#define PMCRAID_IOCTL_PASSTHROUGH_COMMAND \ - FMW_IOCTL(1, sizeof(struct pmcraid_passthrough_ioctl_buffer)) - -#define PMCRAID_IOCTL_DOWNLOAD_MICROCODE \ - FMW_IOCTL(2, sizeof(struct pmcraid_passthrough_ioctl_buffer)) - - #endif /* _PMCRAID_H */ diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index 8196f89f404e..31ec429104e2 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -860,6 +860,37 @@ static int qedi_task_xmit(struct iscsi_task *task) return qedi_iscsi_send_ioreq(task); } +static void qedi_offload_work(struct work_struct *work) +{ + struct qedi_endpoint *qedi_ep = + container_of(work, struct qedi_endpoint, offload_work); + struct qedi_ctx *qedi; + int wait_delay = 5 * HZ; + int ret; + + qedi = qedi_ep->qedi; + + ret = qedi_iscsi_offload_conn(qedi_ep); + if (ret) { + QEDI_ERR(&qedi->dbg_ctx, + "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n", + qedi_ep->iscsi_cid, qedi_ep, ret); + qedi_ep->state = EP_STATE_OFLDCONN_FAILED; + return; + } + + ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait, + (qedi_ep->state == + EP_STATE_OFLDCONN_COMPL), + wait_delay); + if (ret <= 0 || qedi_ep->state != EP_STATE_OFLDCONN_COMPL) { + qedi_ep->state = EP_STATE_OFLDCONN_FAILED; + QEDI_ERR(&qedi->dbg_ctx, + "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n", + qedi_ep->iscsi_cid, qedi_ep); + } +} + static struct iscsi_endpoint * qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, int non_blocking) @@ -908,6 +939,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, } qedi_ep = ep->dd_data; memset(qedi_ep, 0, sizeof(struct qedi_endpoint)); + INIT_WORK(&qedi_ep->offload_work, qedi_offload_work); qedi_ep->state = EP_STATE_IDLE; qedi_ep->iscsi_cid = (u32)-1; qedi_ep->qedi = qedi; @@ -1056,12 +1088,11 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep) qedi_ep = ep->dd_data; qedi = qedi_ep->qedi; + flush_work(&qedi_ep->offload_work); + if (qedi_ep->state == EP_STATE_OFLDCONN_START) goto ep_exit_recover; - if (qedi_ep->state != EP_STATE_OFLDCONN_NONE) - flush_work(&qedi_ep->offload_work); - if (qedi_ep->conn) { qedi_conn = qedi_ep->conn; abrt_conn = qedi_conn->abrt_conn; @@ -1235,37 +1266,6 @@ static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid) return rc; } -static void qedi_offload_work(struct work_struct *work) -{ - struct qedi_endpoint *qedi_ep = - container_of(work, struct qedi_endpoint, offload_work); - struct qedi_ctx *qedi; - int wait_delay = 5 * HZ; - int ret; - - qedi = qedi_ep->qedi; - - ret = qedi_iscsi_offload_conn(qedi_ep); - if (ret) { - QEDI_ERR(&qedi->dbg_ctx, - "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n", - qedi_ep->iscsi_cid, qedi_ep, ret); - qedi_ep->state = EP_STATE_OFLDCONN_FAILED; - return; - } - - ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait, - (qedi_ep->state == - EP_STATE_OFLDCONN_COMPL), - wait_delay); - if ((ret <= 0) || (qedi_ep->state != EP_STATE_OFLDCONN_COMPL)) { - qedi_ep->state = EP_STATE_OFLDCONN_FAILED; - QEDI_ERR(&qedi->dbg_ctx, - "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n", - qedi_ep->iscsi_cid, qedi_ep); - } -} - static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data) { struct qedi_ctx *qedi; @@ -1381,7 +1381,6 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data) qedi_ep->dst_addr, qedi_ep->dst_port); } - INIT_WORK(&qedi_ep->offload_work, qedi_offload_work); queue_work(qedi->offload_thread, &qedi_ep->offload_work); ret = 0; diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index c607755cce00..592a290e6cfa 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -32,7 +32,6 @@ #include <linux/blkdev.h> #include <linux/crc-t10dif.h> #include <linux/spinlock.h> -#include <linux/mutex.h> #include <linux/interrupt.h> #include <linux/atomic.h> #include <linux/hrtimer.h> @@ -732,9 +731,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = { {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, }; -static atomic_t sdebug_num_hosts; -static DEFINE_MUTEX(add_host_mutex); - +static int sdebug_num_hosts; static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */ static int sdebug_ato = DEF_ATO; static int sdebug_cdb_len = DEF_CDB_LEN; @@ -781,7 +778,6 @@ static int sdebug_uuid_ctl = DEF_UUID_CTL; static bool sdebug_random = DEF_RANDOM; static bool sdebug_per_host_store = DEF_PER_HOST_STORE; static bool sdebug_removable = DEF_REMOVABLE; -static bool sdebug_deflect_incoming; static bool sdebug_clustering; static bool sdebug_host_lock = DEF_HOST_LOCK; static bool sdebug_strict = DEF_STRICT; @@ -5122,10 +5118,6 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp) sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN) sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN; - if (smp_load_acquire(&sdebug_deflect_incoming)) { - pr_info("Exit early due to deflect_incoming\n"); - return 1; - } if (devip == NULL) { devip = find_build_dev_info(sdp); if (devip == NULL) @@ -5211,7 +5203,7 @@ static bool stop_queued_cmnd(struct scsi_cmnd *cmnd) } /* Deletes (stops) timers or work queues of all queued commands */ -static void stop_all_queued(bool done_with_no_conn) +static void stop_all_queued(void) { unsigned long iflags; int j, k; @@ -5220,15 +5212,13 @@ static void stop_all_queued(bool done_with_no_conn) struct sdebug_queued_cmd *sqcp; struct sdebug_dev_info *devip; struct sdebug_defer *sd_dp; - struct scsi_cmnd *scp; for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) { spin_lock_irqsave(&sqp->qc_lock, iflags); for (k = 0; k < SDEBUG_CANQUEUE; ++k) { if (test_bit(k, sqp->in_use_bm)) { sqcp = &sqp->qc_arr[k]; - scp = sqcp->a_cmnd; - if (!scp) + if (sqcp->a_cmnd == NULL) continue; devip = (struct sdebug_dev_info *) sqcp->a_cmnd->device->hostdata; @@ -5243,10 +5233,6 @@ static void stop_all_queued(bool done_with_no_conn) l_defer_t = SDEB_DEFER_NONE; spin_unlock_irqrestore(&sqp->qc_lock, iflags); stop_qc_helper(sd_dp, l_defer_t); - if (done_with_no_conn && l_defer_t != SDEB_DEFER_NONE) { - scp->result = DID_NO_CONNECT << 16; - scsi_done(scp); - } clear_bit(k, sqp->in_use_bm); spin_lock_irqsave(&sqp->qc_lock, iflags); } @@ -5389,7 +5375,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt) } } spin_unlock(&sdebug_host_list_lock); - stop_all_queued(false); + stop_all_queued(); if (SDEBUG_OPT_RESET_NOISE & sdebug_opts) sdev_printk(KERN_INFO, SCpnt->device, "%s: %d device(s) found\n", __func__, k); @@ -5449,50 +5435,13 @@ static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size) } } -static void sdeb_block_all_queues(void) -{ - int j; - struct sdebug_queue *sqp; - - for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) - atomic_set(&sqp->blocked, (int)true); -} - -static void sdeb_unblock_all_queues(void) +static void block_unblock_all_queues(bool block) { int j; struct sdebug_queue *sqp; for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) - atomic_set(&sqp->blocked, (int)false); -} - -static void -sdeb_add_n_hosts(int num_hosts) -{ - if (num_hosts < 1) - return; - do { - bool found; - unsigned long idx; - struct sdeb_store_info *sip; - bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store; - - found = false; - if (want_phs) { - xa_for_each_marked(per_store_ap, idx, sip, SDEB_XA_NOT_IN_USE) { - sdeb_most_recent_idx = (int)idx; - found = true; - break; - } - if (found) /* re-use case */ - sdebug_add_host_helper((int)idx); - else - sdebug_do_add_host(true /* make new store */); - } else { - sdebug_do_add_host(false); - } - } while (--num_hosts); + atomic_set(&sqp->blocked, (int)block); } /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1 @@ -5505,10 +5454,10 @@ static void tweak_cmnd_count(void) modulo = abs(sdebug_every_nth); if (modulo < 2) return; - sdeb_block_all_queues(); + block_unblock_all_queues(true); count = atomic_read(&sdebug_cmnd_count); atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo); - sdeb_unblock_all_queues(); + block_unblock_all_queues(false); } static void clear_queue_stats(void) @@ -5526,15 +5475,6 @@ static bool inject_on_this_cmd(void) return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0; } -static int process_deflect_incoming(struct scsi_cmnd *scp) -{ - u8 opcode = scp->cmnd[0]; - - if (opcode == SYNCHRONIZE_CACHE || opcode == SYNCHRONIZE_CACHE_16) - return 0; - return DID_NO_CONNECT << 16; -} - #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */ /* Complete the processing of the thread that queued a SCSI command to this @@ -5544,7 +5484,8 @@ static int process_deflect_incoming(struct scsi_cmnd *scp) */ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, int scsi_result, - int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *), + int (*pfp)(struct scsi_cmnd *, + struct sdebug_dev_info *), int delta_jiff, int ndelay) { bool new_sd_dp; @@ -5565,27 +5506,13 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, } sdp = cmnd->device; - if (delta_jiff == 0) { - sqp = get_queue(cmnd); - if (atomic_read(&sqp->blocked)) { - if (smp_load_acquire(&sdebug_deflect_incoming)) - return process_deflect_incoming(cmnd); - else - return SCSI_MLQUEUE_HOST_BUSY; - } + if (delta_jiff == 0) goto respond_in_thread; - } sqp = get_queue(cmnd); spin_lock_irqsave(&sqp->qc_lock, iflags); if (unlikely(atomic_read(&sqp->blocked))) { spin_unlock_irqrestore(&sqp->qc_lock, iflags); - if (smp_load_acquire(&sdebug_deflect_incoming)) { - scsi_result = process_deflect_incoming(cmnd); - goto respond_in_thread; - } - if (sdebug_verbose) - pr_info("blocked --> SCSI_MLQUEUE_HOST_BUSY\n"); return SCSI_MLQUEUE_HOST_BUSY; } num_in_q = atomic_read(&devip->num_in_q); @@ -5774,12 +5701,8 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, respond_in_thread: /* call back to mid-layer using invocation thread */ cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0; cmnd->result &= ~SDEG_RES_IMMED_MASK; - if (cmnd->result == 0 && scsi_result != 0) { + if (cmnd->result == 0 && scsi_result != 0) cmnd->result = scsi_result; - if (sdebug_verbose) - pr_info("respond_in_thread: tag=0x%x, scp->result=0x%x\n", - blk_mq_unique_tag(scsi_cmd_to_rq(cmnd)), scsi_result); - } scsi_done(cmnd); return 0; } @@ -6064,7 +5987,7 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf, int j, k; struct sdebug_queue *sqp; - sdeb_block_all_queues(); + block_unblock_all_queues(true); for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) { k = find_first_bit(sqp->in_use_bm, @@ -6078,7 +6001,7 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf, sdebug_jdelay = jdelay; sdebug_ndelay = 0; } - sdeb_unblock_all_queues(); + block_unblock_all_queues(false); } return res; } @@ -6104,7 +6027,7 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf, int j, k; struct sdebug_queue *sqp; - sdeb_block_all_queues(); + block_unblock_all_queues(true); for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) { k = find_first_bit(sqp->in_use_bm, @@ -6119,7 +6042,7 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf, sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN : DEF_JDELAY; } - sdeb_unblock_all_queues(); + block_unblock_all_queues(false); } return res; } @@ -6433,7 +6356,7 @@ static ssize_t max_queue_store(struct device_driver *ddp, const char *buf, if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) && (n <= SDEBUG_CANQUEUE) && (sdebug_host_max_queue == 0)) { - sdeb_block_all_queues(); + block_unblock_all_queues(true); k = 0; for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) { @@ -6448,7 +6371,7 @@ static ssize_t max_queue_store(struct device_driver *ddp, const char *buf, atomic_set(&retired_max_queue, k + 1); else atomic_set(&retired_max_queue, 0); - sdeb_unblock_all_queues(); + block_unblock_all_queues(false); return count; } return -EINVAL; @@ -6537,48 +6460,43 @@ static DRIVER_ATTR_RW(virtual_gb); static ssize_t add_host_show(struct device_driver *ddp, char *buf) { /* absolute number of hosts currently active is what is shown */ - return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&sdebug_num_hosts)); + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts); } -/* - * Accept positive and negative values. Hex values (only positive) may be prefixed by '0x'. - * To remove all hosts use a large negative number (e.g. -9999). The value 0 does nothing. - * Returns -EBUSY if another add_host sysfs invocation is active. - */ static ssize_t add_host_store(struct device_driver *ddp, const char *buf, size_t count) { + bool found; + unsigned long idx; + struct sdeb_store_info *sip; + bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store; int delta_hosts; - if (count == 0 || kstrtoint(buf, 0, &delta_hosts)) + if (sscanf(buf, "%d", &delta_hosts) != 1) return -EINVAL; - if (sdebug_verbose) - pr_info("prior num_hosts=%d, num_to_add=%d\n", - atomic_read(&sdebug_num_hosts), delta_hosts); - if (delta_hosts == 0) - return count; - if (mutex_trylock(&add_host_mutex) == 0) - return -EBUSY; if (delta_hosts > 0) { - sdeb_add_n_hosts(delta_hosts); - } else if (delta_hosts < 0) { - smp_store_release(&sdebug_deflect_incoming, true); - sdeb_block_all_queues(); - if (delta_hosts >= atomic_read(&sdebug_num_hosts)) - stop_all_queued(true); do { - if (atomic_read(&sdebug_num_hosts) < 1) { - free_all_queued(); - break; + found = false; + if (want_phs) { + xa_for_each_marked(per_store_ap, idx, sip, + SDEB_XA_NOT_IN_USE) { + sdeb_most_recent_idx = (int)idx; + found = true; + break; + } + if (found) /* re-use case */ + sdebug_add_host_helper((int)idx); + else + sdebug_do_add_host(true); + } else { + sdebug_do_add_host(false); } + } while (--delta_hosts); + } else if (delta_hosts < 0) { + do { sdebug_do_remove_host(false); } while (++delta_hosts); - sdeb_unblock_all_queues(); - smp_store_release(&sdebug_deflect_incoming, false); } - mutex_unlock(&add_host_mutex); - if (sdebug_verbose) - pr_info("post num_hosts=%d\n", atomic_read(&sdebug_num_hosts)); return count; } static DRIVER_ATTR_RW(add_host); @@ -7089,10 +7007,6 @@ static int __init scsi_debug_init(void) sdebug_add_host = 0; for (k = 0; k < hosts_to_add; k++) { - if (smp_load_acquire(&sdebug_deflect_incoming)) { - pr_info("exit early as sdebug_deflect_incoming is set\n"); - return 0; - } if (want_store && k == 0) { ret = sdebug_add_host_helper(idx); if (ret < 0) { @@ -7110,12 +7024,8 @@ static int __init scsi_debug_init(void) } } if (sdebug_verbose) - pr_info("built %d host(s)\n", atomic_read(&sdebug_num_hosts)); + pr_info("built %d host(s)\n", sdebug_num_hosts); - /* - * Even though all the hosts have been established, due to async device (LU) scanning - * by the scsi mid-level, there may still be devices (LUs) being set up. - */ return 0; bus_unreg: @@ -7131,17 +7041,12 @@ free_q_arr: static void __exit scsi_debug_exit(void) { - int k; + int k = sdebug_num_hosts; - /* Possible race with LUs still being set up; stop them asap */ - sdeb_block_all_queues(); - smp_store_release(&sdebug_deflect_incoming, true); - stop_all_queued(false); - for (k = 0; atomic_read(&sdebug_num_hosts) > 0; k++) + stop_all_queued(); + for (; k; k--) sdebug_do_remove_host(true); free_all_queued(); - if (sdebug_verbose) - pr_info("removed %d hosts\n", k); driver_unregister(&sdebug_driverfs_driver); bus_unregister(&pseudo_lld_bus); root_device_unregister(pseudo_primary); @@ -7311,13 +7216,13 @@ static int sdebug_add_host_helper(int per_host_idx) sdbg_host->dev.bus = &pseudo_lld_bus; sdbg_host->dev.parent = pseudo_primary; sdbg_host->dev.release = &sdebug_release_adapter; - dev_set_name(&sdbg_host->dev, "adapter%d", atomic_read(&sdebug_num_hosts)); + dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts); error = device_register(&sdbg_host->dev); if (error) goto clean; - atomic_inc(&sdebug_num_hosts); + ++sdebug_num_hosts; return 0; clean: @@ -7381,7 +7286,7 @@ static void sdebug_do_remove_host(bool the_end) return; device_unregister(&sdbg_host->dev); - atomic_dec(&sdebug_num_hosts); + --sdebug_num_hosts; } static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth) @@ -7389,10 +7294,10 @@ static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth) int num_in_q = 0; struct sdebug_dev_info *devip; - sdeb_block_all_queues(); + block_unblock_all_queues(true); devip = (struct sdebug_dev_info *)sdev->hostdata; if (NULL == devip) { - sdeb_unblock_all_queues(); + block_unblock_all_queues(false); return -ENODEV; } num_in_q = atomic_read(&devip->num_in_q); @@ -7411,7 +7316,7 @@ static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth) sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n", __func__, qdepth, num_in_q); } - sdeb_unblock_all_queues(); + block_unblock_all_queues(false); return sdev->queue_depth; } @@ -7519,12 +7424,13 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num) struct sdebug_defer *sd_dp; sqp = sdebug_q_arr + queue_num; - qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue); - if (qc_idx >= sdebug_max_queue) - return 0; spin_lock_irqsave(&sqp->qc_lock, iflags); + qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue); + if (qc_idx >= sdebug_max_queue) + goto unlock; + for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) { if (first) { first = false; @@ -7589,6 +7495,7 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num) break; } +unlock: spin_unlock_irqrestore(&sqp->qc_lock, iflags); if (num_entries > 0) diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c index ff89de86545d..b02af340c2d3 100644 --- a/drivers/scsi/scsi_logging.c +++ b/drivers/scsi/scsi_logging.c @@ -30,7 +30,7 @@ static inline const char *scmd_name(const struct scsi_cmnd *scmd) { struct request *rq = scsi_cmd_to_rq((struct scsi_cmnd *)scmd); - if (!rq->q->disk) + if (!rq->q || !rq->q->disk) return NULL; return rq->q->disk->disk_name; } diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index f4e6c68ac99e..2ef78083f1ef 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -223,6 +223,8 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev, int ret; struct sbitmap sb_backup; + depth = min_t(unsigned int, depth, scsi_device_max_queue_depth(sdev)); + /* * realloc if new shift is calculated, which is caused by setting * up one new default queue depth after calling ->slave_configure @@ -245,6 +247,9 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev, scsi_device_max_queue_depth(sdev), new_shift, GFP_KERNEL, sdev->request_queue->node, false, true); + if (!ret) + sbitmap_resize(&sdev->budget_map, depth); + if (need_free) { if (ret) sdev->budget_map = sb_backup; diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 226a50944c00..dc6872e352bd 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -1384,10 +1384,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev) if (IS_ENABLED(CONFIG_BLK_DEV_BSG)) { sdev->bsg_dev = scsi_bsg_register_queue(sdev); if (IS_ERR(sdev->bsg_dev)) { - /* - * We're treating error on bsg register as non-fatal, so - * pretend nothing went wrong. - */ error = PTR_ERR(sdev->bsg_dev); sdev_printk(KERN_INFO, sdev, "Failed to register bsg queue, errno=%d\n", diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 27951ea05dd4..2c0dd64159b0 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -86,6 +86,9 @@ struct iscsi_internal { struct transport_container session_cont; }; +static DEFINE_IDR(iscsi_ep_idr); +static DEFINE_MUTEX(iscsi_ep_idr_mutex); + static atomic_t iscsi_session_nr; /* sysfs session id for next new session */ static struct workqueue_struct *iscsi_conn_cleanup_workq; @@ -168,6 +171,11 @@ struct device_attribute dev_attr_##_prefix##_##_name = \ static void iscsi_endpoint_release(struct device *dev) { struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); + + mutex_lock(&iscsi_ep_idr_mutex); + idr_remove(&iscsi_ep_idr, ep->id); + mutex_unlock(&iscsi_ep_idr_mutex); + kfree(ep); } @@ -180,7 +188,7 @@ static ssize_t show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); - return sysfs_emit(buf, "%llu\n", (unsigned long long) ep->id); + return sysfs_emit(buf, "%d\n", ep->id); } static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL); @@ -193,48 +201,32 @@ static struct attribute_group iscsi_endpoint_group = { .attrs = iscsi_endpoint_attrs, }; -#define ISCSI_MAX_EPID -1 - -static int iscsi_match_epid(struct device *dev, const void *data) -{ - struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); - const uint64_t *epid = data; - - return *epid == ep->id; -} - struct iscsi_endpoint * iscsi_create_endpoint(int dd_size) { - struct device *dev; struct iscsi_endpoint *ep; - uint64_t id; - int err; - - for (id = 1; id < ISCSI_MAX_EPID; id++) { - dev = class_find_device(&iscsi_endpoint_class, NULL, &id, - iscsi_match_epid); - if (!dev) - break; - else - put_device(dev); - } - if (id == ISCSI_MAX_EPID) { - printk(KERN_ERR "Too many connections. Max supported %u\n", - ISCSI_MAX_EPID - 1); - return NULL; - } + int err, id; ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL); if (!ep) return NULL; + mutex_lock(&iscsi_ep_idr_mutex); + id = idr_alloc(&iscsi_ep_idr, ep, 0, -1, GFP_NOIO); + if (id < 0) { + mutex_unlock(&iscsi_ep_idr_mutex); + printk(KERN_ERR "Could not allocate endpoint ID. Error %d.\n", + id); + goto free_ep; + } + mutex_unlock(&iscsi_ep_idr_mutex); + ep->id = id; ep->dev.class = &iscsi_endpoint_class; - dev_set_name(&ep->dev, "ep-%llu", (unsigned long long) id); + dev_set_name(&ep->dev, "ep-%d", id); err = device_register(&ep->dev); if (err) - goto free_ep; + goto free_id; err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group); if (err) @@ -248,6 +240,10 @@ unregister_dev: device_unregister(&ep->dev); return NULL; +free_id: + mutex_lock(&iscsi_ep_idr_mutex); + idr_remove(&iscsi_ep_idr, id); + mutex_unlock(&iscsi_ep_idr_mutex); free_ep: kfree(ep); return NULL; @@ -275,14 +271,17 @@ EXPORT_SYMBOL_GPL(iscsi_put_endpoint); */ struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle) { - struct device *dev; + struct iscsi_endpoint *ep; - dev = class_find_device(&iscsi_endpoint_class, NULL, &handle, - iscsi_match_epid); - if (!dev) - return NULL; + mutex_lock(&iscsi_ep_idr_mutex); + ep = idr_find(&iscsi_ep_idr, handle); + if (!ep) + goto unlock; - return iscsi_dev_to_endpoint(dev); + get_device(&ep->dev); +unlock: + mutex_unlock(&iscsi_ep_idr_mutex); + return ep; } EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint); @@ -2202,10 +2201,10 @@ static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag) switch (flag) { case STOP_CONN_RECOVER: - conn->state = ISCSI_CONN_FAILED; + WRITE_ONCE(conn->state, ISCSI_CONN_FAILED); break; case STOP_CONN_TERM: - conn->state = ISCSI_CONN_DOWN; + WRITE_ONCE(conn->state, ISCSI_CONN_DOWN); break; default: iscsi_cls_conn_printk(KERN_ERR, conn, "invalid stop flag %d\n", @@ -2217,6 +2216,49 @@ static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag) ISCSI_DBG_TRANS_CONN(conn, "Stopping conn done.\n"); } +static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active) +{ + struct iscsi_cls_session *session = iscsi_conn_to_session(conn); + struct iscsi_endpoint *ep; + + ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n"); + WRITE_ONCE(conn->state, ISCSI_CONN_FAILED); + + if (!conn->ep || !session->transport->ep_disconnect) + return; + + ep = conn->ep; + conn->ep = NULL; + + session->transport->unbind_conn(conn, is_active); + session->transport->ep_disconnect(ep); + ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n"); +} + +static void iscsi_if_disconnect_bound_ep(struct iscsi_cls_conn *conn, + struct iscsi_endpoint *ep, + bool is_active) +{ + /* Check if this was a conn error and the kernel took ownership */ + spin_lock_irq(&conn->lock); + if (!test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) { + spin_unlock_irq(&conn->lock); + iscsi_ep_disconnect(conn, is_active); + } else { + spin_unlock_irq(&conn->lock); + ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n"); + mutex_unlock(&conn->ep_mutex); + + flush_work(&conn->cleanup_work); + /* + * Userspace is now done with the EP so we can release the ref + * iscsi_cleanup_conn_work_fn took. + */ + iscsi_put_endpoint(ep); + mutex_lock(&conn->ep_mutex); + } +} + static int iscsi_if_stop_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev) { @@ -2238,11 +2280,24 @@ static int iscsi_if_stop_conn(struct iscsi_transport *transport, iscsi_stop_conn(conn, flag); } else { /* + * For offload, when iscsid is restarted it won't know about + * existing endpoints so it can't do a ep_disconnect. We clean + * it up here for userspace. + */ + mutex_lock(&conn->ep_mutex); + if (conn->ep) + iscsi_if_disconnect_bound_ep(conn, conn->ep, true); + mutex_unlock(&conn->ep_mutex); + + /* * Figure out if it was the kernel or userspace initiating this. */ + spin_lock_irq(&conn->lock); if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) { + spin_unlock_irq(&conn->lock); iscsi_stop_conn(conn, flag); } else { + spin_unlock_irq(&conn->lock); ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n"); flush_work(&conn->cleanup_work); @@ -2251,31 +2306,14 @@ static int iscsi_if_stop_conn(struct iscsi_transport *transport, * Only clear for recovery to avoid extra cleanup runs during * termination. */ + spin_lock_irq(&conn->lock); clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags); + spin_unlock_irq(&conn->lock); } ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop done.\n"); return 0; } -static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active) -{ - struct iscsi_cls_session *session = iscsi_conn_to_session(conn); - struct iscsi_endpoint *ep; - - ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n"); - conn->state = ISCSI_CONN_FAILED; - - if (!conn->ep || !session->transport->ep_disconnect) - return; - - ep = conn->ep; - conn->ep = NULL; - - session->transport->unbind_conn(conn, is_active); - session->transport->ep_disconnect(ep); - ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n"); -} - static void iscsi_cleanup_conn_work_fn(struct work_struct *work) { struct iscsi_cls_conn *conn = container_of(work, struct iscsi_cls_conn, @@ -2284,18 +2322,11 @@ static void iscsi_cleanup_conn_work_fn(struct work_struct *work) mutex_lock(&conn->ep_mutex); /* - * If we are not at least bound there is nothing for us to do. Userspace - * will do a ep_disconnect call if offload is used, but will not be - * doing a stop since there is nothing to clean up, so we have to clear - * the cleanup bit here. + * Get a ref to the ep, so we don't release its ID until after + * userspace is done referencing it in iscsi_if_disconnect_bound_ep. */ - if (conn->state != ISCSI_CONN_BOUND && conn->state != ISCSI_CONN_UP) { - ISCSI_DBG_TRANS_CONN(conn, "Got error while conn is already failed. Ignoring.\n"); - clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags); - mutex_unlock(&conn->ep_mutex); - return; - } - + if (conn->ep) + get_device(&conn->ep->dev); iscsi_ep_disconnect(conn, false); if (system_state != SYSTEM_RUNNING) { @@ -2340,11 +2371,12 @@ iscsi_alloc_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid) conn->dd_data = &conn[1]; mutex_init(&conn->ep_mutex); + spin_lock_init(&conn->lock); INIT_LIST_HEAD(&conn->conn_list); INIT_WORK(&conn->cleanup_work, iscsi_cleanup_conn_work_fn); conn->transport = transport; conn->cid = cid; - conn->state = ISCSI_CONN_DOWN; + WRITE_ONCE(conn->state, ISCSI_CONN_DOWN); /* this is released in the dev's release function */ if (!get_device(&session->dev)) @@ -2542,9 +2574,32 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) struct iscsi_uevent *ev; struct iscsi_internal *priv; int len = nlmsg_total_size(sizeof(*ev)); + unsigned long flags; + int state; - if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) - queue_work(iscsi_conn_cleanup_workq, &conn->cleanup_work); + spin_lock_irqsave(&conn->lock, flags); + /* + * Userspace will only do a stop call if we are at least bound. And, we + * only need to do the in kernel cleanup if in the UP state so cmds can + * be released to upper layers. If in other states just wait for + * userspace to avoid races that can leave the cleanup_work queued. + */ + state = READ_ONCE(conn->state); + switch (state) { + case ISCSI_CONN_BOUND: + case ISCSI_CONN_UP: + if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, + &conn->flags)) { + queue_work(iscsi_conn_cleanup_workq, + &conn->cleanup_work); + } + break; + default: + ISCSI_DBG_TRANS_CONN(conn, "Got conn error in state %d\n", + state); + break; + } + spin_unlock_irqrestore(&conn->lock, flags); priv = iscsi_if_transport_lookup(conn->transport); if (!priv) @@ -2894,7 +2949,7 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) char *data = (char*)ev + sizeof(*ev); struct iscsi_cls_conn *conn; struct iscsi_cls_session *session; - int err = 0, value = 0; + int err = 0, value = 0, state; if (ev->u.set_param.len > PAGE_SIZE) return -EINVAL; @@ -2911,8 +2966,8 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) session->recovery_tmo = value; break; default: - if ((conn->state == ISCSI_CONN_BOUND) || - (conn->state == ISCSI_CONN_UP)) { + state = READ_ONCE(conn->state); + if (state == ISCSI_CONN_BOUND || state == ISCSI_CONN_UP) { err = transport->set_param(conn, ev->u.set_param.param, data, ev->u.set_param.len); } else { @@ -2984,16 +3039,7 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport, } mutex_lock(&conn->ep_mutex); - /* Check if this was a conn error and the kernel took ownership */ - if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) { - ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n"); - mutex_unlock(&conn->ep_mutex); - - flush_work(&conn->cleanup_work); - goto put_ep; - } - - iscsi_ep_disconnect(conn, false); + iscsi_if_disconnect_bound_ep(conn, ep, false); mutex_unlock(&conn->ep_mutex); put_ep: iscsi_put_endpoint(ep); @@ -3696,24 +3742,17 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport, return -EINVAL; mutex_lock(&conn->ep_mutex); + spin_lock_irq(&conn->lock); if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) { + spin_unlock_irq(&conn->lock); mutex_unlock(&conn->ep_mutex); ev->r.retcode = -ENOTCONN; return 0; } + spin_unlock_irq(&conn->lock); switch (nlh->nlmsg_type) { case ISCSI_UEVENT_BIND_CONN: - if (conn->ep) { - /* - * For offload boot support where iscsid is restarted - * during the pivot root stage, the ep will be intact - * here when the new iscsid instance starts up and - * reconnects. - */ - iscsi_ep_disconnect(conn, true); - } - session = iscsi_session_lookup(ev->u.b_conn.sid); if (!session) { err = -EINVAL; @@ -3724,7 +3763,7 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport, ev->u.b_conn.transport_eph, ev->u.b_conn.is_leading); if (!ev->r.retcode) - conn->state = ISCSI_CONN_BOUND; + WRITE_ONCE(conn->state, ISCSI_CONN_BOUND); if (ev->r.retcode || !transport->ep_connect) break; @@ -3743,7 +3782,8 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport, case ISCSI_UEVENT_START_CONN: ev->r.retcode = transport->start_conn(conn); if (!ev->r.retcode) - conn->state = ISCSI_CONN_UP; + WRITE_ONCE(conn->state, ISCSI_CONN_UP); + break; case ISCSI_UEVENT_SEND_PDU: pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev); @@ -4050,10 +4090,11 @@ static ssize_t show_conn_state(struct device *dev, { struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); const char *state = "unknown"; + int conn_state = READ_ONCE(conn->state); - if (conn->state >= 0 && - conn->state < ARRAY_SIZE(connection_state_names)) - state = connection_state_names[conn->state]; + if (conn_state >= 0 && + conn_state < ARRAY_SIZE(connection_state_names)) + state = connection_state_names[conn_state]; return sysfs_emit(buf, "%s\n", state); } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index a390679cf458..dc6e55761fd1 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -3216,6 +3216,7 @@ static int sd_revalidate_disk(struct gendisk *disk) sd_read_block_limits(sdkp); sd_read_block_characteristics(sdkp); sd_zbc_read_zones(sdkp, buffer); + sd_read_cpr(sdkp); } sd_print_capacity(sdkp, old_capacity); @@ -3225,7 +3226,6 @@ static int sd_revalidate_disk(struct gendisk *disk) sd_read_app_tag_own(sdkp, buffer); sd_read_write_same(sdkp, buffer); sd_read_security(sdkp, buffer); - sd_read_cpr(sdkp); } /* @@ -3475,6 +3475,7 @@ static int sd_probe(struct device *dev) error = device_add_disk(dev, gd, NULL); if (error) { put_device(&sdkp->disk_dev); + blk_cleanup_disk(gd); goto out; } diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 5ba9df334968..cbd92891a762 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -535,7 +535,7 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, scsi_autopm_get_device(sdev); - if (ret != CDROMCLOSETRAY && ret != CDROMEJECT) { + if (cmd != CDROMCLOSETRAY && cmd != CDROMEJECT) { ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg); if (ret != -ENOSYS) goto put; diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index 0d2e950d0865..586c0e567ff9 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -957,18 +957,6 @@ static const struct reset_control_ops ufs_qcom_reset_ops = { .deassert = ufs_qcom_reset_deassert, }; -#define ANDROID_BOOT_DEV_MAX 30 -static char android_boot_dev[ANDROID_BOOT_DEV_MAX]; - -#ifndef MODULE -static int __init get_android_boot_dev(char *str) -{ - strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX); - return 1; -} -__setup("androidboot.bootdevice=", get_android_boot_dev); -#endif - /** * ufs_qcom_init - bind phy with controller * @hba: host controller instance @@ -988,9 +976,6 @@ static int ufs_qcom_init(struct ufs_hba *hba) struct resource *res; struct ufs_clk_info *clki; - if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev))) - return -ENODEV; - host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); if (!host) { err = -ENOMEM; diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c index f76692053ca1..e892b9feffb1 100644 --- a/drivers/scsi/ufs/ufshcd-pci.c +++ b/drivers/scsi/ufs/ufshcd-pci.c @@ -428,6 +428,12 @@ static int ufs_intel_adl_init(struct ufs_hba *hba) return ufs_intel_common_init(hba); } +static int ufs_intel_mtl_init(struct ufs_hba *hba) +{ + hba->caps |= UFSHCD_CAP_CRYPTO | UFSHCD_CAP_WB_EN; + return ufs_intel_common_init(hba); +} + static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = { .name = "intel-pci", .init = ufs_intel_common_init, @@ -465,6 +471,16 @@ static struct ufs_hba_variant_ops ufs_intel_adl_hba_vops = { .device_reset = ufs_intel_device_reset, }; +static struct ufs_hba_variant_ops ufs_intel_mtl_hba_vops = { + .name = "intel-pci", + .init = ufs_intel_mtl_init, + .exit = ufs_intel_common_exit, + .hce_enable_notify = ufs_intel_hce_enable_notify, + .link_startup_notify = ufs_intel_link_startup_notify, + .resume = ufs_intel_resume, + .device_reset = ufs_intel_device_reset, +}; + #ifdef CONFIG_PM_SLEEP static int ufshcd_pci_restore(struct device *dev) { @@ -579,6 +595,7 @@ static const struct pci_device_id ufshcd_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops }, { PCI_VDEVICE(INTEL, 0x51FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops }, { PCI_VDEVICE(INTEL, 0x54FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops }, + { PCI_VDEVICE(INTEL, 0x7E47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, { } /* terminate list */ }; diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 88c20f3608c2..94f545be183a 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -820,8 +820,6 @@ struct ufs_hba { enum ufs_pm_level rpm_lvl; /* Desired UFS power management level during system PM */ enum ufs_pm_level spm_lvl; - struct device_attribute rpm_lvl_attr; - struct device_attribute spm_lvl_attr; int pm_op_in_progress; /* Auto-Hibernate Idle Timer register value */ diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c index b2bec19022cd..81099b68bbfb 100644 --- a/drivers/scsi/ufs/ufshpb.c +++ b/drivers/scsi/ufs/ufshpb.c @@ -867,12 +867,6 @@ static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb) struct ufshpb_region *rgn, *victim_rgn = NULL; list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) { - if (!rgn) { - dev_err(&hpb->sdev_ufs_lu->sdev_dev, - "%s: no region allocated\n", - __func__); - return NULL; - } if (ufshpb_check_srgns_issue_state(hpb, rgn)) continue; @@ -888,6 +882,11 @@ static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb) break; } + if (!victim_rgn) + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "%s: no region allocated\n", + __func__); + return victim_rgn; } diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 0e6110da69e7..578c4b6d0f7d 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -988,7 +988,7 @@ static struct virtio_driver virtio_scsi_driver = { .remove = virtscsi_remove, }; -static int __init init(void) +static int __init virtio_scsi_init(void) { int ret = -ENOMEM; @@ -1020,14 +1020,14 @@ error: return ret; } -static void __exit fini(void) +static void __exit virtio_scsi_fini(void) { unregister_virtio_driver(&virtio_scsi_driver); mempool_destroy(virtscsi_cmd_pool); kmem_cache_destroy(virtscsi_cmd_cache); } -module_init(init); -module_exit(fini); +module_init(virtio_scsi_init); +module_exit(virtio_scsi_fini); MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio SCSI HBA driver"); diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c index 27b9e2baab1a..7acf9193a9e8 100644 --- a/drivers/scsi/zorro7xx.c +++ b/drivers/scsi/zorro7xx.c @@ -159,6 +159,8 @@ static void zorro7xx_remove_one(struct zorro_dev *z) scsi_remove_host(host); NCR_700_release(host); + if (host->base > 0x01000000) + iounmap(hostdata->base); kfree(hostdata); free_irq(host->irq, host); zorro_release_device(z); diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index 904eec2a7871..6ad4177595f3 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile @@ -4,7 +4,7 @@ # obj-$(CONFIG_ARCH_ACTIONS) += actions/ -obj-$(CONFIG_ARCH_APPLE) += apple/ +obj-y += apple/ obj-y += aspeed/ obj-$(CONFIG_ARCH_AT91) += atmel/ obj-y += bcm/ diff --git a/drivers/soc/apple/Kconfig b/drivers/soc/apple/Kconfig index 9b8de31d6a8f..a1596fefacff 100644 --- a/drivers/soc/apple/Kconfig +++ b/drivers/soc/apple/Kconfig @@ -17,6 +17,30 @@ config APPLE_PMGR_PWRSTATE controls for SoC devices. This driver manages them through the generic power domain framework, and also provides reset support. +config APPLE_RTKIT + tristate "Apple RTKit co-processor IPC protocol" + depends on MAILBOX + depends on ARCH_APPLE || COMPILE_TEST + default ARCH_APPLE + help + Apple SoCs such as the M1 come with various co-processors running + their proprietary RTKit operating system. This option enables support + for the protocol library used to communicate with those. It is used + by various client drivers. + + Say 'y' here if you have an Apple SoC. + +config APPLE_SART + tristate "Apple SART DMA address filter" + depends on ARCH_APPLE || COMPILE_TEST + default ARCH_APPLE + help + Apple SART is a simple DMA address filter used on Apple SoCs such + as the M1. It is usually required for the NVMe coprocessor which does + not use a proper IOMMU. + + Say 'y' here if you have an Apple SoC. + endmenu endif diff --git a/drivers/soc/apple/Makefile b/drivers/soc/apple/Makefile index c114e84667e4..e293770cf66d 100644 --- a/drivers/soc/apple/Makefile +++ b/drivers/soc/apple/Makefile @@ -1,2 +1,8 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_APPLE_PMGR_PWRSTATE) += apple-pmgr-pwrstate.o + +obj-$(CONFIG_APPLE_RTKIT) += apple-rtkit.o +apple-rtkit-y = rtkit.o rtkit-crashlog.o + +obj-$(CONFIG_APPLE_SART) += apple-sart.o +apple-sart-y = sart.o diff --git a/drivers/soc/apple/rtkit-crashlog.c b/drivers/soc/apple/rtkit-crashlog.c new file mode 100644 index 000000000000..732deed64660 --- /dev/null +++ b/drivers/soc/apple/rtkit-crashlog.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* + * Apple RTKit IPC library + * Copyright (C) The Asahi Linux Contributors + */ +#include "rtkit-internal.h" + +#define FOURCC(a, b, c, d) \ + (((u32)(a) << 24) | ((u32)(b) << 16) | ((u32)(c) << 8) | ((u32)(d))) + +#define APPLE_RTKIT_CRASHLOG_HEADER FOURCC('C', 'L', 'H', 'E') +#define APPLE_RTKIT_CRASHLOG_STR FOURCC('C', 's', 't', 'r') +#define APPLE_RTKIT_CRASHLOG_VERSION FOURCC('C', 'v', 'e', 'r') +#define APPLE_RTKIT_CRASHLOG_MBOX FOURCC('C', 'm', 'b', 'x') +#define APPLE_RTKIT_CRASHLOG_TIME FOURCC('C', 't', 'i', 'm') + +struct apple_rtkit_crashlog_header { + u32 fourcc; + u32 version; + u32 size; + u32 flags; + u8 _unk[16]; +}; +static_assert(sizeof(struct apple_rtkit_crashlog_header) == 0x20); + +struct apple_rtkit_crashlog_mbox_entry { + u64 msg0; + u64 msg1; + u32 timestamp; + u8 _unk[4]; +}; +static_assert(sizeof(struct apple_rtkit_crashlog_mbox_entry) == 0x18); + +static void apple_rtkit_crashlog_dump_str(struct apple_rtkit *rtk, u8 *bfr, + size_t size) +{ + u32 idx; + u8 *ptr, *end; + + memcpy(&idx, bfr, 4); + + ptr = bfr + 4; + end = bfr + size; + while (ptr < end) { + u8 *newline = memchr(ptr, '\n', end - ptr); + + if (newline) { + u8 tmp = *newline; + *newline = '\0'; + dev_warn(rtk->dev, "RTKit: Message (id=%x): %s\n", idx, + ptr); + *newline = tmp; + ptr = newline + 1; + } else { + dev_warn(rtk->dev, "RTKit: Message (id=%x): %s", idx, + ptr); + break; + } + } +} + +static void apple_rtkit_crashlog_dump_version(struct apple_rtkit *rtk, u8 *bfr, + size_t size) +{ + dev_warn(rtk->dev, "RTKit: Version: %s", bfr + 16); +} + +static void apple_rtkit_crashlog_dump_time(struct apple_rtkit *rtk, u8 *bfr, + size_t size) +{ + u64 crash_time; + + memcpy(&crash_time, bfr, 8); + dev_warn(rtk->dev, "RTKit: Crash time: %lld", crash_time); +} + +static void apple_rtkit_crashlog_dump_mailbox(struct apple_rtkit *rtk, u8 *bfr, + size_t size) +{ + u32 type, index, i; + size_t n_messages; + struct apple_rtkit_crashlog_mbox_entry entry; + + memcpy(&type, bfr + 16, 4); + memcpy(&index, bfr + 24, 4); + n_messages = (size - 28) / sizeof(entry); + + dev_warn(rtk->dev, "RTKit: Mailbox history (type = %d, index = %d)", + type, index); + for (i = 0; i < n_messages; ++i) { + memcpy(&entry, bfr + 28 + i * sizeof(entry), sizeof(entry)); + dev_warn(rtk->dev, "RTKit: #%03d@%08x: %016llx %016llx", i, + entry.timestamp, entry.msg0, entry.msg1); + } +} + +void apple_rtkit_crashlog_dump(struct apple_rtkit *rtk, u8 *bfr, size_t size) +{ + size_t offset; + u32 section_fourcc, section_size; + struct apple_rtkit_crashlog_header header; + + memcpy(&header, bfr, sizeof(header)); + if (header.fourcc != APPLE_RTKIT_CRASHLOG_HEADER) { + dev_warn(rtk->dev, "RTKit: Expected crashlog header but got %x", + header.fourcc); + return; + } + + if (header.size > size) { + dev_warn(rtk->dev, "RTKit: Crashlog size (%x) is too large", + header.size); + return; + } + + size = header.size; + offset = sizeof(header); + + while (offset < size) { + memcpy(§ion_fourcc, bfr + offset, 4); + memcpy(§ion_size, bfr + offset + 12, 4); + + switch (section_fourcc) { + case APPLE_RTKIT_CRASHLOG_HEADER: + dev_dbg(rtk->dev, "RTKit: End of crashlog reached"); + return; + case APPLE_RTKIT_CRASHLOG_STR: + apple_rtkit_crashlog_dump_str(rtk, bfr + offset + 16, + section_size); + break; + case APPLE_RTKIT_CRASHLOG_VERSION: + apple_rtkit_crashlog_dump_version( + rtk, bfr + offset + 16, section_size); + break; + case APPLE_RTKIT_CRASHLOG_MBOX: + apple_rtkit_crashlog_dump_mailbox( + rtk, bfr + offset + 16, section_size); + break; + case APPLE_RTKIT_CRASHLOG_TIME: + apple_rtkit_crashlog_dump_time(rtk, bfr + offset + 16, + section_size); + break; + default: + dev_warn(rtk->dev, + "RTKit: Unknown crashlog section: %x", + section_fourcc); + } + + offset += section_size; + } + + dev_warn(rtk->dev, + "RTKit: End of crashlog reached but no footer present"); +} diff --git a/drivers/soc/apple/rtkit-internal.h b/drivers/soc/apple/rtkit-internal.h new file mode 100644 index 000000000000..24bd619ec5e4 --- /dev/null +++ b/drivers/soc/apple/rtkit-internal.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* + * Apple RTKit IPC library + * Copyright (C) The Asahi Linux Contributors + */ + +#ifndef _APPLE_RTKIT_INTERAL_H +#define _APPLE_RTKIT_INTERAL_H + +#include <linux/apple-mailbox.h> +#include <linux/bitfield.h> +#include <linux/bitmap.h> +#include <linux/completion.h> +#include <linux/dma-mapping.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/mailbox_client.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/soc/apple/rtkit.h> +#include <linux/workqueue.h> + +#define APPLE_RTKIT_APP_ENDPOINT_START 0x20 +#define APPLE_RTKIT_MAX_ENDPOINTS 0x100 + +struct apple_rtkit { + void *cookie; + const struct apple_rtkit_ops *ops; + struct device *dev; + + const char *mbox_name; + int mbox_idx; + struct mbox_client mbox_cl; + struct mbox_chan *mbox_chan; + + struct completion epmap_completion; + struct completion iop_pwr_ack_completion; + struct completion ap_pwr_ack_completion; + + int boot_result; + int version; + + unsigned int iop_power_state; + unsigned int ap_power_state; + bool crashed; + + DECLARE_BITMAP(endpoints, APPLE_RTKIT_MAX_ENDPOINTS); + + struct apple_rtkit_shmem ioreport_buffer; + struct apple_rtkit_shmem crashlog_buffer; + + struct apple_rtkit_shmem syslog_buffer; + char *syslog_msg_buffer; + size_t syslog_n_entries; + size_t syslog_msg_size; + + struct workqueue_struct *wq; +}; + +void apple_rtkit_crashlog_dump(struct apple_rtkit *rtk, u8 *bfr, size_t size); + +#endif diff --git a/drivers/soc/apple/rtkit.c b/drivers/soc/apple/rtkit.c new file mode 100644 index 000000000000..cf1129e9f76b --- /dev/null +++ b/drivers/soc/apple/rtkit.c @@ -0,0 +1,958 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* + * Apple RTKit IPC library + * Copyright (C) The Asahi Linux Contributors + */ + +#include "rtkit-internal.h" + +enum { + APPLE_RTKIT_PWR_STATE_OFF = 0x00, /* power off, cannot be restarted */ + APPLE_RTKIT_PWR_STATE_SLEEP = 0x01, /* sleeping, can be restarted */ + APPLE_RTKIT_PWR_STATE_QUIESCED = 0x10, /* running but no communication */ + APPLE_RTKIT_PWR_STATE_ON = 0x20, /* normal operating state */ +}; + +enum { + APPLE_RTKIT_EP_MGMT = 0, + APPLE_RTKIT_EP_CRASHLOG = 1, + APPLE_RTKIT_EP_SYSLOG = 2, + APPLE_RTKIT_EP_DEBUG = 3, + APPLE_RTKIT_EP_IOREPORT = 4, + APPLE_RTKIT_EP_OSLOG = 8, +}; + +#define APPLE_RTKIT_MGMT_TYPE GENMASK_ULL(59, 52) + +enum { + APPLE_RTKIT_MGMT_HELLO = 1, + APPLE_RTKIT_MGMT_HELLO_REPLY = 2, + APPLE_RTKIT_MGMT_STARTEP = 5, + APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE = 6, + APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE_ACK = 7, + APPLE_RTKIT_MGMT_EPMAP = 8, + APPLE_RTKIT_MGMT_EPMAP_REPLY = 8, + APPLE_RTKIT_MGMT_SET_AP_PWR_STATE = 0xb, + APPLE_RTKIT_MGMT_SET_AP_PWR_STATE_ACK = 0xb, +}; + +#define APPLE_RTKIT_MGMT_HELLO_MINVER GENMASK_ULL(15, 0) +#define APPLE_RTKIT_MGMT_HELLO_MAXVER GENMASK_ULL(31, 16) + +#define APPLE_RTKIT_MGMT_EPMAP_LAST BIT_ULL(51) +#define APPLE_RTKIT_MGMT_EPMAP_BASE GENMASK_ULL(34, 32) +#define APPLE_RTKIT_MGMT_EPMAP_BITMAP GENMASK_ULL(31, 0) + +#define APPLE_RTKIT_MGMT_EPMAP_REPLY_MORE BIT_ULL(0) + +#define APPLE_RTKIT_MGMT_STARTEP_EP GENMASK_ULL(39, 32) +#define APPLE_RTKIT_MGMT_STARTEP_FLAG BIT_ULL(1) + +#define APPLE_RTKIT_MGMT_PWR_STATE GENMASK_ULL(15, 0) + +#define APPLE_RTKIT_CRASHLOG_CRASH 1 + +#define APPLE_RTKIT_BUFFER_REQUEST 1 +#define APPLE_RTKIT_BUFFER_REQUEST_SIZE GENMASK_ULL(51, 44) +#define APPLE_RTKIT_BUFFER_REQUEST_IOVA GENMASK_ULL(41, 0) + +#define APPLE_RTKIT_SYSLOG_TYPE GENMASK_ULL(59, 52) + +#define APPLE_RTKIT_SYSLOG_LOG 5 + +#define APPLE_RTKIT_SYSLOG_INIT 8 +#define APPLE_RTKIT_SYSLOG_N_ENTRIES GENMASK_ULL(7, 0) +#define APPLE_RTKIT_SYSLOG_MSG_SIZE GENMASK_ULL(31, 24) + +#define APPLE_RTKIT_OSLOG_TYPE GENMASK_ULL(63, 56) +#define APPLE_RTKIT_OSLOG_INIT 1 +#define APPLE_RTKIT_OSLOG_ACK 3 + +#define APPLE_RTKIT_MIN_SUPPORTED_VERSION 11 +#define APPLE_RTKIT_MAX_SUPPORTED_VERSION 12 + +struct apple_rtkit_msg { + struct completion *completion; + struct apple_mbox_msg mbox_msg; +}; + +struct apple_rtkit_rx_work { + struct apple_rtkit *rtk; + u8 ep; + u64 msg; + struct work_struct work; +}; + +bool apple_rtkit_is_running(struct apple_rtkit *rtk) +{ + if (rtk->crashed) + return false; + if ((rtk->iop_power_state & 0xff) != APPLE_RTKIT_PWR_STATE_ON) + return false; + if ((rtk->ap_power_state & 0xff) != APPLE_RTKIT_PWR_STATE_ON) + return false; + return true; +} +EXPORT_SYMBOL_GPL(apple_rtkit_is_running); + +bool apple_rtkit_is_crashed(struct apple_rtkit *rtk) +{ + return rtk->crashed; +} +EXPORT_SYMBOL_GPL(apple_rtkit_is_crashed); + +static void apple_rtkit_management_send(struct apple_rtkit *rtk, u8 type, + u64 msg) +{ + msg &= ~APPLE_RTKIT_MGMT_TYPE; + msg |= FIELD_PREP(APPLE_RTKIT_MGMT_TYPE, type); + apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_MGMT, msg, NULL, false); +} + +static void apple_rtkit_management_rx_hello(struct apple_rtkit *rtk, u64 msg) +{ + u64 reply; + + int min_ver = FIELD_GET(APPLE_RTKIT_MGMT_HELLO_MINVER, msg); + int max_ver = FIELD_GET(APPLE_RTKIT_MGMT_HELLO_MAXVER, msg); + int want_ver = min(APPLE_RTKIT_MAX_SUPPORTED_VERSION, max_ver); + + dev_dbg(rtk->dev, "RTKit: Min ver %d, max ver %d\n", min_ver, max_ver); + + if (min_ver > APPLE_RTKIT_MAX_SUPPORTED_VERSION) { + dev_err(rtk->dev, "RTKit: Firmware min version %d is too new\n", + min_ver); + goto abort_boot; + } + + if (max_ver < APPLE_RTKIT_MIN_SUPPORTED_VERSION) { + dev_err(rtk->dev, "RTKit: Firmware max version %d is too old\n", + max_ver); + goto abort_boot; + } + + dev_info(rtk->dev, "RTKit: Initializing (protocol version %d)\n", + want_ver); + rtk->version = want_ver; + + reply = FIELD_PREP(APPLE_RTKIT_MGMT_HELLO_MINVER, want_ver); + reply |= FIELD_PREP(APPLE_RTKIT_MGMT_HELLO_MAXVER, want_ver); + apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_HELLO_REPLY, reply); + + return; + +abort_boot: + rtk->boot_result = -EINVAL; + complete_all(&rtk->epmap_completion); +} + +static void apple_rtkit_management_rx_epmap(struct apple_rtkit *rtk, u64 msg) +{ + int i, ep; + u64 reply; + unsigned long bitmap = FIELD_GET(APPLE_RTKIT_MGMT_EPMAP_BITMAP, msg); + u32 base = FIELD_GET(APPLE_RTKIT_MGMT_EPMAP_BASE, msg); + + dev_dbg(rtk->dev, + "RTKit: received endpoint bitmap 0x%lx with base 0x%x\n", + bitmap, base); + + for_each_set_bit(i, &bitmap, 32) { + ep = 32 * base + i; + dev_dbg(rtk->dev, "RTKit: Discovered endpoint 0x%02x\n", ep); + set_bit(ep, rtk->endpoints); + } + + reply = FIELD_PREP(APPLE_RTKIT_MGMT_EPMAP_BASE, base); + if (msg & APPLE_RTKIT_MGMT_EPMAP_LAST) + reply |= APPLE_RTKIT_MGMT_EPMAP_LAST; + else + reply |= APPLE_RTKIT_MGMT_EPMAP_REPLY_MORE; + + apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_EPMAP_REPLY, reply); + + if (!(msg & APPLE_RTKIT_MGMT_EPMAP_LAST)) + return; + + for_each_set_bit(ep, rtk->endpoints, APPLE_RTKIT_APP_ENDPOINT_START) { + switch (ep) { + /* the management endpoint is started by default */ + case APPLE_RTKIT_EP_MGMT: + break; + + /* without starting these RTKit refuses to boot */ + case APPLE_RTKIT_EP_SYSLOG: + case APPLE_RTKIT_EP_CRASHLOG: + case APPLE_RTKIT_EP_DEBUG: + case APPLE_RTKIT_EP_IOREPORT: + case APPLE_RTKIT_EP_OSLOG: + dev_dbg(rtk->dev, + "RTKit: Starting system endpoint 0x%02x\n", ep); + apple_rtkit_start_ep(rtk, ep); + break; + + default: + dev_warn(rtk->dev, + "RTKit: Unknown system endpoint: 0x%02x\n", + ep); + } + } + + rtk->boot_result = 0; + complete_all(&rtk->epmap_completion); +} + +static void apple_rtkit_management_rx_iop_pwr_ack(struct apple_rtkit *rtk, + u64 msg) +{ + unsigned int new_state = FIELD_GET(APPLE_RTKIT_MGMT_PWR_STATE, msg); + + dev_dbg(rtk->dev, "RTKit: IOP power state transition: 0x%x -> 0x%x\n", + rtk->iop_power_state, new_state); + rtk->iop_power_state = new_state; + + complete_all(&rtk->iop_pwr_ack_completion); +} + +static void apple_rtkit_management_rx_ap_pwr_ack(struct apple_rtkit *rtk, + u64 msg) +{ + unsigned int new_state = FIELD_GET(APPLE_RTKIT_MGMT_PWR_STATE, msg); + + dev_dbg(rtk->dev, "RTKit: AP power state transition: 0x%x -> 0x%x\n", + rtk->ap_power_state, new_state); + rtk->ap_power_state = new_state; + + complete_all(&rtk->ap_pwr_ack_completion); +} + +static void apple_rtkit_management_rx(struct apple_rtkit *rtk, u64 msg) +{ + u8 type = FIELD_GET(APPLE_RTKIT_MGMT_TYPE, msg); + + switch (type) { + case APPLE_RTKIT_MGMT_HELLO: + apple_rtkit_management_rx_hello(rtk, msg); + break; + case APPLE_RTKIT_MGMT_EPMAP: + apple_rtkit_management_rx_epmap(rtk, msg); + break; + case APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE_ACK: + apple_rtkit_management_rx_iop_pwr_ack(rtk, msg); + break; + case APPLE_RTKIT_MGMT_SET_AP_PWR_STATE_ACK: + apple_rtkit_management_rx_ap_pwr_ack(rtk, msg); + break; + default: + dev_warn( + rtk->dev, + "RTKit: unknown management message: 0x%llx (type: 0x%02x)\n", + msg, type); + } +} + +static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk, + struct apple_rtkit_shmem *buffer, + u8 ep, u64 msg) +{ + size_t n_4kpages = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_SIZE, msg); + u64 reply; + int err; + + buffer->buffer = NULL; + buffer->iomem = NULL; + buffer->is_mapped = false; + buffer->iova = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_IOVA, msg); + buffer->size = n_4kpages << 12; + + dev_dbg(rtk->dev, "RTKit: buffer request for 0x%zx bytes at %pad\n", + buffer->size, &buffer->iova); + + if (buffer->iova && + (!rtk->ops->shmem_setup || !rtk->ops->shmem_destroy)) { + err = -EINVAL; + goto error; + } + + if (rtk->ops->shmem_setup) { + err = rtk->ops->shmem_setup(rtk->cookie, buffer); + if (err) + goto error; + } else { + buffer->buffer = dma_alloc_coherent(rtk->dev, buffer->size, + &buffer->iova, GFP_KERNEL); + if (!buffer->buffer) { + err = -ENOMEM; + goto error; + } + } + + if (!buffer->is_mapped) { + reply = FIELD_PREP(APPLE_RTKIT_SYSLOG_TYPE, + APPLE_RTKIT_BUFFER_REQUEST); + reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_SIZE, n_4kpages); + reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_IOVA, + buffer->iova); + apple_rtkit_send_message(rtk, ep, reply, NULL, false); + } + + return 0; + +error: + buffer->buffer = NULL; + buffer->iomem = NULL; + buffer->iova = 0; + buffer->size = 0; + buffer->is_mapped = false; + return err; +} + +static void apple_rtkit_free_buffer(struct apple_rtkit *rtk, + struct apple_rtkit_shmem *bfr) +{ + if (bfr->size == 0) + return; + + if (rtk->ops->shmem_destroy) + rtk->ops->shmem_destroy(rtk->cookie, bfr); + else if (bfr->buffer) + dma_free_coherent(rtk->dev, bfr->size, bfr->buffer, bfr->iova); + + bfr->buffer = NULL; + bfr->iomem = NULL; + bfr->iova = 0; + bfr->size = 0; + bfr->is_mapped = false; +} + +static void apple_rtkit_memcpy(struct apple_rtkit *rtk, void *dst, + struct apple_rtkit_shmem *bfr, size_t offset, + size_t len) +{ + if (bfr->iomem) + memcpy_fromio(dst, bfr->iomem + offset, len); + else + memcpy(dst, bfr->buffer + offset, len); +} + +static void apple_rtkit_crashlog_rx(struct apple_rtkit *rtk, u64 msg) +{ + u8 type = FIELD_GET(APPLE_RTKIT_SYSLOG_TYPE, msg); + u8 *bfr; + + if (type != APPLE_RTKIT_CRASHLOG_CRASH) { + dev_warn(rtk->dev, "RTKit: Unknown crashlog message: %llx\n", + msg); + return; + } + + if (!rtk->crashlog_buffer.size) { + apple_rtkit_common_rx_get_buffer(rtk, &rtk->crashlog_buffer, + APPLE_RTKIT_EP_CRASHLOG, msg); + return; + } + + dev_err(rtk->dev, "RTKit: co-processor has crashed\n"); + + /* + * create a shadow copy here to make sure the co-processor isn't able + * to change the log while we're dumping it. this also ensures + * the buffer is in normal memory and not iomem for e.g. the SMC + */ + bfr = kzalloc(rtk->crashlog_buffer.size, GFP_KERNEL); + if (bfr) { + apple_rtkit_memcpy(rtk, bfr, &rtk->crashlog_buffer, 0, + rtk->crashlog_buffer.size); + apple_rtkit_crashlog_dump(rtk, bfr, rtk->crashlog_buffer.size); + kfree(bfr); + } else { + dev_err(rtk->dev, + "RTKit: Couldn't allocate crashlog shadow buffer\n"); + } + + rtk->crashed = true; + if (rtk->ops->crashed) + rtk->ops->crashed(rtk->cookie); +} + +static void apple_rtkit_ioreport_rx(struct apple_rtkit *rtk, u64 msg) +{ + u8 type = FIELD_GET(APPLE_RTKIT_SYSLOG_TYPE, msg); + + switch (type) { + case APPLE_RTKIT_BUFFER_REQUEST: + apple_rtkit_common_rx_get_buffer(rtk, &rtk->ioreport_buffer, + APPLE_RTKIT_EP_IOREPORT, msg); + break; + /* unknown, must be ACKed or the co-processor will hang */ + case 0x8: + case 0xc: + apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_IOREPORT, msg, + NULL, false); + break; + default: + dev_warn(rtk->dev, "RTKit: Unknown ioreport message: %llx\n", + msg); + } +} + +static void apple_rtkit_syslog_rx_init(struct apple_rtkit *rtk, u64 msg) +{ + rtk->syslog_n_entries = FIELD_GET(APPLE_RTKIT_SYSLOG_N_ENTRIES, msg); + rtk->syslog_msg_size = FIELD_GET(APPLE_RTKIT_SYSLOG_MSG_SIZE, msg); + + rtk->syslog_msg_buffer = kzalloc(rtk->syslog_msg_size, GFP_KERNEL); + + dev_dbg(rtk->dev, + "RTKit: syslog initialized: entries: %zd, msg_size: %zd\n", + rtk->syslog_n_entries, rtk->syslog_msg_size); +} + +static void apple_rtkit_syslog_rx_log(struct apple_rtkit *rtk, u64 msg) +{ + u8 idx = msg & 0xff; + char log_context[24]; + size_t entry_size = 0x20 + rtk->syslog_msg_size; + + if (!rtk->syslog_msg_buffer) { + dev_warn( + rtk->dev, + "RTKit: received syslog message but no syslog_msg_buffer\n"); + goto done; + } + if (!rtk->syslog_buffer.size) { + dev_warn( + rtk->dev, + "RTKit: received syslog message but syslog_buffer.size is zero\n"); + goto done; + } + if (!rtk->syslog_buffer.buffer && !rtk->syslog_buffer.iomem) { + dev_warn( + rtk->dev, + "RTKit: received syslog message but no syslog_buffer.buffer or syslog_buffer.iomem\n"); + goto done; + } + if (idx > rtk->syslog_n_entries) { + dev_warn(rtk->dev, "RTKit: syslog index %d out of range\n", + idx); + goto done; + } + + apple_rtkit_memcpy(rtk, log_context, &rtk->syslog_buffer, + idx * entry_size + 8, sizeof(log_context)); + apple_rtkit_memcpy(rtk, rtk->syslog_msg_buffer, &rtk->syslog_buffer, + idx * entry_size + 8 + sizeof(log_context), + rtk->syslog_msg_size); + + log_context[sizeof(log_context) - 1] = 0; + rtk->syslog_msg_buffer[rtk->syslog_msg_size - 1] = 0; + dev_info(rtk->dev, "RTKit: syslog message: %s: %s\n", log_context, + rtk->syslog_msg_buffer); + +done: + apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_SYSLOG, msg, NULL, false); +} + +static void apple_rtkit_syslog_rx(struct apple_rtkit *rtk, u64 msg) +{ + u8 type = FIELD_GET(APPLE_RTKIT_SYSLOG_TYPE, msg); + + switch (type) { + case APPLE_RTKIT_BUFFER_REQUEST: + apple_rtkit_common_rx_get_buffer(rtk, &rtk->syslog_buffer, + APPLE_RTKIT_EP_SYSLOG, msg); + break; + case APPLE_RTKIT_SYSLOG_INIT: + apple_rtkit_syslog_rx_init(rtk, msg); + break; + case APPLE_RTKIT_SYSLOG_LOG: + apple_rtkit_syslog_rx_log(rtk, msg); + break; + default: + dev_warn(rtk->dev, "RTKit: Unknown syslog message: %llx\n", + msg); + } +} + +static void apple_rtkit_oslog_rx_init(struct apple_rtkit *rtk, u64 msg) +{ + u64 ack; + + dev_dbg(rtk->dev, "RTKit: oslog init: msg: 0x%llx\n", msg); + ack = FIELD_PREP(APPLE_RTKIT_OSLOG_TYPE, APPLE_RTKIT_OSLOG_ACK); + apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_OSLOG, ack, NULL, false); +} + +static void apple_rtkit_oslog_rx(struct apple_rtkit *rtk, u64 msg) +{ + u8 type = FIELD_GET(APPLE_RTKIT_OSLOG_TYPE, msg); + + switch (type) { + case APPLE_RTKIT_OSLOG_INIT: + apple_rtkit_oslog_rx_init(rtk, msg); + break; + default: + dev_warn(rtk->dev, "RTKit: Unknown oslog message: %llx\n", msg); + } +} + +static void apple_rtkit_rx_work(struct work_struct *work) +{ + struct apple_rtkit_rx_work *rtk_work = + container_of(work, struct apple_rtkit_rx_work, work); + struct apple_rtkit *rtk = rtk_work->rtk; + + switch (rtk_work->ep) { + case APPLE_RTKIT_EP_MGMT: + apple_rtkit_management_rx(rtk, rtk_work->msg); + break; + case APPLE_RTKIT_EP_CRASHLOG: + apple_rtkit_crashlog_rx(rtk, rtk_work->msg); + break; + case APPLE_RTKIT_EP_SYSLOG: + apple_rtkit_syslog_rx(rtk, rtk_work->msg); + break; + case APPLE_RTKIT_EP_IOREPORT: + apple_rtkit_ioreport_rx(rtk, rtk_work->msg); + break; + case APPLE_RTKIT_EP_OSLOG: + apple_rtkit_oslog_rx(rtk, rtk_work->msg); + break; + case APPLE_RTKIT_APP_ENDPOINT_START ... 0xff: + if (rtk->ops->recv_message) + rtk->ops->recv_message(rtk->cookie, rtk_work->ep, + rtk_work->msg); + else + dev_warn( + rtk->dev, + "Received unexpected message to EP%02d: %llx\n", + rtk_work->ep, rtk_work->msg); + break; + default: + dev_warn(rtk->dev, + "RTKit: message to unknown endpoint %02x: %llx\n", + rtk_work->ep, rtk_work->msg); + } + + kfree(rtk_work); +} + +static void apple_rtkit_rx(struct mbox_client *cl, void *mssg) +{ + struct apple_rtkit *rtk = container_of(cl, struct apple_rtkit, mbox_cl); + struct apple_mbox_msg *msg = mssg; + struct apple_rtkit_rx_work *work; + u8 ep = msg->msg1; + + /* + * The message was read from a MMIO FIFO and we have to make + * sure all reads from buffers sent with that message happen + * afterwards. + */ + dma_rmb(); + + if (!test_bit(ep, rtk->endpoints)) + dev_warn(rtk->dev, + "RTKit: Message to undiscovered endpoint 0x%02x\n", + ep); + + if (ep >= APPLE_RTKIT_APP_ENDPOINT_START && + rtk->ops->recv_message_early && + rtk->ops->recv_message_early(rtk->cookie, ep, msg->msg0)) + return; + + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) + return; + + work->rtk = rtk; + work->ep = ep; + work->msg = msg->msg0; + INIT_WORK(&work->work, apple_rtkit_rx_work); + queue_work(rtk->wq, &work->work); +} + +static void apple_rtkit_tx_done(struct mbox_client *cl, void *mssg, int r) +{ + struct apple_rtkit_msg *msg = + container_of(mssg, struct apple_rtkit_msg, mbox_msg); + + if (r == -ETIME) + return; + + if (msg->completion) + complete(msg->completion); + kfree(msg); +} + +int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message, + struct completion *completion, bool atomic) +{ + struct apple_rtkit_msg *msg; + int ret; + gfp_t flags; + + if (rtk->crashed) + return -EINVAL; + if (ep >= APPLE_RTKIT_APP_ENDPOINT_START && + !apple_rtkit_is_running(rtk)) + return -EINVAL; + + if (atomic) + flags = GFP_ATOMIC; + else + flags = GFP_KERNEL; + + msg = kzalloc(sizeof(*msg), flags); + if (!msg) + return -ENOMEM; + + msg->mbox_msg.msg0 = message; + msg->mbox_msg.msg1 = ep; + msg->completion = completion; + + /* + * The message will be sent with a MMIO write. We need the barrier + * here to ensure any previous writes to buffers are visible to the + * device before that MMIO write happens. + */ + dma_wmb(); + + ret = mbox_send_message(rtk->mbox_chan, &msg->mbox_msg); + if (ret < 0) { + kfree(msg); + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(apple_rtkit_send_message); + +int apple_rtkit_send_message_wait(struct apple_rtkit *rtk, u8 ep, u64 message, + unsigned long timeout, bool atomic) +{ + DECLARE_COMPLETION_ONSTACK(completion); + int ret; + long t; + + ret = apple_rtkit_send_message(rtk, ep, message, &completion, atomic); + if (ret < 0) + return ret; + + if (atomic) { + ret = mbox_flush(rtk->mbox_chan, timeout); + if (ret < 0) + return ret; + + if (try_wait_for_completion(&completion)) + return 0; + + return -ETIME; + } else { + t = wait_for_completion_interruptible_timeout( + &completion, msecs_to_jiffies(timeout)); + if (t < 0) + return t; + else if (t == 0) + return -ETIME; + return 0; + } +} +EXPORT_SYMBOL_GPL(apple_rtkit_send_message_wait); + +int apple_rtkit_start_ep(struct apple_rtkit *rtk, u8 endpoint) +{ + u64 msg; + + if (!test_bit(endpoint, rtk->endpoints)) + return -EINVAL; + if (endpoint >= APPLE_RTKIT_APP_ENDPOINT_START && + !apple_rtkit_is_running(rtk)) + return -EINVAL; + + msg = FIELD_PREP(APPLE_RTKIT_MGMT_STARTEP_EP, endpoint); + msg |= APPLE_RTKIT_MGMT_STARTEP_FLAG; + apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_STARTEP, msg); + + return 0; +} +EXPORT_SYMBOL_GPL(apple_rtkit_start_ep); + +static int apple_rtkit_request_mbox_chan(struct apple_rtkit *rtk) +{ + if (rtk->mbox_name) + rtk->mbox_chan = mbox_request_channel_byname(&rtk->mbox_cl, + rtk->mbox_name); + else + rtk->mbox_chan = + mbox_request_channel(&rtk->mbox_cl, rtk->mbox_idx); + + if (IS_ERR(rtk->mbox_chan)) + return PTR_ERR(rtk->mbox_chan); + return 0; +} + +static struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie, + const char *mbox_name, int mbox_idx, + const struct apple_rtkit_ops *ops) +{ + struct apple_rtkit *rtk; + int ret; + + if (!ops) + return ERR_PTR(-EINVAL); + + rtk = kzalloc(sizeof(*rtk), GFP_KERNEL); + if (!rtk) + return ERR_PTR(-ENOMEM); + + rtk->dev = dev; + rtk->cookie = cookie; + rtk->ops = ops; + + init_completion(&rtk->epmap_completion); + init_completion(&rtk->iop_pwr_ack_completion); + init_completion(&rtk->ap_pwr_ack_completion); + + bitmap_zero(rtk->endpoints, APPLE_RTKIT_MAX_ENDPOINTS); + set_bit(APPLE_RTKIT_EP_MGMT, rtk->endpoints); + + rtk->mbox_name = mbox_name; + rtk->mbox_idx = mbox_idx; + rtk->mbox_cl.dev = dev; + rtk->mbox_cl.tx_block = false; + rtk->mbox_cl.knows_txdone = false; + rtk->mbox_cl.rx_callback = &apple_rtkit_rx; + rtk->mbox_cl.tx_done = &apple_rtkit_tx_done; + + rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_MEM_RECLAIM, + dev_name(rtk->dev)); + if (!rtk->wq) { + ret = -ENOMEM; + goto free_rtk; + } + + ret = apple_rtkit_request_mbox_chan(rtk); + if (ret) + goto destroy_wq; + + return rtk; + +destroy_wq: + destroy_workqueue(rtk->wq); +free_rtk: + kfree(rtk); + return ERR_PTR(ret); +} + +static int apple_rtkit_wait_for_completion(struct completion *c) +{ + long t; + + t = wait_for_completion_interruptible_timeout(c, + msecs_to_jiffies(1000)); + if (t < 0) + return t; + else if (t == 0) + return -ETIME; + else + return 0; +} + +int apple_rtkit_reinit(struct apple_rtkit *rtk) +{ + /* make sure we don't handle any messages while reinitializing */ + mbox_free_channel(rtk->mbox_chan); + flush_workqueue(rtk->wq); + + apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer); + apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer); + apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer); + + kfree(rtk->syslog_msg_buffer); + + rtk->syslog_msg_buffer = NULL; + rtk->syslog_n_entries = 0; + rtk->syslog_msg_size = 0; + + bitmap_zero(rtk->endpoints, APPLE_RTKIT_MAX_ENDPOINTS); + set_bit(APPLE_RTKIT_EP_MGMT, rtk->endpoints); + + reinit_completion(&rtk->epmap_completion); + reinit_completion(&rtk->iop_pwr_ack_completion); + reinit_completion(&rtk->ap_pwr_ack_completion); + + rtk->crashed = false; + rtk->iop_power_state = APPLE_RTKIT_PWR_STATE_OFF; + rtk->ap_power_state = APPLE_RTKIT_PWR_STATE_OFF; + + return apple_rtkit_request_mbox_chan(rtk); +} +EXPORT_SYMBOL_GPL(apple_rtkit_reinit); + +static int apple_rtkit_set_ap_power_state(struct apple_rtkit *rtk, + unsigned int state) +{ + u64 msg; + int ret; + + reinit_completion(&rtk->ap_pwr_ack_completion); + + msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, state); + apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_AP_PWR_STATE, + msg); + + ret = apple_rtkit_wait_for_completion(&rtk->ap_pwr_ack_completion); + if (ret) + return ret; + + if (rtk->ap_power_state != state) + return -EINVAL; + return 0; +} + +static int apple_rtkit_set_iop_power_state(struct apple_rtkit *rtk, + unsigned int state) +{ + u64 msg; + int ret; + + reinit_completion(&rtk->iop_pwr_ack_completion); + + msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, state); + apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE, + msg); + + ret = apple_rtkit_wait_for_completion(&rtk->iop_pwr_ack_completion); + if (ret) + return ret; + + if (rtk->iop_power_state != state) + return -EINVAL; + return 0; +} + +int apple_rtkit_boot(struct apple_rtkit *rtk) +{ + int ret; + + if (apple_rtkit_is_running(rtk)) + return 0; + if (rtk->crashed) + return -EINVAL; + + dev_dbg(rtk->dev, "RTKit: waiting for boot to finish\n"); + ret = apple_rtkit_wait_for_completion(&rtk->epmap_completion); + if (ret) + return ret; + if (rtk->boot_result) + return rtk->boot_result; + + dev_dbg(rtk->dev, "RTKit: waiting for IOP power state ACK\n"); + ret = apple_rtkit_wait_for_completion(&rtk->iop_pwr_ack_completion); + if (ret) + return ret; + + return apple_rtkit_set_ap_power_state(rtk, APPLE_RTKIT_PWR_STATE_ON); +} +EXPORT_SYMBOL_GPL(apple_rtkit_boot); + +int apple_rtkit_shutdown(struct apple_rtkit *rtk) +{ + int ret; + + /* if OFF is used here the co-processor will not wake up again */ + ret = apple_rtkit_set_ap_power_state(rtk, + APPLE_RTKIT_PWR_STATE_QUIESCED); + if (ret) + return ret; + + ret = apple_rtkit_set_iop_power_state(rtk, APPLE_RTKIT_PWR_STATE_SLEEP); + if (ret) + return ret; + + return apple_rtkit_reinit(rtk); +} +EXPORT_SYMBOL_GPL(apple_rtkit_shutdown); + +int apple_rtkit_quiesce(struct apple_rtkit *rtk) +{ + int ret; + + ret = apple_rtkit_set_ap_power_state(rtk, + APPLE_RTKIT_PWR_STATE_QUIESCED); + if (ret) + return ret; + + ret = apple_rtkit_set_iop_power_state(rtk, + APPLE_RTKIT_PWR_STATE_QUIESCED); + if (ret) + return ret; + + ret = apple_rtkit_reinit(rtk); + if (ret) + return ret; + + rtk->iop_power_state = APPLE_RTKIT_PWR_STATE_QUIESCED; + rtk->ap_power_state = APPLE_RTKIT_PWR_STATE_QUIESCED; + return 0; +} +EXPORT_SYMBOL_GPL(apple_rtkit_quiesce); + +int apple_rtkit_wake(struct apple_rtkit *rtk) +{ + u64 msg; + + if (apple_rtkit_is_running(rtk)) + return -EINVAL; + + reinit_completion(&rtk->iop_pwr_ack_completion); + + /* + * Use open-coded apple_rtkit_set_iop_power_state since apple_rtkit_boot + * will wait for the completion anyway. + */ + msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, APPLE_RTKIT_PWR_STATE_ON); + apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE, + msg); + + return apple_rtkit_boot(rtk); +} +EXPORT_SYMBOL_GPL(apple_rtkit_wake); + +static void apple_rtkit_free(struct apple_rtkit *rtk) +{ + mbox_free_channel(rtk->mbox_chan); + destroy_workqueue(rtk->wq); + + apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer); + apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer); + apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer); + + kfree(rtk->syslog_msg_buffer); + kfree(rtk); +} + +struct apple_rtkit *devm_apple_rtkit_init(struct device *dev, void *cookie, + const char *mbox_name, int mbox_idx, + const struct apple_rtkit_ops *ops) +{ + struct apple_rtkit *rtk; + int ret; + + rtk = apple_rtkit_init(dev, cookie, mbox_name, mbox_idx, ops); + if (IS_ERR(rtk)) + return rtk; + + ret = devm_add_action_or_reset(dev, (void (*)(void *))apple_rtkit_free, + rtk); + if (ret) + return ERR_PTR(ret); + + return rtk; +} +EXPORT_SYMBOL_GPL(devm_apple_rtkit_init); + +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>"); +MODULE_DESCRIPTION("Apple RTKit driver"); diff --git a/drivers/soc/apple/sart.c b/drivers/soc/apple/sart.c new file mode 100644 index 000000000000..83804b16ad03 --- /dev/null +++ b/drivers/soc/apple/sart.c @@ -0,0 +1,328 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* + * Apple SART device driver + * Copyright (C) The Asahi Linux Contributors + * + * Apple SART is a simple address filter for some DMA transactions. + * Regions of physical memory must be added to the SART's allow + * list before any DMA can target these. Unlike a proper + * IOMMU no remapping can be done and special support in the + * consumer driver is required since not all DMA transactions of + * a single device are subject to SART filtering. + */ + +#include <linux/soc/apple/sart.h> +#include <linux/atomic.h> +#include <linux/bits.h> +#include <linux/bitfield.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/types.h> + +#define APPLE_SART_MAX_ENTRIES 16 + +/* This is probably a bitfield but the exact meaning of each bit is unknown. */ +#define APPLE_SART_FLAGS_ALLOW 0xff + +/* SARTv2 registers */ +#define APPLE_SART2_CONFIG(idx) (0x00 + 4 * (idx)) +#define APPLE_SART2_CONFIG_FLAGS GENMASK(31, 24) +#define APPLE_SART2_CONFIG_SIZE GENMASK(23, 0) +#define APPLE_SART2_CONFIG_SIZE_SHIFT 12 +#define APPLE_SART2_CONFIG_SIZE_MAX GENMASK(23, 0) + +#define APPLE_SART2_PADDR(idx) (0x40 + 4 * (idx)) +#define APPLE_SART2_PADDR_SHIFT 12 + +/* SARTv3 registers */ +#define APPLE_SART3_CONFIG(idx) (0x00 + 4 * (idx)) + +#define APPLE_SART3_PADDR(idx) (0x40 + 4 * (idx)) +#define APPLE_SART3_PADDR_SHIFT 12 + +#define APPLE_SART3_SIZE(idx) (0x80 + 4 * (idx)) +#define APPLE_SART3_SIZE_SHIFT 12 +#define APPLE_SART3_SIZE_MAX GENMASK(29, 0) + +struct apple_sart_ops { + void (*get_entry)(struct apple_sart *sart, int index, u8 *flags, + phys_addr_t *paddr, size_t *size); + void (*set_entry)(struct apple_sart *sart, int index, u8 flags, + phys_addr_t paddr_shifted, size_t size_shifted); + unsigned int size_shift; + unsigned int paddr_shift; + size_t size_max; +}; + +struct apple_sart { + struct device *dev; + void __iomem *regs; + + const struct apple_sart_ops *ops; + + unsigned long protected_entries; + unsigned long used_entries; +}; + +static void sart2_get_entry(struct apple_sart *sart, int index, u8 *flags, + phys_addr_t *paddr, size_t *size) +{ + u32 cfg = readl(sart->regs + APPLE_SART2_CONFIG(index)); + phys_addr_t paddr_ = readl(sart->regs + APPLE_SART2_PADDR(index)); + size_t size_ = FIELD_GET(APPLE_SART2_CONFIG_SIZE, cfg); + + *flags = FIELD_GET(APPLE_SART2_CONFIG_FLAGS, cfg); + *size = size_ << APPLE_SART2_CONFIG_SIZE_SHIFT; + *paddr = paddr_ << APPLE_SART2_PADDR_SHIFT; +} + +static void sart2_set_entry(struct apple_sart *sart, int index, u8 flags, + phys_addr_t paddr_shifted, size_t size_shifted) +{ + u32 cfg; + + cfg = FIELD_PREP(APPLE_SART2_CONFIG_FLAGS, flags); + cfg |= FIELD_PREP(APPLE_SART2_CONFIG_SIZE, size_shifted); + + writel(paddr_shifted, sart->regs + APPLE_SART2_PADDR(index)); + writel(cfg, sart->regs + APPLE_SART2_CONFIG(index)); +} + +static struct apple_sart_ops sart_ops_v2 = { + .get_entry = sart2_get_entry, + .set_entry = sart2_set_entry, + .size_shift = APPLE_SART2_CONFIG_SIZE_SHIFT, + .paddr_shift = APPLE_SART2_PADDR_SHIFT, + .size_max = APPLE_SART2_CONFIG_SIZE_MAX, +}; + +static void sart3_get_entry(struct apple_sart *sart, int index, u8 *flags, + phys_addr_t *paddr, size_t *size) +{ + phys_addr_t paddr_ = readl(sart->regs + APPLE_SART3_PADDR(index)); + size_t size_ = readl(sart->regs + APPLE_SART3_SIZE(index)); + + *flags = readl(sart->regs + APPLE_SART3_CONFIG(index)); + *size = size_ << APPLE_SART3_SIZE_SHIFT; + *paddr = paddr_ << APPLE_SART3_PADDR_SHIFT; +} + +static void sart3_set_entry(struct apple_sart *sart, int index, u8 flags, + phys_addr_t paddr_shifted, size_t size_shifted) +{ + writel(paddr_shifted, sart->regs + APPLE_SART3_PADDR(index)); + writel(size_shifted, sart->regs + APPLE_SART3_SIZE(index)); + writel(flags, sart->regs + APPLE_SART3_CONFIG(index)); +} + +static struct apple_sart_ops sart_ops_v3 = { + .get_entry = sart3_get_entry, + .set_entry = sart3_set_entry, + .size_shift = APPLE_SART3_SIZE_SHIFT, + .paddr_shift = APPLE_SART3_PADDR_SHIFT, + .size_max = APPLE_SART3_SIZE_MAX, +}; + +static int apple_sart_probe(struct platform_device *pdev) +{ + int i; + struct apple_sart *sart; + struct device *dev = &pdev->dev; + + sart = devm_kzalloc(dev, sizeof(*sart), GFP_KERNEL); + if (!sart) + return -ENOMEM; + + sart->dev = dev; + sart->ops = of_device_get_match_data(dev); + + sart->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(sart->regs)) + return PTR_ERR(sart->regs); + + for (i = 0; i < APPLE_SART_MAX_ENTRIES; ++i) { + u8 flags; + size_t size; + phys_addr_t paddr; + + sart->ops->get_entry(sart, i, &flags, &paddr, &size); + + if (!flags) + continue; + + dev_dbg(sart->dev, + "SART bootloader entry: index %02d; flags: 0x%02x; paddr: %pa; size: 0x%zx\n", + i, flags, &paddr, size); + set_bit(i, &sart->protected_entries); + } + + platform_set_drvdata(pdev, sart); + return 0; +} + +struct apple_sart *devm_apple_sart_get(struct device *dev) +{ + struct device_node *sart_node; + struct platform_device *sart_pdev; + struct apple_sart *sart; + int ret; + + sart_node = of_parse_phandle(dev->of_node, "apple,sart", 0); + if (!sart_node) + return ERR_PTR(-ENODEV); + + sart_pdev = of_find_device_by_node(sart_node); + of_node_put(sart_node); + + if (!sart_pdev) + return ERR_PTR(-ENODEV); + + sart = dev_get_drvdata(&sart_pdev->dev); + if (!sart) { + put_device(&sart_pdev->dev); + return ERR_PTR(-EPROBE_DEFER); + } + + ret = devm_add_action_or_reset(dev, (void (*)(void *))put_device, + &sart_pdev->dev); + if (ret) + return ERR_PTR(ret); + + device_link_add(dev, &sart_pdev->dev, + DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER); + + return sart; +} +EXPORT_SYMBOL_GPL(devm_apple_sart_get); + +static int sart_set_entry(struct apple_sart *sart, int index, u8 flags, + phys_addr_t paddr, size_t size) +{ + if (size & ((1 << sart->ops->size_shift) - 1)) + return -EINVAL; + if (paddr & ((1 << sart->ops->paddr_shift) - 1)) + return -EINVAL; + + paddr >>= sart->ops->size_shift; + size >>= sart->ops->paddr_shift; + + if (size > sart->ops->size_max) + return -EINVAL; + + sart->ops->set_entry(sart, index, flags, paddr, size); + return 0; +} + +int apple_sart_add_allowed_region(struct apple_sart *sart, phys_addr_t paddr, + size_t size) +{ + int i, ret; + + for (i = 0; i < APPLE_SART_MAX_ENTRIES; ++i) { + if (test_bit(i, &sart->protected_entries)) + continue; + if (test_and_set_bit(i, &sart->used_entries)) + continue; + + ret = sart_set_entry(sart, i, APPLE_SART_FLAGS_ALLOW, paddr, + size); + if (ret) { + dev_dbg(sart->dev, + "unable to set entry %d to [%pa, 0x%zx]\n", + i, &paddr, size); + clear_bit(i, &sart->used_entries); + return ret; + } + + dev_dbg(sart->dev, "wrote [%pa, 0x%zx] to %d\n", &paddr, size, + i); + return 0; + } + + dev_warn(sart->dev, + "no free entries left to add [paddr: 0x%pa, size: 0x%zx]\n", + &paddr, size); + + return -EBUSY; +} +EXPORT_SYMBOL_GPL(apple_sart_add_allowed_region); + +int apple_sart_remove_allowed_region(struct apple_sart *sart, phys_addr_t paddr, + size_t size) +{ + int i; + + dev_dbg(sart->dev, + "will remove [paddr: %pa, size: 0x%zx] from allowed regions\n", + &paddr, size); + + for (i = 0; i < APPLE_SART_MAX_ENTRIES; ++i) { + u8 eflags; + size_t esize; + phys_addr_t epaddr; + + if (test_bit(i, &sart->protected_entries)) + continue; + + sart->ops->get_entry(sart, i, &eflags, &epaddr, &esize); + + if (epaddr != paddr || esize != size) + continue; + + sart->ops->set_entry(sart, i, 0, 0, 0); + + clear_bit(i, &sart->used_entries); + dev_dbg(sart->dev, "cleared entry %d\n", i); + return 0; + } + + dev_warn(sart->dev, "entry [paddr: 0x%pa, size: 0x%zx] not found\n", + &paddr, size); + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(apple_sart_remove_allowed_region); + +static void apple_sart_shutdown(struct platform_device *pdev) +{ + struct apple_sart *sart = dev_get_drvdata(&pdev->dev); + int i; + + for (i = 0; i < APPLE_SART_MAX_ENTRIES; ++i) { + if (test_bit(i, &sart->protected_entries)) + continue; + + sart->ops->set_entry(sart, i, 0, 0, 0); + } +} + +static const struct of_device_id apple_sart_of_match[] = { + { + .compatible = "apple,t6000-sart", + .data = &sart_ops_v3, + }, + { + .compatible = "apple,t8103-sart", + .data = &sart_ops_v2, + }, + {} +}; +MODULE_DEVICE_TABLE(of, apple_sart_of_match); + +static struct platform_driver apple_sart_driver = { + .driver = { + .name = "apple-sart", + .of_match_table = apple_sart_of_match, + }, + .probe = apple_sart_probe, + .shutdown = apple_sart_shutdown, +}; +module_platform_driver(apple_sart_driver); + +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>"); +MODULE_DESCRIPTION("Apple SART driver"); diff --git a/drivers/soc/bcm/bcm63xx/bcm-pmb.c b/drivers/soc/bcm/bcm63xx/bcm-pmb.c index 7bbe46ea5f94..9407cac47fdb 100644 --- a/drivers/soc/bcm/bcm63xx/bcm-pmb.c +++ b/drivers/soc/bcm/bcm63xx/bcm-pmb.c @@ -312,6 +312,9 @@ static int bcm_pmb_probe(struct platform_device *pdev) for (e = table; e->name; e++) { struct bcm_pmb_pm_domain *pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); + if (!pd) + return -ENOMEM; + pd->pmb = pmb; pd->data = e; pd->genpd.name = e->name; diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c index 86c76211b3d3..cad2d55dcd3d 100644 --- a/drivers/spi/spi-bcm-qspi.c +++ b/drivers/spi/spi-bcm-qspi.c @@ -1205,7 +1205,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem, addr = op->addr.val; len = op->data.nbytes; - if (bcm_qspi_bspi_ver_three(qspi) == true) { + if (has_bspi(qspi) && bcm_qspi_bspi_ver_three(qspi) == true) { /* * The address coming into this function is a raw flash offset. * But for BSPI <= V3, we need to convert it to a remapped BSPI @@ -1224,7 +1224,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem, len < 4) mspi_read = true; - if (mspi_read) + if (!has_bspi(qspi) || mspi_read) return bcm_qspi_mspi_exec_mem_op(spi, op); ret = bcm_qspi_bspi_set_mode(qspi, op, 0); diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index b0c9f62ccefb..616ada891974 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -19,6 +19,7 @@ #include <linux/iopoll.h> #include <linux/jiffies.h> #include <linux/kernel.h> +#include <linux/log2.h> #include <linux/module.h> #include <linux/of_device.h> #include <linux/of.h> @@ -102,12 +103,6 @@ struct cqspi_driver_platdata { #define CQSPI_TIMEOUT_MS 500 #define CQSPI_READ_TIMEOUT_MS 10 -/* Instruction type */ -#define CQSPI_INST_TYPE_SINGLE 0 -#define CQSPI_INST_TYPE_DUAL 1 -#define CQSPI_INST_TYPE_QUAD 2 -#define CQSPI_INST_TYPE_OCTAL 3 - #define CQSPI_DUMMY_CLKS_PER_BYTE 8 #define CQSPI_DUMMY_BYTES_MAX 4 #define CQSPI_DUMMY_CLKS_MAX 31 @@ -376,10 +371,6 @@ static unsigned int cqspi_calc_dummy(const struct spi_mem_op *op, bool dtr) static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata, const struct spi_mem_op *op) { - f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE; - f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE; - f_pdata->data_width = CQSPI_INST_TYPE_SINGLE; - /* * For an op to be DTR, cmd phase along with every other non-empty * phase should have dtr field set to 1. If an op phase has zero @@ -389,32 +380,23 @@ static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata, (!op->addr.nbytes || op->addr.dtr) && (!op->data.nbytes || op->data.dtr); - switch (op->data.buswidth) { - case 0: - break; - case 1: - f_pdata->data_width = CQSPI_INST_TYPE_SINGLE; - break; - case 2: - f_pdata->data_width = CQSPI_INST_TYPE_DUAL; - break; - case 4: - f_pdata->data_width = CQSPI_INST_TYPE_QUAD; - break; - case 8: - f_pdata->data_width = CQSPI_INST_TYPE_OCTAL; - break; - default: - return -EINVAL; - } + f_pdata->inst_width = 0; + if (op->cmd.buswidth) + f_pdata->inst_width = ilog2(op->cmd.buswidth); + + f_pdata->addr_width = 0; + if (op->addr.buswidth) + f_pdata->addr_width = ilog2(op->addr.buswidth); + + f_pdata->data_width = 0; + if (op->data.buswidth) + f_pdata->data_width = ilog2(op->data.buswidth); /* Right now we only support 8-8-8 DTR mode. */ if (f_pdata->dtr) { switch (op->cmd.buswidth) { case 0: - break; case 8: - f_pdata->inst_width = CQSPI_INST_TYPE_OCTAL; break; default: return -EINVAL; @@ -422,9 +404,7 @@ static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata, switch (op->addr.buswidth) { case 0: - break; case 8: - f_pdata->addr_width = CQSPI_INST_TYPE_OCTAL; break; default: return -EINVAL; @@ -432,9 +412,7 @@ static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata, switch (op->data.buswidth) { case 0: - break; case 8: - f_pdata->data_width = CQSPI_INST_TYPE_OCTAL; break; default: return -EINVAL; diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c index 55c092069301..65be8e085ab8 100644 --- a/drivers/spi/spi-mxic.c +++ b/drivers/spi/spi-mxic.c @@ -813,6 +813,7 @@ static int mxic_spi_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "spi_register_master failed\n"); pm_runtime_disable(&pdev->dev); + mxic_spi_mem_ecc_remove(mxic); } return ret; diff --git a/drivers/spi/spi-rpc-if.c b/drivers/spi/spi-rpc-if.c index fe82f3575df4..24ec1c83f379 100644 --- a/drivers/spi/spi-rpc-if.c +++ b/drivers/spi/spi-rpc-if.c @@ -158,14 +158,18 @@ static int rpcif_spi_probe(struct platform_device *pdev) error = rpcif_hw_init(rpc, false); if (error) - return error; + goto out_disable_rpm; error = spi_register_controller(ctlr); if (error) { dev_err(&pdev->dev, "spi_register_controller failed\n"); - rpcif_disable_rpm(rpc); + goto out_disable_rpm; } + return 0; + +out_disable_rpm: + rpcif_disable_rpm(rpc); return error; } diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index c4dd1200fe99..2e6d6bbeb784 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1130,11 +1130,15 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) if (ctlr->dma_tx) tx_dev = ctlr->dma_tx->device->dev; + else if (ctlr->dma_map_dev) + tx_dev = ctlr->dma_map_dev; else tx_dev = ctlr->dev.parent; if (ctlr->dma_rx) rx_dev = ctlr->dma_rx->device->dev; + else if (ctlr->dma_map_dev) + rx_dev = ctlr->dma_map_dev; else rx_dev = ctlr->dev.parent; @@ -2406,7 +2410,8 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) } else { struct acpi_device *adev; - if (acpi_bus_get_device(parent_handle, &adev)) + adev = acpi_fetch_acpi_dev(parent_handle); + if (!adev) return -ENODEV; ctlr = acpi_spi_find_controller_by_adev(adev); diff --git a/drivers/staging/r8188eu/core/rtw_br_ext.c b/drivers/staging/r8188eu/core/rtw_br_ext.c index d68611ef22f8..f056204c0fdb 100644 --- a/drivers/staging/r8188eu/core/rtw_br_ext.c +++ b/drivers/staging/r8188eu/core/rtw_br_ext.c @@ -70,7 +70,7 @@ static int __nat25_add_pppoe_tag(struct sk_buff *skb, struct pppoe_tag *tag) struct pppoe_hdr *ph = (struct pppoe_hdr *)(skb->data + ETH_HLEN); int data_len; - data_len = tag->tag_len + TAG_HDR_LEN; + data_len = be16_to_cpu(tag->tag_len) + TAG_HDR_LEN; if (skb_tailroom(skb) < data_len) return -1; diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 95d4ca50a605..fd7267baa707 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -1821,6 +1821,7 @@ static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi) mutex_lock(&udev->cmdr_lock); page = xa_load(&udev->data_pages, dpi); if (likely(page)) { + get_page(page); mutex_unlock(&udev->cmdr_lock); return page; } @@ -1877,6 +1878,7 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf) /* For the vmalloc()ed cmd area pages */ addr = (void *)(unsigned long)info->mem[mi].addr + offset; page = vmalloc_to_page(addr); + get_page(page); } else { uint32_t dpi; @@ -1887,7 +1889,6 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf) return VM_FAULT_SIGBUS; } - get_page(page); vmf->page = page; return 0; } diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig index e99d840c2511..73a147202e88 100644 --- a/drivers/tee/Kconfig +++ b/drivers/tee/Kconfig @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only # Generic Trusted Execution Environment Configuration -config TEE +menuconfig TEE tristate "Trusted Execution Environment support" depends on HAVE_ARM_SMCCC || COMPILE_TEST || CPU_SUP_AMD select CRYPTO @@ -13,10 +13,7 @@ config TEE if TEE -menu "TEE drivers" - source "drivers/tee/optee/Kconfig" source "drivers/tee/amdtee/Kconfig" -endmenu endif diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c index bd49ec934060..6554e06e053e 100644 --- a/drivers/tee/optee/call.c +++ b/drivers/tee/optee/call.c @@ -11,6 +11,34 @@ #include <linux/types.h> #include "optee_private.h" +#define MAX_ARG_PARAM_COUNT 6 + +/* + * How much memory we allocate for each entry. This doesn't have to be a + * single page, but it makes sense to keep at least keep it as multiples of + * the page size. + */ +#define SHM_ENTRY_SIZE PAGE_SIZE + +/* + * We need to have a compile time constant to be able to determine the + * maximum needed size of the bit field. + */ +#define MIN_ARG_SIZE OPTEE_MSG_GET_ARG_SIZE(MAX_ARG_PARAM_COUNT) +#define MAX_ARG_COUNT_PER_ENTRY (SHM_ENTRY_SIZE / MIN_ARG_SIZE) + +/* + * Shared memory for argument structs are cached here. The number of + * arguments structs that can fit is determined at runtime depending on the + * needed RPC parameter count reported by secure world + * (optee->rpc_param_count). + */ +struct optee_shm_arg_entry { + struct list_head list_node; + struct tee_shm *shm; + DECLARE_BITMAP(map, MAX_ARG_COUNT_PER_ENTRY); +}; + void optee_cq_wait_init(struct optee_call_queue *cq, struct optee_call_waiter *w) { @@ -104,37 +132,149 @@ static struct optee_session *find_session(struct optee_context_data *ctxdata, return NULL; } -struct tee_shm *optee_get_msg_arg(struct tee_context *ctx, size_t num_params, - struct optee_msg_arg **msg_arg) +void optee_shm_arg_cache_init(struct optee *optee, u32 flags) +{ + INIT_LIST_HEAD(&optee->shm_arg_cache.shm_args); + mutex_init(&optee->shm_arg_cache.mutex); + optee->shm_arg_cache.flags = flags; +} + +void optee_shm_arg_cache_uninit(struct optee *optee) +{ + struct list_head *head = &optee->shm_arg_cache.shm_args; + struct optee_shm_arg_entry *entry; + + mutex_destroy(&optee->shm_arg_cache.mutex); + while (!list_empty(head)) { + entry = list_first_entry(head, struct optee_shm_arg_entry, + list_node); + list_del(&entry->list_node); + if (find_first_bit(entry->map, MAX_ARG_COUNT_PER_ENTRY) != + MAX_ARG_COUNT_PER_ENTRY) { + pr_err("Freeing non-free entry\n"); + } + tee_shm_free(entry->shm); + kfree(entry); + } +} + +size_t optee_msg_arg_size(size_t rpc_param_count) +{ + size_t sz = OPTEE_MSG_GET_ARG_SIZE(MAX_ARG_PARAM_COUNT); + + if (rpc_param_count) + sz += OPTEE_MSG_GET_ARG_SIZE(rpc_param_count); + + return sz; +} + +/** + * optee_get_msg_arg() - Provide shared memory for argument struct + * @ctx: Caller TEE context + * @num_params: Number of parameter to store + * @entry_ret: Entry pointer, needed when freeing the buffer + * @shm_ret: Shared memory buffer + * @offs_ret: Offset of argument strut in shared memory buffer + * + * @returns a pointer to the argument struct in memory, else an ERR_PTR + */ +struct optee_msg_arg *optee_get_msg_arg(struct tee_context *ctx, + size_t num_params, + struct optee_shm_arg_entry **entry_ret, + struct tee_shm **shm_ret, + u_int *offs_ret) { struct optee *optee = tee_get_drvdata(ctx->teedev); - size_t sz = OPTEE_MSG_GET_ARG_SIZE(num_params); - struct tee_shm *shm; + size_t sz = optee_msg_arg_size(optee->rpc_param_count); + struct optee_shm_arg_entry *entry; struct optee_msg_arg *ma; + size_t args_per_entry; + u_long bit; + u_int offs; + void *res; + + if (num_params > MAX_ARG_PARAM_COUNT) + return ERR_PTR(-EINVAL); + + if (optee->shm_arg_cache.flags & OPTEE_SHM_ARG_SHARED) + args_per_entry = SHM_ENTRY_SIZE / sz; + else + args_per_entry = 1; + + mutex_lock(&optee->shm_arg_cache.mutex); + list_for_each_entry(entry, &optee->shm_arg_cache.shm_args, list_node) { + bit = find_first_zero_bit(entry->map, MAX_ARG_COUNT_PER_ENTRY); + if (bit < args_per_entry) + goto have_entry; + } /* - * rpc_arg_count is set to the number of allocated parameters in - * the RPC argument struct if a second MSG arg struct is expected. - * The second arg struct will then be used for RPC. + * No entry was found, let's allocate a new. */ - if (optee->rpc_arg_count) - sz += OPTEE_MSG_GET_ARG_SIZE(optee->rpc_arg_count); + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + res = ERR_PTR(-ENOMEM); + goto out; + } - shm = tee_shm_alloc_priv_buf(ctx, sz); - if (IS_ERR(shm)) - return shm; + if (optee->shm_arg_cache.flags & OPTEE_SHM_ARG_ALLOC_PRIV) + res = tee_shm_alloc_priv_buf(ctx, SHM_ENTRY_SIZE); + else + res = tee_shm_alloc_kernel_buf(ctx, SHM_ENTRY_SIZE); - ma = tee_shm_get_va(shm, 0); - if (IS_ERR(ma)) { - tee_shm_free(shm); - return (void *)ma; + if (IS_ERR(res)) { + kfree(entry); + goto out; } - - memset(ma, 0, OPTEE_MSG_GET_ARG_SIZE(num_params)); + entry->shm = res; + list_add(&entry->list_node, &optee->shm_arg_cache.shm_args); + bit = 0; + +have_entry: + offs = bit * sz; + res = tee_shm_get_va(entry->shm, offs); + if (IS_ERR(res)) + goto out; + ma = res; + set_bit(bit, entry->map); + memset(ma, 0, sz); ma->num_params = num_params; - *msg_arg = ma; + *entry_ret = entry; + *shm_ret = entry->shm; + *offs_ret = offs; +out: + mutex_unlock(&optee->shm_arg_cache.mutex); + return res; +} + +/** + * optee_free_msg_arg() - Free previsouly obtained shared memory + * @ctx: Caller TEE context + * @entry: Pointer returned when the shared memory was obtained + * @offs: Offset of shared memory buffer to free + * + * This function frees the shared memory obtained with optee_get_msg_arg(). + */ +void optee_free_msg_arg(struct tee_context *ctx, + struct optee_shm_arg_entry *entry, u_int offs) +{ + struct optee *optee = tee_get_drvdata(ctx->teedev); + size_t sz = optee_msg_arg_size(optee->rpc_param_count); + u_long bit; - return shm; + if (offs > SHM_ENTRY_SIZE || offs % sz) { + pr_err("Invalid offs %u\n", offs); + return; + } + bit = offs / sz; + + mutex_lock(&optee->shm_arg_cache.mutex); + + if (!test_bit(bit, entry->map)) + pr_err("Bit pos %lu is already free\n", bit); + clear_bit(bit, entry->map); + + mutex_unlock(&optee->shm_arg_cache.mutex); } int optee_open_session(struct tee_context *ctx, @@ -143,16 +283,19 @@ int optee_open_session(struct tee_context *ctx, { struct optee *optee = tee_get_drvdata(ctx->teedev); struct optee_context_data *ctxdata = ctx->data; - int rc; + struct optee_shm_arg_entry *entry; struct tee_shm *shm; struct optee_msg_arg *msg_arg; struct optee_session *sess = NULL; uuid_t client_uuid; + u_int offs; + int rc; /* +2 for the meta parameters added below */ - shm = optee_get_msg_arg(ctx, arg->num_params + 2, &msg_arg); - if (IS_ERR(shm)) - return PTR_ERR(shm); + msg_arg = optee_get_msg_arg(ctx, arg->num_params + 2, + &entry, &shm, &offs); + if (IS_ERR(msg_arg)) + return PTR_ERR(msg_arg); msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION; msg_arg->cancel_id = arg->cancel_id; @@ -185,7 +328,7 @@ int optee_open_session(struct tee_context *ctx, goto out; } - if (optee->ops->do_call_with_arg(ctx, shm)) { + if (optee->ops->do_call_with_arg(ctx, shm, offs)) { msg_arg->ret = TEEC_ERROR_COMMUNICATION; msg_arg->ret_origin = TEEC_ORIGIN_COMMS; } @@ -212,26 +355,28 @@ int optee_open_session(struct tee_context *ctx, arg->ret_origin = msg_arg->ret_origin; } out: - tee_shm_free(shm); + optee_free_msg_arg(ctx, entry, offs); return rc; } int optee_close_session_helper(struct tee_context *ctx, u32 session) { - struct tee_shm *shm; struct optee *optee = tee_get_drvdata(ctx->teedev); + struct optee_shm_arg_entry *entry; struct optee_msg_arg *msg_arg; + struct tee_shm *shm; + u_int offs; - shm = optee_get_msg_arg(ctx, 0, &msg_arg); - if (IS_ERR(shm)) - return PTR_ERR(shm); + msg_arg = optee_get_msg_arg(ctx, 0, &entry, &shm, &offs); + if (IS_ERR(msg_arg)) + return PTR_ERR(msg_arg); msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION; msg_arg->session = session; - optee->ops->do_call_with_arg(ctx, shm); + optee->ops->do_call_with_arg(ctx, shm, offs); - tee_shm_free(shm); + optee_free_msg_arg(ctx, entry, offs); return 0; } @@ -259,9 +404,11 @@ int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, { struct optee *optee = tee_get_drvdata(ctx->teedev); struct optee_context_data *ctxdata = ctx->data; - struct tee_shm *shm; + struct optee_shm_arg_entry *entry; struct optee_msg_arg *msg_arg; struct optee_session *sess; + struct tee_shm *shm; + u_int offs; int rc; /* Check that the session is valid */ @@ -271,9 +418,10 @@ int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, if (!sess) return -EINVAL; - shm = optee_get_msg_arg(ctx, arg->num_params, &msg_arg); - if (IS_ERR(shm)) - return PTR_ERR(shm); + msg_arg = optee_get_msg_arg(ctx, arg->num_params, + &entry, &shm, &offs); + if (IS_ERR(msg_arg)) + return PTR_ERR(msg_arg); msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND; msg_arg->func = arg->func; msg_arg->session = arg->session; @@ -284,7 +432,7 @@ int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, if (rc) goto out; - if (optee->ops->do_call_with_arg(ctx, shm)) { + if (optee->ops->do_call_with_arg(ctx, shm, offs)) { msg_arg->ret = TEEC_ERROR_COMMUNICATION; msg_arg->ret_origin = TEEC_ORIGIN_COMMS; } @@ -298,7 +446,7 @@ int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, arg->ret = msg_arg->ret; arg->ret_origin = msg_arg->ret_origin; out: - tee_shm_free(shm); + optee_free_msg_arg(ctx, entry, offs); return rc; } @@ -306,9 +454,11 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session) { struct optee *optee = tee_get_drvdata(ctx->teedev); struct optee_context_data *ctxdata = ctx->data; - struct tee_shm *shm; + struct optee_shm_arg_entry *entry; struct optee_msg_arg *msg_arg; struct optee_session *sess; + struct tee_shm *shm; + u_int offs; /* Check that the session is valid */ mutex_lock(&ctxdata->mutex); @@ -317,16 +467,16 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session) if (!sess) return -EINVAL; - shm = optee_get_msg_arg(ctx, 0, &msg_arg); - if (IS_ERR(shm)) - return PTR_ERR(shm); + msg_arg = optee_get_msg_arg(ctx, 0, &entry, &shm, &offs); + if (IS_ERR(msg_arg)) + return PTR_ERR(msg_arg); msg_arg->cmd = OPTEE_MSG_CMD_CANCEL; msg_arg->session = session; msg_arg->cancel_id = cancel_id; - optee->ops->do_call_with_arg(ctx, shm); + optee->ops->do_call_with_arg(ctx, shm, offs); - tee_shm_free(shm); + optee_free_msg_arg(ctx, entry, offs); return 0; } @@ -362,7 +512,7 @@ int optee_check_mem_type(unsigned long start, size_t num_pages) * Allow kernel address to register with OP-TEE as kernel * pages are configured as normal memory only. */ - if (virt_addr_valid(start)) + if (virt_addr_valid(start) || is_vmalloc_addr((void *)start)) return 0; mmap_read_lock(mm); diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index daf947e98d14..daf07737c4fd 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -171,6 +171,7 @@ void optee_remove_common(struct optee *optee) optee_unregister_devices(); optee_notif_uninit(optee); + optee_shm_arg_cache_uninit(optee); teedev_close_context(optee->ctx); /* * The two devices have to be unregistered before we can free the diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c index a5eb4ef46971..cbabcde4076f 100644 --- a/drivers/tee/optee/ffa_abi.c +++ b/drivers/tee/optee/ffa_abi.c @@ -601,6 +601,7 @@ done: * optee_ffa_do_call_with_arg() - Do a FF-A call to enter OP-TEE in secure world * @ctx: calling context * @shm: shared memory holding the message to pass to secure world + * @offs: offset of the message in @shm * * Does a FF-A call to OP-TEE in secure world and handles eventual resulting * Remote Procedure Calls (RPC) from OP-TEE. @@ -609,24 +610,33 @@ done: */ static int optee_ffa_do_call_with_arg(struct tee_context *ctx, - struct tee_shm *shm) + struct tee_shm *shm, u_int offs) { struct ffa_send_direct_data data = { .data0 = OPTEE_FFA_YIELDING_CALL_WITH_ARG, .data1 = (u32)shm->sec_world_id, .data2 = (u32)(shm->sec_world_id >> 32), - .data3 = shm->offset, + .data3 = offs, }; struct optee_msg_arg *arg; unsigned int rpc_arg_offs; struct optee_msg_arg *rpc_arg; - arg = tee_shm_get_va(shm, 0); + /* + * The shared memory object has to start on a page when passed as + * an argument struct. This is also what the shm pool allocator + * returns, but check this before calling secure world to catch + * eventual errors early in case something changes. + */ + if (shm->offset) + return -EINVAL; + + arg = tee_shm_get_va(shm, offs); if (IS_ERR(arg)) return PTR_ERR(arg); rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params); - rpc_arg = tee_shm_get_va(shm, rpc_arg_offs); + rpc_arg = tee_shm_get_va(shm, offs + rpc_arg_offs); if (IS_ERR(rpc_arg)) return PTR_ERR(rpc_arg); @@ -678,7 +688,8 @@ static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev, static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev, const struct ffa_dev_ops *ops, - unsigned int *rpc_arg_count) + u32 *sec_caps, + unsigned int *rpc_param_count) { struct ffa_send_direct_data data = { OPTEE_FFA_EXCHANGE_CAPABILITIES }; int rc; @@ -693,7 +704,8 @@ static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev, return false; } - *rpc_arg_count = (u8)data.data1; + *rpc_param_count = (u8)data.data1; + *sec_caps = data.data2; return true; } @@ -759,7 +771,7 @@ static const struct optee_ops optee_ffa_ops = { static void optee_ffa_remove(struct ffa_device *ffa_dev) { - struct optee *optee = ffa_dev->dev.driver_data; + struct optee *optee = ffa_dev_get_drvdata(ffa_dev); optee_remove_common(optee); @@ -772,11 +784,13 @@ static void optee_ffa_remove(struct ffa_device *ffa_dev) static int optee_ffa_probe(struct ffa_device *ffa_dev) { const struct ffa_dev_ops *ffa_ops; - unsigned int rpc_arg_count; + unsigned int rpc_param_count; struct tee_shm_pool *pool; struct tee_device *teedev; struct tee_context *ctx; + u32 arg_cache_flags = 0; struct optee *optee; + u32 sec_caps; int rc; ffa_ops = ffa_dev_ops_get(ffa_dev); @@ -788,8 +802,11 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev) if (!optee_ffa_api_is_compatbile(ffa_dev, ffa_ops)) return -EINVAL; - if (!optee_ffa_exchange_caps(ffa_dev, ffa_ops, &rpc_arg_count)) + if (!optee_ffa_exchange_caps(ffa_dev, ffa_ops, &sec_caps, + &rpc_param_count)) return -EINVAL; + if (sec_caps & OPTEE_FFA_SEC_CAP_ARG_OFFSET) + arg_cache_flags |= OPTEE_SHM_ARG_SHARED; optee = kzalloc(sizeof(*optee), GFP_KERNEL); if (!optee) @@ -805,7 +822,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev) optee->ops = &optee_ffa_ops; optee->ffa.ffa_dev = ffa_dev; optee->ffa.ffa_ops = ffa_ops; - optee->rpc_arg_count = rpc_arg_count; + optee->rpc_param_count = rpc_param_count; teedev = tee_device_alloc(&optee_ffa_clnt_desc, NULL, optee->pool, optee); @@ -838,6 +855,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev) mutex_init(&optee->call_queue.mutex); INIT_LIST_HEAD(&optee->call_queue.waiters); optee_supp_init(&optee->supp); + optee_shm_arg_cache_init(optee, arg_cache_flags); ffa_dev_set_drvdata(ffa_dev, optee); ctx = teedev_open(optee->teedev); if (IS_ERR(ctx)) { diff --git a/drivers/tee/optee/optee_ffa.h b/drivers/tee/optee/optee_ffa.h index ee3a03fc392c..97266243deaa 100644 --- a/drivers/tee/optee/optee_ffa.h +++ b/drivers/tee/optee/optee_ffa.h @@ -81,8 +81,16 @@ * as the second MSG arg struct for * OPTEE_FFA_YIELDING_CALL_WITH_ARG. * Bit[31:8]: Reserved (MBZ) - * w5-w7: Note used (MBZ) + * w5: Bitfield of secure world capabilities OPTEE_FFA_SEC_CAP_* below, + * unused bits MBZ. + * w6-w7: Not used (MBZ) + */ +/* + * Secure world supports giving an offset into the argument shared memory + * object, see also OPTEE_FFA_YIELDING_CALL_WITH_ARG */ +#define OPTEE_FFA_SEC_CAP_ARG_OFFSET BIT(0) + #define OPTEE_FFA_EXCHANGE_CAPABILITIES OPTEE_FFA_BLOCKING_CALL(2) /* @@ -112,6 +120,8 @@ * OPTEE_MSG_GET_ARG_SIZE(num_params) follows a struct optee_msg_arg * for RPC, this struct has reserved space for the number of RPC * parameters as returned by OPTEE_FFA_EXCHANGE_CAPABILITIES. + * MBZ unless the bit OPTEE_FFA_SEC_CAP_ARG_OFFSET is received with + * OPTEE_FFA_EXCHANGE_CAPABILITIES. * w7: Not used (MBZ) * Resume from RPC. Register usage: * w3: Service ID, OPTEE_FFA_YIELDING_CALL_RESUME diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h index e77765c78878..a33d98d17cfd 100644 --- a/drivers/tee/optee/optee_private.h +++ b/drivers/tee/optee/optee_private.h @@ -59,6 +59,16 @@ struct optee_notif { u_long *bitmap; }; +#define OPTEE_SHM_ARG_ALLOC_PRIV BIT(0) +#define OPTEE_SHM_ARG_SHARED BIT(1) +struct optee_shm_arg_entry; +struct optee_shm_arg_cache { + u32 flags; + /* Serializes access to this struct */ + struct mutex mutex; + struct list_head shm_args; +}; + /** * struct optee_supp - supplicant synchronization struct * @ctx the context of current connected supplicant. @@ -121,7 +131,7 @@ struct optee; */ struct optee_ops { int (*do_call_with_arg)(struct tee_context *ctx, - struct tee_shm *shm_arg); + struct tee_shm *shm_arg, u_int offs); int (*to_msg_param)(struct optee *optee, struct optee_msg_param *msg_params, size_t num_params, const struct tee_param *params); @@ -143,7 +153,7 @@ struct optee_ops { * @notif: notification synchronization struct * @supp: supplicant synchronization struct for RPC to supplicant * @pool: shared memory pool - * @rpc_arg_count: If > 0 number of RPC parameters to make room for + * @rpc_param_count: If > 0 number of RPC parameters to make room for * @scan_bus_done flag if device registation was already done. * @scan_bus_wq workqueue to scan optee bus and register optee drivers * @scan_bus_work workq to scan optee bus and register optee drivers @@ -157,11 +167,12 @@ struct optee { struct optee_smc smc; struct optee_ffa ffa; }; + struct optee_shm_arg_cache shm_arg_cache; struct optee_call_queue call_queue; struct optee_notif notif; struct optee_supp supp; struct tee_shm_pool *pool; - unsigned int rpc_arg_count; + unsigned int rpc_param_count; bool scan_bus_done; struct workqueue_struct *scan_bus_wq; struct work_struct scan_bus_work; @@ -273,8 +284,18 @@ void optee_cq_wait_for_completion(struct optee_call_queue *cq, void optee_cq_wait_final(struct optee_call_queue *cq, struct optee_call_waiter *w); int optee_check_mem_type(unsigned long start, size_t num_pages); -struct tee_shm *optee_get_msg_arg(struct tee_context *ctx, size_t num_params, - struct optee_msg_arg **msg_arg); + +void optee_shm_arg_cache_init(struct optee *optee, u32 flags); +void optee_shm_arg_cache_uninit(struct optee *optee); +struct optee_msg_arg *optee_get_msg_arg(struct tee_context *ctx, + size_t num_params, + struct optee_shm_arg_entry **entry, + struct tee_shm **shm_ret, + u_int *offs); +void optee_free_msg_arg(struct tee_context *ctx, + struct optee_shm_arg_entry *entry, u_int offs); +size_t optee_msg_arg_size(size_t rpc_param_count); + struct tee_shm *optee_rpc_cmd_alloc_suppl(struct tee_context *ctx, size_t sz); void optee_rpc_cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm); diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h index d44a6ae994f8..c60896cf71cb 100644 --- a/drivers/tee/optee/optee_smc.h +++ b/drivers/tee/optee/optee_smc.h @@ -107,14 +107,22 @@ struct optee_smc_call_get_os_revision_result { /* * Call with struct optee_msg_arg as argument * - * When calling this function normal world has a few responsibilities: + * When called with OPTEE_SMC_CALL_WITH_RPC_ARG or + * OPTEE_SMC_CALL_WITH_REGD_ARG in a0 there is one RPC struct optee_msg_arg + * following after the first struct optee_msg_arg. The RPC struct + * optee_msg_arg has reserved space for the number of RPC parameters as + * returned by OPTEE_SMC_EXCHANGE_CAPABILITIES. + * + * When calling these functions, normal world has a few responsibilities: * 1. It must be able to handle eventual RPCs * 2. Non-secure interrupts should not be masked * 3. If asynchronous notifications has been negotiated successfully, then - * asynchronous notifications should be unmasked during this call. + * the interrupt for asynchronous notifications should be unmasked + * during this call. * - * Call register usage: - * a0 SMC Function ID, OPTEE_SMC*CALL_WITH_ARG + * Call register usage, OPTEE_SMC_CALL_WITH_ARG and + * OPTEE_SMC_CALL_WITH_RPC_ARG: + * a0 SMC Function ID, OPTEE_SMC_CALL_WITH_ARG or OPTEE_SMC_CALL_WITH_RPC_ARG * a1 Upper 32 bits of a 64-bit physical pointer to a struct optee_msg_arg * a2 Lower 32 bits of a 64-bit physical pointer to a struct optee_msg_arg * a3 Cache settings, not used if physical pointer is in a predefined shared @@ -122,6 +130,15 @@ struct optee_smc_call_get_os_revision_result { * a4-6 Not used * a7 Hypervisor Client ID register * + * Call register usage, OPTEE_SMC_CALL_WITH_REGD_ARG: + * a0 SMC Function ID, OPTEE_SMC_CALL_WITH_REGD_ARG + * a1 Upper 32 bits of a 64-bit shared memory cookie + * a2 Lower 32 bits of a 64-bit shared memory cookie + * a3 Offset of the struct optee_msg_arg in the shared memory with the + * supplied cookie + * a4-6 Not used + * a7 Hypervisor Client ID register + * * Normal return register usage: * a0 Return value, OPTEE_SMC_RETURN_* * a1-3 Not used @@ -154,6 +171,10 @@ struct optee_smc_call_get_os_revision_result { #define OPTEE_SMC_FUNCID_CALL_WITH_ARG OPTEE_MSG_FUNCID_CALL_WITH_ARG #define OPTEE_SMC_CALL_WITH_ARG \ OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_ARG) +#define OPTEE_SMC_CALL_WITH_RPC_ARG \ + OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_RPC_ARG) +#define OPTEE_SMC_CALL_WITH_REGD_ARG \ + OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_REGD_ARG) /* * Get Shared Memory Config @@ -202,7 +223,11 @@ struct optee_smc_get_shm_config_result { * a0 OPTEE_SMC_RETURN_OK * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_* * a2 The maximum secure world notification number - * a3-7 Preserved + * a3 Bit[7:0]: Number of parameters needed for RPC to be supplied + * as the second MSG arg struct for + * OPTEE_SMC_CALL_WITH_ARG + * Bit[31:8]: Reserved (MBZ) + * a4-7 Preserved * * Error return register usage: * a0 OPTEE_SMC_RETURN_ENOTAVAIL, can't use the capabilities from normal world @@ -227,6 +252,8 @@ struct optee_smc_get_shm_config_result { #define OPTEE_SMC_SEC_CAP_MEMREF_NULL BIT(4) /* Secure world supports asynchronous notification of normal world */ #define OPTEE_SMC_SEC_CAP_ASYNC_NOTIF BIT(5) +/* Secure world supports pre-allocating RPC arg struct */ +#define OPTEE_SMC_SEC_CAP_RPC_ARG BIT(6) #define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9 #define OPTEE_SMC_EXCHANGE_CAPABILITIES \ @@ -236,7 +263,7 @@ struct optee_smc_exchange_capabilities_result { unsigned long status; unsigned long capabilities; unsigned long max_notif_value; - unsigned long reserved0; + unsigned long data; }; /* @@ -358,6 +385,9 @@ struct optee_smc_disable_shm_cache_result { * should be called until all pended values have been retrieved. When a * value is retrieved, it's cleared from the record in secure world. * + * It is expected that this function is called from an interrupt handler + * in normal world. + * * Call requests usage: * a0 SMC Function ID, OPTEE_SMC_GET_ASYNC_NOTIF_VALUE * a1-6 Not used @@ -390,6 +420,12 @@ struct optee_smc_disable_shm_cache_result { #define OPTEE_SMC_GET_ASYNC_NOTIF_VALUE \ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_ASYNC_NOTIF_VALUE) +/* See OPTEE_SMC_CALL_WITH_RPC_ARG above */ +#define OPTEE_SMC_FUNCID_CALL_WITH_RPC_ARG 18 + +/* See OPTEE_SMC_CALL_WITH_REGD_ARG above */ +#define OPTEE_SMC_FUNCID_CALL_WITH_REGD_ARG 19 + /* * Resume from RPC (for example after processing a foreign interrupt) * diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c index 67b7f7d2ff27..385cb0aee610 100644 --- a/drivers/tee/optee/smc_abi.c +++ b/drivers/tee/optee/smc_abi.c @@ -437,6 +437,7 @@ static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm, struct optee_msg_arg *msg_arg; struct tee_shm *shm_arg; u64 *pages_list; + size_t sz; int rc; if (!num_pages) @@ -450,15 +451,30 @@ static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm, if (!pages_list) return -ENOMEM; - shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg); + /* + * We're about to register shared memory we can't register shared + * memory for this request or there's a catch-22. + * + * So in this we'll have to do the good old temporary private + * allocation instead of using optee_get_msg_arg(). + */ + sz = optee_msg_arg_size(optee->rpc_param_count); + shm_arg = tee_shm_alloc_priv_buf(ctx, sz); if (IS_ERR(shm_arg)) { rc = PTR_ERR(shm_arg); goto out; } + msg_arg = tee_shm_get_va(shm_arg, 0); + if (IS_ERR(msg_arg)) { + rc = PTR_ERR(msg_arg); + goto out; + } optee_fill_pages_list(pages_list, pages, num_pages, tee_shm_get_page_offset(shm)); + memset(msg_arg, 0, OPTEE_MSG_GET_ARG_SIZE(1)); + msg_arg->num_params = 1; msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM; msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT | OPTEE_MSG_ATTR_NONCONTIG; @@ -471,7 +487,7 @@ static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm, msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) | (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1)); - if (optee->ops->do_call_with_arg(ctx, shm_arg) || + if (optee->ops->do_call_with_arg(ctx, shm_arg, 0) || msg_arg->ret != TEEC_SUCCESS) rc = -EINVAL; @@ -487,19 +503,37 @@ static int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm) struct optee_msg_arg *msg_arg; struct tee_shm *shm_arg; int rc = 0; + size_t sz; - shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg); + /* + * We're about to unregister shared memory and we may not be able + * register shared memory for this request in case we're called + * from optee_shm_arg_cache_uninit(). + * + * So in order to keep things simple in this function just as in + * optee_shm_register() we'll use temporary private allocation + * instead of using optee_get_msg_arg(). + */ + sz = optee_msg_arg_size(optee->rpc_param_count); + shm_arg = tee_shm_alloc_priv_buf(ctx, sz); if (IS_ERR(shm_arg)) return PTR_ERR(shm_arg); + msg_arg = tee_shm_get_va(shm_arg, 0); + if (IS_ERR(msg_arg)) { + rc = PTR_ERR(msg_arg); + goto out; + } + memset(msg_arg, 0, sz); + msg_arg->num_params = 1; msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM; - msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT; msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm; - if (optee->ops->do_call_with_arg(ctx, shm_arg) || + if (optee->ops->do_call_with_arg(ctx, shm_arg, 0) || msg_arg->ret != TEEC_SUCCESS) rc = -EINVAL; +out: tee_shm_free(shm_arg); return rc; } @@ -732,16 +766,9 @@ static void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx) } static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee, - struct tee_shm *shm, + struct optee_msg_arg *arg, struct optee_call_ctx *call_ctx) { - struct optee_msg_arg *arg; - - arg = tee_shm_get_va(shm, 0); - if (IS_ERR(arg)) { - pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm); - return; - } switch (arg->cmd) { case OPTEE_RPC_CMD_SHM_ALLOC: @@ -765,11 +792,13 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee, * Result of RPC is written back into @param. */ static void optee_handle_rpc(struct tee_context *ctx, + struct optee_msg_arg *rpc_arg, struct optee_rpc_param *param, struct optee_call_ctx *call_ctx) { struct tee_device *teedev = ctx->teedev; struct optee *optee = tee_get_drvdata(teedev); + struct optee_msg_arg *arg; struct tee_shm *shm; phys_addr_t pa; @@ -801,8 +830,19 @@ static void optee_handle_rpc(struct tee_context *ctx, */ break; case OPTEE_SMC_RPC_FUNC_CMD: - shm = reg_pair_to_ptr(param->a1, param->a2); - handle_rpc_func_cmd(ctx, optee, shm, call_ctx); + if (rpc_arg) { + arg = rpc_arg; + } else { + shm = reg_pair_to_ptr(param->a1, param->a2); + arg = tee_shm_get_va(shm, 0); + if (IS_ERR(arg)) { + pr_err("%s: tee_shm_get_va %p failed\n", + __func__, shm); + break; + } + } + + handle_rpc_func_cmd(ctx, optee, arg, call_ctx); break; default: pr_warn("Unknown RPC func 0x%x\n", @@ -816,7 +856,8 @@ static void optee_handle_rpc(struct tee_context *ctx, /** * optee_smc_do_call_with_arg() - Do an SMC to OP-TEE in secure world * @ctx: calling context - * @arg: shared memory holding the message to pass to secure world + * @shm: shared memory holding the message to pass to secure world + * @offs: offset of the message in @shm * * Does and SMC to OP-TEE in secure world and handles eventual resulting * Remote Procedure Calls (RPC) from OP-TEE. @@ -824,21 +865,46 @@ static void optee_handle_rpc(struct tee_context *ctx, * Returns return code from secure world, 0 is OK */ static int optee_smc_do_call_with_arg(struct tee_context *ctx, - struct tee_shm *arg) + struct tee_shm *shm, u_int offs) { struct optee *optee = tee_get_drvdata(ctx->teedev); struct optee_call_waiter w; struct optee_rpc_param param = { }; struct optee_call_ctx call_ctx = { }; - phys_addr_t parg; + struct optee_msg_arg *rpc_arg = NULL; int rc; - rc = tee_shm_get_pa(arg, 0, &parg); - if (rc) - return rc; + if (optee->rpc_param_count) { + struct optee_msg_arg *arg; + unsigned int rpc_arg_offs; + + arg = tee_shm_get_va(shm, offs); + if (IS_ERR(arg)) + return PTR_ERR(arg); + + rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params); + rpc_arg = tee_shm_get_va(shm, offs + rpc_arg_offs); + if (IS_ERR(arg)) + return PTR_ERR(arg); + } + + if (rpc_arg && tee_shm_is_dynamic(shm)) { + param.a0 = OPTEE_SMC_CALL_WITH_REGD_ARG; + reg_pair_from_64(¶m.a1, ¶m.a2, (u_long)shm); + param.a3 = offs; + } else { + phys_addr_t parg; - param.a0 = OPTEE_SMC_CALL_WITH_ARG; - reg_pair_from_64(¶m.a1, ¶m.a2, parg); + rc = tee_shm_get_pa(shm, offs, &parg); + if (rc) + return rc; + + if (rpc_arg) + param.a0 = OPTEE_SMC_CALL_WITH_RPC_ARG; + else + param.a0 = OPTEE_SMC_CALL_WITH_ARG; + reg_pair_from_64(¶m.a1, ¶m.a2, parg); + } /* Initialize waiter */ optee_cq_wait_init(&optee->call_queue, &w); while (true) { @@ -862,7 +928,7 @@ static int optee_smc_do_call_with_arg(struct tee_context *ctx, param.a1 = res.a1; param.a2 = res.a2; param.a3 = res.a3; - optee_handle_rpc(ctx, ¶m, &call_ctx); + optee_handle_rpc(ctx, rpc_arg, ¶m, &call_ctx); } else { rc = res.a0; break; @@ -881,17 +947,19 @@ static int optee_smc_do_call_with_arg(struct tee_context *ctx, static int simple_call_with_arg(struct tee_context *ctx, u32 cmd) { + struct optee_shm_arg_entry *entry; struct optee_msg_arg *msg_arg; struct tee_shm *shm; + u_int offs; - shm = optee_get_msg_arg(ctx, 0, &msg_arg); - if (IS_ERR(shm)) - return PTR_ERR(shm); + msg_arg = optee_get_msg_arg(ctx, 0, &entry, &shm, &offs); + if (IS_ERR(msg_arg)) + return PTR_ERR(msg_arg); msg_arg->cmd = cmd; - optee_smc_do_call_with_arg(ctx, shm); + optee_smc_do_call_with_arg(ctx, shm, offs); - tee_shm_free(shm); + optee_free_msg_arg(ctx, entry, offs); return 0; } @@ -1118,7 +1186,8 @@ static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn) } static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn, - u32 *sec_caps, u32 *max_notif_value) + u32 *sec_caps, u32 *max_notif_value, + unsigned int *rpc_param_count) { union { struct arm_smccc_res smccc; @@ -1145,6 +1214,10 @@ static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn, *max_notif_value = res.result.max_notif_value; else *max_notif_value = OPTEE_DEFAULT_MAX_NOTIF_VALUE; + if (*sec_caps & OPTEE_SMC_SEC_CAP_RPC_ARG) + *rpc_param_count = (u8)res.result.data; + else + *rpc_param_count = 0; return true; } @@ -1251,7 +1324,8 @@ static int optee_smc_remove(struct platform_device *pdev) * reference counters and also avoid wild pointers in secure world * into the old shared memory range. */ - optee_disable_shm_cache(optee); + if (!optee->rpc_param_count) + optee_disable_shm_cache(optee); optee_smc_notif_uninit_irq(optee); @@ -1274,7 +1348,10 @@ static int optee_smc_remove(struct platform_device *pdev) */ static void optee_shutdown(struct platform_device *pdev) { - optee_disable_shm_cache(platform_get_drvdata(pdev)); + struct optee *optee = platform_get_drvdata(pdev); + + if (!optee->rpc_param_count) + optee_disable_shm_cache(optee); } static int optee_probe(struct platform_device *pdev) @@ -1283,9 +1360,11 @@ static int optee_probe(struct platform_device *pdev) struct tee_shm_pool *pool = ERR_PTR(-EINVAL); struct optee *optee = NULL; void *memremaped_shm = NULL; + unsigned int rpc_param_count; struct tee_device *teedev; struct tee_context *ctx; u32 max_notif_value; + u32 arg_cache_flags; u32 sec_caps; int rc; @@ -1306,7 +1385,8 @@ static int optee_probe(struct platform_device *pdev) } if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps, - &max_notif_value)) { + &max_notif_value, + &rpc_param_count)) { pr_warn("capabilities mismatch\n"); return -EINVAL; } @@ -1314,14 +1394,48 @@ static int optee_probe(struct platform_device *pdev) /* * Try to use dynamic shared memory if possible */ - if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) + if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) { + /* + * If we have OPTEE_SMC_SEC_CAP_RPC_ARG we can ask + * optee_get_msg_arg() to pre-register (by having + * OPTEE_SHM_ARG_ALLOC_PRIV cleared) the page used to pass + * an argument struct. + * + * With the page is pre-registered we can use a non-zero + * offset for argument struct, this is indicated with + * OPTEE_SHM_ARG_SHARED. + * + * This means that optee_smc_do_call_with_arg() will use + * OPTEE_SMC_CALL_WITH_REGD_ARG for pre-registered pages. + */ + if (sec_caps & OPTEE_SMC_SEC_CAP_RPC_ARG) + arg_cache_flags = OPTEE_SHM_ARG_SHARED; + else + arg_cache_flags = OPTEE_SHM_ARG_ALLOC_PRIV; + pool = optee_shm_pool_alloc_pages(); + } /* * If dynamic shared memory is not available or failed - try static one */ - if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM)) + if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM)) { + /* + * The static memory pool can use non-zero page offsets so + * let optee_get_msg_arg() know that with OPTEE_SHM_ARG_SHARED. + * + * optee_get_msg_arg() should not pre-register the + * allocated page used to pass an argument struct, this is + * indicated with OPTEE_SHM_ARG_ALLOC_PRIV. + * + * This means that optee_smc_do_call_with_arg() will use + * OPTEE_SMC_CALL_WITH_ARG if rpc_param_count is 0, else + * OPTEE_SMC_CALL_WITH_RPC_ARG. + */ + arg_cache_flags = OPTEE_SHM_ARG_SHARED | + OPTEE_SHM_ARG_ALLOC_PRIV; pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm); + } if (IS_ERR(pool)) return PTR_ERR(pool); @@ -1335,6 +1449,7 @@ static int optee_probe(struct platform_device *pdev) optee->ops = &optee_ops; optee->smc.invoke_fn = invoke_fn; optee->smc.sec_caps = sec_caps; + optee->rpc_param_count = rpc_param_count; teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee); if (IS_ERR(teedev)) { @@ -1363,6 +1478,7 @@ static int optee_probe(struct platform_device *pdev) optee_supp_init(&optee->supp); optee->smc.memremaped_shm = memremaped_shm; optee->pool = pool; + optee_shm_arg_cache_init(optee, arg_cache_flags); platform_set_drvdata(pdev, optee); ctx = teedev_open(optee->teedev); @@ -1403,7 +1519,12 @@ static int optee_probe(struct platform_device *pdev) */ optee_disable_unmapped_shm_cache(optee); - optee_enable_shm_cache(optee); + /* + * Only enable the shm cache in case we're not able to pass the RPC + * arg struct right after the normal arg struct. + */ + if (!optee->rpc_param_count) + optee_enable_shm_cache(optee); if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) pr_info("dynamic shared memory is enabled\n"); @@ -1416,7 +1537,8 @@ static int optee_probe(struct platform_device *pdev) return 0; err_disable_shm_cache: - optee_disable_shm_cache(optee); + if (!optee->rpc_param_count) + optee_disable_shm_cache(optee); optee_smc_notif_uninit_irq(optee); optee_unregister_devices(); err_notif_uninit: @@ -1424,6 +1546,7 @@ err_notif_uninit: err_close_ctx: teedev_close_context(ctx); err_supp_uninit: + optee_shm_arg_cache_uninit(optee); optee_supp_uninit(&optee->supp); mutex_destroy(&optee->call_queue.mutex); err_unreg_supp_teedev: diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index 8aa1a4836b92..af0f7c603fa4 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -302,7 +302,6 @@ static int tee_ioctl_shm_alloc(struct tee_context *ctx, return PTR_ERR(shm); data.id = shm->id; - data.flags = shm->flags; data.size = shm->size; if (copy_to_user(udata, &data, sizeof(data))) @@ -339,7 +338,6 @@ tee_ioctl_shm_register(struct tee_context *ctx, return PTR_ERR(shm); data.id = shm->id; - data.flags = shm->flags; data.length = shm->size; if (copy_to_user(udata, &data, sizeof(data))) diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index f31e29e8f1ca..f2b1bcefcadd 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -23,21 +23,36 @@ static void shm_put_kernel_pages(struct page **pages, size_t page_count) static int shm_get_kernel_pages(unsigned long start, size_t page_count, struct page **pages) { - struct kvec *kiov; size_t n; int rc; - kiov = kcalloc(page_count, sizeof(*kiov), GFP_KERNEL); - if (!kiov) - return -ENOMEM; + if (is_vmalloc_addr((void *)start)) { + struct page *page; - for (n = 0; n < page_count; n++) { - kiov[n].iov_base = (void *)(start + n * PAGE_SIZE); - kiov[n].iov_len = PAGE_SIZE; - } + for (n = 0; n < page_count; n++) { + page = vmalloc_to_page((void *)(start + PAGE_SIZE * n)); + if (!page) + return -ENOMEM; + + get_page(page); + pages[n] = page; + } + rc = page_count; + } else { + struct kvec *kiov; + + kiov = kcalloc(page_count, sizeof(*kiov), GFP_KERNEL); + if (!kiov) + return -ENOMEM; + + for (n = 0; n < page_count; n++) { + kiov[n].iov_base = (void *)(start + n * PAGE_SIZE); + kiov[n].iov_len = PAGE_SIZE; + } - rc = get_kernel_pages(kiov, page_count, 0, pages); - kfree(kiov); + rc = get_kernel_pages(kiov, page_count, 0, pages); + kfree(kiov); + } return rc; } @@ -415,56 +430,6 @@ void tee_shm_free(struct tee_shm *shm) EXPORT_SYMBOL_GPL(tee_shm_free); /** - * tee_shm_va2pa() - Get physical address of a virtual address - * @shm: Shared memory handle - * @va: Virtual address to tranlsate - * @pa: Returned physical address - * @returns 0 on success and < 0 on failure - */ -int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa) -{ - if (!shm->kaddr) - return -EINVAL; - /* Check that we're in the range of the shm */ - if ((char *)va < (char *)shm->kaddr) - return -EINVAL; - if ((char *)va >= ((char *)shm->kaddr + shm->size)) - return -EINVAL; - - return tee_shm_get_pa( - shm, (unsigned long)va - (unsigned long)shm->kaddr, pa); -} -EXPORT_SYMBOL_GPL(tee_shm_va2pa); - -/** - * tee_shm_pa2va() - Get virtual address of a physical address - * @shm: Shared memory handle - * @pa: Physical address to tranlsate - * @va: Returned virtual address - * @returns 0 on success and < 0 on failure - */ -int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va) -{ - if (!shm->kaddr) - return -EINVAL; - /* Check that we're in the range of the shm */ - if (pa < shm->paddr) - return -EINVAL; - if (pa >= (shm->paddr + shm->size)) - return -EINVAL; - - if (va) { - void *v = tee_shm_get_va(shm, pa - shm->paddr); - - if (IS_ERR(v)) - return PTR_ERR(v); - *va = v; - } - return 0; -} -EXPORT_SYMBOL_GPL(tee_shm_pa2va); - -/** * tee_shm_get_va() - Get virtual address of a shared memory plus an offset * @shm: Shared memory handle * @offs: Offset from start of this shared memory diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c index 8a6958377764..3acc0f185762 100644 --- a/drivers/tty/serial/mpc52xx_uart.c +++ b/drivers/tty/serial/mpc52xx_uart.c @@ -436,31 +436,31 @@ static void mpc512x_psc_fifo_init(struct uart_port *port) out_be32(&FIFO_512x(port)->rximr, MPC512x_PSC_FIFO_ALARM); } -static int mpc512x_psc_raw_rx_rdy(struct uart_port *port) +static unsigned int mpc512x_psc_raw_rx_rdy(struct uart_port *port) { return !(in_be32(&FIFO_512x(port)->rxsr) & MPC512x_PSC_FIFO_EMPTY); } -static int mpc512x_psc_raw_tx_rdy(struct uart_port *port) +static unsigned int mpc512x_psc_raw_tx_rdy(struct uart_port *port) { return !(in_be32(&FIFO_512x(port)->txsr) & MPC512x_PSC_FIFO_FULL); } -static int mpc512x_psc_rx_rdy(struct uart_port *port) +static unsigned int mpc512x_psc_rx_rdy(struct uart_port *port) { return in_be32(&FIFO_512x(port)->rxsr) & in_be32(&FIFO_512x(port)->rximr) & MPC512x_PSC_FIFO_ALARM; } -static int mpc512x_psc_tx_rdy(struct uart_port *port) +static unsigned int mpc512x_psc_tx_rdy(struct uart_port *port) { return in_be32(&FIFO_512x(port)->txsr) & in_be32(&FIFO_512x(port)->tximr) & MPC512x_PSC_FIFO_ALARM; } -static int mpc512x_psc_tx_empty(struct uart_port *port) +static unsigned int mpc512x_psc_tx_empty(struct uart_port *port) { return in_be32(&FIFO_512x(port)->txsr) & MPC512x_PSC_FIFO_EMPTY; @@ -780,29 +780,29 @@ static void mpc5125_psc_fifo_init(struct uart_port *port) out_be32(&FIFO_5125(port)->rximr, MPC512x_PSC_FIFO_ALARM); } -static int mpc5125_psc_raw_rx_rdy(struct uart_port *port) +static unsigned int mpc5125_psc_raw_rx_rdy(struct uart_port *port) { return !(in_be32(&FIFO_5125(port)->rxsr) & MPC512x_PSC_FIFO_EMPTY); } -static int mpc5125_psc_raw_tx_rdy(struct uart_port *port) +static unsigned int mpc5125_psc_raw_tx_rdy(struct uart_port *port) { return !(in_be32(&FIFO_5125(port)->txsr) & MPC512x_PSC_FIFO_FULL); } -static int mpc5125_psc_rx_rdy(struct uart_port *port) +static unsigned int mpc5125_psc_rx_rdy(struct uart_port *port) { return in_be32(&FIFO_5125(port)->rxsr) & in_be32(&FIFO_5125(port)->rximr) & MPC512x_PSC_FIFO_ALARM; } -static int mpc5125_psc_tx_rdy(struct uart_port *port) +static unsigned int mpc5125_psc_tx_rdy(struct uart_port *port) { return in_be32(&FIFO_5125(port)->txsr) & in_be32(&FIFO_5125(port)->tximr) & MPC512x_PSC_FIFO_ALARM; } -static int mpc5125_psc_tx_empty(struct uart_port *port) +static unsigned int mpc5125_psc_tx_empty(struct uart_port *port) { return in_be32(&FIFO_5125(port)->txsr) & MPC512x_PSC_FIFO_EMPTY; } diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 2f4fb09f1e89..79001301b383 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -163,6 +163,7 @@ struct mlx5_vdpa_net { u32 cur_num_vqs; struct notifier_block nb; struct vdpa_callback config_cb; + struct mlx5_vdpa_wq_ent cvq_ent; }; static void free_resources(struct mlx5_vdpa_net *ndev); @@ -1658,6 +1659,12 @@ static void mlx5_cvq_kick_handler(struct work_struct *work) mvdev = wqent->mvdev; ndev = to_mlx5_vdpa_ndev(mvdev); cvq = &mvdev->cvq; + + mutex_lock(&ndev->reslock); + + if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) + goto out; + if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) goto out; @@ -1696,9 +1703,13 @@ static void mlx5_cvq_kick_handler(struct work_struct *work) if (vringh_need_notify_iotlb(&cvq->vring)) vringh_notify(&cvq->vring); + + queue_work(mvdev->wq, &wqent->work); + break; } + out: - kfree(wqent); + mutex_unlock(&ndev->reslock); } static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx) @@ -1706,7 +1717,6 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx) struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); struct mlx5_vdpa_virtqueue *mvq; - struct mlx5_vdpa_wq_ent *wqent; if (!is_index_valid(mvdev, idx)) return; @@ -1715,13 +1725,7 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx) if (!mvdev->wq || !mvdev->cvq.ready) return; - wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC); - if (!wqent) - return; - - wqent->mvdev = mvdev; - INIT_WORK(&wqent->work, mlx5_cvq_kick_handler); - queue_work(mvdev->wq, &wqent->work); + queue_work(mvdev->wq, &ndev->cvq_ent.work); return; } @@ -2180,7 +2184,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb goto err_mr; if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) - return 0; + goto err_mr; restore_channels_info(ndev); err = setup_driver(mvdev); @@ -2195,12 +2199,14 @@ err_mr: return err; } +/* reslock must be held for this function */ static int setup_driver(struct mlx5_vdpa_dev *mvdev) { struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); int err; - mutex_lock(&ndev->reslock); + WARN_ON(!mutex_is_locked(&ndev->reslock)); + if (ndev->setup) { mlx5_vdpa_warn(mvdev, "setup driver called for already setup driver\n"); err = 0; @@ -2230,7 +2236,6 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev) goto err_fwd; } ndev->setup = true; - mutex_unlock(&ndev->reslock); return 0; @@ -2241,23 +2246,23 @@ err_tir: err_rqt: teardown_virtqueues(ndev); out: - mutex_unlock(&ndev->reslock); return err; } +/* reslock must be held for this function */ static void teardown_driver(struct mlx5_vdpa_net *ndev) { - mutex_lock(&ndev->reslock); + + WARN_ON(!mutex_is_locked(&ndev->reslock)); + if (!ndev->setup) - goto out; + return; remove_fwd_to_tir(ndev); destroy_tir(ndev); destroy_rqt(ndev); teardown_virtqueues(ndev); ndev->setup = false; -out: - mutex_unlock(&ndev->reslock); } static void clear_vqs_ready(struct mlx5_vdpa_net *ndev) @@ -2278,6 +2283,8 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status) print_status(mvdev, status, true); + mutex_lock(&ndev->reslock); + if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) { if (status & VIRTIO_CONFIG_S_DRIVER_OK) { err = setup_driver(mvdev); @@ -2287,16 +2294,19 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status) } } else { mlx5_vdpa_warn(mvdev, "did not expect DRIVER_OK to be cleared\n"); - return; + goto err_clear; } } ndev->mvdev.status = status; + mutex_unlock(&ndev->reslock); return; err_setup: mlx5_vdpa_destroy_mr(&ndev->mvdev); ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED; +err_clear: + mutex_unlock(&ndev->reslock); } static int mlx5_vdpa_reset(struct vdpa_device *vdev) @@ -2306,6 +2316,8 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev) print_status(mvdev, 0, true); mlx5_vdpa_info(mvdev, "performing device reset\n"); + + mutex_lock(&ndev->reslock); teardown_driver(ndev); clear_vqs_ready(ndev); mlx5_vdpa_destroy_mr(&ndev->mvdev); @@ -2318,6 +2330,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev) if (mlx5_vdpa_create_mr(mvdev, NULL)) mlx5_vdpa_warn(mvdev, "create MR failed\n"); } + mutex_unlock(&ndev->reslock); return 0; } @@ -2353,19 +2366,24 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev) static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb) { struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); + struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); bool change_map; int err; + mutex_lock(&ndev->reslock); + err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map); if (err) { mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err); - return err; + goto err; } if (change_map) - return mlx5_vdpa_change_map(mvdev, iotlb); + err = mlx5_vdpa_change_map(mvdev, iotlb); - return 0; +err: + mutex_unlock(&ndev->reslock); + return err; } static void mlx5_vdpa_free(struct vdpa_device *vdev) @@ -2740,6 +2758,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, if (err) goto err_mr; + ndev->cvq_ent.mvdev = mvdev; + INIT_WORK(&ndev->cvq_ent.work, mlx5_cvq_kick_handler); mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq"); if (!mvdev->wq) { err = -ENOMEM; diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c index b7bb16f92ac6..06b6f3594a13 100644 --- a/drivers/vfio/pci/vfio_pci_core.c +++ b/drivers/vfio/pci/vfio_pci_core.c @@ -36,6 +36,10 @@ static bool nointxmask; static bool disable_vga; static bool disable_idle_d3; +/* List of PF's that vfio_pci_core_sriov_configure() has been called on */ +static DEFINE_MUTEX(vfio_pci_sriov_pfs_mutex); +static LIST_HEAD(vfio_pci_sriov_pfs); + static inline bool vfio_vga_disabled(void) { #ifdef CONFIG_VFIO_PCI_VGA @@ -434,47 +438,17 @@ out: } EXPORT_SYMBOL_GPL(vfio_pci_core_disable); -static struct vfio_pci_core_device *get_pf_vdev(struct vfio_pci_core_device *vdev) -{ - struct pci_dev *physfn = pci_physfn(vdev->pdev); - struct vfio_device *pf_dev; - - if (!vdev->pdev->is_virtfn) - return NULL; - - pf_dev = vfio_device_get_from_dev(&physfn->dev); - if (!pf_dev) - return NULL; - - if (pci_dev_driver(physfn) != pci_dev_driver(vdev->pdev)) { - vfio_device_put(pf_dev); - return NULL; - } - - return container_of(pf_dev, struct vfio_pci_core_device, vdev); -} - -static void vfio_pci_vf_token_user_add(struct vfio_pci_core_device *vdev, int val) -{ - struct vfio_pci_core_device *pf_vdev = get_pf_vdev(vdev); - - if (!pf_vdev) - return; - - mutex_lock(&pf_vdev->vf_token->lock); - pf_vdev->vf_token->users += val; - WARN_ON(pf_vdev->vf_token->users < 0); - mutex_unlock(&pf_vdev->vf_token->lock); - - vfio_device_put(&pf_vdev->vdev); -} - void vfio_pci_core_close_device(struct vfio_device *core_vdev) { struct vfio_pci_core_device *vdev = container_of(core_vdev, struct vfio_pci_core_device, vdev); - vfio_pci_vf_token_user_add(vdev, -1); + if (vdev->sriov_pf_core_dev) { + mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock); + WARN_ON(!vdev->sriov_pf_core_dev->vf_token->users); + vdev->sriov_pf_core_dev->vf_token->users--; + mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock); + } vfio_spapr_pci_eeh_release(vdev->pdev); vfio_pci_core_disable(vdev); @@ -495,7 +469,12 @@ void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev) { vfio_pci_probe_mmaps(vdev); vfio_spapr_pci_eeh_open(vdev->pdev); - vfio_pci_vf_token_user_add(vdev, 1); + + if (vdev->sriov_pf_core_dev) { + mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock); + vdev->sriov_pf_core_dev->vf_token->users++; + mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock); + } } EXPORT_SYMBOL_GPL(vfio_pci_core_finish_enable); @@ -1583,11 +1562,8 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev, * * If the VF token is provided but unused, an error is generated. */ - if (!vdev->pdev->is_virtfn && !vdev->vf_token && !vf_token) - return 0; /* No VF token provided or required */ - if (vdev->pdev->is_virtfn) { - struct vfio_pci_core_device *pf_vdev = get_pf_vdev(vdev); + struct vfio_pci_core_device *pf_vdev = vdev->sriov_pf_core_dev; bool match; if (!pf_vdev) { @@ -1600,7 +1576,6 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev, } if (!vf_token) { - vfio_device_put(&pf_vdev->vdev); pci_info_ratelimited(vdev->pdev, "VF token required to access device\n"); return -EACCES; @@ -1610,8 +1585,6 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev, match = uuid_equal(uuid, &pf_vdev->vf_token->uuid); mutex_unlock(&pf_vdev->vf_token->lock); - vfio_device_put(&pf_vdev->vdev); - if (!match) { pci_info_ratelimited(vdev->pdev, "Incorrect VF token provided for device\n"); @@ -1732,8 +1705,30 @@ static int vfio_pci_bus_notifier(struct notifier_block *nb, static int vfio_pci_vf_init(struct vfio_pci_core_device *vdev) { struct pci_dev *pdev = vdev->pdev; + struct vfio_pci_core_device *cur; + struct pci_dev *physfn; int ret; + if (pdev->is_virtfn) { + /* + * If this VF was created by our vfio_pci_core_sriov_configure() + * then we can find the PF vfio_pci_core_device now, and due to + * the locking in pci_disable_sriov() it cannot change until + * this VF device driver is removed. + */ + physfn = pci_physfn(vdev->pdev); + mutex_lock(&vfio_pci_sriov_pfs_mutex); + list_for_each_entry(cur, &vfio_pci_sriov_pfs, sriov_pfs_item) { + if (cur->pdev == physfn) { + vdev->sriov_pf_core_dev = cur; + break; + } + } + mutex_unlock(&vfio_pci_sriov_pfs_mutex); + return 0; + } + + /* Not a SRIOV PF */ if (!pdev->is_physfn) return 0; @@ -1805,6 +1800,7 @@ void vfio_pci_core_init_device(struct vfio_pci_core_device *vdev, INIT_LIST_HEAD(&vdev->ioeventfds_list); mutex_init(&vdev->vma_lock); INIT_LIST_HEAD(&vdev->vma_list); + INIT_LIST_HEAD(&vdev->sriov_pfs_item); init_rwsem(&vdev->memory_lock); } EXPORT_SYMBOL_GPL(vfio_pci_core_init_device); @@ -1896,7 +1892,7 @@ void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev) { struct pci_dev *pdev = vdev->pdev; - pci_disable_sriov(pdev); + vfio_pci_core_sriov_configure(pdev, 0); vfio_unregister_group_dev(&vdev->vdev); @@ -1935,21 +1931,49 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_aer_err_detected); int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn) { + struct vfio_pci_core_device *vdev; struct vfio_device *device; int ret = 0; + device_lock_assert(&pdev->dev); + device = vfio_device_get_from_dev(&pdev->dev); if (!device) return -ENODEV; - if (nr_virtfn == 0) - pci_disable_sriov(pdev); - else + vdev = container_of(device, struct vfio_pci_core_device, vdev); + + if (nr_virtfn) { + mutex_lock(&vfio_pci_sriov_pfs_mutex); + /* + * The thread that adds the vdev to the list is the only thread + * that gets to call pci_enable_sriov() and we will only allow + * it to be called once without going through + * pci_disable_sriov() + */ + if (!list_empty(&vdev->sriov_pfs_item)) { + ret = -EINVAL; + goto out_unlock; + } + list_add_tail(&vdev->sriov_pfs_item, &vfio_pci_sriov_pfs); + mutex_unlock(&vfio_pci_sriov_pfs_mutex); ret = pci_enable_sriov(pdev, nr_virtfn); + if (ret) + goto out_del; + ret = nr_virtfn; + goto out_put; + } - vfio_device_put(device); + pci_disable_sriov(pdev); - return ret < 0 ? ret : nr_virtfn; +out_del: + mutex_lock(&vfio_pci_sriov_pfs_mutex); + list_del_init(&vdev->sriov_pfs_item); +out_unlock: + mutex_unlock(&vfio_pci_sriov_pfs_mutex); +out_put: + vfio_device_put(device); + return ret; } EXPORT_SYMBOL_GPL(vfio_pci_core_sriov_configure); diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 34d6bb1bf82e..a6bb0e438216 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -1579,7 +1579,14 @@ static void do_remove_conflicting_framebuffers(struct apertures_struct *a, * If it's not a platform device, at least print a warning. A * fix would add code to remove the device from the system. */ - if (dev_is_platform(device)) { + if (!device) { + /* TODO: Represent each OF framebuffer as its own + * device in the device hierarchy. For now, offb + * doesn't have such a device, so unregister the + * framebuffer as before without warning. + */ + do_unregister_framebuffer(registered_fb[i]); + } else if (dev_is_platform(device)) { registered_fb[i]->forced_out = true; platform_device_unregister(to_platform_device(device)); } else { diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 75c8d560bbd3..22f15f444f75 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -526,9 +526,8 @@ int virtio_device_restore(struct virtio_device *dev) goto err; } - /* If restore didn't do it, mark device DRIVER_OK ourselves. */ - if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK)) - virtio_device_ready(dev); + /* Finally, tell the device we're all set */ + virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); virtio_config_enable(dev); diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index dfe26fa17e95..617a7f4f07a8 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -689,29 +689,34 @@ void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages) } EXPORT_SYMBOL(xen_free_ballooned_pages); -#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC) -static void __init balloon_add_region(unsigned long start_pfn, - unsigned long pages) +static void __init balloon_add_regions(void) { +#if defined(CONFIG_XEN_PV) + unsigned long start_pfn, pages; unsigned long pfn, extra_pfn_end; + unsigned int i; - /* - * If the amount of usable memory has been limited (e.g., with - * the 'mem' command line parameter), don't add pages beyond - * this limit. - */ - extra_pfn_end = min(max_pfn, start_pfn + pages); + for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { + pages = xen_extra_mem[i].n_pfns; + if (!pages) + continue; - for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) { - /* totalram_pages and totalhigh_pages do not - include the boot-time balloon extension, so - don't subtract from it. */ - balloon_append(pfn_to_page(pfn)); - } + start_pfn = xen_extra_mem[i].start_pfn; - balloon_stats.total_pages += extra_pfn_end - start_pfn; -} + /* + * If the amount of usable memory has been limited (e.g., with + * the 'mem' command line parameter), don't add pages beyond + * this limit. + */ + extra_pfn_end = min(max_pfn, start_pfn + pages); + + for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) + balloon_append(pfn_to_page(pfn)); + + balloon_stats.total_pages += extra_pfn_end - start_pfn; + } #endif +} static int __init balloon_init(void) { @@ -745,20 +750,7 @@ static int __init balloon_init(void) register_sysctl_table(xen_root); #endif -#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC) - { - int i; - - /* - * Initialize the balloon with pages from the extra memory - * regions (see arch/x86/xen/setup.c). - */ - for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) - if (xen_extra_mem[i].n_pfns) - balloon_add_region(xen_extra_mem[i].start_pfn, - xen_extra_mem[i].n_pfns); - } -#endif + balloon_add_regions(); task = kthread_run(balloon_thread, NULL, "xen-balloon"); if (IS_ERR(task)) { diff --git a/drivers/xen/unpopulated-alloc.c b/drivers/xen/unpopulated-alloc.c index a8b41057c382..a39f2d36dd9c 100644 --- a/drivers/xen/unpopulated-alloc.c +++ b/drivers/xen/unpopulated-alloc.c @@ -230,39 +230,6 @@ void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages) } EXPORT_SYMBOL(xen_free_unpopulated_pages); -#ifdef CONFIG_XEN_PV -static int __init init(void) -{ - unsigned int i; - - if (!xen_domain()) - return -ENODEV; - - if (!xen_pv_domain()) - return 0; - - /* - * Initialize with pages from the extra memory regions (see - * arch/x86/xen/setup.c). - */ - for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { - unsigned int j; - - for (j = 0; j < xen_extra_mem[i].n_pfns; j++) { - struct page *pg = - pfn_to_page(xen_extra_mem[i].start_pfn + j); - - pg->zone_device_data = page_list; - page_list = pg; - list_count++; - } - } - - return 0; -} -subsys_initcall(init); -#endif - static int __init unpopulated_init(void) { int ret; |