diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-12-15 14:10:09 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-12-15 14:10:09 -0800 |
commit | 2911ed9f47b47cb5ab87d03314b3b9fe008e607f (patch) | |
tree | 7357e609aac80001b12a3933122060a777e67578 /drivers/misc | |
parent | 7240153a9bdb77217b99b76fd73105bce12770be (diff) | |
parent | 93f998879cd95b3e4f2836e7b17d6d5ae035cf90 (diff) | |
download | linux-2911ed9f47b47cb5ab87d03314b3b9fe008e607f.tar.bz2 |
Merge tag 'char-misc-5.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char / misc driver updates from Greg KH:
"Here is the big char/misc driver update for 5.11-rc1.
Continuing the tradition of previous -rc1 pulls, there seems to be
more and more tiny driver subsystems flowing through this tree.
Lots of different things, all of which have been in linux-next for a
while with no reported issues:
- extcon driver updates
- habannalab driver updates
- mei driver updates
- uio driver updates
- binder fixes and features added
- soundwire driver updates
- mhi bus driver updates
- phy driver updates
- coresight driver updates
- fpga driver updates
- speakup driver updates
- slimbus driver updates
- various small char and misc driver updates"
* tag 'char-misc-5.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (305 commits)
extcon: max77693: Fix modalias string
extcon: fsa9480: Support TI TSU6111 variant
extcon: fsa9480: Rewrite bindings in YAML and extend
dt-bindings: extcon: add binding for TUSB320
extcon: Add driver for TI TUSB320
slimbus: qcom: fix potential NULL dereference in qcom_slim_prg_slew()
siox: Make remove callback return void
siox: Use bus_type functions for probe, remove and shutdown
spmi: Add driver shutdown support
spmi: fix some coding style issues at the spmi core
spmi: get rid of a warning when built with W=1
uio: uio_hv_generic: use devm_kzalloc() for private data alloc
uio: uio_fsl_elbc_gpcm: use device-managed allocators
uio: uio_aec: use devm_kzalloc() for uio_info object
uio: uio_cif: use devm_kzalloc() for uio_info object
uio: uio_netx: use devm_kzalloc() for or uio_info object
uio: uio_mf624: use devm_kzalloc() for uio_info object
uio: uio_sercos3: use device-managed functions for simple allocs
uio: uio_dmem_genirq: finalize conversion of probe to devm_ handlers
uio: uio_dmem_genirq: convert simple allocations to device-managed
...
Diffstat (limited to 'drivers/misc')
71 files changed, 18161 insertions, 1944 deletions
diff --git a/drivers/misc/altera-stapl/altera.c b/drivers/misc/altera-stapl/altera.c index 5bdf57472314..92c0611034b0 100644 --- a/drivers/misc/altera-stapl/altera.c +++ b/drivers/misc/altera-stapl/altera.c @@ -2265,11 +2265,6 @@ static int altera_check_crc(u8 *p, s32 program_size) "actual %04x\n", __func__, local_expected, local_actual); break; - case -ENODATA: - printk(KERN_ERR "%s: expected CRC not found, " - "actual CRC = %04x\n", __func__, - local_actual); - break; case -EIO: printk(KERN_ERR "%s: error: format isn't " "recognized.\n", __func__); diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c index 80d87e8a0bea..fb9a1b49ff6d 100644 --- a/drivers/misc/c2port/core.c +++ b/drivers/misc/c2port/core.c @@ -899,7 +899,7 @@ struct c2port_device *c2port_device_register(char *name, unlikely(!ops->c2d_get) || unlikely(!ops->c2d_set)) return ERR_PTR(-EINVAL); - c2dev = kmalloc(sizeof(struct c2port_device), GFP_KERNEL); + c2dev = kzalloc(sizeof(struct c2port_device), GFP_KERNEL); if (unlikely(!c2dev)) return ERR_PTR(-ENOMEM); diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c index b85279f1fc5e..b2676e7f5027 100644 --- a/drivers/misc/cardreader/rts5249.c +++ b/drivers/misc/cardreader/rts5249.c @@ -73,6 +73,9 @@ static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr) pci_read_config_dword(pdev, PCR_SETTING_REG2, ®); pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg); + + pcr->rtd3_en = rtsx_reg_to_rtd3_uhsii(reg); + if (rtsx_check_mmc_support(reg)) pcr->extra_caps |= EXTRA_CAPS_NO_MMC; pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg); @@ -278,15 +281,28 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr) rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF); - if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) { + if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) rtsx_pci_write_register(pcr, REG_VREF, PWD_SUSPND_EN, PWD_SUSPND_EN); - rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3, 0x01, 0x00); - rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL, 0x30, 0x20); + + if (pcr->rtd3_en) { + if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) { + rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3, 0x01, 0x01); + rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL, 0x30, 0x30); + } else { + rtsx_pci_write_register(pcr, PM_CTRL3, 0x01, 0x01); + rtsx_pci_write_register(pcr, PME_FORCE_CTL, 0xFF, 0x33); + } } else { - rtsx_pci_write_register(pcr, PME_FORCE_CTL, 0xFF, 0x30); - rtsx_pci_write_register(pcr, PM_CTRL3, 0x01, 0x00); + if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) { + rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3, 0x01, 0x00); + rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL, 0x30, 0x20); + } else { + rtsx_pci_write_register(pcr, PME_FORCE_CTL, 0xFF, 0x30); + rtsx_pci_write_register(pcr, PM_CTRL3, 0x01, 0x00); + } } + /* * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced * to drive low, and we forcibly request clock. diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c index 5d15607027e9..2700d1997750 100644 --- a/drivers/misc/cardreader/rtsx_pcr.c +++ b/drivers/misc/cardreader/rtsx_pcr.c @@ -20,6 +20,8 @@ #include <linux/rtsx_pci.h> #include <linux/mmc/card.h> #include <asm/unaligned.h> +#include <linux/pm.h> +#include <linux/pm_runtime.h> #include "rtsx_pcr.h" #include "rts5261.h" @@ -89,9 +91,15 @@ static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable) if (pcr->aspm_enabled == enable) return; - pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL, - PCI_EXP_LNKCTL_ASPMC, - enable ? pcr->aspm_en : 0); + if (pcr->aspm_en & 0x02) + rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 | + FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1); + else + rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 | + FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1); + + if (!enable && (pcr->aspm_en & 0x02)) + mdelay(10); pcr->aspm_enabled = enable; } @@ -144,6 +152,12 @@ void rtsx_pci_start_run(struct rtsx_pcr *pcr) if (pcr->remove_pci) return; + if (pcr->rtd3_en) + if (pcr->is_runtime_suspended) { + pm_runtime_get(&(pcr->pci->dev)); + pcr->is_runtime_suspended = false; + } + if (pcr->state != PDEV_STAT_RUN) { pcr->state = PDEV_STAT_RUN; if (pcr->ops->enable_auto_blink) @@ -1075,6 +1089,16 @@ static void rtsx_pm_power_saving(struct rtsx_pcr *pcr) rtsx_comm_pm_power_saving(pcr); } +static void rtsx_pci_rtd3_work(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, rtd3_work); + + pcr_dbg(pcr, "--> %s\n", __func__); + if (!pcr->is_runtime_suspended) + pm_runtime_put(&(pcr->pci->dev)); +} + static void rtsx_pci_idle_work(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); @@ -1094,6 +1118,9 @@ static void rtsx_pci_idle_work(struct work_struct *work) rtsx_pm_power_saving(pcr); mutex_unlock(&pcr->pcr_mutex); + + if (pcr->rtd3_en) + mod_delayed_work(system_wq, &pcr->rtd3_work, msecs_to_jiffies(10000)); } static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state) @@ -1283,7 +1310,7 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) /* Wait SSC power stable */ udelay(200); - rtsx_pci_disable_aspm(pcr); + rtsx_disable_aspm(pcr); if (pcr->ops->optimize_phy) { err = pcr->ops->optimize_phy(pcr); if (err < 0) @@ -1357,8 +1384,8 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) rtsx_pci_init_ocp(pcr); /* Enable clk_request_n to enable clock power management */ - pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, - PCI_EXP_LNKCTL_CLKREQ_EN); + pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL, + 0, PCI_EXP_LNKCTL_CLKREQ_EN); /* Enter L1 when host tx idle */ pci_write_config_byte(pdev, 0x70F, 0x5B); @@ -1368,6 +1395,8 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) return err; } + rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30); + /* No CD interrupt if probing driver with card inserted. * So we need to initialize pcr->card_exist here. */ @@ -1571,6 +1600,15 @@ static int rtsx_pci_probe(struct pci_dev *pcidev, rtsx_pcr_cells[i].platform_data = handle; rtsx_pcr_cells[i].pdata_size = sizeof(*handle); } + + if (pcr->rtd3_en) { + INIT_DELAYED_WORK(&pcr->rtd3_work, rtsx_pci_rtd3_work); + pm_runtime_allow(&pcidev->dev); + pm_runtime_enable(&pcidev->dev); + pcr->is_runtime_suspended = false; + } + + ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells, ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL); if (ret < 0) @@ -1608,6 +1646,9 @@ static void rtsx_pci_remove(struct pci_dev *pcidev) struct pcr_handle *handle = pci_get_drvdata(pcidev); struct rtsx_pcr *pcr = handle->pcr; + if (pcr->rtd3_en) + pm_runtime_get_noresume(&pcr->pci->dev); + pcr->remove_pci = true; /* Disable interrupts at the pcr level */ @@ -1618,6 +1659,8 @@ static void rtsx_pci_remove(struct pci_dev *pcidev) cancel_delayed_work_sync(&pcr->carddet_work); cancel_delayed_work_sync(&pcr->idle_work); + if (pcr->rtd3_en) + cancel_delayed_work_sync(&pcr->rtd3_work); mfd_remove_devices(&pcidev->dev); @@ -1635,6 +1678,11 @@ static void rtsx_pci_remove(struct pci_dev *pcidev) idr_remove(&rtsx_pci_idr, pcr->id); spin_unlock(&rtsx_pci_lock); + if (pcr->rtd3_en) { + pm_runtime_disable(&pcr->pci->dev); + pm_runtime_put_noidle(&pcr->pci->dev); + } + kfree(pcr->slots); kfree(pcr); kfree(handle); @@ -1716,13 +1764,77 @@ static void rtsx_pci_shutdown(struct pci_dev *pcidev) pci_disable_msi(pcr->pci); } +static int rtsx_pci_runtime_suspend(struct device *device) +{ + struct pci_dev *pcidev = to_pci_dev(device); + struct pcr_handle *handle; + struct rtsx_pcr *pcr; + + handle = pci_get_drvdata(pcidev); + pcr = handle->pcr; + dev_dbg(&(pcidev->dev), "--> %s\n", __func__); + + cancel_delayed_work(&pcr->carddet_work); + cancel_delayed_work(&pcr->rtd3_work); + cancel_delayed_work(&pcr->idle_work); + + mutex_lock(&pcr->pcr_mutex); + rtsx_pci_power_off(pcr, HOST_ENTER_S3); + + free_irq(pcr->irq, (void *)pcr); + + mutex_unlock(&pcr->pcr_mutex); + + pcr->is_runtime_suspended = true; + + return 0; +} + +static int rtsx_pci_runtime_resume(struct device *device) +{ + struct pci_dev *pcidev = to_pci_dev(device); + struct pcr_handle *handle; + struct rtsx_pcr *pcr; + int ret = 0; + + handle = pci_get_drvdata(pcidev); + pcr = handle->pcr; + dev_dbg(&(pcidev->dev), "--> %s\n", __func__); + + mutex_lock(&pcr->pcr_mutex); + + rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00); + rtsx_pci_acquire_irq(pcr); + synchronize_irq(pcr->irq); + + if (pcr->ops->fetch_vendor_settings) + pcr->ops->fetch_vendor_settings(pcr); + + rtsx_pci_init_hw(pcr); + + if (pcr->slots[RTSX_SD_CARD].p_dev != NULL) { + pcr->slots[RTSX_SD_CARD].card_event( + pcr->slots[RTSX_SD_CARD].p_dev); + } + + schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200)); + + mutex_unlock(&pcr->pcr_mutex); + return ret; +} + #else /* CONFIG_PM */ #define rtsx_pci_shutdown NULL +#define rtsx_pci_runtime_suspend NULL +#define rtsx_pic_runtime_resume NULL #endif /* CONFIG_PM */ -static SIMPLE_DEV_PM_OPS(rtsx_pci_pm_ops, rtsx_pci_suspend, rtsx_pci_resume); +static const struct dev_pm_ops rtsx_pci_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(rtsx_pci_suspend, rtsx_pci_resume) + SET_RUNTIME_PM_OPS(rtsx_pci_runtime_suspend, rtsx_pci_runtime_resume, NULL) +}; static struct pci_driver rtsx_pci_driver = { .name = DRV_NAME_RTSX_PCI, diff --git a/drivers/misc/cardreader/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h index fe5f4ca0f937..daf057c4eea6 100644 --- a/drivers/misc/cardreader/rtsx_pcr.h +++ b/drivers/misc/cardreader/rtsx_pcr.h @@ -90,6 +90,7 @@ static inline u8 map_sd_drive(int idx) #define rtsx_check_mmc_support(reg) ((reg) & 0x10) #define rtsx_reg_to_rtd3(reg) ((reg) & 0x02) +#define rtsx_reg_to_rtd3_uhsii(reg) ((reg) & 0x04) #define rtsx_reg_to_aspm(reg) (((reg) >> 28) & 0x03) #define rtsx_reg_to_sd30_drive_sel_1v8(reg) (((reg) >> 26) & 0x03) #define rtsx_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 5) & 0x03) diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c index 3b7d8b7584f4..b76e4901b4a4 100644 --- a/drivers/misc/eeprom/at25.c +++ b/drivers/misc/eeprom/at25.c @@ -22,6 +22,9 @@ * mean that some AT25 products are EEPROMs, and others are FLASH. * Handle FLASH chips with the drivers/mtd/devices/m25p80.c driver, * not this one! + * + * EEPROMs that can be used with this driver include, for example: + * AT25M02, AT25128B */ struct at25_data { diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c index c9b886618071..2e1befbd1ad9 100644 --- a/drivers/misc/genwqe/card_base.c +++ b/drivers/misc/genwqe/card_base.c @@ -1089,24 +1089,9 @@ static int genwqe_pci_setup(struct genwqe_dev *cd) } /* check for 64-bit DMA address supported (DAC) */ - if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) { - err = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(64)); - if (err) { - dev_err(&pci_dev->dev, - "err: DMA64 consistent mask error\n"); - err = -EIO; - goto out_release_resources; - } /* check for 32-bit DMA address supported (SAC) */ - } else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { - err = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pci_dev->dev, - "err: DMA32 consistent mask error\n"); - err = -EIO; - goto out_release_resources; - } - } else { + if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)) || + dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) { dev_err(&pci_dev->dev, "err: neither DMA32 nor DMA64 supported\n"); err = -EIO; diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c index ada570f35a41..6f6a904ab6ca 100644 --- a/drivers/misc/habanalabs/common/command_buffer.c +++ b/drivers/misc/habanalabs/common/command_buffer.c @@ -11,7 +11,6 @@ #include <linux/mm.h> #include <linux/slab.h> #include <linux/uaccess.h> -#include <linux/genalloc.h> static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb) { @@ -68,9 +67,9 @@ static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb) bus_addr = cb->bus_address; offset = 0; list_for_each_entry(va_block, &cb->va_block_list, node) { - rc = hl_mmu_map(ctx, va_block->start, bus_addr, va_block->size, - list_is_last(&va_block->node, - &cb->va_block_list)); + rc = hl_mmu_map_page(ctx, va_block->start, bus_addr, + va_block->size, list_is_last(&va_block->node, + &cb->va_block_list)); if (rc) { dev_err(hdev->dev, "Failed to map VA %#llx to CB\n", va_block->start); @@ -93,7 +92,7 @@ err_va_umap: list_for_each_entry(va_block, &cb->va_block_list, node) { if (offset <= 0) break; - hl_mmu_unmap(ctx, va_block->start, va_block->size, + hl_mmu_unmap_page(ctx, va_block->start, va_block->size, offset <= va_block->size); offset -= va_block->size; } @@ -120,7 +119,7 @@ static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb) mutex_lock(&ctx->mmu_lock); list_for_each_entry(va_block, &cb->va_block_list, node) - if (hl_mmu_unmap(ctx, va_block->start, va_block->size, + if (hl_mmu_unmap_page(ctx, va_block->start, va_block->size, list_is_last(&va_block->node, &cb->va_block_list))) dev_warn_ratelimited(hdev->dev, @@ -376,17 +375,49 @@ int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle) return rc; } +static int hl_cb_info(struct hl_device *hdev, struct hl_cb_mgr *mgr, + u64 cb_handle, u32 *usage_cnt) +{ + struct hl_cb *cb; + u32 handle; + int rc = 0; + + /* The CB handle was given to user to do mmap, so need to shift it back + * to the value which was allocated by the IDR module. + */ + cb_handle >>= PAGE_SHIFT; + handle = (u32) cb_handle; + + spin_lock(&mgr->cb_lock); + + cb = idr_find(&mgr->cb_handles, handle); + if (!cb) { + dev_err(hdev->dev, + "CB info failed, no match to handle 0x%x\n", handle); + rc = -EINVAL; + goto out; + } + + *usage_cnt = atomic_read(&cb->cs_cnt); + +out: + spin_unlock(&mgr->cb_lock); + return rc; +} + int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data) { union hl_cb_args *args = data; struct hl_device *hdev = hpriv->hdev; + enum hl_device_status status; u64 handle = 0; + u32 usage_cnt = 0; int rc; - if (hl_device_disabled_or_in_reset(hdev)) { + if (!hl_device_operational(hdev, &status)) { dev_warn_ratelimited(hdev->dev, "Device is %s. Can't execute CB IOCTL\n", - atomic_read(&hdev->in_reset) ? "in_reset" : "disabled"); + hdev->status[status]); return -EBUSY; } @@ -413,6 +444,13 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data) args->in.cb_handle); break; + case HL_CB_OP_INFO: + rc = hl_cb_info(hdev, &hpriv->cb_mgr, args->in.cb_handle, + &usage_cnt); + memset(args, 0, sizeof(*args)); + args->out.usage_cnt = usage_cnt; + break; + default: rc = -ENOTTY; break; @@ -517,6 +555,7 @@ int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma) } cb->mmap_size = cb->size; + vma->vm_pgoff = handle; return 0; diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index b2b974ecc431..beb482310a58 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -11,11 +11,25 @@ #include <linux/uaccess.h> #include <linux/slab.h> -#define HL_CS_FLAGS_SIG_WAIT (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT) +#define HL_CS_FLAGS_TYPE_MASK (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \ + HL_CS_FLAGS_COLLECTIVE_WAIT) + +/** + * enum hl_cs_wait_status - cs wait status + * @CS_WAIT_STATUS_BUSY: cs was not completed yet + * @CS_WAIT_STATUS_COMPLETED: cs completed + * @CS_WAIT_STATUS_GONE: cs completed but fence is already gone + */ +enum hl_cs_wait_status { + CS_WAIT_STATUS_BUSY, + CS_WAIT_STATUS_COMPLETED, + CS_WAIT_STATUS_GONE +}; static void job_wq_completion(struct work_struct *work); -static long _hl_cs_wait_ioctl(struct hl_device *hdev, - struct hl_ctx *ctx, u64 timeout_us, u64 seq); +static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, + u64 timeout_us, u64 seq, + enum hl_cs_wait_status *status, s64 *timestamp); static void cs_do_release(struct kref *ref); static void hl_sob_reset(struct kref *ref) @@ -38,6 +52,38 @@ void hl_sob_reset_error(struct kref *ref) hw_sob->q_idx, hw_sob->sob_id); } +/** + * hl_gen_sob_mask() - Generates a sob mask to be used in a monitor arm packet + * @sob_base: sob base id + * @sob_mask: sob user mask, each bit represents a sob offset from sob base + * @mask: generated mask + * + * Return: 0 if given parameters are valid + */ +int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask) +{ + int i; + + if (sob_mask == 0) + return -EINVAL; + + if (sob_mask == 0x1) { + *mask = ~(1 << (sob_base & 0x7)); + } else { + /* find msb in order to verify sob range is valid */ + for (i = BITS_PER_BYTE - 1 ; i >= 0 ; i--) + if (BIT(i) & sob_mask) + break; + + if (i > (HL_MAX_SOBS_PER_MONITOR - (sob_base & 0x7) - 1)) + return -EINVAL; + + *mask = ~sob_mask; + } + + return 0; +} + static void hl_fence_release(struct kref *kref) { struct hl_fence *fence = @@ -53,7 +99,8 @@ static void hl_fence_release(struct kref *kref) goto free; if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) || - (hl_cs_cmpl->type == CS_TYPE_WAIT)) { + (hl_cs_cmpl->type == CS_TYPE_WAIT) || + (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)) { dev_dbg(hdev->dev, "CS 0x%llx type %d finished, sob_id: %d, sob_val: 0x%x\n", @@ -80,6 +127,10 @@ static void hl_fence_release(struct kref *kref) * hence the above scenario is avoided. */ kref_put(&hl_cs_cmpl->hw_sob->kref, hl_sob_reset); + + if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT) + hdev->asic_funcs->reset_sob_group(hdev, + hl_cs_cmpl->sob_group); } free: @@ -102,10 +153,11 @@ static void hl_fence_init(struct hl_fence *fence) { kref_init(&fence->refcount); fence->error = 0; + fence->timestamp = ktime_set(0, 0); init_completion(&fence->completion); } -static void cs_get(struct hl_cs *cs) +void cs_get(struct hl_cs *cs) { kref_get(&cs->refcount); } @@ -120,6 +172,18 @@ static void cs_put(struct hl_cs *cs) kref_put(&cs->refcount, cs_do_release); } +static void cs_job_do_release(struct kref *ref) +{ + struct hl_cs_job *job = container_of(ref, struct hl_cs_job, refcount); + + kfree(job); +} + +static void cs_job_put(struct hl_cs_job *job) +{ + kref_put(&job->refcount, cs_job_do_release); +} + static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job) { /* @@ -169,10 +233,7 @@ static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job) job->patched_cb = parser.patched_cb; job->job_cb_size = parser.patched_cb_size; job->contains_dma_pkt = parser.contains_dma_pkt; - - spin_lock(&job->patched_cb->lock); - job->patched_cb->cs_cnt++; - spin_unlock(&job->patched_cb->lock); + atomic_inc(&job->patched_cb->cs_cnt); } /* @@ -180,9 +241,7 @@ static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job) * original CB anymore because it was already parsed and * won't be accessed again for this CS */ - spin_lock(&job->user_cb->lock); - job->user_cb->cs_cnt--; - spin_unlock(&job->user_cb->lock); + atomic_dec(&job->user_cb->cs_cnt); hl_cb_put(job->user_cb); job->user_cb = NULL; } else if (!rc) { @@ -192,7 +251,7 @@ static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job) return rc; } -static void free_job(struct hl_device *hdev, struct hl_cs_job *job) +static void complete_job(struct hl_device *hdev, struct hl_cs_job *job) { struct hl_cs *cs = job->cs; @@ -204,10 +263,7 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job) * created, so we need to check it's not NULL */ if (job->patched_cb) { - spin_lock(&job->patched_cb->lock); - job->patched_cb->cs_cnt--; - spin_unlock(&job->patched_cb->lock); - + atomic_dec(&job->patched_cb->cs_cnt); hl_cb_put(job->patched_cb); } } @@ -215,13 +271,12 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job) /* For H/W queue jobs, if a user CB was allocated by driver and MMU is * enabled, the user CB isn't released in cs_parser() and thus should be * released here. + * This is also true for INT queues jobs which were allocated by driver */ - if (job->queue_type == QUEUE_TYPE_HW && - job->is_kernel_allocated_cb && hdev->mmu_enable) { - spin_lock(&job->user_cb->lock); - job->user_cb->cs_cnt--; - spin_unlock(&job->user_cb->lock); - + if (job->is_kernel_allocated_cb && + ((job->queue_type == QUEUE_TYPE_HW && hdev->mmu_enable) || + job->queue_type == QUEUE_TYPE_INT)) { + atomic_dec(&job->user_cb->cs_cnt); hl_cb_put(job->user_cb); } @@ -239,27 +294,12 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job) job->queue_type == QUEUE_TYPE_HW) cs_put(cs); - kfree(job); -} - -static void cs_counters_aggregate(struct hl_device *hdev, struct hl_ctx *ctx) -{ - hdev->aggregated_cs_counters.device_in_reset_drop_cnt += - ctx->cs_counters.device_in_reset_drop_cnt; - hdev->aggregated_cs_counters.out_of_mem_drop_cnt += - ctx->cs_counters.out_of_mem_drop_cnt; - hdev->aggregated_cs_counters.parsing_drop_cnt += - ctx->cs_counters.parsing_drop_cnt; - hdev->aggregated_cs_counters.queue_full_drop_cnt += - ctx->cs_counters.queue_full_drop_cnt; - hdev->aggregated_cs_counters.max_cs_in_flight_drop_cnt += - ctx->cs_counters.max_cs_in_flight_drop_cnt; + cs_job_put(job); } static void cs_do_release(struct kref *ref) { - struct hl_cs *cs = container_of(ref, struct hl_cs, - refcount); + struct hl_cs *cs = container_of(ref, struct hl_cs, refcount); struct hl_device *hdev = cs->ctx->hdev; struct hl_cs_job *job, *tmp; @@ -268,77 +308,78 @@ static void cs_do_release(struct kref *ref) /* * Although if we reached here it means that all external jobs have * finished, because each one of them took refcnt to CS, we still - * need to go over the internal jobs and free them. Otherwise, we + * need to go over the internal jobs and complete them. Otherwise, we * will have leaked memory and what's worse, the CS object (and * potentially the CTX object) could be released, while the JOB * still holds a pointer to them (but no reference). */ list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) - free_job(hdev, job); + complete_job(hdev, job); - /* We also need to update CI for internal queues */ - if (cs->submitted) { - hdev->asic_funcs->hw_queues_lock(hdev); + if (!cs->submitted) { + /* In case the wait for signal CS was submitted, the put occurs + * in init_signal_wait_cs() or collective_wait_init_cs() + * right before hanging on the PQ. + */ + if (cs->type == CS_TYPE_WAIT || + cs->type == CS_TYPE_COLLECTIVE_WAIT) + hl_fence_put(cs->signal_fence); - hdev->cs_active_cnt--; - if (!hdev->cs_active_cnt) { - struct hl_device_idle_busy_ts *ts; + goto out; + } - ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx++]; - ts->busy_to_idle_ts = ktime_get(); + hdev->asic_funcs->hw_queues_lock(hdev); - if (hdev->idle_busy_ts_idx == HL_IDLE_BUSY_TS_ARR_SIZE) - hdev->idle_busy_ts_idx = 0; - } else if (hdev->cs_active_cnt < 0) { - dev_crit(hdev->dev, "CS active cnt %d is negative\n", - hdev->cs_active_cnt); - } + hdev->cs_active_cnt--; + if (!hdev->cs_active_cnt) { + struct hl_device_idle_busy_ts *ts; - hdev->asic_funcs->hw_queues_unlock(hdev); + ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx++]; + ts->busy_to_idle_ts = ktime_get(); - hl_int_hw_queue_update_ci(cs); + if (hdev->idle_busy_ts_idx == HL_IDLE_BUSY_TS_ARR_SIZE) + hdev->idle_busy_ts_idx = 0; + } else if (hdev->cs_active_cnt < 0) { + dev_crit(hdev->dev, "CS active cnt %d is negative\n", + hdev->cs_active_cnt); + } - spin_lock(&hdev->hw_queues_mirror_lock); - /* remove CS from hw_queues mirror list */ - list_del_init(&cs->mirror_node); - spin_unlock(&hdev->hw_queues_mirror_lock); + hdev->asic_funcs->hw_queues_unlock(hdev); - /* - * Don't cancel TDR in case this CS was timedout because we - * might be running from the TDR context - */ - if ((!cs->timedout) && - (hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT)) { - struct hl_cs *next; + /* Need to update CI for internal queues */ + hl_int_hw_queue_update_ci(cs); - if (cs->tdr_active) - cancel_delayed_work_sync(&cs->work_tdr); + /* remove CS from CS mirror list */ + spin_lock(&hdev->cs_mirror_lock); + list_del_init(&cs->mirror_node); + spin_unlock(&hdev->cs_mirror_lock); - spin_lock(&hdev->hw_queues_mirror_lock); + /* Don't cancel TDR in case this CS was timedout because we might be + * running from the TDR context + */ + if (!cs->timedout && hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) { + struct hl_cs *next; - /* queue TDR for next CS */ - next = list_first_entry_or_null( - &hdev->hw_queues_mirror_list, - struct hl_cs, mirror_node); + if (cs->tdr_active) + cancel_delayed_work_sync(&cs->work_tdr); - if ((next) && (!next->tdr_active)) { - next->tdr_active = true; - schedule_delayed_work(&next->work_tdr, - hdev->timeout_jiffies); - } + spin_lock(&hdev->cs_mirror_lock); - spin_unlock(&hdev->hw_queues_mirror_lock); + /* queue TDR for next CS */ + next = list_first_entry_or_null(&hdev->cs_mirror_list, + struct hl_cs, mirror_node); + + if (next && !next->tdr_active) { + next->tdr_active = true; + schedule_delayed_work(&next->work_tdr, + hdev->timeout_jiffies); } - } else if (cs->type == CS_TYPE_WAIT) { - /* - * In case the wait for signal CS was submitted, the put occurs - * in init_signal_wait_cs() right before hanging on the PQ. - */ - hl_fence_put(cs->signal_fence); + + spin_unlock(&hdev->cs_mirror_lock); } - /* - * Must be called before hl_ctx_put because inside we use ctx to get +out: + /* Must be called before hl_ctx_put because inside we use ctx to get * the device */ hl_debugfs_remove_cs(cs); @@ -356,9 +397,10 @@ static void cs_do_release(struct kref *ref) else if (!cs->submitted) cs->fence->error = -EBUSY; + if (cs->timestamp) + cs->fence->timestamp = ktime_get(); complete_all(&cs->fence->completion); hl_fence_put(cs->fence); - cs_counters_aggregate(hdev, cs->ctx); kfree(cs->jobs_in_queue_cnt); kfree(cs); @@ -384,24 +426,51 @@ static void cs_timedout(struct work_struct *work) hdev = cs->ctx->hdev; - dev_err(hdev->dev, - "Command submission %llu has not finished in time!\n", - cs->sequence); + switch (cs->type) { + case CS_TYPE_SIGNAL: + dev_err(hdev->dev, + "Signal command submission %llu has not finished in time!\n", + cs->sequence); + break; + + case CS_TYPE_WAIT: + dev_err(hdev->dev, + "Wait command submission %llu has not finished in time!\n", + cs->sequence); + break; + + case CS_TYPE_COLLECTIVE_WAIT: + dev_err(hdev->dev, + "Collective Wait command submission %llu has not finished in time!\n", + cs->sequence); + break; + + default: + dev_err(hdev->dev, + "Command submission %llu has not finished in time!\n", + cs->sequence); + break; + } cs_put(cs); if (hdev->reset_on_lockup) hl_device_reset(hdev, false, false); + else + hdev->needs_reset = true; } static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx, enum hl_cs_type cs_type, struct hl_cs **cs_new) { - struct hl_cs_compl *cs_cmpl; + struct hl_cs_counters_atomic *cntr; struct hl_fence *other = NULL; + struct hl_cs_compl *cs_cmpl; struct hl_cs *cs; int rc; + cntr = &hdev->aggregated_cs_counters; + cs = kzalloc(sizeof(*cs), GFP_ATOMIC); if (!cs) return -ENOMEM; @@ -435,7 +504,8 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx, if (other && !completion_done(&other->completion)) { dev_dbg_ratelimited(hdev->dev, "Rejecting CS because of too many in-flights CS\n"); - ctx->cs_counters.max_cs_in_flight_drop_cnt++; + atomic64_inc(&ctx->cs_counters.max_cs_in_flight_drop_cnt); + atomic64_inc(&cntr->max_cs_in_flight_drop_cnt); rc = -EAGAIN; goto free_fence; } @@ -480,7 +550,7 @@ static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs) struct hl_cs_job *job, *tmp; list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) - free_job(hdev, job); + complete_job(hdev, job); } void hl_cs_rollback_all(struct hl_device *hdev) @@ -493,8 +563,7 @@ void hl_cs_rollback_all(struct hl_device *hdev) flush_workqueue(hdev->cq_wq[i]); /* Make sure we don't have leftovers in the H/W queues mirror list */ - list_for_each_entry_safe(cs, tmp, &hdev->hw_queues_mirror_list, - mirror_node) { + list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) { cs_get(cs); cs->aborted = true; dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n", @@ -512,7 +581,7 @@ static void job_wq_completion(struct work_struct *work) struct hl_device *hdev = cs->ctx->hdev; /* job is no longer needed */ - free_job(hdev, job); + complete_job(hdev, job); } static int validate_queue_index(struct hl_device *hdev, @@ -547,9 +616,36 @@ static int validate_queue_index(struct hl_device *hdev, return -EINVAL; } - *queue_type = hw_queue_prop->type; - *is_kernel_allocated_cb = !!hw_queue_prop->requires_kernel_cb; + /* When hw queue type isn't QUEUE_TYPE_HW, + * USER_ALLOC_CB flag shall be referred as "don't care". + */ + if (hw_queue_prop->type == QUEUE_TYPE_HW) { + if (chunk->cs_chunk_flags & HL_CS_CHUNK_FLAGS_USER_ALLOC_CB) { + if (!(hw_queue_prop->cb_alloc_flags & CB_ALLOC_USER)) { + dev_err(hdev->dev, + "Queue index %d doesn't support user CB\n", + chunk->queue_index); + return -EINVAL; + } + + *is_kernel_allocated_cb = false; + } else { + if (!(hw_queue_prop->cb_alloc_flags & + CB_ALLOC_KERNEL)) { + dev_err(hdev->dev, + "Queue index %d doesn't support kernel CB\n", + chunk->queue_index); + return -EINVAL; + } + + *is_kernel_allocated_cb = true; + } + } else { + *is_kernel_allocated_cb = !!(hw_queue_prop->cb_alloc_flags + & CB_ALLOC_KERNEL); + } + *queue_type = hw_queue_prop->type; return 0; } @@ -573,9 +669,7 @@ static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev, goto release_cb; } - spin_lock(&cb->lock); - cb->cs_cnt++; - spin_unlock(&cb->lock); + atomic_inc(&cb->cs_cnt); return cb; @@ -593,6 +687,7 @@ struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, if (!job) return NULL; + kref_init(&job->refcount); job->queue_type = queue_type; job->is_kernel_allocated_cb = is_kernel_allocated_cb; @@ -605,42 +700,115 @@ struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, return job; } -static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, - u32 num_chunks, u64 *cs_seq) +static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags) +{ + if (cs_type_flags & HL_CS_FLAGS_SIGNAL) + return CS_TYPE_SIGNAL; + else if (cs_type_flags & HL_CS_FLAGS_WAIT) + return CS_TYPE_WAIT; + else if (cs_type_flags & HL_CS_FLAGS_COLLECTIVE_WAIT) + return CS_TYPE_COLLECTIVE_WAIT; + else + return CS_TYPE_DEFAULT; +} + +static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args) { struct hl_device *hdev = hpriv->hdev; - struct hl_cs_chunk *cs_chunk_array; - struct hl_cs_job *job; - struct hl_cs *cs; - struct hl_cb *cb; - bool int_queues_only = true; - u32 size_to_copy; - int rc, i; + struct hl_ctx *ctx = hpriv->ctx; + u32 cs_type_flags, num_chunks; + enum hl_device_status status; + enum hl_cs_type cs_type; - *cs_seq = ULLONG_MAX; + if (!hl_device_operational(hdev, &status)) { + dev_warn_ratelimited(hdev->dev, + "Device is %s. Can't submit new CS\n", + hdev->status[status]); + return -EBUSY; + } + + cs_type_flags = args->in.cs_flags & HL_CS_FLAGS_TYPE_MASK; + + if (unlikely(cs_type_flags && !is_power_of_2(cs_type_flags))) { + dev_err(hdev->dev, + "CS type flags are mutually exclusive, context %d\n", + ctx->asid); + return -EINVAL; + } + + cs_type = hl_cs_get_cs_type(cs_type_flags); + num_chunks = args->in.num_chunks_execute; + + if (unlikely((cs_type != CS_TYPE_DEFAULT) && + !hdev->supports_sync_stream)) { + dev_err(hdev->dev, "Sync stream CS is not supported\n"); + return -EINVAL; + } + + if (cs_type == CS_TYPE_DEFAULT) { + if (!num_chunks) { + dev_err(hdev->dev, + "Got execute CS with 0 chunks, context %d\n", + ctx->asid); + return -EINVAL; + } + } else if (num_chunks != 1) { + dev_err(hdev->dev, + "Sync stream CS mandates one chunk only, context %d\n", + ctx->asid); + return -EINVAL; + } + + return 0; +} + +static int hl_cs_copy_chunk_array(struct hl_device *hdev, + struct hl_cs_chunk **cs_chunk_array, + void __user *chunks, u32 num_chunks) +{ + u32 size_to_copy; if (num_chunks > HL_MAX_JOBS_PER_CS) { dev_err(hdev->dev, "Number of chunks can NOT be larger than %d\n", HL_MAX_JOBS_PER_CS); - rc = -EINVAL; - goto out; + return -EINVAL; } - cs_chunk_array = kmalloc_array(num_chunks, sizeof(*cs_chunk_array), + *cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array), GFP_ATOMIC); - if (!cs_chunk_array) { - rc = -ENOMEM; - goto out; - } + if (!*cs_chunk_array) + return -ENOMEM; size_to_copy = num_chunks * sizeof(struct hl_cs_chunk); - if (copy_from_user(cs_chunk_array, chunks, size_to_copy)) { + if (copy_from_user(*cs_chunk_array, chunks, size_to_copy)) { dev_err(hdev->dev, "Failed to copy cs chunk array from user\n"); - rc = -EFAULT; - goto free_cs_chunk_array; + kfree(*cs_chunk_array); + return -EFAULT; } + return 0; +} + +static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, + u32 num_chunks, u64 *cs_seq, bool timestamp) +{ + bool int_queues_only = true; + struct hl_device *hdev = hpriv->hdev; + struct hl_cs_chunk *cs_chunk_array; + struct hl_cs_counters_atomic *cntr; + struct hl_cs_job *job; + struct hl_cs *cs; + struct hl_cb *cb; + int rc, i; + + cntr = &hdev->aggregated_cs_counters; + *cs_seq = ULLONG_MAX; + + rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks); + if (rc) + goto out; + /* increment refcnt for context */ hl_ctx_get(hdev, hpriv->ctx); @@ -650,6 +818,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, goto free_cs_chunk_array; } + cs->timestamp = !!timestamp; *cs_seq = cs->sequence; hl_debugfs_add_cs(cs); @@ -663,14 +832,17 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, rc = validate_queue_index(hdev, chunk, &queue_type, &is_kernel_allocated_cb); if (rc) { - hpriv->ctx->cs_counters.parsing_drop_cnt++; + atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt); + atomic64_inc(&cntr->parsing_drop_cnt); goto free_cs_object; } if (is_kernel_allocated_cb) { cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk); if (!cb) { - hpriv->ctx->cs_counters.parsing_drop_cnt++; + atomic64_inc( + &hpriv->ctx->cs_counters.parsing_drop_cnt); + atomic64_inc(&cntr->parsing_drop_cnt); rc = -EINVAL; goto free_cs_object; } @@ -684,7 +856,9 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, job = hl_cs_allocate_job(hdev, queue_type, is_kernel_allocated_cb); if (!job) { - hpriv->ctx->cs_counters.out_of_mem_drop_cnt++; + atomic64_inc( + &hpriv->ctx->cs_counters.out_of_mem_drop_cnt); + atomic64_inc(&cntr->out_of_mem_drop_cnt); dev_err(hdev->dev, "Failed to allocate a new job\n"); rc = -ENOMEM; if (is_kernel_allocated_cb) @@ -717,7 +891,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, rc = cs_parser(hpriv, job); if (rc) { - hpriv->ctx->cs_counters.parsing_drop_cnt++; + atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt); + atomic64_inc(&cntr->parsing_drop_cnt); dev_err(hdev->dev, "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n", cs->ctx->asid, cs->sequence, job->id, rc); @@ -726,7 +901,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, } if (int_queues_only) { - hpriv->ctx->cs_counters.parsing_drop_cnt++; + atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt); + atomic64_inc(&cntr->parsing_drop_cnt); dev_err(hdev->dev, "Reject CS %d.%llu because only internal queues jobs are present\n", cs->ctx->asid, cs->sequence); @@ -747,9 +923,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, goto put_cs; release_cb: - spin_lock(&cb->lock); - cb->cs_cnt--; - spin_unlock(&cb->lock); + atomic_dec(&cb->cs_cnt); hl_cb_put(cb); free_cs_object: cs_rollback(hdev, cs); @@ -764,47 +938,234 @@ out: return rc; } -static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, - void __user *chunks, u32 num_chunks, +static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args, u64 *cs_seq) { struct hl_device *hdev = hpriv->hdev; struct hl_ctx *ctx = hpriv->ctx; - struct hl_cs_chunk *cs_chunk_array, *chunk; - struct hw_queue_properties *hw_queue_prop; - struct hl_fence *sig_fence = NULL; - struct hl_cs_job *job; - struct hl_cs *cs; - struct hl_cb *cb; - enum hl_queue_type q_type; - u64 *signal_seq_arr = NULL, signal_seq; - u32 size_to_copy, q_idx, signal_seq_arr_len, cb_size; - int rc; + bool need_soft_reset = false; + int rc = 0, do_ctx_switch; + void __user *chunks; + u32 num_chunks, tmp; + int ret; - *cs_seq = ULLONG_MAX; + do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0); - if (num_chunks > HL_MAX_JOBS_PER_CS) { + if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) { + mutex_lock(&hpriv->restore_phase_mutex); + + if (do_ctx_switch) { + rc = hdev->asic_funcs->context_switch(hdev, ctx->asid); + if (rc) { + dev_err_ratelimited(hdev->dev, + "Failed to switch to context %d, rejecting CS! %d\n", + ctx->asid, rc); + /* + * If we timedout, or if the device is not IDLE + * while we want to do context-switch (-EBUSY), + * we need to soft-reset because QMAN is + * probably stuck. However, we can't call to + * reset here directly because of deadlock, so + * need to do it at the very end of this + * function + */ + if ((rc == -ETIMEDOUT) || (rc == -EBUSY)) + need_soft_reset = true; + mutex_unlock(&hpriv->restore_phase_mutex); + goto out; + } + } + + hdev->asic_funcs->restore_phase_topology(hdev); + + chunks = (void __user *) (uintptr_t) args->in.chunks_restore; + num_chunks = args->in.num_chunks_restore; + + if (!num_chunks) { + dev_dbg(hdev->dev, + "Need to run restore phase but restore CS is empty\n"); + rc = 0; + } else { + rc = cs_ioctl_default(hpriv, chunks, num_chunks, + cs_seq, false); + } + + mutex_unlock(&hpriv->restore_phase_mutex); + + if (rc) { + dev_err(hdev->dev, + "Failed to submit restore CS for context %d (%d)\n", + ctx->asid, rc); + goto out; + } + + /* Need to wait for restore completion before execution phase */ + if (num_chunks) { + enum hl_cs_wait_status status; +wait_again: + ret = _hl_cs_wait_ioctl(hdev, ctx, + jiffies_to_usecs(hdev->timeout_jiffies), + *cs_seq, &status, NULL); + if (ret) { + if (ret == -ERESTARTSYS) { + usleep_range(100, 200); + goto wait_again; + } + + dev_err(hdev->dev, + "Restore CS for context %d failed to complete %d\n", + ctx->asid, ret); + rc = -ENOEXEC; + goto out; + } + } + + ctx->thread_ctx_switch_wait_token = 1; + + } else if (!ctx->thread_ctx_switch_wait_token) { + rc = hl_poll_timeout_memory(hdev, + &ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1), + 100, jiffies_to_usecs(hdev->timeout_jiffies), false); + + if (rc == -ETIMEDOUT) { + dev_err(hdev->dev, + "context switch phase timeout (%d)\n", tmp); + goto out; + } + } + +out: + if ((rc == -ETIMEDOUT || rc == -EBUSY) && (need_soft_reset)) + hl_device_reset(hdev, false, false); + + return rc; +} + +static int cs_ioctl_extract_signal_seq(struct hl_device *hdev, + struct hl_cs_chunk *chunk, u64 *signal_seq) +{ + u64 *signal_seq_arr = NULL; + u32 size_to_copy, signal_seq_arr_len; + int rc = 0; + + signal_seq_arr_len = chunk->num_signal_seq_arr; + + /* currently only one signal seq is supported */ + if (signal_seq_arr_len != 1) { dev_err(hdev->dev, - "Number of chunks can NOT be larger than %d\n", - HL_MAX_JOBS_PER_CS); - rc = -EINVAL; - goto out; + "Wait for signal CS supports only one signal CS seq\n"); + return -EINVAL; } - cs_chunk_array = kmalloc_array(num_chunks, sizeof(*cs_chunk_array), + signal_seq_arr = kmalloc_array(signal_seq_arr_len, + sizeof(*signal_seq_arr), GFP_ATOMIC); - if (!cs_chunk_array) { - rc = -ENOMEM; + if (!signal_seq_arr) + return -ENOMEM; + + size_to_copy = chunk->num_signal_seq_arr * sizeof(*signal_seq_arr); + if (copy_from_user(signal_seq_arr, + u64_to_user_ptr(chunk->signal_seq_arr), + size_to_copy)) { + dev_err(hdev->dev, + "Failed to copy signal seq array from user\n"); + rc = -EFAULT; goto out; } - size_to_copy = num_chunks * sizeof(struct hl_cs_chunk); - if (copy_from_user(cs_chunk_array, chunks, size_to_copy)) { - dev_err(hdev->dev, "Failed to copy cs chunk array from user\n"); - rc = -EFAULT; - goto free_cs_chunk_array; + /* currently it is guaranteed to have only one signal seq */ + *signal_seq = signal_seq_arr[0]; + +out: + kfree(signal_seq_arr); + + return rc; +} + +static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev, + struct hl_ctx *ctx, struct hl_cs *cs, enum hl_queue_type q_type, + u32 q_idx) +{ + struct hl_cs_counters_atomic *cntr; + struct hl_cs_job *job; + struct hl_cb *cb; + u32 cb_size; + + cntr = &hdev->aggregated_cs_counters; + + job = hl_cs_allocate_job(hdev, q_type, true); + if (!job) { + atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); + atomic64_inc(&cntr->out_of_mem_drop_cnt); + dev_err(hdev->dev, "Failed to allocate a new job\n"); + return -ENOMEM; } + if (cs->type == CS_TYPE_WAIT) + cb_size = hdev->asic_funcs->get_wait_cb_size(hdev); + else + cb_size = hdev->asic_funcs->get_signal_cb_size(hdev); + + cb = hl_cb_kernel_create(hdev, cb_size, + q_type == QUEUE_TYPE_HW && hdev->mmu_enable); + if (!cb) { + atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); + atomic64_inc(&cntr->out_of_mem_drop_cnt); + kfree(job); + return -EFAULT; + } + + job->id = 0; + job->cs = cs; + job->user_cb = cb; + atomic_inc(&job->user_cb->cs_cnt); + job->user_cb_size = cb_size; + job->hw_queue_id = q_idx; + + /* + * No need in parsing, user CB is the patched CB. + * We call hl_cb_destroy() out of two reasons - we don't need the CB in + * the CB idr anymore and to decrement its refcount as it was + * incremented inside hl_cb_kernel_create(). + */ + job->patched_cb = job->user_cb; + job->job_cb_size = job->user_cb_size; + hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT); + + /* increment refcount as for external queues we get completion */ + cs_get(cs); + + cs->jobs_in_queue_cnt[job->hw_queue_id]++; + + list_add_tail(&job->cs_node, &cs->job_list); + + hl_debugfs_add_job(hdev, job); + + return 0; +} + +static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, + void __user *chunks, u32 num_chunks, + u64 *cs_seq, bool timestamp) +{ + struct hl_cs_chunk *cs_chunk_array, *chunk; + struct hw_queue_properties *hw_queue_prop; + struct hl_device *hdev = hpriv->hdev; + struct hl_cs_compl *sig_waitcs_cmpl; + u32 q_idx, collective_engine_id = 0; + struct hl_fence *sig_fence = NULL; + struct hl_ctx *ctx = hpriv->ctx; + enum hl_queue_type q_type; + struct hl_cs *cs; + u64 signal_seq; + int rc; + + *cs_seq = ULLONG_MAX; + + rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks); + if (rc) + goto out; + /* currently it is guaranteed to have only one chunk */ chunk = &cs_chunk_array[0]; @@ -819,60 +1180,43 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx]; q_type = hw_queue_prop->type; - if ((q_idx >= hdev->asic_prop.max_queues) || - (!hw_queue_prop->supports_sync_stream)) { - dev_err(hdev->dev, "Queue index %d is invalid\n", q_idx); + if (!hw_queue_prop->supports_sync_stream) { + dev_err(hdev->dev, + "Queue index %d does not support sync stream operations\n", + q_idx); rc = -EINVAL; goto free_cs_chunk_array; } - if (cs_type == CS_TYPE_WAIT) { - struct hl_cs_compl *sig_waitcs_cmpl; - - signal_seq_arr_len = chunk->num_signal_seq_arr; - - /* currently only one signal seq is supported */ - if (signal_seq_arr_len != 1) { + if (cs_type == CS_TYPE_COLLECTIVE_WAIT) { + if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) { dev_err(hdev->dev, - "Wait for signal CS supports only one signal CS seq\n"); + "Queue index %d is invalid\n", q_idx); rc = -EINVAL; goto free_cs_chunk_array; } - signal_seq_arr = kmalloc_array(signal_seq_arr_len, - sizeof(*signal_seq_arr), - GFP_ATOMIC); - if (!signal_seq_arr) { - rc = -ENOMEM; - goto free_cs_chunk_array; - } + collective_engine_id = chunk->collective_engine_id; + } - size_to_copy = chunk->num_signal_seq_arr * - sizeof(*signal_seq_arr); - if (copy_from_user(signal_seq_arr, - u64_to_user_ptr(chunk->signal_seq_arr), - size_to_copy)) { - dev_err(hdev->dev, - "Failed to copy signal seq array from user\n"); - rc = -EFAULT; - goto free_signal_seq_array; - } + if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_COLLECTIVE_WAIT) { + rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq); + if (rc) + goto free_cs_chunk_array; - /* currently it is guaranteed to have only one signal seq */ - signal_seq = signal_seq_arr[0]; sig_fence = hl_ctx_get_fence(ctx, signal_seq); if (IS_ERR(sig_fence)) { dev_err(hdev->dev, "Failed to get signal CS with seq 0x%llx\n", signal_seq); rc = PTR_ERR(sig_fence); - goto free_signal_seq_array; + goto free_cs_chunk_array; } if (!sig_fence) { /* signal CS already finished */ rc = 0; - goto free_signal_seq_array; + goto free_cs_chunk_array; } sig_waitcs_cmpl = @@ -884,14 +1228,14 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, signal_seq); hl_fence_put(sig_fence); rc = -EINVAL; - goto free_signal_seq_array; + goto free_cs_chunk_array; } if (completion_done(&sig_fence->completion)) { /* signal CS already finished */ hl_fence_put(sig_fence); rc = 0; - goto free_signal_seq_array; + goto free_cs_chunk_array; } } @@ -900,70 +1244,37 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, rc = allocate_cs(hdev, ctx, cs_type, &cs); if (rc) { - if (cs_type == CS_TYPE_WAIT) + if (cs_type == CS_TYPE_WAIT || + cs_type == CS_TYPE_COLLECTIVE_WAIT) hl_fence_put(sig_fence); hl_ctx_put(ctx); - goto free_signal_seq_array; + goto free_cs_chunk_array; } + cs->timestamp = !!timestamp; + /* * Save the signal CS fence for later initialization right before * hanging the wait CS on the queue. */ - if (cs->type == CS_TYPE_WAIT) + if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_COLLECTIVE_WAIT) cs->signal_fence = sig_fence; hl_debugfs_add_cs(cs); *cs_seq = cs->sequence; - job = hl_cs_allocate_job(hdev, q_type, true); - if (!job) { - ctx->cs_counters.out_of_mem_drop_cnt++; - dev_err(hdev->dev, "Failed to allocate a new job\n"); - rc = -ENOMEM; - goto put_cs; - } - - if (cs->type == CS_TYPE_WAIT) - cb_size = hdev->asic_funcs->get_wait_cb_size(hdev); + if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_SIGNAL) + rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type, + q_idx); + else if (cs_type == CS_TYPE_COLLECTIVE_WAIT) + rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx, + cs, q_idx, collective_engine_id); else - cb_size = hdev->asic_funcs->get_signal_cb_size(hdev); - - cb = hl_cb_kernel_create(hdev, cb_size, - q_type == QUEUE_TYPE_HW && hdev->mmu_enable); - if (!cb) { - ctx->cs_counters.out_of_mem_drop_cnt++; - kfree(job); - rc = -EFAULT; - goto put_cs; - } - - job->id = 0; - job->cs = cs; - job->user_cb = cb; - job->user_cb->cs_cnt++; - job->user_cb_size = cb_size; - job->hw_queue_id = q_idx; - - /* - * No need in parsing, user CB is the patched CB. - * We call hl_cb_destroy() out of two reasons - we don't need the CB in - * the CB idr anymore and to decrement its refcount as it was - * incremented inside hl_cb_kernel_create(). - */ - job->patched_cb = job->user_cb; - job->job_cb_size = job->user_cb_size; - hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT); - - cs->jobs_in_queue_cnt[job->hw_queue_id]++; - - list_add_tail(&job->cs_node, &cs->job_list); - - /* increment refcount as for external queues we get completion */ - cs_get(cs); + rc = -EINVAL; - hl_debugfs_add_job(hdev, job); + if (rc) + goto free_cs_object; rc = hl_hw_queue_schedule_cs(cs); if (rc) { @@ -984,9 +1295,6 @@ free_cs_object: put_cs: /* We finished with the CS in this function, so put the ref */ cs_put(cs); -free_signal_seq_array: - if (cs_type == CS_TYPE_WAIT) - kfree(signal_seq_arr); free_cs_chunk_array: kfree(cs_chunk_array); out: @@ -995,156 +1303,39 @@ out: int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data) { - struct hl_device *hdev = hpriv->hdev; union hl_cs_args *args = data; - struct hl_ctx *ctx = hpriv->ctx; - void __user *chunks_execute, *chunks_restore; enum hl_cs_type cs_type; - u32 num_chunks_execute, num_chunks_restore, sig_wait_flags; u64 cs_seq = ULONG_MAX; - int rc, do_ctx_switch; - bool need_soft_reset = false; - - if (hl_device_disabled_or_in_reset(hdev)) { - dev_warn_ratelimited(hdev->dev, - "Device is %s. Can't submit new CS\n", - atomic_read(&hdev->in_reset) ? "in_reset" : "disabled"); - rc = -EBUSY; - goto out; - } - - sig_wait_flags = args->in.cs_flags & HL_CS_FLAGS_SIG_WAIT; + void __user *chunks; + u32 num_chunks; + int rc; - if (unlikely(sig_wait_flags == HL_CS_FLAGS_SIG_WAIT)) { - dev_err(hdev->dev, - "Signal and wait CS flags are mutually exclusive, context %d\n", - ctx->asid); - rc = -EINVAL; + rc = hl_cs_sanity_checks(hpriv, args); + if (rc) goto out; - } - if (unlikely((sig_wait_flags & HL_CS_FLAGS_SIG_WAIT) && - (!hdev->supports_sync_stream))) { - dev_err(hdev->dev, "Sync stream CS is not supported\n"); - rc = -EINVAL; + rc = hl_cs_ctx_switch(hpriv, args, &cs_seq); + if (rc) goto out; - } - if (args->in.cs_flags & HL_CS_FLAGS_SIGNAL) - cs_type = CS_TYPE_SIGNAL; - else if (args->in.cs_flags & HL_CS_FLAGS_WAIT) - cs_type = CS_TYPE_WAIT; - else - cs_type = CS_TYPE_DEFAULT; - - chunks_execute = (void __user *) (uintptr_t) args->in.chunks_execute; - num_chunks_execute = args->in.num_chunks_execute; - - if (cs_type == CS_TYPE_DEFAULT) { - if (!num_chunks_execute) { - dev_err(hdev->dev, - "Got execute CS with 0 chunks, context %d\n", - ctx->asid); - rc = -EINVAL; - goto out; - } - } else if (num_chunks_execute != 1) { - dev_err(hdev->dev, - "Sync stream CS mandates one chunk only, context %d\n", - ctx->asid); - rc = -EINVAL; - goto out; + cs_type = hl_cs_get_cs_type(args->in.cs_flags & + ~HL_CS_FLAGS_FORCE_RESTORE); + chunks = (void __user *) (uintptr_t) args->in.chunks_execute; + num_chunks = args->in.num_chunks_execute; + + switch (cs_type) { + case CS_TYPE_SIGNAL: + case CS_TYPE_WAIT: + case CS_TYPE_COLLECTIVE_WAIT: + rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks, num_chunks, + &cs_seq, args->in.cs_flags & HL_CS_FLAGS_TIMESTAMP); + break; + default: + rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq, + args->in.cs_flags & HL_CS_FLAGS_TIMESTAMP); + break; } - do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0); - - if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) { - long ret; - - chunks_restore = - (void __user *) (uintptr_t) args->in.chunks_restore; - num_chunks_restore = args->in.num_chunks_restore; - - mutex_lock(&hpriv->restore_phase_mutex); - - if (do_ctx_switch) { - rc = hdev->asic_funcs->context_switch(hdev, ctx->asid); - if (rc) { - dev_err_ratelimited(hdev->dev, - "Failed to switch to context %d, rejecting CS! %d\n", - ctx->asid, rc); - /* - * If we timedout, or if the device is not IDLE - * while we want to do context-switch (-EBUSY), - * we need to soft-reset because QMAN is - * probably stuck. However, we can't call to - * reset here directly because of deadlock, so - * need to do it at the very end of this - * function - */ - if ((rc == -ETIMEDOUT) || (rc == -EBUSY)) - need_soft_reset = true; - mutex_unlock(&hpriv->restore_phase_mutex); - goto out; - } - } - - hdev->asic_funcs->restore_phase_topology(hdev); - - if (!num_chunks_restore) { - dev_dbg(hdev->dev, - "Need to run restore phase but restore CS is empty\n"); - rc = 0; - } else { - rc = cs_ioctl_default(hpriv, chunks_restore, - num_chunks_restore, &cs_seq); - } - - mutex_unlock(&hpriv->restore_phase_mutex); - - if (rc) { - dev_err(hdev->dev, - "Failed to submit restore CS for context %d (%d)\n", - ctx->asid, rc); - goto out; - } - - /* Need to wait for restore completion before execution phase */ - if (num_chunks_restore) { - ret = _hl_cs_wait_ioctl(hdev, ctx, - jiffies_to_usecs(hdev->timeout_jiffies), - cs_seq); - if (ret <= 0) { - dev_err(hdev->dev, - "Restore CS for context %d failed to complete %ld\n", - ctx->asid, ret); - rc = -ENOEXEC; - goto out; - } - } - - ctx->thread_ctx_switch_wait_token = 1; - } else if (!ctx->thread_ctx_switch_wait_token) { - u32 tmp; - - rc = hl_poll_timeout_memory(hdev, - &ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1), - 100, jiffies_to_usecs(hdev->timeout_jiffies), false); - - if (rc == -ETIMEDOUT) { - dev_err(hdev->dev, - "context switch phase timeout (%d)\n", tmp); - goto out; - } - } - - if (cs_type == CS_TYPE_DEFAULT) - rc = cs_ioctl_default(hpriv, chunks_execute, num_chunks_execute, - &cs_seq); - else - rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks_execute, - num_chunks_execute, &cs_seq); - out: if (rc != -EAGAIN) { memset(args, 0, sizeof(*args)); @@ -1152,18 +1343,20 @@ out: args->out.seq = cs_seq; } - if (((rc == -ETIMEDOUT) || (rc == -EBUSY)) && (need_soft_reset)) - hl_device_reset(hdev, false, false); - return rc; } -static long _hl_cs_wait_ioctl(struct hl_device *hdev, - struct hl_ctx *ctx, u64 timeout_us, u64 seq) +static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, + u64 timeout_us, u64 seq, + enum hl_cs_wait_status *status, s64 *timestamp) { struct hl_fence *fence; unsigned long timeout; - long rc; + int rc = 0; + long completion_rc; + + if (timestamp) + *timestamp = 0; if (timeout_us == MAX_SCHEDULE_TIMEOUT) timeout = timeout_us; @@ -1181,11 +1374,20 @@ static long _hl_cs_wait_ioctl(struct hl_device *hdev, seq, ctx->cs_sequence); } else if (fence) { if (!timeout_us) - rc = completion_done(&fence->completion); + completion_rc = completion_done(&fence->completion); else - rc = wait_for_completion_interruptible_timeout( + completion_rc = + wait_for_completion_interruptible_timeout( &fence->completion, timeout); + if (completion_rc > 0) { + *status = CS_WAIT_STATUS_COMPLETED; + if (timestamp) + *timestamp = ktime_to_ns(fence->timestamp); + } else { + *status = CS_WAIT_STATUS_BUSY; + } + if (fence->error == -ETIMEDOUT) rc = -ETIMEDOUT; else if (fence->error == -EIO) @@ -1196,7 +1398,7 @@ static long _hl_cs_wait_ioctl(struct hl_device *hdev, dev_dbg(hdev->dev, "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n", seq, ctx->cs_sequence); - rc = 1; + *status = CS_WAIT_STATUS_GONE; } hl_ctx_put(ctx); @@ -1208,14 +1410,17 @@ int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data) { struct hl_device *hdev = hpriv->hdev; union hl_wait_cs_args *args = data; + enum hl_cs_wait_status status; u64 seq = args->in.seq; - long rc; + s64 timestamp; + int rc; - rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq); + rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq, + &status, ×tamp); memset(args, 0, sizeof(*args)); - if (rc < 0) { + if (rc) { if (rc == -ERESTARTSYS) { dev_err_ratelimited(hdev->dev, "user process got signal while waiting for CS handle %llu\n", @@ -1236,10 +1441,23 @@ int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data) return rc; } - if (rc == 0) - args->out.status = HL_WAIT_CS_STATUS_BUSY; - else + if (timestamp) { + args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD; + args->out.timestamp_nsec = timestamp; + } + + switch (status) { + case CS_WAIT_STATUS_GONE: + args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE; + fallthrough; + case CS_WAIT_STATUS_COMPLETED: args->out.status = HL_WAIT_CS_STATUS_COMPLETED; + break; + case CS_WAIT_STATUS_BUSY: + default: + args->out.status = HL_WAIT_CS_STATUS_BUSY; + break; + } return 0; } diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/misc/habanalabs/common/context.c index 7a59dd7c6450..f65e6559149b 100644 --- a/drivers/misc/habanalabs/common/context.c +++ b/drivers/misc/habanalabs/common/context.c @@ -40,10 +40,14 @@ static void hl_ctx_fini(struct hl_ctx *ctx) if ((hdev->in_debug) && (hdev->compute_ctx == ctx)) hl_device_set_debug_mode(hdev, false); + hdev->asic_funcs->ctx_fini(ctx); hl_cb_va_pool_fini(ctx); hl_vm_ctx_fini(ctx); hl_asid_free(hdev, ctx->asid); + /* Scrub both SRAM and DRAM */ + hdev->asic_funcs->scrub_device_mem(hdev, 0, 0); + if ((!hdev->pldm) && (hdev->pdev) && (!hdev->asic_funcs->is_device_idle(hdev, &idle_mask, NULL))) diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c index 912ddfa360b1..cef716643979 100644 --- a/drivers/misc/habanalabs/common/debugfs.c +++ b/drivers/misc/habanalabs/common/debugfs.c @@ -22,9 +22,10 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr, u8 i2c_reg, long *val) { struct cpucp_packet pkt; + u64 result; int rc; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -EBUSY; memset(&pkt, 0, sizeof(pkt)); @@ -36,7 +37,9 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr, pkt.i2c_reg = i2c_reg; rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - 0, val); + 0, &result); + + *val = (long) result; if (rc) dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc); @@ -50,7 +53,7 @@ static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr, struct cpucp_packet pkt; int rc; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -EBUSY; memset(&pkt, 0, sizeof(pkt)); @@ -76,7 +79,7 @@ static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state) struct cpucp_packet pkt; int rc; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return; memset(&pkt, 0, sizeof(pkt)); @@ -113,7 +116,7 @@ static int command_buffers_show(struct seq_file *s, void *data) " %03llu %d 0x%08x %d %d %d\n", cb->id, cb->ctx->asid, cb->size, kref_read(&cb->refcount), - cb->mmap, cb->cs_cnt); + cb->mmap, atomic_read(&cb->cs_cnt)); } spin_unlock(&dev_entry->cb_spinlock); @@ -168,18 +171,19 @@ static int command_submission_jobs_show(struct seq_file *s, void *data) if (first) { first = false; seq_puts(s, "\n"); - seq_puts(s, " JOB ID CS ID CTX ASID H/W Queue\n"); - seq_puts(s, "---------------------------------------\n"); + seq_puts(s, " JOB ID CS ID CTX ASID JOB RefCnt H/W Queue\n"); + seq_puts(s, "----------------------------------------------------\n"); } if (job->cs) seq_printf(s, - " %02d %llu %d %d\n", + " %02d %llu %d %d %d\n", job->id, job->cs->sequence, job->cs->ctx->asid, - job->hw_queue_id); + kref_read(&job->refcount), job->hw_queue_id); else seq_printf(s, - " %02d 0 %d %d\n", - job->id, HL_KERNEL_ASID_ID, job->hw_queue_id); + " %02d 0 %d %d %d\n", + job->id, HL_KERNEL_ASID_ID, + kref_read(&job->refcount), job->hw_queue_id); } spin_unlock(&dev_entry->cs_job_spinlock); @@ -300,93 +304,15 @@ static int vm_show(struct seq_file *s, void *data) return 0; } -/* these inline functions are copied from mmu.c */ -static inline u64 get_hop0_addr(struct hl_ctx *ctx) -{ - return ctx->hdev->asic_prop.mmu_pgt_addr + - (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size); -} - -static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr, - u64 virt_addr, u64 mask, u64 shift) -{ - return hop_addr + ctx->hdev->asic_prop.mmu_pte_size * - ((virt_addr & mask) >> shift); -} - -static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx, - struct hl_mmu_properties *mmu_specs, - u64 hop_addr, u64 vaddr) -{ - return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop0_mask, - mmu_specs->hop0_shift); -} - -static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx, - struct hl_mmu_properties *mmu_specs, - u64 hop_addr, u64 vaddr) -{ - return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop1_mask, - mmu_specs->hop1_shift); -} - -static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx, - struct hl_mmu_properties *mmu_specs, - u64 hop_addr, u64 vaddr) -{ - return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop2_mask, - mmu_specs->hop2_shift); -} - -static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx, - struct hl_mmu_properties *mmu_specs, - u64 hop_addr, u64 vaddr) -{ - return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop3_mask, - mmu_specs->hop3_shift); -} - -static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx, - struct hl_mmu_properties *mmu_specs, - u64 hop_addr, u64 vaddr) -{ - return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop4_mask, - mmu_specs->hop4_shift); -} - -static inline u64 get_hop5_pte_addr(struct hl_ctx *ctx, - struct hl_mmu_properties *mmu_specs, - u64 hop_addr, u64 vaddr) -{ - return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop5_mask, - mmu_specs->hop5_shift); -} - -static inline u64 get_next_hop_addr(u64 curr_pte) -{ - if (curr_pte & PAGE_PRESENT_MASK) - return curr_pte & HOP_PHYS_ADDR_MASK; - else - return ULLONG_MAX; -} - static int mmu_show(struct seq_file *s, void *data) { struct hl_debugfs_entry *entry = s->private; struct hl_dbg_device_entry *dev_entry = entry->dev_entry; struct hl_device *hdev = dev_entry->hdev; - struct asic_fixed_properties *prop = &hdev->asic_prop; - struct hl_mmu_properties *mmu_prop; struct hl_ctx *ctx; - bool is_dram_addr; - - u64 hop0_addr = 0, hop0_pte_addr = 0, hop0_pte = 0, - hop1_addr = 0, hop1_pte_addr = 0, hop1_pte = 0, - hop2_addr = 0, hop2_pte_addr = 0, hop2_pte = 0, - hop3_addr = 0, hop3_pte_addr = 0, hop3_pte = 0, - hop4_addr = 0, hop4_pte_addr = 0, hop4_pte = 0, - hop5_addr = 0, hop5_pte_addr = 0, hop5_pte = 0, - virt_addr = dev_entry->mmu_addr; + struct hl_mmu_hop_info hops_info; + u64 virt_addr = dev_entry->mmu_addr; + int i; if (!hdev->mmu_enable) return 0; @@ -401,132 +327,24 @@ static int mmu_show(struct seq_file *s, void *data) return 0; } - is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size, - prop->dmmu.start_addr, - prop->dmmu.end_addr); - - /* shifts and masks are the same in PMMU and HPMMU, use one of them */ - mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu; - - mutex_lock(&ctx->mmu_lock); - - /* the following lookup is copied from unmap() in mmu.c */ - - hop0_addr = get_hop0_addr(ctx); - hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr); - hop0_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr); - hop1_addr = get_next_hop_addr(hop0_pte); - - if (hop1_addr == ULLONG_MAX) - goto not_mapped; - - hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr); - hop1_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr); - hop2_addr = get_next_hop_addr(hop1_pte); - - if (hop2_addr == ULLONG_MAX) - goto not_mapped; - - hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr); - hop2_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr); - hop3_addr = get_next_hop_addr(hop2_pte); - - if (hop3_addr == ULLONG_MAX) - goto not_mapped; - - hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr); - hop3_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr); - - if (mmu_prop->num_hops == MMU_ARCH_5_HOPS) { - if (!(hop3_pte & LAST_MASK)) { - hop4_addr = get_next_hop_addr(hop3_pte); - - if (hop4_addr == ULLONG_MAX) - goto not_mapped; - - hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, - hop4_addr, virt_addr); - hop4_pte = hdev->asic_funcs->read_pte(hdev, - hop4_pte_addr); - if (!(hop4_pte & PAGE_PRESENT_MASK)) - goto not_mapped; - } else { - if (!(hop3_pte & PAGE_PRESENT_MASK)) - goto not_mapped; - } - } else { - hop4_addr = get_next_hop_addr(hop3_pte); - - if (hop4_addr == ULLONG_MAX) - goto not_mapped; - - hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, - hop4_addr, virt_addr); - hop4_pte = hdev->asic_funcs->read_pte(hdev, - hop4_pte_addr); - if (!(hop4_pte & LAST_MASK)) { - hop5_addr = get_next_hop_addr(hop4_pte); - - if (hop5_addr == ULLONG_MAX) - goto not_mapped; - - hop5_pte_addr = get_hop5_pte_addr(ctx, mmu_prop, - hop5_addr, virt_addr); - hop5_pte = hdev->asic_funcs->read_pte(hdev, - hop5_pte_addr); - if (!(hop5_pte & PAGE_PRESENT_MASK)) - goto not_mapped; - } else { - if (!(hop4_pte & PAGE_PRESENT_MASK)) - goto not_mapped; - } + if (hl_mmu_get_tlb_info(ctx, virt_addr, &hops_info)) { + dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n", + virt_addr); + return 0; } seq_printf(s, "asid: %u, virt_addr: 0x%llx\n", dev_entry->mmu_asid, dev_entry->mmu_addr); - seq_printf(s, "hop0_addr: 0x%llx\n", hop0_addr); - seq_printf(s, "hop0_pte_addr: 0x%llx\n", hop0_pte_addr); - seq_printf(s, "hop0_pte: 0x%llx\n", hop0_pte); - - seq_printf(s, "hop1_addr: 0x%llx\n", hop1_addr); - seq_printf(s, "hop1_pte_addr: 0x%llx\n", hop1_pte_addr); - seq_printf(s, "hop1_pte: 0x%llx\n", hop1_pte); - - seq_printf(s, "hop2_addr: 0x%llx\n", hop2_addr); - seq_printf(s, "hop2_pte_addr: 0x%llx\n", hop2_pte_addr); - seq_printf(s, "hop2_pte: 0x%llx\n", hop2_pte); - - seq_printf(s, "hop3_addr: 0x%llx\n", hop3_addr); - seq_printf(s, "hop3_pte_addr: 0x%llx\n", hop3_pte_addr); - seq_printf(s, "hop3_pte: 0x%llx\n", hop3_pte); - - if (mmu_prop->num_hops == MMU_ARCH_5_HOPS) { - if (!(hop3_pte & LAST_MASK)) { - seq_printf(s, "hop4_addr: 0x%llx\n", hop4_addr); - seq_printf(s, "hop4_pte_addr: 0x%llx\n", hop4_pte_addr); - seq_printf(s, "hop4_pte: 0x%llx\n", hop4_pte); - } - } else { - seq_printf(s, "hop4_addr: 0x%llx\n", hop4_addr); - seq_printf(s, "hop4_pte_addr: 0x%llx\n", hop4_pte_addr); - seq_printf(s, "hop4_pte: 0x%llx\n", hop4_pte); - - if (!(hop4_pte & LAST_MASK)) { - seq_printf(s, "hop5_addr: 0x%llx\n", hop5_addr); - seq_printf(s, "hop5_pte_addr: 0x%llx\n", hop5_pte_addr); - seq_printf(s, "hop5_pte: 0x%llx\n", hop5_pte); - } + for (i = 0 ; i < hops_info.used_hops ; i++) { + seq_printf(s, "hop%d_addr: 0x%llx\n", + i, hops_info.hop_info[i].hop_addr); + seq_printf(s, "hop%d_pte_addr: 0x%llx\n", + i, hops_info.hop_info[i].hop_pte_addr); + seq_printf(s, "hop%d_pte: 0x%llx\n", + i, hops_info.hop_info[i].hop_pte_val); } - goto out; - -not_mapped: - dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n", - virt_addr); -out: - mutex_unlock(&ctx->mmu_lock); - return 0; } @@ -597,7 +415,7 @@ static bool hl_is_device_va(struct hl_device *hdev, u64 addr) if (!hdev->mmu_enable) goto out; - if (hdev->dram_supports_virtual_memory && + if (prop->dram_supports_virtual_memory && (addr >= prop->dmmu.start_addr && addr < prop->dmmu.end_addr)) return true; @@ -616,78 +434,20 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, u64 *phys_addr) { struct hl_ctx *ctx = hdev->compute_ctx; - struct asic_fixed_properties *prop = &hdev->asic_prop; - struct hl_mmu_properties *mmu_prop; - u64 hop_addr, hop_pte_addr, hop_pte; - u64 offset_mask = HOP4_MASK | FLAGS_MASK; int rc = 0; - bool is_dram_addr; if (!ctx) { dev_err(hdev->dev, "no ctx available\n"); return -EINVAL; } - is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size, - prop->dmmu.start_addr, - prop->dmmu.end_addr); - - /* shifts and masks are the same in PMMU and HPMMU, use one of them */ - mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu; - - mutex_lock(&ctx->mmu_lock); - - /* hop 0 */ - hop_addr = get_hop0_addr(ctx); - hop_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); - hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr); - - /* hop 1 */ - hop_addr = get_next_hop_addr(hop_pte); - if (hop_addr == ULLONG_MAX) - goto not_mapped; - hop_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); - hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr); - - /* hop 2 */ - hop_addr = get_next_hop_addr(hop_pte); - if (hop_addr == ULLONG_MAX) - goto not_mapped; - hop_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); - hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr); - - /* hop 3 */ - hop_addr = get_next_hop_addr(hop_pte); - if (hop_addr == ULLONG_MAX) - goto not_mapped; - hop_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); - hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr); - - if (!(hop_pte & LAST_MASK)) { - /* hop 4 */ - hop_addr = get_next_hop_addr(hop_pte); - if (hop_addr == ULLONG_MAX) - goto not_mapped; - hop_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop_addr, - virt_addr); - hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr); - - offset_mask = FLAGS_MASK; + rc = hl_mmu_va_to_pa(ctx, virt_addr, phys_addr); + if (rc) { + dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n", + virt_addr); + rc = -EINVAL; } - if (!(hop_pte & PAGE_PRESENT_MASK)) - goto not_mapped; - - *phys_addr = (hop_pte & ~offset_mask) | (virt_addr & offset_mask); - - goto out; - -not_mapped: - dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n", - virt_addr); - rc = -EINVAL; -out: - mutex_unlock(&ctx->mmu_lock); return rc; } diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c index 783bbdcb1e61..5871162a8442 100644 --- a/drivers/misc/habanalabs/common/device.c +++ b/drivers/misc/habanalabs/common/device.c @@ -10,20 +10,9 @@ #include "habanalabs.h" #include <linux/pci.h> -#include <linux/sched/signal.h> #include <linux/hwmon.h> #include <uapi/misc/habanalabs.h> -#define HL_PLDM_PENDING_RESET_PER_SEC (HL_PENDING_RESET_PER_SEC * 10) - -bool hl_device_disabled_or_in_reset(struct hl_device *hdev) -{ - if ((hdev->disabled) || (atomic_read(&hdev->in_reset))) - return true; - else - return false; -} - enum hl_device_status hl_device_status(struct hl_device *hdev) { enum hl_device_status status; @@ -32,12 +21,34 @@ enum hl_device_status hl_device_status(struct hl_device *hdev) status = HL_DEVICE_STATUS_MALFUNCTION; else if (atomic_read(&hdev->in_reset)) status = HL_DEVICE_STATUS_IN_RESET; + else if (hdev->needs_reset) + status = HL_DEVICE_STATUS_NEEDS_RESET; else status = HL_DEVICE_STATUS_OPERATIONAL; return status; } +bool hl_device_operational(struct hl_device *hdev, + enum hl_device_status *status) +{ + enum hl_device_status current_status; + + current_status = hl_device_status(hdev); + if (status) + *status = current_status; + + switch (current_status) { + case HL_DEVICE_STATUS_IN_RESET: + case HL_DEVICE_STATUS_MALFUNCTION: + case HL_DEVICE_STATUS_NEEDS_RESET: + return false; + case HL_DEVICE_STATUS_OPERATIONAL: + default: + return true; + } +} + static void hpriv_release(struct kref *ref) { struct hl_fpriv *hpriv; @@ -243,6 +254,26 @@ put_devices: put_device(hdev->dev_ctrl); } +static void device_hard_reset_pending(struct work_struct *work) +{ + struct hl_device_reset_work *device_reset_work = + container_of(work, struct hl_device_reset_work, + reset_work.work); + struct hl_device *hdev = device_reset_work->hdev; + int rc; + + rc = hl_device_reset(hdev, true, true); + if ((rc == -EBUSY) && !hdev->device_fini_pending) { + dev_info(hdev->dev, + "Could not reset device. will try again in %u seconds", + HL_PENDING_RESET_PER_SEC); + + queue_delayed_work(device_reset_work->wq, + &device_reset_work->reset_work, + msecs_to_jiffies(HL_PENDING_RESET_PER_SEC * 1000)); + } +} + /* * device_early_init - do some early initialization for the habanalabs device * @@ -327,17 +358,32 @@ static int device_early_init(struct hl_device *hdev) hl_cb_mgr_init(&hdev->kernel_cb_mgr); + hdev->device_reset_work.wq = + create_singlethread_workqueue("hl_device_reset"); + if (!hdev->device_reset_work.wq) { + rc = -ENOMEM; + dev_err(hdev->dev, "Failed to create device reset WQ\n"); + goto free_cb_mgr; + } + + INIT_DELAYED_WORK(&hdev->device_reset_work.reset_work, + device_hard_reset_pending); + hdev->device_reset_work.hdev = hdev; + hdev->device_fini_pending = 0; + mutex_init(&hdev->send_cpu_message_lock); mutex_init(&hdev->debug_lock); mutex_init(&hdev->mmu_cache_lock); - INIT_LIST_HEAD(&hdev->hw_queues_mirror_list); - spin_lock_init(&hdev->hw_queues_mirror_lock); + INIT_LIST_HEAD(&hdev->cs_mirror_list); + spin_lock_init(&hdev->cs_mirror_lock); INIT_LIST_HEAD(&hdev->fpriv_list); mutex_init(&hdev->fpriv_list_lock); atomic_set(&hdev->in_reset, 0); return 0; +free_cb_mgr: + hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr); free_idle_busy_ts_arr: kfree(hdev->idle_busy_ts_arr); free_chip_info: @@ -380,6 +426,7 @@ static void device_early_fini(struct hl_device *hdev) kfree(hdev->hl_chip_info); destroy_workqueue(hdev->eq_wq); + destroy_workqueue(hdev->device_reset_work.wq); for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) destroy_workqueue(hdev->cq_wq[i]); @@ -412,7 +459,7 @@ static void hl_device_heartbeat(struct work_struct *work) struct hl_device *hdev = container_of(work, struct hl_device, work_heartbeat.work); - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) goto reschedule; if (!hdev->asic_funcs->send_heartbeat(hdev)) @@ -758,16 +805,12 @@ disable_device: return rc; } -static int device_kill_open_processes(struct hl_device *hdev) +static int device_kill_open_processes(struct hl_device *hdev, u32 timeout) { - u16 pending_total, pending_cnt; struct hl_fpriv *hpriv; struct task_struct *task = NULL; + u32 pending_cnt; - if (hdev->pldm) - pending_total = HL_PLDM_PENDING_RESET_PER_SEC; - else - pending_total = HL_PENDING_RESET_PER_SEC; /* Giving time for user to close FD, and for processes that are inside * hl_device_open to finish @@ -775,6 +818,19 @@ static int device_kill_open_processes(struct hl_device *hdev) if (!list_empty(&hdev->fpriv_list)) ssleep(1); + if (timeout) { + pending_cnt = timeout; + } else { + if (hdev->process_kill_trial_cnt) { + /* Processes have been already killed */ + pending_cnt = 1; + goto wait_for_processes; + } else { + /* Wait a small period after process kill */ + pending_cnt = HL_PENDING_RESET_PER_SEC; + } + } + mutex_lock(&hdev->fpriv_list_lock); /* This section must be protected because we are dereferencing @@ -794,16 +850,18 @@ static int device_kill_open_processes(struct hl_device *hdev) mutex_unlock(&hdev->fpriv_list_lock); - /* We killed the open users, but because the driver cleans up after the - * user contexts are closed (e.g. mmu mappings), we need to wait again - * to make sure the cleaning phase is finished before continuing with - * the reset + /* + * We killed the open users, but that doesn't mean they are closed. + * It could be that they are running a long cleanup phase in the driver + * e.g. MMU unmappings, or running other long teardown flow even before + * our cleanup. + * Therefore we need to wait again to make sure they are closed before + * continuing with the reset. */ - pending_cnt = pending_total; - +wait_for_processes: while ((!list_empty(&hdev->fpriv_list)) && (pending_cnt)) { - dev_info(hdev->dev, + dev_dbg(hdev->dev, "Waiting for all unmap operations to finish before hard reset\n"); pending_cnt--; @@ -811,18 +869,17 @@ static int device_kill_open_processes(struct hl_device *hdev) ssleep(1); } - return list_empty(&hdev->fpriv_list) ? 0 : -EBUSY; -} + /* All processes exited successfully */ + if (list_empty(&hdev->fpriv_list)) + return 0; -static void device_hard_reset_pending(struct work_struct *work) -{ - struct hl_device_reset_work *device_reset_work = - container_of(work, struct hl_device_reset_work, reset_work); - struct hl_device *hdev = device_reset_work->hdev; + /* Give up waiting for processes to exit */ + if (hdev->process_kill_trial_cnt == HL_PENDING_RESET_MAX_TRIALS) + return -ETIME; - hl_device_reset(hdev, true, true); + hdev->process_kill_trial_cnt++; - kfree(device_reset_work); + return -EBUSY; } /* @@ -859,6 +916,10 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset, hard_reset = true; } + /* Re-entry of reset thread */ + if (from_hard_reset_thread && hdev->process_kill_trial_cnt) + goto kill_processes; + /* * Prevent concurrency in this function - only one reset should be * done at any given time. Only need to perform this if we didn't @@ -904,26 +965,17 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset, again: if ((hard_reset) && (!from_hard_reset_thread)) { - struct hl_device_reset_work *device_reset_work; - hdev->hard_reset_pending = true; - device_reset_work = kzalloc(sizeof(*device_reset_work), - GFP_ATOMIC); - if (!device_reset_work) { - rc = -ENOMEM; - goto out_err; - } + hdev->process_kill_trial_cnt = 0; /* * Because the reset function can't run from interrupt or * from heartbeat work, we need to call the reset function * from a dedicated work */ - INIT_WORK(&device_reset_work->reset_work, - device_hard_reset_pending); - device_reset_work->hdev = hdev; - schedule_work(&device_reset_work->reset_work); + queue_delayed_work(hdev->device_reset_work.wq, + &hdev->device_reset_work.reset_work, 0); return 0; } @@ -949,12 +1001,25 @@ again: /* Go over all the queues, release all CS and their jobs */ hl_cs_rollback_all(hdev); +kill_processes: if (hard_reset) { /* Kill processes here after CS rollback. This is because the * process can't really exit until all its CSs are done, which * is what we do in cs rollback */ - rc = device_kill_open_processes(hdev); + rc = device_kill_open_processes(hdev, 0); + + if (rc == -EBUSY) { + if (hdev->device_fini_pending) { + dev_crit(hdev->dev, + "Failed to kill all open processes, stopping hard reset\n"); + goto out_err; + } + + /* signal reset thread to reschedule */ + return rc; + } + if (rc) { dev_crit(hdev->dev, "Failed to kill all open processes, stopping hard reset\n"); @@ -1089,6 +1154,7 @@ again: } atomic_set(&hdev->in_reset, 0); + hdev->needs_reset = false; if (hard_reset) hdev->hard_reset_cnt++; @@ -1261,13 +1327,6 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass) hl_debugfs_add_device(hdev); - if (hdev->asic_funcs->get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) { - dev_info(hdev->dev, - "H/W state is dirty, must reset before initializing\n"); - hdev->asic_funcs->halt_engines(hdev, true); - hdev->asic_funcs->hw_fini(hdev, true); - } - /* * From this point, in case of an error, add char devices and create * sysfs nodes as part of the error flow, to allow debugging. @@ -1398,11 +1457,14 @@ out_disabled: */ void hl_device_fini(struct hl_device *hdev) { - int i, rc; ktime_t timeout; + int i, rc; dev_info(hdev->dev, "Removing device\n"); + hdev->device_fini_pending = 1; + flush_delayed_work(&hdev->device_reset_work.reset_work); + /* * This function is competing with the reset function, so try to * take the reset atomic and if we are already in middle of reset, @@ -1458,7 +1520,11 @@ void hl_device_fini(struct hl_device *hdev) * can't really exit until all its CSs are done, which is what we * do in cs rollback */ - rc = device_kill_open_processes(hdev); + dev_info(hdev->dev, + "Waiting for all processes to exit (timeout of %u seconds)", + HL_PENDING_RESET_LONG_SEC); + + rc = device_kill_open_processes(hdev, HL_PENDING_RESET_LONG_SEC); if (rc) dev_crit(hdev->dev, "Failed to kill all open processes\n"); diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c index cd41c7ceb0e7..0e1c629e9800 100644 --- a/drivers/misc/habanalabs/common/firmware_if.c +++ b/drivers/misc/habanalabs/common/firmware_if.c @@ -9,8 +9,6 @@ #include "../include/common/hl_boot_if.h" #include <linux/firmware.h> -#include <linux/genalloc.h> -#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/slab.h> #define FW_FILE_MAX_SIZE 0x1400000 /* maximum size of 20MB */ @@ -20,16 +18,18 @@ * @hdev: pointer to hl_device structure. * @fw_name: the firmware image name * @dst: IO memory mapped address space to copy firmware to + * @src_offset: offset in src FW to copy from + * @size: amount of bytes to copy (0 to copy the whole binary) * * Copy fw code from firmware file to device memory. * * Return: 0 on success, non-zero for failure. */ int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name, - void __iomem *dst) + void __iomem *dst, u32 src_offset, u32 size) { const struct firmware *fw; - const u64 *fw_data; + const void *fw_data; size_t fw_size; int rc; @@ -57,9 +57,20 @@ int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name, goto out; } - fw_data = (const u64 *) fw->data; + if (size - src_offset > fw_size) { + dev_err(hdev->dev, + "size to copy(%u) and offset(%u) are invalid\n", + size, src_offset); + rc = -EINVAL; + goto out; + } + + if (size) + fw_size = size; + + fw_data = (const void *) fw->data; - memcpy_toio(dst, fw_data, fw_size); + memcpy_toio(dst, fw_data + src_offset, fw_size); out: release_firmware(fw); @@ -77,7 +88,7 @@ int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode) } int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg, - u16 len, u32 timeout, long *result) + u16 len, u32 timeout, u64 *result) { struct cpucp_packet *pkt; dma_addr_t pkt_dma_addr; @@ -132,7 +143,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg, >> CPUCP_PKT_CTL_OPCODE_SHIFT); rc = -EIO; } else if (result) { - *result = (long) le64_to_cpu(pkt->result); + *result = le64_to_cpu(pkt->result); } out: @@ -146,7 +157,7 @@ out: int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type) { struct cpucp_packet pkt; - long result; + u64 result; int rc; memset(&pkt, 0, sizeof(pkt)); @@ -169,7 +180,7 @@ int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr, { struct cpucp_unmask_irq_arr_packet *pkt; size_t total_pkt_size; - long result; + u64 result; int rc; total_pkt_size = sizeof(struct cpucp_unmask_irq_arr_packet) + @@ -208,7 +219,7 @@ int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr, int hl_fw_test_cpu_queue(struct hl_device *hdev) { struct cpucp_packet test_pkt = {}; - long result; + u64 result; int rc; test_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST << @@ -221,7 +232,7 @@ int hl_fw_test_cpu_queue(struct hl_device *hdev) if (!rc) { if (result != CPUCP_PACKET_FENCE_VAL) dev_err(hdev->dev, - "CPU queue test failed (0x%08lX)\n", result); + "CPU queue test failed (%#08llx)\n", result); } else { dev_err(hdev->dev, "CPU queue test failed, error %d\n", rc); } @@ -252,7 +263,7 @@ void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, int hl_fw_send_heartbeat(struct hl_device *hdev) { struct cpucp_packet hb_pkt = {}; - long result; + u64 result; int rc; hb_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST << @@ -268,13 +279,14 @@ int hl_fw_send_heartbeat(struct hl_device *hdev) return rc; } -int hl_fw_cpucp_info_get(struct hl_device *hdev) +int hl_fw_cpucp_info_get(struct hl_device *hdev, + u32 cpu_security_boot_status_reg) { struct asic_fixed_properties *prop = &hdev->asic_prop; struct cpucp_packet pkt = {}; void *cpucp_info_cpu_addr; dma_addr_t cpucp_info_dma_addr; - long result; + u64 result; int rc; cpucp_info_cpu_addr = @@ -313,6 +325,11 @@ int hl_fw_cpucp_info_get(struct hl_device *hdev) goto out; } + /* Read FW application security bits again */ + if (hdev->asic_prop.fw_security_status_valid) + hdev->asic_prop.fw_app_security_map = + RREG32(cpu_security_boot_status_reg); + out: hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, sizeof(struct cpucp_info), cpucp_info_cpu_addr); @@ -325,7 +342,7 @@ int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size) struct cpucp_packet pkt = {}; void *eeprom_info_cpu_addr; dma_addr_t eeprom_info_dma_addr; - long result; + u64 result; int rc; eeprom_info_cpu_addr = @@ -368,7 +385,7 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev, struct hl_info_pci_counters *counters) { struct cpucp_packet pkt = {}; - long result; + u64 result; int rc; pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET << @@ -415,7 +432,7 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev, int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy) { struct cpucp_packet pkt = {}; - long result; + u64 result; int rc; pkt.ctl = cpu_to_le32(CPUCP_PACKET_TOTAL_ENERGY_GET << @@ -435,9 +452,36 @@ int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy) return rc; } -static void fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg) +int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u16 pll_index, + u16 *pll_freq_arr) { - u32 err_val; + struct cpucp_packet pkt; + u64 result; + int rc; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = cpu_to_le32(CPUCP_PACKET_PLL_INFO_GET << + CPUCP_PKT_CTL_OPCODE_SHIFT); + pkt.pll_type = __cpu_to_le16(pll_index); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + HL_CPUCP_INFO_TIMEOUT_USEC, &result); + if (rc) + dev_err(hdev->dev, "Failed to read PLL info, error %d\n", rc); + + pll_freq_arr[0] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT0_MASK, result); + pll_freq_arr[1] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT1_MASK, result); + pll_freq_arr[2] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT2_MASK, result); + pll_freq_arr[3] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT3_MASK, result); + + return rc; +} + +static void fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg, + u32 cpu_security_boot_status_reg) +{ + u32 err_val, security_val; /* Some of the firmware status codes are deprecated in newer f/w * versions. In those versions, the errors are reported @@ -472,6 +516,18 @@ static void fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg) if (err_val & CPU_BOOT_ERR0_NIC_FW_FAIL) dev_err(hdev->dev, "Device boot error - NIC F/W initialization failed\n"); + if (err_val & CPU_BOOT_ERR0_SECURITY_NOT_RDY) + dev_warn(hdev->dev, + "Device boot warning - security not ready\n"); + if (err_val & CPU_BOOT_ERR0_SECURITY_FAIL) + dev_err(hdev->dev, "Device boot error - security failure\n"); + if (err_val & CPU_BOOT_ERR0_EFUSE_FAIL) + dev_err(hdev->dev, "Device boot error - eFuse failure\n"); + + security_val = RREG32(cpu_security_boot_status_reg); + if (security_val & CPU_BOOT_DEV_STS0_ENABLED) + dev_dbg(hdev->dev, "Device security status %#x\n", + security_val); } static void detect_cpu_boot_status(struct hl_device *hdev, u32 status) @@ -524,10 +580,12 @@ static void detect_cpu_boot_status(struct hl_device *hdev, u32 status) } } -int hl_fw_read_preboot_ver(struct hl_device *hdev, u32 cpu_boot_status_reg, - u32 boot_err0_reg, u32 timeout) +int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg, + u32 cpu_security_boot_status_reg, u32 boot_err0_reg, + u32 timeout) { - u32 status; + struct asic_fixed_properties *prop = &hdev->asic_prop; + u32 status, security_status; int rc; if (!hdev->cpu_enable) @@ -557,23 +615,52 @@ int hl_fw_read_preboot_ver(struct hl_device *hdev, u32 cpu_boot_status_reg, if (rc) { dev_err(hdev->dev, "Failed to read preboot version\n"); detect_cpu_boot_status(hdev, status); - fw_read_errors(hdev, boot_err0_reg); + fw_read_errors(hdev, boot_err0_reg, + cpu_security_boot_status_reg); return -EIO; } - hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_PREBOOT); + rc = hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_PREBOOT); + if (rc) + return rc; + + security_status = RREG32(cpu_security_boot_status_reg); + + /* We read security status multiple times during boot: + * 1. preboot - we check if fw security feature is supported + * 2. boot cpu - we get boot cpu security status + * 3. FW application - we get FW application security status + * + * Preboot: + * Check security status bit (CPU_BOOT_DEV_STS0_ENABLED), if it is set + * check security enabled bit (CPU_BOOT_DEV_STS0_SECURITY_EN) + */ + if (security_status & CPU_BOOT_DEV_STS0_ENABLED) { + hdev->asic_prop.fw_security_status_valid = 1; + prop->fw_security_disabled = + !(security_status & CPU_BOOT_DEV_STS0_SECURITY_EN); + } else { + hdev->asic_prop.fw_security_status_valid = 0; + prop->fw_security_disabled = true; + } + + dev_info(hdev->dev, "firmware-level security is %s\n", + prop->fw_security_disabled ? "disabled" : "enabled"); return 0; } int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg, u32 msg_to_cpu_reg, u32 cpu_msg_status_reg, - u32 boot_err0_reg, bool skip_bmc, - u32 cpu_timeout, u32 boot_fit_timeout) + u32 cpu_security_boot_status_reg, u32 boot_err0_reg, + bool skip_bmc, u32 cpu_timeout, u32 boot_fit_timeout) { u32 status; int rc; + if (!(hdev->fw_loading & FW_TYPE_BOOT_CPU)) + return 0; + dev_info(hdev->dev, "Going to wait for device boot (up to %lds)\n", cpu_timeout / USEC_PER_SEC); @@ -631,17 +718,24 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg, 10000, cpu_timeout); + dev_dbg(hdev->dev, "uboot status = %d\n", status); + /* Read U-Boot version now in case we will later fail */ hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_UBOOT); + /* Read boot_cpu security bits */ + if (hdev->asic_prop.fw_security_status_valid) + hdev->asic_prop.fw_boot_cpu_security_map = + RREG32(cpu_security_boot_status_reg); + if (rc) { detect_cpu_boot_status(hdev, status); rc = -EIO; goto out; } - if (!hdev->fw_loading) { - dev_info(hdev->dev, "Skip loading FW\n"); + if (!(hdev->fw_loading & FW_TYPE_LINUX)) { + dev_info(hdev->dev, "Skip loading Linux F/W\n"); goto out; } @@ -702,10 +796,23 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg, goto out; } + /* Read FW application security bits */ + if (hdev->asic_prop.fw_security_status_valid) { + hdev->asic_prop.fw_app_security_map = + RREG32(cpu_security_boot_status_reg); + + if (hdev->asic_prop.fw_app_security_map & + CPU_BOOT_DEV_STS0_FW_HARD_RST_EN) + hdev->asic_prop.hard_reset_done_by_fw = true; + } + + dev_dbg(hdev->dev, "Firmware hard-reset is %s\n", + hdev->asic_prop.hard_reset_done_by_fw ? "enabled" : "disabled"); + dev_info(hdev->dev, "Successfully loaded firmware to device\n"); out: - fw_read_errors(hdev, boot_err0_reg); + fw_read_errors(hdev, boot_err0_reg, cpu_security_boot_status_reg); return rc; } diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 6ed974d2def0..571eda6ef5ab 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -10,6 +10,7 @@ #include "../include/common/cpucp_if.h" #include "../include/common/qman_if.h" +#include "../include/hw_ip/mmu/mmu_general.h" #include <uapi/misc/habanalabs.h> #include <linux/cdev.h> @@ -19,6 +20,10 @@ #include <linux/scatterlist.h> #include <linux/hashtable.h> #include <linux/bitfield.h> +#include <linux/genalloc.h> +#include <linux/sched/signal.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/coresight.h> #define HL_NAME "habanalabs" @@ -36,7 +41,9 @@ #define HL_MMAP_OFFSET_VALUE_MASK (0x3FFFFFFFFFFFull >> PAGE_SHIFT) #define HL_MMAP_OFFSET_VALUE_GET(off) (off & HL_MMAP_OFFSET_VALUE_MASK) -#define HL_PENDING_RESET_PER_SEC 30 +#define HL_PENDING_RESET_PER_SEC 10 +#define HL_PENDING_RESET_MAX_TRIALS 60 /* 10 minutes */ +#define HL_PENDING_RESET_LONG_SEC 60 #define HL_HARD_RESET_MAX_TIMEOUT 120 @@ -61,15 +68,29 @@ /* MMU */ #define MMU_HASH_TABLE_BITS 7 /* 1 << 7 buckets */ +/** + * enum hl_mmu_page_table_locaion - mmu page table location + * @MMU_DR_PGT: page-table is located on device DRAM. + * @MMU_HR_PGT: page-table is located on host memory. + * @MMU_NUM_PGT_LOCATIONS: number of page-table locations currently supported. + */ +enum hl_mmu_page_table_location { + MMU_DR_PGT = 0, /* device-dram-resident MMU PGT */ + MMU_HR_PGT, /* host resident MMU PGT */ + MMU_NUM_PGT_LOCATIONS /* num of PGT locations */ +}; + /* * HL_RSVD_SOBS 'sync stream' reserved sync objects per QMAN stream * HL_RSVD_MONS 'sync stream' reserved monitors per QMAN stream */ -#define HL_RSVD_SOBS 4 -#define HL_RSVD_MONS 2 +#define HL_RSVD_SOBS 2 +#define HL_RSVD_MONS 1 -#define HL_RSVD_SOBS_IN_USE 2 -#define HL_RSVD_MONS_IN_USE 1 +/* + * HL_COLLECTIVE_RSVD_MSTR_MONS 'collective' reserved monitors per QMAN stream + */ +#define HL_COLLECTIVE_RSVD_MSTR_MONS 2 #define HL_MAX_SOB_VAL (1 << 15) @@ -80,6 +101,28 @@ #define HL_MAX_DCORES 4 +#define HL_MAX_SOBS_PER_MONITOR 8 + +/** + * struct hl_gen_wait_properties - properties for generating a wait CB + * @data: command buffer + * @q_idx: queue id is used to extract fence register address + * @size: offset in command buffer + * @sob_base: SOB base to use in this wait CB + * @sob_val: SOB value to wait for + * @mon_id: monitor to use in this wait CB + * @sob_mask: each bit represents a SOB offset from sob_base to be used + */ +struct hl_gen_wait_properties { + void *data; + u32 q_idx; + u32 size; + u16 sob_base; + u16 sob_val; + u16 mon_id; + u8 sob_mask; +}; + /** * struct pgt_info - MMU hop page info. * @node: hash linked-list node for the pgts shadow hash of pgts. @@ -125,6 +168,18 @@ enum hl_fw_component { }; /** + * enum hl_fw_types - F/W types to load + * @FW_TYPE_LINUX: Linux image for device CPU + * @FW_TYPE_BOOT_CPU: Boot image for device CPU + * @FW_TYPE_ALL_TYPES: Mask for all types + */ +enum hl_fw_types { + FW_TYPE_LINUX = 0x1, + FW_TYPE_BOOT_CPU = 0x2, + FW_TYPE_ALL_TYPES = (FW_TYPE_LINUX | FW_TYPE_BOOT_CPU) +}; + +/** * enum hl_queue_type - Supported QUEUE types. * @QUEUE_TYPE_NA: queue is not available. * @QUEUE_TYPE_EXT: external queue which is a DMA channel that may access the @@ -146,7 +201,8 @@ enum hl_queue_type { enum hl_cs_type { CS_TYPE_DEFAULT, CS_TYPE_SIGNAL, - CS_TYPE_WAIT + CS_TYPE_WAIT, + CS_TYPE_COLLECTIVE_WAIT }; /* @@ -176,6 +232,17 @@ struct hl_outbound_pci_region { }; /* + * enum queue_cb_alloc_flags - Indicates queue support for CBs that + * allocated by Kernel or by User + * @CB_ALLOC_KERNEL: support only CBs that allocated by Kernel + * @CB_ALLOC_USER: support only CBs that allocated by User + */ +enum queue_cb_alloc_flags { + CB_ALLOC_KERNEL = 0x1, + CB_ALLOC_USER = 0x2 +}; + +/* * struct hl_hw_sob - H/W SOB info. * @hdev: habanalabs device structure. * @kref: refcount of this SOB. The SOB will reset once the refcount is zero. @@ -189,19 +256,29 @@ struct hl_hw_sob { u32 q_idx; }; +enum hl_collective_mode { + HL_COLLECTIVE_NOT_SUPPORTED = 0x0, + HL_COLLECTIVE_MASTER = 0x1, + HL_COLLECTIVE_SLAVE = 0x2 +}; + /** * struct hw_queue_properties - queue information. * @type: queue type. + * @queue_cb_alloc_flags: bitmap which indicates if the hw queue supports CB + * that allocated by the Kernel driver and therefore, + * a CB handle can be provided for jobs on this queue. + * Otherwise, a CB address must be provided. + * @collective_mode: collective mode of current queue * @driver_only: true if only the driver is allowed to send a job to this queue, * false otherwise. - * @requires_kernel_cb: true if a CB handle must be provided for jobs on this - * queue, false otherwise (a CB address must be provided). * @supports_sync_stream: True if queue supports sync stream */ struct hw_queue_properties { enum hl_queue_type type; + enum queue_cb_alloc_flags cb_alloc_flags; + enum hl_collective_mode collective_mode; u8 driver_only; - u8 requires_kernel_cb; u8 supports_sync_stream; }; @@ -227,6 +304,8 @@ enum hl_device_hw_state { HL_DEVICE_HW_STATE_DIRTY }; +#define HL_MMU_VA_ALIGNMENT_NOT_NEEDED 0 + /** * struct hl_mmu_properties - ASIC specific MMU address translation properties. * @start_addr: virtual start address of the memory region. @@ -245,6 +324,8 @@ enum hl_device_hw_state { * @hop5_mask: mask to get the PTE address in hop 5. * @page_size: default page size used to allocate memory. * @num_hops: The amount of hops supported by the translation table. + * @host_resident: Should the MMU page table reside in host memory or in the + * device DRAM. */ struct hl_mmu_properties { u64 start_addr; @@ -263,6 +344,7 @@ struct hl_mmu_properties { u64 hop5_mask; u32 page_size; u32 num_hops; + u8 host_resident; }; /** @@ -314,6 +396,14 @@ struct hl_mmu_properties { * @cb_pool_cb_size: size of each CB in the CB pool. * @max_pending_cs: maximum of concurrent pending command submissions * @max_queues: maximum amount of queues in the system + * @fw_boot_cpu_security_map: bitmap representation of boot cpu security status + * reported by FW, bit description can be found in + * CPU_BOOT_DEV_STS* + * @fw_app_security_map: bitmap representation of application security status + * reported by FW, bit description can be found in + * CPU_BOOT_DEV_STS* + * @collective_first_sob: first sync object available for collective use + * @collective_first_mon: first monitor available for collective use * @sync_stream_first_sob: first sync object available for sync stream use * @sync_stream_first_mon: first monitor available for sync stream use * @first_available_user_sob: first sob available for the user @@ -322,6 +412,10 @@ struct hl_mmu_properties { * @completion_queues_count: number of completion queues. * @fw_security_disabled: true if security measures are disabled in firmware, * false otherwise + * @fw_security_status_valid: security status bits are valid and can be fetched + * from BOOT_DEV_STS0 + * @dram_supports_virtual_memory: is there an MMU towards the DRAM + * @hard_reset_done_by_fw: true if firmware is handling hard reset flow */ struct asic_fixed_properties { struct hw_queue_properties *hw_queues_props; @@ -366,6 +460,10 @@ struct asic_fixed_properties { u32 cb_pool_cb_size; u32 max_pending_cs; u32 max_queues; + u32 fw_boot_cpu_security_map; + u32 fw_app_security_map; + u16 collective_first_sob; + u16 collective_first_mon; u16 sync_stream_first_sob; u16 sync_stream_first_mon; u16 first_available_user_sob[HL_MAX_DCORES]; @@ -373,6 +471,9 @@ struct asic_fixed_properties { u8 tpc_enabled_mask; u8 completion_queues_count; u8 fw_security_disabled; + u8 fw_security_status_valid; + u8 dram_supports_virtual_memory; + u8 hard_reset_done_by_fw; }; /** @@ -380,12 +481,14 @@ struct asic_fixed_properties { * @completion: fence is implemented using completion * @refcount: refcount for this fence * @error: mark this fence with error + * @timestamp: timestamp upon completion * */ struct hl_fence { struct completion completion; struct kref refcount; int error; + ktime_t timestamp; }; /** @@ -397,6 +500,7 @@ struct hl_fence { * @cs_seq: command submission sequence number. * @type: type of the CS - signal/wait. * @sob_val: the SOB value that is used in this signal/wait CS. + * @sob_group: the SOB group that is used in this collective wait CS. */ struct hl_cs_compl { struct hl_fence base_fence; @@ -406,6 +510,7 @@ struct hl_cs_compl { u64 cs_seq; enum hl_cs_type type; u16 sob_val; + u16 sob_group; }; /* @@ -427,7 +532,7 @@ struct hl_cb_mgr { * @refcount: reference counter for usage of the CB. * @hdev: pointer to device this CB belongs to. * @ctx: pointer to the CB owner's context. - * @lock: spinlock to protect mmap/cs flows. + * @lock: spinlock to protect mmap flows. * @debugfs_list: node in debugfs list of command buffers. * @pool_list: node in pool list of command buffers. * @va_block_list: list of virtual addresses blocks of the CB if it is mapped to @@ -456,7 +561,7 @@ struct hl_cb { dma_addr_t bus_address; u32 mmap_size; u32 size; - u32 cs_cnt; + atomic_t cs_cnt; u8 mmap; u8 is_pool; u8 is_internal; @@ -468,6 +573,7 @@ struct hl_cb { * QUEUES */ +struct hl_cs; struct hl_cs_job; /* Queue length of external and HW queues */ @@ -490,10 +596,38 @@ struct hl_cs_job; #define HL_CPU_ACCESSIBLE_MEM_SIZE SZ_2M /** - * struct hl_hw_queue - describes a H/W transport queue. + * struct hl_sync_stream_properties - + * describes a H/W queue sync stream properties * @hw_sob: array of the used H/W SOBs by this H/W queue. + * @next_sob_val: the next value to use for the currently used SOB. + * @base_sob_id: the base SOB id of the SOBs used by this queue. + * @base_mon_id: the base MON id of the MONs used by this queue. + * @collective_mstr_mon_id: the MON ids of the MONs used by this master queue + * in order to sync with all slave queues. + * @collective_slave_mon_id: the MON id used by this slave queue in order to + * sync with its master queue. + * @collective_sob_id: current SOB id used by this collective slave queue + * to signal its collective master queue upon completion. + * @curr_sob_offset: the id offset to the currently used SOB from the + * HL_RSVD_SOBS that are being used by this queue. + */ +struct hl_sync_stream_properties { + struct hl_hw_sob hw_sob[HL_RSVD_SOBS]; + u16 next_sob_val; + u16 base_sob_id; + u16 base_mon_id; + u16 collective_mstr_mon_id[HL_COLLECTIVE_RSVD_MSTR_MONS]; + u16 collective_slave_mon_id; + u16 collective_sob_id; + u8 curr_sob_offset; +}; + +/** + * struct hl_hw_queue - describes a H/W transport queue. * @shadow_queue: pointer to a shadow queue that holds pointers to jobs. + * @sync_stream_prop: sync stream queue properties * @queue_type: type of queue. + * @collective_mode: collective mode of current queue * @kernel_address: holds the queue's kernel virtual address. * @bus_address: holds the queue's DMA address. * @pi: holds the queue's pi value. @@ -502,33 +636,25 @@ struct hl_cs_job; * @cq_id: the id for the corresponding CQ for this H/W queue. * @msi_vec: the IRQ number of the H/W queue. * @int_queue_len: length of internal queue (number of entries). - * @next_sob_val: the next value to use for the currently used SOB. - * @base_sob_id: the base SOB id of the SOBs used by this queue. - * @base_mon_id: the base MON id of the MONs used by this queue. * @valid: is the queue valid (we have array of 32 queues, not all of them * exist). - * @curr_sob_offset: the id offset to the currently used SOB from the - * HL_RSVD_SOBS that are being used by this queue. * @supports_sync_stream: True if queue supports sync stream */ struct hl_hw_queue { - struct hl_hw_sob hw_sob[HL_RSVD_SOBS]; - struct hl_cs_job **shadow_queue; - enum hl_queue_type queue_type; - void *kernel_address; - dma_addr_t bus_address; - u32 pi; - atomic_t ci; - u32 hw_queue_id; - u32 cq_id; - u32 msi_vec; - u16 int_queue_len; - u16 next_sob_val; - u16 base_sob_id; - u16 base_mon_id; - u8 valid; - u8 curr_sob_offset; - u8 supports_sync_stream; + struct hl_cs_job **shadow_queue; + struct hl_sync_stream_properties sync_stream_prop; + enum hl_queue_type queue_type; + enum hl_collective_mode collective_mode; + void *kernel_address; + dma_addr_t bus_address; + u32 pi; + atomic_t ci; + u32 hw_queue_id; + u32 cq_id; + u32 msi_vec; + u16 int_queue_len; + u8 valid; + u8 supports_sync_stream; }; /** @@ -650,6 +776,7 @@ enum div_select_defs { * dma_free_coherent(). This is ASIC function because * its implementation is not trivial when the driver * is loaded in simulation mode (not upstreamed). + * @scrub_device_mem: Scrub device memory given an address and size * @get_int_queue_base: get the internal queue base address. * @test_queues: run simple test on all queues for sanity check. * @asic_dma_pool_zalloc: small DMA allocation of coherent memory from DMA pool. @@ -700,6 +827,7 @@ enum div_select_defs { * @wreg: Write a register. Needed for simulator support. * @halt_coresight: stop the ETF and ETR traces. * @ctx_init: context dependent initialization. + * @ctx_fini: context dependent cleanup. * @get_clk_rate: Retrieve the ASIC current and maximum clock rate in MHz * @get_queue_id_for_cq: Get the H/W queue id related to the given CQ index. * @read_device_fw_version: read the device's firmware versions that are @@ -711,9 +839,13 @@ enum div_select_defs { * @gen_signal_cb: Generate a signal CB. * @gen_wait_cb: Generate a wait CB. * @reset_sob: Reset a SOB. + * @reset_sob_group: Reset SOB group * @set_dma_mask_from_fw: set the DMA mask in the driver according to the * firmware configuration * @get_device_time: Get the device time. + * @collective_wait_init_cs: Generate collective master/slave packets + * and place them in the relevant cs jobs + * @collective_wait_create_jobs: allocate collective wait cs jobs */ struct hl_asic_funcs { int (*early_init)(struct hl_device *hdev); @@ -736,6 +868,7 @@ struct hl_asic_funcs { dma_addr_t *dma_handle, gfp_t flag); void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size, void *cpu_addr, dma_addr_t dma_handle); + int (*scrub_device_mem)(struct hl_device *hdev, u64 addr, u64 size); void* (*get_int_queue_base)(struct hl_device *hdev, u32 queue_id, dma_addr_t *dma_handle, u16 *queue_len); int (*test_queues)(struct hl_device *hdev); @@ -794,28 +927,34 @@ struct hl_asic_funcs { int (*get_eeprom_data)(struct hl_device *hdev, void *data, size_t max_size); int (*send_cpu_message)(struct hl_device *hdev, u32 *msg, - u16 len, u32 timeout, long *result); - enum hl_device_hw_state (*get_hw_state)(struct hl_device *hdev); + u16 len, u32 timeout, u64 *result); int (*pci_bars_map)(struct hl_device *hdev); int (*init_iatu)(struct hl_device *hdev); u32 (*rreg)(struct hl_device *hdev, u32 reg); void (*wreg)(struct hl_device *hdev, u32 reg, u32 val); void (*halt_coresight)(struct hl_device *hdev); int (*ctx_init)(struct hl_ctx *ctx); + void (*ctx_fini)(struct hl_ctx *ctx); int (*get_clk_rate)(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk); u32 (*get_queue_id_for_cq)(struct hl_device *hdev, u32 cq_idx); - void (*read_device_fw_version)(struct hl_device *hdev, + int (*read_device_fw_version)(struct hl_device *hdev, enum hl_fw_component fwc); int (*load_firmware_to_device)(struct hl_device *hdev); int (*load_boot_fit_to_device)(struct hl_device *hdev); u32 (*get_signal_cb_size)(struct hl_device *hdev); u32 (*get_wait_cb_size)(struct hl_device *hdev); - void (*gen_signal_cb)(struct hl_device *hdev, void *data, u16 sob_id); - void (*gen_wait_cb)(struct hl_device *hdev, void *data, u16 sob_id, - u16 sob_val, u16 mon_id, u32 q_idx); + u32 (*gen_signal_cb)(struct hl_device *hdev, void *data, u16 sob_id, + u32 size); + u32 (*gen_wait_cb)(struct hl_device *hdev, + struct hl_gen_wait_properties *prop); void (*reset_sob)(struct hl_device *hdev, void *data); + void (*reset_sob_group)(struct hl_device *hdev, u16 sob_group); void (*set_dma_mask_from_fw)(struct hl_device *hdev); u64 (*get_device_time)(struct hl_device *hdev); + void (*collective_wait_init_cs)(struct hl_cs *cs); + int (*collective_wait_create_jobs)(struct hl_device *hdev, + struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id, + u32 collective_engine_id); }; @@ -826,17 +965,48 @@ struct hl_asic_funcs { #define HL_KERNEL_ASID_ID 0 /** + * enum hl_va_range_type - virtual address range type. + * @HL_VA_RANGE_TYPE_HOST: range type of host pages + * @HL_VA_RANGE_TYPE_HOST_HUGE: range type of host huge pages + * @HL_VA_RANGE_TYPE_DRAM: range type of dram pages + */ +enum hl_va_range_type { + HL_VA_RANGE_TYPE_HOST, + HL_VA_RANGE_TYPE_HOST_HUGE, + HL_VA_RANGE_TYPE_DRAM, + HL_VA_RANGE_TYPE_MAX +}; + +/** * struct hl_va_range - virtual addresses range. * @lock: protects the virtual addresses list. * @list: list of virtual addresses blocks available for mappings. * @start_addr: range start address. * @end_addr: range end address. + * @page_size: page size of this va range. */ struct hl_va_range { struct mutex lock; struct list_head list; u64 start_addr; u64 end_addr; + u32 page_size; +}; + +/** + * struct hl_cs_counters_atomic - command submission counters + * @out_of_mem_drop_cnt: dropped due to memory allocation issue + * @parsing_drop_cnt: dropped due to error in packet parsing + * @queue_full_drop_cnt: dropped due to queue full + * @device_in_reset_drop_cnt: dropped due to device in reset + * @max_cs_in_flight_drop_cnt: dropped due to maximum CS in-flight + */ +struct hl_cs_counters_atomic { + atomic64_t out_of_mem_drop_cnt; + atomic64_t parsing_drop_cnt; + atomic64_t queue_full_drop_cnt; + atomic64_t device_in_reset_drop_cnt; + atomic64_t max_cs_in_flight_drop_cnt; }; /** @@ -849,14 +1019,12 @@ struct hl_va_range { * @refcount: reference counter for the context. Context is released only when * this hits 0l. It is incremented on CS and CS_WAIT. * @cs_pending: array of hl fence objects representing pending CS. - * @host_va_range: holds available virtual addresses for host mappings. - * @host_huge_va_range: holds available virtual addresses for host mappings - * with huge pages. - * @dram_va_range: holds available virtual addresses for DRAM mappings. + * @va_range: holds available virtual addresses for host and dram mappings. * @mem_hash_lock: protects the mem_hash. * @mmu_lock: protects the MMU page tables. Any change to the PGT, modifying the * MMU hash or walking the PGT requires talking this lock. * @debugfs_list: node in debugfs list of contexts. + * @cs_counters: context command submission counters. * @cb_va_pool: device VA pool for command buffers which are mapped to the * device's MMU. * @cs_sequence: sequence number for CS. Value is assigned to a CS and passed @@ -879,26 +1047,24 @@ struct hl_va_range { struct hl_ctx { DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS); DECLARE_HASHTABLE(mmu_shadow_hash, MMU_HASH_TABLE_BITS); - struct hl_fpriv *hpriv; - struct hl_device *hdev; - struct kref refcount; - struct hl_fence **cs_pending; - struct hl_va_range *host_va_range; - struct hl_va_range *host_huge_va_range; - struct hl_va_range *dram_va_range; - struct mutex mem_hash_lock; - struct mutex mmu_lock; - struct list_head debugfs_list; - struct hl_cs_counters cs_counters; - struct gen_pool *cb_va_pool; - u64 cs_sequence; - u64 *dram_default_hops; - spinlock_t cs_lock; - atomic64_t dram_phys_mem; - atomic_t thread_ctx_switch_token; - u32 thread_ctx_switch_wait_token; - u32 asid; - u32 handle; + struct hl_fpriv *hpriv; + struct hl_device *hdev; + struct kref refcount; + struct hl_fence **cs_pending; + struct hl_va_range *va_range[HL_VA_RANGE_TYPE_MAX]; + struct mutex mem_hash_lock; + struct mutex mmu_lock; + struct list_head debugfs_list; + struct hl_cs_counters_atomic cs_counters; + struct gen_pool *cb_va_pool; + u64 cs_sequence; + u64 *dram_default_hops; + spinlock_t cs_lock; + atomic64_t dram_phys_mem; + atomic_t thread_ctx_switch_token; + u32 thread_ctx_switch_wait_token; + u32 asid; + u32 handle; }; /** @@ -963,6 +1129,7 @@ struct hl_userptr { * @tdr_active: true if TDR was activated for this CS (to prevent * double TDR activation). * @aborted: true if CS was aborted due to some device error. + * @timestamp: true if a timestmap must be captured upon completion */ struct hl_cs { u16 *jobs_in_queue_cnt; @@ -983,6 +1150,7 @@ struct hl_cs { u8 timedout; u8 tdr_active; u8 aborted; + u8 timestamp; }; /** @@ -996,6 +1164,7 @@ struct hl_cs { * @userptr_list: linked-list of userptr mappings that belong to this job and * wait for completion. * @debugfs_list: node in debugfs list of command submission jobs. + * @refcount: reference counter for usage of the CS job. * @queue_type: the type of the H/W queue this job is submitted to. * @id: the id of this job inside a CS. * @hw_queue_id: the id of the H/W queue this job is submitted to. @@ -1019,6 +1188,7 @@ struct hl_cs_job { struct work_struct finish_work; struct list_head userptr_list; struct list_head debugfs_list; + struct kref refcount; enum hl_queue_type queue_type; u32 id; u32 hw_queue_id; @@ -1067,7 +1237,6 @@ struct hl_cs_parser { u8 contains_dma_pkt; }; - /* * MEMORY STRUCTURE */ @@ -1285,6 +1454,10 @@ struct hl_dbg_device_entry { * DEVICES */ +#define HL_STR_MAX 32 + +#define HL_DEV_STS_MAX (HL_DEVICE_STATUS_NEEDS_RESET + 1) + /* Theoretical limit only. A single host can only contain up to 4 or 8 PCIe * x16 cards. In extreme cases, there are hosts that can accommodate 16 cards. */ @@ -1428,11 +1601,13 @@ struct hwmon_chip_info; /** * struct hl_device_reset_work - reset workqueue task wrapper. + * @wq: work queue for device reset procedure. * @reset_work: reset work to be done. * @hdev: habanalabs device structure. */ struct hl_device_reset_work { - struct work_struct reset_work; + struct workqueue_struct *wq; + struct delayed_work reset_work; struct hl_device *hdev; }; @@ -1446,18 +1621,78 @@ struct hl_device_idle_busy_ts { ktime_t busy_to_idle_ts; }; +/** + * struct hr_mmu_hop_addrs - used for holding per-device host-resident mmu hop + * information. + * @virt_addr: the virtual address of the hop. + * @phys-addr: the physical address of the hop (used by the device-mmu). + * @shadow_addr: The shadow of the hop used by the driver for walking the hops. + */ +struct hr_mmu_hop_addrs { + u64 virt_addr; + u64 phys_addr; + u64 shadow_addr; +}; /** - * struct hl_mmu_priv - used for holding per-device mmu internal information. + * struct hl_mmu_hr_pgt_priv - used for holding per-device mmu host-resident + * page-table internal information. * @mmu_pgt_pool: pool of page tables used by MMU for allocating hops. * @mmu_shadow_hop0: shadow array of hop0 tables. */ -struct hl_mmu_priv { +struct hl_mmu_hr_priv { + struct gen_pool *mmu_pgt_pool; + struct hr_mmu_hop_addrs *mmu_shadow_hop0; +}; + +/** + * struct hl_mmu_dr_pgt_priv - used for holding per-device mmu device-resident + * page-table internal information. + * @mmu_pgt_pool: pool of page tables used by MMU for allocating hops. + * @mmu_shadow_hop0: shadow array of hop0 tables. + */ +struct hl_mmu_dr_priv { struct gen_pool *mmu_pgt_pool; void *mmu_shadow_hop0; }; /** + * struct hl_mmu_priv - used for holding per-device mmu internal information. + * @dr: information on the device-resident MMU, when exists. + * @hr: information on the host-resident MMU, when exists. + */ +struct hl_mmu_priv { + struct hl_mmu_dr_priv dr; + struct hl_mmu_hr_priv hr; +}; + +/** + * struct hl_mmu_per_hop_info - A structure describing one TLB HOP and its entry + * that was created in order to translate a virtual address to a + * physical one. + * @hop_addr: The address of the hop. + * @hop_pte_addr: The address of the hop entry. + * @hop_pte_val: The value in the hop entry. + */ +struct hl_mmu_per_hop_info { + u64 hop_addr; + u64 hop_pte_addr; + u64 hop_pte_val; +}; + +/** + * struct hl_mmu_hop_info - A structure describing the TLB hops and their + * hop-entries that were created in order to translate a virtual address to a + * physical one. + * @hop_info: Array holding the per-hop information used for the translation. + * @used_hops: The number of hops used for the translation. + */ +struct hl_mmu_hop_info { + struct hl_mmu_per_hop_info hop_info[MMU_ARCH_5_HOPS]; + u32 used_hops; +}; + +/** * struct hl_mmu_funcs - Device related MMU functions. * @init: initialize the MMU module. * @fini: release the MMU module. @@ -1468,6 +1703,9 @@ struct hl_mmu_priv { * @flush: flush all writes from all cores to reach device MMU. * @swap_out: marks all mapping of the given context as swapped out. * @swap_in: marks all mapping of the given context as swapped in. + * @get_tlb_info: returns the list of hops and hop-entries used that were + * created in order to translate the giver virtual address to a + * physical one. */ struct hl_mmu_funcs { int (*init)(struct hl_device *hdev); @@ -1482,6 +1720,8 @@ struct hl_mmu_funcs { void (*flush)(struct hl_ctx *ctx); void (*swap_out)(struct hl_ctx *ctx); void (*swap_in)(struct hl_ctx *ctx); + int (*get_tlb_info)(struct hl_ctx *ctx, + u64 virt_addr, struct hl_mmu_hop_info *hops); }; /** @@ -1497,6 +1737,7 @@ struct hl_mmu_funcs { * @dev_ctrl: related kernel device structure for the control device * @work_freq: delayed work to lower device frequency if possible. * @work_heartbeat: delayed work for CPU-CP is-alive check. + * @device_reset_work: delayed work which performs hard reset * @asic_name: ASIC specific name. * @asic_type: ASIC specific type. * @completion_queue: array of hl_cq. @@ -1505,8 +1746,8 @@ struct hl_mmu_funcs { * @eq_wq: work queue of event queue for executing work in process context. * @kernel_ctx: Kernel driver context structure. * @kernel_queues: array of hl_hw_queue. - * @hw_queues_mirror_list: CS mirror list for TDR. - * @hw_queues_mirror_lock: protects hw_queues_mirror_list. + * @cs_mirror_list: CS mirror list for TDR. + * @cs_mirror_lock: protects cs_mirror_list. * @kernel_cb_mgr: command buffer manager for creating/destroying/handling CGs. * @event_queue: event queue for IRQ from CPU-CP. * @dma_pool: DMA pool for small allocations. @@ -1525,6 +1766,7 @@ struct hl_mmu_funcs { * @hwmon_dev: H/W monitor device. * @pm_mng_profile: current power management profile. * @hl_chip_info: ASIC's sensors information. + * @device_status_description: device status description. * @hl_debugfs: device's debugfs manager. * @cb_pool: list of preallocated CBs. * @cb_pool_lock: protects the CB pool. @@ -1572,13 +1814,12 @@ struct hl_mmu_funcs { * @heartbeat: is heartbeat sanity check towards CPU-CP enabled. * @reset_on_lockup: true if a reset should be done in case of stuck CS, false * otherwise. - * @dram_supports_virtual_memory: is MMU enabled towards DRAM. * @dram_default_page_mapping: is DRAM default page mapping enabled. + * @memory_scrub: true to perform device memory scrub in various locations, + * such as context-switch, context close, page free, etc. * @pmmu_huge_range: is a different virtual addresses range used for PMMU with * huge pages. * @init_done: is the initialization of the device done. - * @mmu_enable: is MMU enabled. - * @mmu_huge_page_opt: is MMU huge pages optimization enabled. * @device_cpu_disabled: is the device CPU disabled (due to timeouts) * @dma_mask: the dma mask that was set for this device * @in_debug: is device under debug. This, together with fpriv_list, enforces @@ -1589,9 +1830,16 @@ struct hl_mmu_funcs { * @stop_on_err: true if engines should stop on error. * @supports_sync_stream: is sync stream supported. * @sync_stream_queue_idx: helper index for sync stream queues initialization. + * @collective_mon_idx: helper index for collective initialization * @supports_coresight: is CoreSight supported. * @supports_soft_reset: is soft reset supported. * @supports_cb_mapping: is mapping a CB to the device's MMU supported. + * @needs_reset: true if reset_on_lockup is false and device should be reset + * due to lockup. + * @process_kill_trial_cnt: number of trials reset thread tried killing + * user processes + * @device_fini_pending: true if device_fini was called and might be + * waiting for the reset thread to finish */ struct hl_device { struct pci_dev *pdev; @@ -1604,15 +1852,17 @@ struct hl_device { struct device *dev_ctrl; struct delayed_work work_freq; struct delayed_work work_heartbeat; - char asic_name[32]; + struct hl_device_reset_work device_reset_work; + char asic_name[HL_STR_MAX]; + char status[HL_DEV_STS_MAX][HL_STR_MAX]; enum hl_asic_type asic_type; struct hl_cq *completion_queue; struct workqueue_struct **cq_wq; struct workqueue_struct *eq_wq; struct hl_ctx *kernel_ctx; struct hl_hw_queue *kernel_queues; - struct list_head hw_queues_mirror_list; - spinlock_t hw_queues_mirror_lock; + struct list_head cs_mirror_list; + spinlock_t cs_mirror_lock; struct hl_cb_mgr kernel_cb_mgr; struct hl_eq event_queue; struct dma_pool *dma_pool; @@ -1649,10 +1899,10 @@ struct hl_device { struct hl_device_idle_busy_ts *idle_busy_ts_arr; - struct hl_cs_counters aggregated_cs_counters; + struct hl_cs_counters_atomic aggregated_cs_counters; struct hl_mmu_priv mmu_priv; - struct hl_mmu_funcs mmu_func; + struct hl_mmu_funcs mmu_func[MMU_NUM_PGT_LOCATIONS]; atomic64_t dram_used_mem; u64 timeout_jiffies; @@ -1677,8 +1927,8 @@ struct hl_device { u8 hard_reset_pending; u8 heartbeat; u8 reset_on_lockup; - u8 dram_supports_virtual_memory; u8 dram_default_page_mapping; + u8 memory_scrub; u8 pmmu_huge_range; u8 init_done; u8 device_cpu_disabled; @@ -1689,17 +1939,22 @@ struct hl_device { u8 stop_on_err; u8 supports_sync_stream; u8 sync_stream_queue_idx; + u8 collective_mon_idx; u8 supports_coresight; u8 supports_soft_reset; u8 supports_cb_mapping; + u8 needs_reset; + u8 process_kill_trial_cnt; + u8 device_fini_pending; /* Parameters for bring-up */ + u64 nic_ports_mask; + u64 fw_loading; u8 mmu_enable; u8 mmu_huge_page_opt; u8 cpu_enable; u8 reset_pcilink; u8 cpu_queues_enable; - u8 fw_loading; u8 pldm; u8 axi_drain; u8 sram_scrambler_enable; @@ -1707,6 +1962,7 @@ struct hl_device { u8 hard_reset_on_fw_events; u8 bmc_enable; u8 rl_enable; + u8 reset_on_preboot_fail; }; @@ -1793,7 +2049,8 @@ static inline bool hl_mem_area_crosses_range(u64 address, u32 size, int hl_device_open(struct inode *inode, struct file *filp); int hl_device_open_ctrl(struct inode *inode, struct file *filp); -bool hl_device_disabled_or_in_reset(struct hl_device *hdev); +bool hl_device_operational(struct hl_device *hdev, + enum hl_device_status *status); enum hl_device_status hl_device_status(struct hl_device *hdev); int hl_device_set_debug_mode(struct hl_device *hdev, bool enable); int create_hdev(struct hl_device **dev, struct pci_dev *pdev, @@ -1878,8 +2135,10 @@ void hl_cs_rollback_all(struct hl_device *hdev); struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, enum hl_queue_type queue_type, bool is_kernel_allocated_cb); void hl_sob_reset_error(struct kref *ref); +int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask); void hl_fence_put(struct hl_fence *fence); void hl_fence_get(struct hl_fence *fence); +void cs_get(struct hl_cs *cs); void goya_set_asic_funcs(struct hl_device *hdev); void gaudi_set_asic_funcs(struct hl_device *hdev); @@ -1890,6 +2149,10 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx); int hl_vm_init(struct hl_device *hdev); void hl_vm_fini(struct hl_device *hdev); +u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx, + enum hl_va_range_type type, u32 size, u32 alignment); +int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx, + u64 start_addr, u64 size); int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size, struct hl_userptr *userptr); void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr); @@ -1903,20 +2166,26 @@ int hl_mmu_init(struct hl_device *hdev); void hl_mmu_fini(struct hl_device *hdev); int hl_mmu_ctx_init(struct hl_ctx *ctx); void hl_mmu_ctx_fini(struct hl_ctx *ctx); -int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, +int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size, bool flush_pte); -int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, +int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, bool flush_pte); +int hl_mmu_map_contiguous(struct hl_ctx *ctx, u64 virt_addr, + u64 phys_addr, u32 size); +int hl_mmu_unmap_contiguous(struct hl_ctx *ctx, u64 virt_addr, u32 size); void hl_mmu_swap_out(struct hl_ctx *ctx); void hl_mmu_swap_in(struct hl_ctx *ctx); int hl_mmu_if_set_funcs(struct hl_device *hdev); -void hl_mmu_v1_set_funcs(struct hl_device *hdev); +void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu); +int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr); +int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, + struct hl_mmu_hop_info *hops); int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name, - void __iomem *dst); + void __iomem *dst, u32 src_offset, u32 size); int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode); int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg, - u16 len, u32 timeout, long *result); + u16 len, u32 timeout, u64 *result); int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type); int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr, size_t irq_arr_size); @@ -1926,18 +2195,22 @@ void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr); int hl_fw_send_heartbeat(struct hl_device *hdev); -int hl_fw_cpucp_info_get(struct hl_device *hdev); +int hl_fw_cpucp_info_get(struct hl_device *hdev, + u32 cpu_security_boot_status_reg); int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size); int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev, struct hl_info_pci_counters *counters); int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy); +int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u16 pll_index, + u16 *pll_freq_arr); int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg, u32 msg_to_cpu_reg, u32 cpu_msg_status_reg, - u32 boot_err0_reg, bool skip_bmc, - u32 cpu_timeout, u32 boot_fit_timeout); -int hl_fw_read_preboot_ver(struct hl_device *hdev, u32 cpu_boot_status_reg, - u32 boot_err0_reg, u32 timeout); + u32 cpu_security_boot_status_reg, u32 boot_err0_reg, + bool skip_bmc, u32 cpu_timeout, u32 boot_fit_timeout); +int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg, + u32 cpu_security_boot_status_reg, u32 boot_err0_reg, + u32 timeout); int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3], bool is_wc[3]); @@ -1946,8 +2219,7 @@ int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region, struct hl_inbound_pci_region *pci_region); int hl_pci_set_outbound_region(struct hl_device *hdev, struct hl_outbound_pci_region *pci_region); -int hl_pci_init(struct hl_device *hdev, u32 cpu_boot_status_reg, - u32 boot_err0_reg, u32 preboot_ver_timeout); +int hl_pci_init(struct hl_device *hdev); void hl_pci_fini(struct hl_device *hdev); long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr); diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c index f9067d3ef437..6bbb6bca6860 100644 --- a/drivers/misc/habanalabs/common/habanalabs_drv.c +++ b/drivers/misc/habanalabs/common/habanalabs_drv.c @@ -29,6 +29,7 @@ static DEFINE_MUTEX(hl_devs_idr_lock); static int timeout_locked = 5; static int reset_on_lockup = 1; +static int memory_scrub = 1; module_param(timeout_locked, int, 0444); MODULE_PARM_DESC(timeout_locked, @@ -38,6 +39,10 @@ module_param(reset_on_lockup, int, 0444); MODULE_PARM_DESC(reset_on_lockup, "Do device reset on lockup (0 = no, 1 = yes, default yes)"); +module_param(memory_scrub, int, 0444); +MODULE_PARM_DESC(memory_scrub, + "Scrub device memory in various states (0 = no, 1 = yes, default yes)"); + #define PCI_VENDOR_ID_HABANALABS 0x1da3 #define PCI_IDS_GOYA 0x0001 @@ -87,6 +92,7 @@ static enum hl_asic_type get_asic_type(u16 device) */ int hl_device_open(struct inode *inode, struct file *filp) { + enum hl_device_status status; struct hl_device *hdev; struct hl_fpriv *hpriv; int rc; @@ -119,10 +125,10 @@ int hl_device_open(struct inode *inode, struct file *filp) mutex_lock(&hdev->fpriv_list_lock); - if (hl_device_disabled_or_in_reset(hdev)) { + if (!hl_device_operational(hdev, &status)) { dev_err_ratelimited(hdev->dev, - "Can't open %s because it is disabled or in reset\n", - dev_name(hdev->dev)); + "Can't open %s because it is %s\n", + dev_name(hdev->dev), hdev->status[status]); rc = -EPERM; goto out_err; } @@ -199,7 +205,7 @@ int hl_device_open_ctrl(struct inode *inode, struct file *filp) mutex_lock(&hdev->fpriv_list_lock); - if (hl_device_disabled_or_in_reset(hdev)) { + if (!hl_device_operational(hdev, NULL)) { dev_err_ratelimited(hdev->dev_ctrl, "Can't open %s because it is disabled or in reset\n", dev_name(hdev->dev_ctrl)); @@ -228,19 +234,20 @@ out_err: static void set_driver_behavior_per_device(struct hl_device *hdev) { - hdev->mmu_enable = 1; hdev->cpu_enable = 1; - hdev->fw_loading = 1; + hdev->fw_loading = FW_TYPE_ALL_TYPES; hdev->cpu_queues_enable = 1; hdev->heartbeat = 1; + hdev->mmu_enable = 1; hdev->clock_gating_mask = ULONG_MAX; - - hdev->reset_pcilink = 0; - hdev->axi_drain = 0; hdev->sram_scrambler_enable = 1; hdev->dram_scrambler_enable = 1; hdev->bmc_enable = 1; hdev->hard_reset_on_fw_events = 1; + hdev->reset_on_preboot_fail = 1; + + hdev->reset_pcilink = 0; + hdev->axi_drain = 0; } /* @@ -281,8 +288,17 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev, hdev->asic_type = asic_type; } + /* Assign status description string */ + strncpy(hdev->status[HL_DEVICE_STATUS_MALFUNCTION], + "disabled", HL_STR_MAX); + strncpy(hdev->status[HL_DEVICE_STATUS_IN_RESET], + "in reset", HL_STR_MAX); + strncpy(hdev->status[HL_DEVICE_STATUS_NEEDS_RESET], + "needs reset", HL_STR_MAX); + hdev->major = hl_major; hdev->reset_on_lockup = reset_on_lockup; + hdev->memory_scrub = memory_scrub; hdev->pldm = 0; set_driver_behavior_per_device(hdev); diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c index 07317ea49129..32e6af1db4e3 100644 --- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c +++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c @@ -314,20 +314,45 @@ static int clk_throttle_info(struct hl_fpriv *hpriv, struct hl_info_args *args) static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args) { + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + struct hl_info_cs_counters cs_counters = {0}; struct hl_device *hdev = hpriv->hdev; - struct hl_info_cs_counters cs_counters = { {0} }; + struct hl_cs_counters_atomic *cntr; u32 max_size = args->return_size; - void __user *out = (void __user *) (uintptr_t) args->return_pointer; + + cntr = &hdev->aggregated_cs_counters; if ((!max_size) || (!out)) return -EINVAL; - memcpy(&cs_counters.cs_counters, &hdev->aggregated_cs_counters, - sizeof(struct hl_cs_counters)); - - if (hpriv->ctx) - memcpy(&cs_counters.ctx_cs_counters, &hpriv->ctx->cs_counters, - sizeof(struct hl_cs_counters)); + cs_counters.total_out_of_mem_drop_cnt = + atomic64_read(&cntr->out_of_mem_drop_cnt); + cs_counters.total_parsing_drop_cnt = + atomic64_read(&cntr->parsing_drop_cnt); + cs_counters.total_queue_full_drop_cnt = + atomic64_read(&cntr->queue_full_drop_cnt); + cs_counters.total_device_in_reset_drop_cnt = + atomic64_read(&cntr->device_in_reset_drop_cnt); + cs_counters.total_max_cs_in_flight_drop_cnt = + atomic64_read(&cntr->max_cs_in_flight_drop_cnt); + + if (hpriv->ctx) { + cs_counters.ctx_out_of_mem_drop_cnt = + atomic64_read( + &hpriv->ctx->cs_counters.out_of_mem_drop_cnt); + cs_counters.ctx_parsing_drop_cnt = + atomic64_read( + &hpriv->ctx->cs_counters.parsing_drop_cnt); + cs_counters.ctx_queue_full_drop_cnt = + atomic64_read( + &hpriv->ctx->cs_counters.queue_full_drop_cnt); + cs_counters.ctx_device_in_reset_drop_cnt = + atomic64_read( + &hpriv->ctx->cs_counters.device_in_reset_drop_cnt); + cs_counters.ctx_max_cs_in_flight_drop_cnt = + atomic64_read( + &hpriv->ctx->cs_counters.max_cs_in_flight_drop_cnt); + } return copy_to_user(out, &cs_counters, min((size_t) max_size, sizeof(cs_counters))) ? -EFAULT : 0; @@ -378,11 +403,32 @@ static int total_energy_consumption_info(struct hl_fpriv *hpriv, min((size_t) max_size, sizeof(total_energy))) ? -EFAULT : 0; } +static int pll_frequency_info(struct hl_fpriv *hpriv, struct hl_info_args *args) +{ + struct hl_device *hdev = hpriv->hdev; + struct hl_pll_frequency_info freq_info = {0}; + u32 max_size = args->return_size; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + int rc; + + if ((!max_size) || (!out)) + return -EINVAL; + + rc = hl_fw_cpucp_pll_info_get(hdev, args->pll_index, freq_info.output); + if (rc) + return rc; + + return copy_to_user(out, &freq_info, + min((size_t) max_size, sizeof(freq_info))) ? -EFAULT : 0; +} + static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, struct device *dev) { + enum hl_device_status status; struct hl_info_args *args = data; struct hl_device *hdev = hpriv->hdev; + int rc; /* @@ -403,10 +449,10 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, break; } - if (hl_device_disabled_or_in_reset(hdev)) { + if (!hl_device_operational(hdev, &status)) { dev_warn_ratelimited(dev, "Device is %s. Can't execute INFO IOCTL\n", - atomic_read(&hdev->in_reset) ? "in_reset" : "disabled"); + hdev->status[status]); return -EBUSY; } @@ -453,6 +499,9 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, case HL_INFO_TOTAL_ENERGY: return total_energy_consumption_info(hpriv, args); + case HL_INFO_PLL_FREQUENCY: + return pll_frequency_info(hpriv, args); + default: dev_err(dev, "Invalid request %d\n", args->op); rc = -ENOTTY; @@ -476,12 +525,14 @@ static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data) { struct hl_debug_args *args = data; struct hl_device *hdev = hpriv->hdev; + enum hl_device_status status; + int rc = 0; - if (hl_device_disabled_or_in_reset(hdev)) { + if (!hl_device_operational(hdev, &status)) { dev_warn_ratelimited(hdev->dev, "Device is %s. Can't execute DEBUG IOCTL\n", - atomic_read(&hdev->in_reset) ? "in_reset" : "disabled"); + hdev->status[status]); return -EBUSY; } @@ -544,7 +595,7 @@ static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg, int retcode; if (hdev->hard_reset_pending) { - dev_crit_ratelimited(hdev->dev_ctrl, + dev_crit_ratelimited(dev, "Device HARD reset pending! Please close FD\n"); return -ENODEV; } diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c index 250cf9cefc06..7caf868d1585 100644 --- a/drivers/misc/habanalabs/common/hw_queue.c +++ b/drivers/misc/habanalabs/common/hw_queue.c @@ -48,6 +48,11 @@ void hl_int_hw_queue_update_ci(struct hl_cs *cs) return; q = &hdev->kernel_queues[0]; + + /* There are no internal queues if H/W queues are being used */ + if (!hdev->asic_prop.max_queues || q->queue_type == QUEUE_TYPE_HW) + return; + for (i = 0 ; i < hdev->asic_prop.max_queues ; i++, q++) { if (q->queue_type == QUEUE_TYPE_INT) atomic_add(cs->jobs_in_queue_cnt[i], &q->ci); @@ -333,7 +338,14 @@ static void int_queue_schedule_job(struct hl_cs_job *job) bd.ctl = 0; bd.len = cpu_to_le32(job->job_cb_size); - bd.ptr = cpu_to_le64((u64) (uintptr_t) job->user_cb); + + if (job->is_kernel_allocated_cb) + /* bus_address is actually a mmu mapped address + * allocated from an internal pool + */ + bd.ptr = cpu_to_le64(job->user_cb->bus_address); + else + bd.ptr = cpu_to_le64((u64) (uintptr_t) job->user_cb); pi = q->kernel_address + (q->pi & (q->int_queue_len - 1)) * sizeof(bd); @@ -388,6 +400,91 @@ static void hw_queue_schedule_job(struct hl_cs_job *job) ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr); } +static void init_signal_cs(struct hl_device *hdev, + struct hl_cs_job *job, struct hl_cs_compl *cs_cmpl) +{ + struct hl_sync_stream_properties *prop; + struct hl_hw_sob *hw_sob; + u32 q_idx; + + q_idx = job->hw_queue_id; + prop = &hdev->kernel_queues[q_idx].sync_stream_prop; + hw_sob = &prop->hw_sob[prop->curr_sob_offset]; + + cs_cmpl->hw_sob = hw_sob; + cs_cmpl->sob_val = prop->next_sob_val++; + + dev_dbg(hdev->dev, + "generate signal CB, sob_id: %d, sob val: 0x%x, q_idx: %d\n", + cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx); + + hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb, + cs_cmpl->hw_sob->sob_id, 0); + + kref_get(&hw_sob->kref); + + /* check for wraparound */ + if (prop->next_sob_val == HL_MAX_SOB_VAL) { + /* + * Decrement as we reached the max value. + * The release function won't be called here as we've + * just incremented the refcount. + */ + kref_put(&hw_sob->kref, hl_sob_reset_error); + prop->next_sob_val = 1; + /* only two SOBs are currently in use */ + prop->curr_sob_offset = + (prop->curr_sob_offset + 1) % HL_RSVD_SOBS; + + dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n", + prop->curr_sob_offset, q_idx); + } +} + +static void init_wait_cs(struct hl_device *hdev, struct hl_cs *cs, + struct hl_cs_job *job, struct hl_cs_compl *cs_cmpl) +{ + struct hl_cs_compl *signal_cs_cmpl; + struct hl_sync_stream_properties *prop; + struct hl_gen_wait_properties wait_prop; + u32 q_idx; + + q_idx = job->hw_queue_id; + prop = &hdev->kernel_queues[q_idx].sync_stream_prop; + + signal_cs_cmpl = container_of(cs->signal_fence, + struct hl_cs_compl, + base_fence); + + /* copy the SOB id and value of the signal CS */ + cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob; + cs_cmpl->sob_val = signal_cs_cmpl->sob_val; + + dev_dbg(hdev->dev, + "generate wait CB, sob_id: %d, sob_val: 0x%x, mon_id: %d, q_idx: %d\n", + cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, + prop->base_mon_id, q_idx); + + wait_prop.data = (void *) job->patched_cb; + wait_prop.sob_base = cs_cmpl->hw_sob->sob_id; + wait_prop.sob_mask = 0x1; + wait_prop.sob_val = cs_cmpl->sob_val; + wait_prop.mon_id = prop->base_mon_id; + wait_prop.q_idx = q_idx; + wait_prop.size = 0; + hdev->asic_funcs->gen_wait_cb(hdev, &wait_prop); + + kref_get(&cs_cmpl->hw_sob->kref); + /* + * Must put the signal fence after the SOB refcnt increment so + * the SOB refcnt won't turn 0 and reset the SOB before the + * wait CS was submitted. + */ + mb(); + hl_fence_put(cs->signal_fence); + cs->signal_fence = NULL; +} + /* * init_signal_wait_cs - initialize a signal/wait CS * @cs: pointer to the signal/wait CS @@ -398,84 +495,18 @@ static void init_signal_wait_cs(struct hl_cs *cs) { struct hl_ctx *ctx = cs->ctx; struct hl_device *hdev = ctx->hdev; - struct hl_hw_queue *hw_queue; + struct hl_cs_job *job; struct hl_cs_compl *cs_cmpl = container_of(cs->fence, struct hl_cs_compl, base_fence); - struct hl_hw_sob *hw_sob; - struct hl_cs_job *job; - u32 q_idx; - /* There is only one job in a signal/wait CS */ job = list_first_entry(&cs->job_list, struct hl_cs_job, cs_node); - q_idx = job->hw_queue_id; - hw_queue = &hdev->kernel_queues[q_idx]; - - if (cs->type & CS_TYPE_SIGNAL) { - hw_sob = &hw_queue->hw_sob[hw_queue->curr_sob_offset]; - - cs_cmpl->hw_sob = hw_sob; - cs_cmpl->sob_val = hw_queue->next_sob_val++; - - dev_dbg(hdev->dev, - "generate signal CB, sob_id: %d, sob val: 0x%x, q_idx: %d\n", - cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx); - - hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb, - cs_cmpl->hw_sob->sob_id); - - kref_get(&hw_sob->kref); - - /* check for wraparound */ - if (hw_queue->next_sob_val == HL_MAX_SOB_VAL) { - /* - * Decrement as we reached the max value. - * The release function won't be called here as we've - * just incremented the refcount. - */ - kref_put(&hw_sob->kref, hl_sob_reset_error); - hw_queue->next_sob_val = 1; - /* only two SOBs are currently in use */ - hw_queue->curr_sob_offset = - (hw_queue->curr_sob_offset + 1) % - HL_RSVD_SOBS_IN_USE; - - dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n", - hw_queue->curr_sob_offset, q_idx); - } - } else if (cs->type & CS_TYPE_WAIT) { - struct hl_cs_compl *signal_cs_cmpl; - - signal_cs_cmpl = container_of(cs->signal_fence, - struct hl_cs_compl, - base_fence); - - /* copy the the SOB id and value of the signal CS */ - cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob; - cs_cmpl->sob_val = signal_cs_cmpl->sob_val; - - dev_dbg(hdev->dev, - "generate wait CB, sob_id: %d, sob_val: 0x%x, mon_id: %d, q_idx: %d\n", - cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, - hw_queue->base_mon_id, q_idx); - hdev->asic_funcs->gen_wait_cb(hdev, job->patched_cb, - cs_cmpl->hw_sob->sob_id, - cs_cmpl->sob_val, - hw_queue->base_mon_id, - q_idx); - - kref_get(&cs_cmpl->hw_sob->kref); - /* - * Must put the signal fence after the SOB refcnt increment so - * the SOB refcnt won't turn 0 and reset the SOB before the - * wait CS was submitted. - */ - mb(); - hl_fence_put(cs->signal_fence); - cs->signal_fence = NULL; - } + if (cs->type & CS_TYPE_SIGNAL) + init_signal_cs(hdev, job, cs_cmpl); + else if (cs->type & CS_TYPE_WAIT) + init_wait_cs(hdev, cs, job, cs_cmpl); } /* @@ -484,19 +515,24 @@ static void init_signal_wait_cs(struct hl_cs *cs) */ int hl_hw_queue_schedule_cs(struct hl_cs *cs) { + enum hl_device_status status; + struct hl_cs_counters_atomic *cntr; struct hl_ctx *ctx = cs->ctx; struct hl_device *hdev = ctx->hdev; struct hl_cs_job *job, *tmp; struct hl_hw_queue *q; - u32 max_queues; int rc = 0, i, cq_cnt; + u32 max_queues; + + cntr = &hdev->aggregated_cs_counters; hdev->asic_funcs->hw_queues_lock(hdev); - if (hl_device_disabled_or_in_reset(hdev)) { - ctx->cs_counters.device_in_reset_drop_cnt++; + if (!hl_device_operational(hdev, &status)) { + atomic64_inc(&cntr->device_in_reset_drop_cnt); + atomic64_inc(&ctx->cs_counters.device_in_reset_drop_cnt); dev_err(hdev->dev, - "device is disabled or in reset, CS rejected!\n"); + "device is %s, CS rejected!\n", hdev->status[status]); rc = -EPERM; goto out; } @@ -527,7 +563,9 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs) } if (rc) { - ctx->cs_counters.queue_full_drop_cnt++; + atomic64_inc( + &ctx->cs_counters.queue_full_drop_cnt); + atomic64_inc(&cntr->queue_full_drop_cnt); goto unroll_cq_resv; } @@ -538,21 +576,23 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs) if ((cs->type == CS_TYPE_SIGNAL) || (cs->type == CS_TYPE_WAIT)) init_signal_wait_cs(cs); + else if (cs->type == CS_TYPE_COLLECTIVE_WAIT) + hdev->asic_funcs->collective_wait_init_cs(cs); - spin_lock(&hdev->hw_queues_mirror_lock); - list_add_tail(&cs->mirror_node, &hdev->hw_queues_mirror_list); + spin_lock(&hdev->cs_mirror_lock); + list_add_tail(&cs->mirror_node, &hdev->cs_mirror_list); /* Queue TDR if the CS is the first entry and if timeout is wanted */ if ((hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) && - (list_first_entry(&hdev->hw_queues_mirror_list, + (list_first_entry(&hdev->cs_mirror_list, struct hl_cs, mirror_node) == cs)) { cs->tdr_active = true; schedule_delayed_work(&cs->work_tdr, hdev->timeout_jiffies); - spin_unlock(&hdev->hw_queues_mirror_lock); - } else { - spin_unlock(&hdev->hw_queues_mirror_lock); + } + spin_unlock(&hdev->cs_mirror_lock); + if (!hdev->cs_active_cnt++) { struct hl_device_idle_busy_ts *ts; @@ -714,22 +754,56 @@ static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q) static void sync_stream_queue_init(struct hl_device *hdev, u32 q_idx) { - struct hl_hw_queue *hw_queue = &hdev->kernel_queues[q_idx]; + struct hl_sync_stream_properties *sync_stream_prop; struct asic_fixed_properties *prop = &hdev->asic_prop; struct hl_hw_sob *hw_sob; - int sob, queue_idx = hdev->sync_stream_queue_idx++; + int sob, reserved_mon_idx, queue_idx; + + sync_stream_prop = &hdev->kernel_queues[q_idx].sync_stream_prop; + + /* We use 'collective_mon_idx' as a running index in order to reserve + * monitors for collective master/slave queues. + * collective master queue gets 2 reserved monitors + * collective slave queue gets 1 reserved monitor + */ + if (hdev->kernel_queues[q_idx].collective_mode == + HL_COLLECTIVE_MASTER) { + reserved_mon_idx = hdev->collective_mon_idx; + + /* reserve the first monitor for collective master queue */ + sync_stream_prop->collective_mstr_mon_id[0] = + prop->collective_first_mon + reserved_mon_idx; + + /* reserve the second monitor for collective master queue */ + sync_stream_prop->collective_mstr_mon_id[1] = + prop->collective_first_mon + reserved_mon_idx + 1; + + hdev->collective_mon_idx += HL_COLLECTIVE_RSVD_MSTR_MONS; + } else if (hdev->kernel_queues[q_idx].collective_mode == + HL_COLLECTIVE_SLAVE) { + reserved_mon_idx = hdev->collective_mon_idx++; + + /* reserve a monitor for collective slave queue */ + sync_stream_prop->collective_slave_mon_id = + prop->collective_first_mon + reserved_mon_idx; + } + + if (!hdev->kernel_queues[q_idx].supports_sync_stream) + return; + + queue_idx = hdev->sync_stream_queue_idx++; - hw_queue->base_sob_id = - prop->sync_stream_first_sob + queue_idx * HL_RSVD_SOBS; - hw_queue->base_mon_id = - prop->sync_stream_first_mon + queue_idx * HL_RSVD_MONS; - hw_queue->next_sob_val = 1; - hw_queue->curr_sob_offset = 0; + sync_stream_prop->base_sob_id = prop->sync_stream_first_sob + + (queue_idx * HL_RSVD_SOBS); + sync_stream_prop->base_mon_id = prop->sync_stream_first_mon + + (queue_idx * HL_RSVD_MONS); + sync_stream_prop->next_sob_val = 1; + sync_stream_prop->curr_sob_offset = 0; for (sob = 0 ; sob < HL_RSVD_SOBS ; sob++) { - hw_sob = &hw_queue->hw_sob[sob]; + hw_sob = &sync_stream_prop->hw_sob[sob]; hw_sob->hdev = hdev; - hw_sob->sob_id = hw_queue->base_sob_id + sob; + hw_sob->sob_id = sync_stream_prop->base_sob_id + sob; hw_sob->q_idx = q_idx; kref_init(&hw_sob->kref); } @@ -737,15 +811,16 @@ static void sync_stream_queue_init(struct hl_device *hdev, u32 q_idx) static void sync_stream_queue_reset(struct hl_device *hdev, u32 q_idx) { - struct hl_hw_queue *hw_queue = &hdev->kernel_queues[q_idx]; + struct hl_sync_stream_properties *prop = + &hdev->kernel_queues[q_idx].sync_stream_prop; /* * In case we got here due to a stuck CS, the refcnt might be bigger * than 1 and therefore we reset it. */ - kref_init(&hw_queue->hw_sob[hw_queue->curr_sob_offset].kref); - hw_queue->curr_sob_offset = 0; - hw_queue->next_sob_val = 1; + kref_init(&prop->hw_sob[prop->curr_sob_offset].kref); + prop->curr_sob_offset = 0; + prop->next_sob_val = 1; } /* @@ -788,8 +863,7 @@ static int queue_init(struct hl_device *hdev, struct hl_hw_queue *q, break; } - if (q->supports_sync_stream) - sync_stream_queue_init(hdev, q->hw_queue_id); + sync_stream_queue_init(hdev, q->hw_queue_id); if (rc) return rc; @@ -867,6 +941,7 @@ int hl_hw_queues_create(struct hl_device *hdev) q->queue_type = asic->hw_queues_props[i].type; q->supports_sync_stream = asic->hw_queues_props[i].supports_sync_stream; + q->collective_mode = asic->hw_queues_props[i].collective_mode; rc = queue_init(hdev, q, i); if (rc) { dev_err(hdev->dev, diff --git a/drivers/misc/habanalabs/common/hwmon.c b/drivers/misc/habanalabs/common/hwmon.c index 2ac29cb2fe61..6b421d76b311 100644 --- a/drivers/misc/habanalabs/common/hwmon.c +++ b/drivers/misc/habanalabs/common/hwmon.c @@ -114,7 +114,7 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type, struct hl_device *hdev = dev_get_drvdata(dev); int rc; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; switch (type) { @@ -192,7 +192,7 @@ static int hl_write(struct device *dev, enum hwmon_sensor_types type, { struct hl_device *hdev = dev_get_drvdata(dev); - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; switch (type) { @@ -312,6 +312,7 @@ int hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr, long *value) { struct cpucp_packet pkt; + u64 result; int rc; memset(&pkt, 0, sizeof(pkt)); @@ -322,7 +323,9 @@ int hl_get_temperature(struct hl_device *hdev, pkt.type = __cpu_to_le16(attr); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - 0, value); + 0, &result); + + *value = (long) result; if (rc) { dev_err(hdev->dev, @@ -363,6 +366,7 @@ int hl_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr, long *value) { struct cpucp_packet pkt; + u64 result; int rc; memset(&pkt, 0, sizeof(pkt)); @@ -373,7 +377,9 @@ int hl_get_voltage(struct hl_device *hdev, pkt.type = __cpu_to_le16(attr); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - 0, value); + 0, &result); + + *value = (long) result; if (rc) { dev_err(hdev->dev, @@ -389,6 +395,7 @@ int hl_get_current(struct hl_device *hdev, int sensor_index, u32 attr, long *value) { struct cpucp_packet pkt; + u64 result; int rc; memset(&pkt, 0, sizeof(pkt)); @@ -399,7 +406,9 @@ int hl_get_current(struct hl_device *hdev, pkt.type = __cpu_to_le16(attr); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - 0, value); + 0, &result); + + *value = (long) result; if (rc) { dev_err(hdev->dev, @@ -415,6 +424,7 @@ int hl_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr, long *value) { struct cpucp_packet pkt; + u64 result; int rc; memset(&pkt, 0, sizeof(pkt)); @@ -425,7 +435,9 @@ int hl_get_fan_speed(struct hl_device *hdev, pkt.type = __cpu_to_le16(attr); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - 0, value); + 0, &result); + + *value = (long) result; if (rc) { dev_err(hdev->dev, @@ -441,6 +453,7 @@ int hl_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr, long *value) { struct cpucp_packet pkt; + u64 result; int rc; memset(&pkt, 0, sizeof(pkt)); @@ -451,7 +464,9 @@ int hl_get_pwm_info(struct hl_device *hdev, pkt.type = __cpu_to_le16(attr); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - 0, value); + 0, &result); + + *value = (long) result; if (rc) { dev_err(hdev->dev, @@ -542,7 +557,7 @@ int hl_hwmon_init(struct hl_device *hdev) struct asic_fixed_properties *prop = &hdev->asic_prop; int rc; - if ((hdev->hwmon_initialized) || !(hdev->fw_loading)) + if ((hdev->hwmon_initialized) || !(hdev->cpu_queues_enable)) return 0; if (hdev->hl_chip_info->info) { diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c index bfe223abf142..cbe9da4e0211 100644 --- a/drivers/misc/habanalabs/common/memory.c +++ b/drivers/misc/habanalabs/common/memory.c @@ -11,7 +11,6 @@ #include <linux/uaccess.h> #include <linux/slab.h> -#include <linux/genalloc.h> #define HL_MMU_DEBUG 0 @@ -46,7 +45,7 @@ * @ret_handle : result handle * * This function does the following: - * - Allocate the requested size rounded up to 2MB pages + * - Allocate the requested size rounded up to 'dram_page_size' pages * - Return unique handle */ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args, @@ -81,6 +80,16 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args, num_pgs, total_size); return -ENOMEM; } + + if (hdev->memory_scrub) { + rc = hdev->asic_funcs->scrub_device_mem(hdev, paddr, + total_size); + if (rc) { + dev_err(hdev->dev, + "Failed to scrub contiguous device memory\n"); + goto pages_pack_err; + } + } } phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL); @@ -118,6 +127,17 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args, goto page_err; } + if (hdev->memory_scrub) { + rc = hdev->asic_funcs->scrub_device_mem(hdev, + phys_pg_pack->pages[i], + page_size); + if (rc) { + dev_err(hdev->dev, + "Failed to scrub device memory\n"); + goto page_err; + } + } + num_curr_pgs++; } } @@ -601,6 +621,87 @@ out: } /* + * hl_reserve_va_block() - reserve a virtual block of a given size. + * @hdev: pointer to the habanalabs device structure. + * @ctx: current context + * @type: virtual addresses range type. + * @size: requested block size. + * @alignment: required alignment in bytes of the virtual block start address, + * 0 means no alignment. + * + * This function does the following: + * - Iterate on the virtual block list to find a suitable virtual block for the + * given size and alignment. + * - Reserve the requested block and update the list. + * - Return the start address of the virtual block. + */ +u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx, + enum hl_va_range_type type, u32 size, u32 alignment) +{ + return get_va_block(hdev, ctx->va_range[type], size, 0, + max(alignment, ctx->va_range[type]->page_size)); +} + +/** + * hl_get_va_range_type() - get va_range type for the given address and size. + * @address: The start address of the area we want to validate. + * @size: The size in bytes of the area we want to validate. + * @type: returned va_range type + * + * Return: true if the area is inside a valid range, false otherwise. + */ +static int hl_get_va_range_type(struct hl_ctx *ctx, u64 address, u64 size, + enum hl_va_range_type *type) +{ + int i; + + for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX; i++) { + if (hl_mem_area_inside_range(address, size, + ctx->va_range[i]->start_addr, + ctx->va_range[i]->end_addr)) { + *type = i; + return 0; + } + } + + return -EINVAL; +} + +/* + * hl_unreserve_va_block - wrapper for add_va_block for unreserving a va block + * + * @hdev: pointer to the habanalabs device structure + * @ctx: current context + * @start: start virtual address + * @end: end virtual address + * + * This function does the following: + * - Takes the list lock and calls add_va_block_locked + */ +int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx, + u64 start_addr, u64 size) +{ + enum hl_va_range_type type; + int rc; + + rc = hl_get_va_range_type(ctx, start_addr, size, &type); + if (rc) { + dev_err(hdev->dev, + "cannot find va_range for va %#llx size %llu", + start_addr, size); + return rc; + } + + rc = add_va_block(hdev, ctx->va_range[type], start_addr, + start_addr + size - 1); + if (rc) + dev_warn(hdev->dev, + "add va block failed for vaddr: 0x%llx\n", start_addr); + + return rc; +} + +/* * get_sg_info - get number of pages and the DMA address from SG list * * @sg : the SG list @@ -742,7 +843,7 @@ static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr, for (i = 0 ; i < phys_pg_pack->npages ; i++) { paddr = phys_pg_pack->pages[i]; - rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size, + rc = hl_mmu_map_page(ctx, next_vaddr, paddr, page_size, (i + 1) == phys_pg_pack->npages); if (rc) { dev_err(hdev->dev, @@ -761,7 +862,7 @@ static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr, err: next_vaddr = vaddr; for (i = 0 ; i < mapped_pg_cnt ; i++) { - if (hl_mmu_unmap(ctx, next_vaddr, page_size, + if (hl_mmu_unmap_page(ctx, next_vaddr, page_size, (i + 1) == mapped_pg_cnt)) dev_warn_ratelimited(hdev->dev, "failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n", @@ -791,7 +892,7 @@ static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr, next_vaddr = vaddr; for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) { - if (hl_mmu_unmap(ctx, next_vaddr, page_size, + if (hl_mmu_unmap_page(ctx, next_vaddr, page_size, (i + 1) == phys_pg_pack->npages)) dev_warn_ratelimited(hdev->dev, "unmap failed for vaddr: 0x%llx\n", next_vaddr); @@ -888,7 +989,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, /* get required alignment */ if (phys_pg_pack->page_size == page_size) { - va_range = ctx->host_va_range; + va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST]; /* * huge page alignment may be needed in case of regular @@ -903,7 +1004,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, * huge page alignment is needed in case of huge page * mapping */ - va_range = ctx->host_huge_va_range; + va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]; va_block_align = huge_page_size; } } else { @@ -928,7 +1029,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, hint_addr = args->map_device.hint_addr; /* DRAM VA alignment is the same as the DRAM page size */ - va_range = ctx->dram_va_range; + va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM]; va_block_align = hdev->asic_prop.dmmu.page_size; } @@ -1073,12 +1174,12 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free) if (phys_pg_pack->page_size == hdev->asic_prop.pmmu.page_size) - va_range = ctx->host_va_range; + va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST]; else - va_range = ctx->host_huge_va_range; + va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]; } else if (*vm_type == VM_TYPE_PHYS_PACK) { is_userptr = false; - va_range = ctx->dram_va_range; + va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM]; phys_pg_pack = hnode->ptr; } else { dev_warn(hdev->dev, @@ -1217,6 +1318,7 @@ out: int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data) { + enum hl_device_status status; union hl_mem_args *args = data; struct hl_device *hdev = hpriv->hdev; struct hl_ctx *ctx = hpriv->ctx; @@ -1224,10 +1326,10 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data) u32 handle = 0; int rc; - if (hl_device_disabled_or_in_reset(hdev)) { + if (!hl_device_operational(hdev, &status)) { dev_warn_ratelimited(hdev->dev, "Device is %s. Can't execute MEMORY IOCTL\n", - atomic_read(&hdev->in_reset) ? "in_reset" : "disabled"); + hdev->status[status]); return -EBUSY; } @@ -1236,18 +1338,35 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data) switch (args->in.op) { case HL_MEM_OP_ALLOC: - if (!hdev->dram_supports_virtual_memory) { - dev_err(hdev->dev, "DRAM alloc is not supported\n"); - rc = -EINVAL; - goto out; - } - if (args->in.alloc.mem_size == 0) { dev_err(hdev->dev, "alloc size must be larger than 0\n"); rc = -EINVAL; goto out; } + + /* If DRAM does not support virtual memory the driver won't + * handle the allocation/freeing of that memory. However, for + * system administration/monitoring purposes, the driver will + * keep track of the amount of DRAM memory that is allocated + * and freed by the user. Because this code totally relies on + * the user's input, the driver can't ensure the validity + * of this accounting. + */ + if (!hdev->asic_prop.dram_supports_virtual_memory) { + atomic64_add(args->in.alloc.mem_size, + &ctx->dram_phys_mem); + atomic64_add(args->in.alloc.mem_size, + &hdev->dram_used_mem); + + dev_dbg(hdev->dev, "DRAM alloc is not supported\n"); + rc = 0; + + memset(args, 0, sizeof(*args)); + args->out.handle = 0; + goto out; + } + rc = alloc_device_memory(ctx, &args->in, &handle); memset(args, 0, sizeof(*args)); @@ -1255,6 +1374,26 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data) break; case HL_MEM_OP_FREE: + /* If DRAM does not support virtual memory the driver won't + * handle the allocation/freeing of that memory. However, for + * system administration/monitoring purposes, the driver will + * keep track of the amount of DRAM memory that is allocated + * and freed by the user. Because this code totally relies on + * the user's input, the driver can't ensure the validity + * of this accounting. + */ + if (!hdev->asic_prop.dram_supports_virtual_memory) { + atomic64_sub(args->in.alloc.mem_size, + &ctx->dram_phys_mem); + atomic64_sub(args->in.alloc.mem_size, + &hdev->dram_used_mem); + + dev_dbg(hdev->dev, "DRAM alloc is not supported\n"); + rc = 0; + + goto out; + } + rc = free_device_memory(ctx, args->in.free.handle); break; @@ -1498,7 +1637,7 @@ bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, * addresses. */ static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range, - u64 start, u64 end) + u64 start, u64 end, u32 page_size) { int rc; @@ -1528,6 +1667,7 @@ static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range, va_range->start_addr = start; va_range->end_addr = end; + va_range->page_size = page_size; return 0; } @@ -1540,8 +1680,7 @@ static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range, * This function does the following: * - Frees the virtual addresses block list and its lock */ -static void va_range_fini(struct hl_device *hdev, - struct hl_va_range *va_range) +static void va_range_fini(struct hl_device *hdev, struct hl_va_range *va_range) { mutex_lock(&va_range->lock); clear_va_list_locked(hdev, &va_range->list); @@ -1571,102 +1710,97 @@ static void va_range_fini(struct hl_device *hdev, static int vm_ctx_init_with_ranges(struct hl_ctx *ctx, u64 host_range_start, u64 host_range_end, + u32 host_page_size, u64 host_huge_range_start, u64 host_huge_range_end, + u32 host_huge_page_size, u64 dram_range_start, - u64 dram_range_end) + u64 dram_range_end, + u32 dram_page_size) { struct hl_device *hdev = ctx->hdev; - int rc; - - ctx->host_va_range = kzalloc(sizeof(*ctx->host_va_range), GFP_KERNEL); - if (!ctx->host_va_range) - return -ENOMEM; - - ctx->host_huge_va_range = kzalloc(sizeof(*ctx->host_huge_va_range), - GFP_KERNEL); - if (!ctx->host_huge_va_range) { - rc = -ENOMEM; - goto host_huge_va_range_err; - } - - ctx->dram_va_range = kzalloc(sizeof(*ctx->dram_va_range), GFP_KERNEL); - if (!ctx->dram_va_range) { - rc = -ENOMEM; - goto dram_va_range_err; + int i, rc; + + for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX ; i++) { + ctx->va_range[i] = + kzalloc(sizeof(struct hl_va_range), GFP_KERNEL); + if (!ctx->va_range[i]) { + rc = -ENOMEM; + goto free_va_range; + } } rc = hl_mmu_ctx_init(ctx); if (rc) { dev_err(hdev->dev, "failed to init context %d\n", ctx->asid); - goto mmu_ctx_err; + goto free_va_range; } mutex_init(&ctx->mem_hash_lock); hash_init(ctx->mem_hash); - mutex_init(&ctx->host_va_range->lock); + mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock); - rc = va_range_init(hdev, ctx->host_va_range, host_range_start, - host_range_end); + rc = va_range_init(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST], + host_range_start, host_range_end, host_page_size); if (rc) { dev_err(hdev->dev, "failed to init host vm range\n"); - goto host_page_range_err; + goto mmu_ctx_fini; } if (hdev->pmmu_huge_range) { - mutex_init(&ctx->host_huge_va_range->lock); + mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock); - rc = va_range_init(hdev, ctx->host_huge_va_range, - host_huge_range_start, - host_huge_range_end); + rc = va_range_init(hdev, + ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE], + host_huge_range_start, host_huge_range_end, + host_huge_page_size); if (rc) { dev_err(hdev->dev, "failed to init host huge vm range\n"); - goto host_hpage_range_err; + goto clear_host_va_range; } } else { - kfree(ctx->host_huge_va_range); - ctx->host_huge_va_range = ctx->host_va_range; + kfree(ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]); + ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE] = + ctx->va_range[HL_VA_RANGE_TYPE_HOST]; } - mutex_init(&ctx->dram_va_range->lock); + mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_DRAM]->lock); - rc = va_range_init(hdev, ctx->dram_va_range, dram_range_start, - dram_range_end); + rc = va_range_init(hdev, ctx->va_range[HL_VA_RANGE_TYPE_DRAM], + dram_range_start, dram_range_end, dram_page_size); if (rc) { dev_err(hdev->dev, "failed to init dram vm range\n"); - goto dram_vm_err; + goto clear_host_huge_va_range; } hl_debugfs_add_ctx_mem_hash(hdev, ctx); return 0; -dram_vm_err: - mutex_destroy(&ctx->dram_va_range->lock); +clear_host_huge_va_range: + mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_DRAM]->lock); if (hdev->pmmu_huge_range) { - mutex_lock(&ctx->host_huge_va_range->lock); - clear_va_list_locked(hdev, &ctx->host_huge_va_range->list); - mutex_unlock(&ctx->host_huge_va_range->lock); + mutex_lock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock); + clear_va_list_locked(hdev, + &ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->list); + mutex_unlock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock); } -host_hpage_range_err: +clear_host_va_range: if (hdev->pmmu_huge_range) - mutex_destroy(&ctx->host_huge_va_range->lock); - mutex_lock(&ctx->host_va_range->lock); - clear_va_list_locked(hdev, &ctx->host_va_range->list); - mutex_unlock(&ctx->host_va_range->lock); -host_page_range_err: - mutex_destroy(&ctx->host_va_range->lock); + mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock); + mutex_lock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock); + clear_va_list_locked(hdev, &ctx->va_range[HL_VA_RANGE_TYPE_HOST]->list); + mutex_unlock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock); +mmu_ctx_fini: + mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock); mutex_destroy(&ctx->mem_hash_lock); hl_mmu_ctx_fini(ctx); -mmu_ctx_err: - kfree(ctx->dram_va_range); -dram_va_range_err: - kfree(ctx->host_huge_va_range); -host_huge_va_range_err: - kfree(ctx->host_va_range); +free_va_range: + for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX ; i++) + kfree(ctx->va_range[i]); return rc; } @@ -1676,6 +1810,7 @@ int hl_vm_ctx_init(struct hl_ctx *ctx) struct asic_fixed_properties *prop = &ctx->hdev->asic_prop; u64 host_range_start, host_range_end, host_huge_range_start, host_huge_range_end, dram_range_start, dram_range_end; + u32 host_page_size, host_huge_page_size, dram_page_size; atomic64_set(&ctx->dram_phys_mem, 0); @@ -1686,27 +1821,23 @@ int hl_vm_ctx_init(struct hl_ctx *ctx) * In case of DRAM mapping, the returned address is the physical * address of the memory related to the given handle. */ - if (ctx->hdev->mmu_enable) { - dram_range_start = prop->dmmu.start_addr; - dram_range_end = prop->dmmu.end_addr; - host_range_start = prop->pmmu.start_addr; - host_range_end = prop->pmmu.end_addr; - host_huge_range_start = prop->pmmu_huge.start_addr; - host_huge_range_end = prop->pmmu_huge.end_addr; - } else { - dram_range_start = prop->dram_user_base_address; - dram_range_end = prop->dram_end_address; - host_range_start = prop->dram_user_base_address; - host_range_end = prop->dram_end_address; - host_huge_range_start = prop->dram_user_base_address; - host_huge_range_end = prop->dram_end_address; - } + if (!ctx->hdev->mmu_enable) + return 0; + + dram_range_start = prop->dmmu.start_addr; + dram_range_end = prop->dmmu.end_addr; + dram_page_size = prop->dmmu.page_size; + host_range_start = prop->pmmu.start_addr; + host_range_end = prop->pmmu.end_addr; + host_page_size = prop->pmmu.page_size; + host_huge_range_start = prop->pmmu_huge.start_addr; + host_huge_range_end = prop->pmmu_huge.end_addr; + host_huge_page_size = prop->pmmu_huge.page_size; return vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end, - host_huge_range_start, - host_huge_range_end, - dram_range_start, - dram_range_end); + host_page_size, host_huge_range_start, + host_huge_range_end, host_huge_page_size, + dram_range_start, dram_range_end, dram_page_size); } /* @@ -1738,6 +1869,9 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx) struct hlist_node *tmp_node; int i; + if (!ctx->hdev->mmu_enable) + return; + hl_debugfs_remove_ctx_mem_hash(hdev, ctx); /* @@ -1772,13 +1906,21 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx) } spin_unlock(&vm->idr_lock); - va_range_fini(hdev, ctx->dram_va_range); + va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_DRAM]); + va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST]); + if (hdev->pmmu_huge_range) - va_range_fini(hdev, ctx->host_huge_va_range); - va_range_fini(hdev, ctx->host_va_range); + va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]); mutex_destroy(&ctx->mem_hash_lock); hl_mmu_ctx_fini(ctx); + + /* In this case we need to clear the global accounting of DRAM usage + * because the user notifies us on allocations. If the user is no more, + * all DRAM is available + */ + if (!ctx->hdev->asic_prop.dram_supports_virtual_memory) + atomic64_set(&ctx->hdev->dram_used_mem, 0); } /* diff --git a/drivers/misc/habanalabs/common/mmu.c b/drivers/misc/habanalabs/common/mmu.c index b5058798aeb9..33ae953d3a36 100644 --- a/drivers/misc/habanalabs/common/mmu.c +++ b/drivers/misc/habanalabs/common/mmu.c @@ -22,18 +22,25 @@ static bool is_dram_va(struct hl_device *hdev, u64 virt_addr) * hl_mmu_init() - initialize the MMU module. * @hdev: habanalabs device structure. * - * This function does the following: - * - Create a pool of pages for pgt_infos. - * - Create a shadow table for pgt - * * Return: 0 for success, non-zero for failure. */ int hl_mmu_init(struct hl_device *hdev) { - if (hdev->mmu_enable) - return hdev->mmu_func.init(hdev); + int rc = -EOPNOTSUPP; - return 0; + if (!hdev->mmu_enable) + return 0; + + if (hdev->mmu_func[MMU_DR_PGT].init != NULL) { + rc = hdev->mmu_func[MMU_DR_PGT].init(hdev); + if (rc) + return rc; + } + + if (hdev->mmu_func[MMU_HR_PGT].init != NULL) + rc = hdev->mmu_func[MMU_HR_PGT].init(hdev); + + return rc; } /** @@ -48,8 +55,14 @@ int hl_mmu_init(struct hl_device *hdev) */ void hl_mmu_fini(struct hl_device *hdev) { - if (hdev->mmu_enable) - hdev->mmu_func.fini(hdev); + if (!hdev->mmu_enable) + return; + + if (hdev->mmu_func[MMU_DR_PGT].fini != NULL) + hdev->mmu_func[MMU_DR_PGT].fini(hdev); + + if (hdev->mmu_func[MMU_HR_PGT].fini != NULL) + hdev->mmu_func[MMU_HR_PGT].fini(hdev); } /** @@ -63,11 +76,23 @@ void hl_mmu_fini(struct hl_device *hdev) int hl_mmu_ctx_init(struct hl_ctx *ctx) { struct hl_device *hdev = ctx->hdev; + int rc = -EOPNOTSUPP; - if (hdev->mmu_enable) - return hdev->mmu_func.ctx_init(ctx); + if (!hdev->mmu_enable) + return 0; - return 0; + mutex_init(&ctx->mmu_lock); + + if (hdev->mmu_func[MMU_DR_PGT].ctx_init != NULL) { + rc = hdev->mmu_func[MMU_DR_PGT].ctx_init(ctx); + if (rc) + return rc; + } + + if (hdev->mmu_func[MMU_HR_PGT].ctx_init != NULL) + rc = hdev->mmu_func[MMU_HR_PGT].ctx_init(ctx); + + return rc; } /* @@ -84,12 +109,20 @@ void hl_mmu_ctx_fini(struct hl_ctx *ctx) { struct hl_device *hdev = ctx->hdev; - if (hdev->mmu_enable) - hdev->mmu_func.ctx_fini(ctx); + if (!hdev->mmu_enable) + return; + + if (hdev->mmu_func[MMU_DR_PGT].ctx_fini != NULL) + hdev->mmu_func[MMU_DR_PGT].ctx_fini(ctx); + + if (hdev->mmu_func[MMU_HR_PGT].ctx_fini != NULL) + hdev->mmu_func[MMU_HR_PGT].ctx_fini(ctx); + + mutex_destroy(&ctx->mmu_lock); } /* - * hl_mmu_unmap - unmaps a virtual addr + * hl_mmu_unmap_page - unmaps a virtual addr * * @ctx: pointer to the context structure * @virt_addr: virt addr to map from @@ -109,7 +142,7 @@ void hl_mmu_ctx_fini(struct hl_ctx *ctx) * For optimization reasons PCI flush may be requested once after unmapping of * large area. */ -int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, +int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, bool flush_pte) { struct hl_device *hdev = ctx->hdev; @@ -117,7 +150,7 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, struct hl_mmu_properties *mmu_prop; u64 real_virt_addr; u32 real_page_size, npages; - int i, rc = 0; + int i, rc = 0, pgt_residency; bool is_dram_addr; if (!hdev->mmu_enable) @@ -132,6 +165,8 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, else mmu_prop = &prop->pmmu; + pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT; + /* * The H/W handles mapping of specific page sizes. Hence if the page * size is bigger, we break it to sub-pages and unmap them separately. @@ -150,7 +185,8 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, real_virt_addr = virt_addr; for (i = 0 ; i < npages ; i++) { - rc = hdev->mmu_func.unmap(ctx, real_virt_addr, is_dram_addr); + rc = hdev->mmu_func[pgt_residency].unmap(ctx, + real_virt_addr, is_dram_addr); if (rc) break; @@ -158,13 +194,13 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, } if (flush_pte) - hdev->mmu_func.flush(ctx); + hdev->mmu_func[pgt_residency].flush(ctx); return rc; } /* - * hl_mmu_map - maps a virtual addr to physical addr + * hl_mmu_map_page - maps a virtual addr to physical addr * * @ctx: pointer to the context structure * @virt_addr: virt addr to map from @@ -185,17 +221,18 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, * For optimization reasons PCI flush may be requested once after mapping of * large area. */ -int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size, - bool flush_pte) +int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, + u32 page_size, bool flush_pte) { struct hl_device *hdev = ctx->hdev; struct asic_fixed_properties *prop = &hdev->asic_prop; struct hl_mmu_properties *mmu_prop; u64 real_virt_addr, real_phys_addr; u32 real_page_size, npages; - int i, rc, mapped_cnt = 0; + int i, rc, pgt_residency, mapped_cnt = 0; bool is_dram_addr; + if (!hdev->mmu_enable) return 0; @@ -208,6 +245,8 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size, else mmu_prop = &prop->pmmu; + pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT; + /* * The H/W handles mapping of specific page sizes. Hence if the page * size is bigger, we break it to sub-pages and map them separately. @@ -216,7 +255,7 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size, real_page_size = mmu_prop->page_size; } else { dev_err(hdev->dev, - "page size of %u is not %uKB aligned, can't unmap\n", + "page size of %u is not %uKB aligned, can't map\n", page_size, mmu_prop->page_size >> 10); return -EFAULT; @@ -231,8 +270,9 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size, real_phys_addr = phys_addr; for (i = 0 ; i < npages ; i++) { - rc = hdev->mmu_func.map(ctx, real_virt_addr, real_phys_addr, - real_page_size, is_dram_addr); + rc = hdev->mmu_func[pgt_residency].map(ctx, + real_virt_addr, real_phys_addr, + real_page_size, is_dram_addr); if (rc) goto err; @@ -242,21 +282,124 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size, } if (flush_pte) - hdev->mmu_func.flush(ctx); + hdev->mmu_func[pgt_residency].flush(ctx); return 0; err: real_virt_addr = virt_addr; for (i = 0 ; i < mapped_cnt ; i++) { - if (hdev->mmu_func.unmap(ctx, real_virt_addr, is_dram_addr)) + if (hdev->mmu_func[pgt_residency].unmap(ctx, + real_virt_addr, is_dram_addr)) dev_warn_ratelimited(hdev->dev, "failed to unmap va: 0x%llx\n", real_virt_addr); real_virt_addr += real_page_size; } - hdev->mmu_func.flush(ctx); + hdev->mmu_func[pgt_residency].flush(ctx); + + return rc; +} + +/* + * hl_mmu_map_contiguous - implements a wrapper for hl_mmu_map_page + * for mapping contiguous physical memory + * + * @ctx: pointer to the context structure + * @virt_addr: virt addr to map from + * @phys_addr: phys addr to map to + * @size: size to map + * + */ +int hl_mmu_map_contiguous(struct hl_ctx *ctx, u64 virt_addr, + u64 phys_addr, u32 size) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 curr_va, curr_pa; + u32 page_size; + bool flush_pte; + int rc = 0, off; + + if (hl_mem_area_inside_range(virt_addr, size, + prop->dmmu.start_addr, prop->dmmu.end_addr)) + page_size = prop->dmmu.page_size; + else if (hl_mem_area_inside_range(virt_addr, size, + prop->pmmu.start_addr, prop->pmmu.end_addr)) + page_size = prop->pmmu.page_size; + else if (hl_mem_area_inside_range(virt_addr, size, + prop->pmmu_huge.start_addr, prop->pmmu_huge.end_addr)) + page_size = prop->pmmu_huge.page_size; + else + return -EINVAL; + + for (off = 0 ; off < size ; off += page_size) { + curr_va = virt_addr + off; + curr_pa = phys_addr + off; + flush_pte = (off + page_size) >= size; + rc = hl_mmu_map_page(ctx, curr_va, curr_pa, page_size, + flush_pte); + if (rc) { + dev_err(hdev->dev, + "Map failed for va 0x%llx to pa 0x%llx\n", + curr_va, curr_pa); + goto unmap; + } + } + + return rc; + +unmap: + for (; off >= 0 ; off -= page_size) { + curr_va = virt_addr + off; + flush_pte = (off - (s32) page_size) < 0; + if (hl_mmu_unmap_page(ctx, curr_va, page_size, flush_pte)) + dev_warn_ratelimited(hdev->dev, + "failed to unmap va 0x%llx\n", curr_va); + } + + return rc; +} + +/* + * hl_mmu_unmap_contiguous - implements a wrapper for hl_mmu_unmap_page + * for unmapping contiguous physical memory + * + * @ctx: pointer to the context structure + * @virt_addr: virt addr to unmap + * @size: size to unmap + * + */ +int hl_mmu_unmap_contiguous(struct hl_ctx *ctx, u64 virt_addr, u32 size) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 curr_va; + u32 page_size; + bool flush_pte; + int rc = 0, off; + + if (hl_mem_area_inside_range(virt_addr, size, + prop->dmmu.start_addr, prop->dmmu.end_addr)) + page_size = prop->dmmu.page_size; + else if (hl_mem_area_inside_range(virt_addr, size, + prop->pmmu.start_addr, prop->pmmu.end_addr)) + page_size = prop->pmmu.page_size; + else if (hl_mem_area_inside_range(virt_addr, size, + prop->pmmu_huge.start_addr, prop->pmmu_huge.end_addr)) + page_size = prop->pmmu_huge.page_size; + else + return -EINVAL; + + for (off = 0 ; off < size ; off += page_size) { + curr_va = virt_addr + off; + flush_pte = (off + page_size) >= size; + rc = hl_mmu_unmap_page(ctx, curr_va, page_size, flush_pte); + if (rc) + dev_warn_ratelimited(hdev->dev, + "Unmap failed for va 0x%llx\n", curr_va); + } return rc; } @@ -271,8 +414,14 @@ void hl_mmu_swap_out(struct hl_ctx *ctx) { struct hl_device *hdev = ctx->hdev; - if (hdev->mmu_enable) - hdev->mmu_func.swap_out(ctx); + if (!hdev->mmu_enable) + return; + + if (hdev->mmu_func[MMU_DR_PGT].swap_out != NULL) + hdev->mmu_func[MMU_DR_PGT].swap_out(ctx); + + if (hdev->mmu_func[MMU_HR_PGT].swap_out != NULL) + hdev->mmu_func[MMU_HR_PGT].swap_out(ctx); } /* @@ -285,8 +434,64 @@ void hl_mmu_swap_in(struct hl_ctx *ctx) { struct hl_device *hdev = ctx->hdev; - if (hdev->mmu_enable) - hdev->mmu_func.swap_in(ctx); + if (!hdev->mmu_enable) + return; + + if (hdev->mmu_func[MMU_DR_PGT].swap_in != NULL) + hdev->mmu_func[MMU_DR_PGT].swap_in(ctx); + + if (hdev->mmu_func[MMU_HR_PGT].swap_in != NULL) + hdev->mmu_func[MMU_HR_PGT].swap_in(ctx); +} + +int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr) +{ + struct hl_mmu_hop_info hops; + u64 tmp_addr; + int rc; + + rc = hl_mmu_get_tlb_info(ctx, virt_addr, &hops); + if (rc) + return rc; + + /* last hop holds the phys address and flags */ + tmp_addr = hops.hop_info[hops.used_hops - 1].hop_pte_val; + *phys_addr = (tmp_addr & HOP_PHYS_ADDR_MASK) | (virt_addr & FLAGS_MASK); + + return 0; +} + +int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, + struct hl_mmu_hop_info *hops) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct hl_mmu_properties *mmu_prop; + int rc; + bool is_dram_addr; + + if (!hdev->mmu_enable) + return -EOPNOTSUPP; + + is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size, + prop->dmmu.start_addr, + prop->dmmu.end_addr); + + /* host-residency is the same in PMMU and HPMMU, use one of them */ + mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu; + + mutex_lock(&ctx->mmu_lock); + + if (mmu_prop->host_resident) + rc = hdev->mmu_func[MMU_HR_PGT].get_tlb_info(ctx, + virt_addr, hops); + else + rc = hdev->mmu_func[MMU_DR_PGT].get_tlb_info(ctx, + virt_addr, hops); + + mutex_unlock(&ctx->mmu_lock); + + return rc; } int hl_mmu_if_set_funcs(struct hl_device *hdev) @@ -297,7 +502,7 @@ int hl_mmu_if_set_funcs(struct hl_device *hdev) switch (hdev->asic_type) { case ASIC_GOYA: case ASIC_GAUDI: - hl_mmu_v1_set_funcs(hdev); + hl_mmu_v1_set_funcs(hdev, &hdev->mmu_func[MMU_DR_PGT]); break; default: dev_err(hdev->dev, "Unrecognized ASIC type %d\n", diff --git a/drivers/misc/habanalabs/common/mmu_v1.c b/drivers/misc/habanalabs/common/mmu_v1.c index 8d1eb5265419..2ce6ea89d4fa 100644 --- a/drivers/misc/habanalabs/common/mmu_v1.c +++ b/drivers/misc/habanalabs/common/mmu_v1.c @@ -8,7 +8,6 @@ #include "habanalabs.h" #include "../include/hw_ip/mmu/mmu_general.h" -#include <linux/genalloc.h> #include <linux/slab.h> static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr); @@ -29,7 +28,7 @@ static void _free_hop(struct hl_ctx *ctx, struct pgt_info *pgt_info) { struct hl_device *hdev = ctx->hdev; - gen_pool_free(hdev->mmu_priv.mmu_pgt_pool, pgt_info->phys_addr, + gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, pgt_info->phys_addr, hdev->asic_prop.mmu_hop_table_size); hash_del(&pgt_info->node); kfree((u64 *) (uintptr_t) pgt_info->shadow_addr); @@ -54,7 +53,7 @@ static u64 alloc_hop(struct hl_ctx *ctx) if (!pgt_info) return ULLONG_MAX; - phys_addr = (u64) gen_pool_alloc(hdev->mmu_priv.mmu_pgt_pool, + phys_addr = (u64) gen_pool_alloc(hdev->mmu_priv.dr.mmu_pgt_pool, prop->mmu_hop_table_size); if (!phys_addr) { dev_err(hdev->dev, "failed to allocate page\n"); @@ -75,7 +74,7 @@ static u64 alloc_hop(struct hl_ctx *ctx) return shadow_addr; shadow_err: - gen_pool_free(hdev->mmu_priv.mmu_pgt_pool, phys_addr, + gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, phys_addr, prop->mmu_hop_table_size); pool_add_err: kfree(pgt_info); @@ -91,7 +90,7 @@ static inline u64 get_phys_hop0_addr(struct hl_ctx *ctx) static inline u64 get_hop0_addr(struct hl_ctx *ctx) { - return (u64) (uintptr_t) ctx->hdev->mmu_priv.mmu_shadow_hop0 + + return (u64) (uintptr_t) ctx->hdev->mmu_priv.dr.mmu_shadow_hop0 + (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size); } @@ -263,7 +262,7 @@ static int dram_default_mapping_init(struct hl_ctx *ctx) hop2_pte_addr, hop3_pte_addr, pte_val; int rc, i, j, hop3_allocated = 0; - if ((!hdev->dram_supports_virtual_memory) || + if ((!prop->dram_supports_virtual_memory) || (!hdev->dram_default_page_mapping) || (ctx->asid == HL_KERNEL_ASID_ID)) return 0; @@ -363,7 +362,7 @@ static void dram_default_mapping_fini(struct hl_ctx *ctx) hop2_pte_addr, hop3_pte_addr; int i, j; - if ((!hdev->dram_supports_virtual_memory) || + if ((!prop->dram_supports_virtual_memory) || (!hdev->dram_default_page_mapping) || (ctx->asid == HL_KERNEL_ASID_ID)) return; @@ -419,15 +418,15 @@ static int hl_mmu_v1_init(struct hl_device *hdev) struct asic_fixed_properties *prop = &hdev->asic_prop; int rc; - hdev->mmu_priv.mmu_pgt_pool = + hdev->mmu_priv.dr.mmu_pgt_pool = gen_pool_create(__ffs(prop->mmu_hop_table_size), -1); - if (!hdev->mmu_priv.mmu_pgt_pool) { + if (!hdev->mmu_priv.dr.mmu_pgt_pool) { dev_err(hdev->dev, "Failed to create page gen pool\n"); return -ENOMEM; } - rc = gen_pool_add(hdev->mmu_priv.mmu_pgt_pool, prop->mmu_pgt_addr + + rc = gen_pool_add(hdev->mmu_priv.dr.mmu_pgt_pool, prop->mmu_pgt_addr + prop->mmu_hop0_tables_total_size, prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size, -1); @@ -436,10 +435,10 @@ static int hl_mmu_v1_init(struct hl_device *hdev) goto err_pool_add; } - hdev->mmu_priv.mmu_shadow_hop0 = kvmalloc_array(prop->max_asid, + hdev->mmu_priv.dr.mmu_shadow_hop0 = kvmalloc_array(prop->max_asid, prop->mmu_hop_table_size, GFP_KERNEL | __GFP_ZERO); - if (ZERO_OR_NULL_PTR(hdev->mmu_priv.mmu_shadow_hop0)) { + if (ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) { rc = -ENOMEM; goto err_pool_add; } @@ -449,7 +448,7 @@ static int hl_mmu_v1_init(struct hl_device *hdev) return 0; err_pool_add: - gen_pool_destroy(hdev->mmu_priv.mmu_pgt_pool); + gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool); return rc; } @@ -468,8 +467,8 @@ static void hl_mmu_v1_fini(struct hl_device *hdev) { /* MMU H/W fini was already done in device hw_fini() */ - kvfree(hdev->mmu_priv.mmu_shadow_hop0); - gen_pool_destroy(hdev->mmu_priv.mmu_pgt_pool); + kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0); + gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool); } /** @@ -482,9 +481,7 @@ static void hl_mmu_v1_fini(struct hl_device *hdev) */ static int hl_mmu_v1_ctx_init(struct hl_ctx *ctx) { - mutex_init(&ctx->mmu_lock); hash_init(ctx->mmu_shadow_hash); - return dram_default_mapping_init(ctx); } @@ -517,8 +514,6 @@ static void hl_mmu_v1_ctx_fini(struct hl_ctx *ctx) pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes); _free_hop(ctx, pgt_info); } - - mutex_destroy(&ctx->mmu_lock); } static int _hl_mmu_v1_unmap(struct hl_ctx *ctx, @@ -842,15 +837,114 @@ static void hl_mmu_v1_swap_in(struct hl_ctx *ctx) } +static inline u64 get_hop_pte_addr(struct hl_ctx *ctx, + struct hl_mmu_properties *mmu_prop, + int hop_num, u64 hop_addr, u64 virt_addr) +{ + switch (hop_num) { + case 0: + return get_hop0_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); + case 1: + return get_hop1_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); + case 2: + return get_hop2_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); + case 3: + return get_hop3_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); + case 4: + return get_hop4_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); + default: + break; + } + return U64_MAX; +} + +static int hl_mmu_v1_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, + struct hl_mmu_hop_info *hops) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct hl_mmu_properties *mmu_prop; + bool is_dram_addr, is_pmmu_addr, is_pmmu_h_addr, is_huge; + int i, used_hops; + + is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size, + prop->dmmu.start_addr, + prop->dmmu.end_addr); + is_pmmu_addr = hl_mem_area_inside_range(virt_addr, prop->pmmu.page_size, + prop->pmmu.start_addr, + prop->pmmu.end_addr); + is_pmmu_h_addr = hl_mem_area_inside_range(virt_addr, + prop->pmmu_huge.page_size, + prop->pmmu_huge.start_addr, + prop->pmmu_huge.end_addr); + if (is_dram_addr) { + mmu_prop = &prop->dmmu; + is_huge = true; + } else if (is_pmmu_addr) { + mmu_prop = &prop->pmmu; + is_huge = false; + } else if (is_pmmu_h_addr) { + mmu_prop = &prop->pmmu_huge; + is_huge = true; + } else { + return -EINVAL; + } + + used_hops = mmu_prop->num_hops; + + /* huge pages use lesser hops */ + if (is_huge) + used_hops--; + + hops->hop_info[0].hop_addr = get_phys_hop0_addr(ctx); + hops->hop_info[0].hop_pte_addr = + get_hop_pte_addr(ctx, mmu_prop, 0, + hops->hop_info[0].hop_addr, virt_addr); + hops->hop_info[0].hop_pte_val = + hdev->asic_funcs->read_pte(hdev, + hops->hop_info[0].hop_pte_addr); + + for (i = 1 ; i < used_hops ; i++) { + hops->hop_info[i].hop_addr = + get_next_hop_addr(ctx, + hops->hop_info[i - 1].hop_pte_val); + if (hops->hop_info[i].hop_addr == ULLONG_MAX) + return -EFAULT; + + hops->hop_info[i].hop_pte_addr = + get_hop_pte_addr(ctx, mmu_prop, i, + hops->hop_info[i].hop_addr, + virt_addr); + hops->hop_info[i].hop_pte_val = + hdev->asic_funcs->read_pte(hdev, + hops->hop_info[i].hop_pte_addr); + + if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK)) + return -EFAULT; + + if (hops->hop_info[i].hop_pte_val & LAST_MASK) + break; + } + + /* if passed over all hops then no last hop was found */ + if (i == mmu_prop->num_hops) + return -EFAULT; + + if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK)) + return -EFAULT; + + hops->used_hops = i + 1; + + return 0; +} + /* * hl_mmu_v1_prepare - prepare mmu for working with mmu v1 * * @hdev: pointer to the device structure */ -void hl_mmu_v1_set_funcs(struct hl_device *hdev) +void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu) { - struct hl_mmu_funcs *mmu = &hdev->mmu_func; - mmu->init = hl_mmu_v1_init; mmu->fini = hl_mmu_v1_fini; mmu->ctx_init = hl_mmu_v1_ctx_init; @@ -860,4 +954,5 @@ void hl_mmu_v1_set_funcs(struct hl_device *hdev) mmu->flush = flush; mmu->swap_out = hl_mmu_v1_swap_out; mmu->swap_in = hl_mmu_v1_swap_in; + mmu->get_tlb_info = hl_mmu_v1_get_tlb_info; } diff --git a/drivers/misc/habanalabs/common/pci.c b/drivers/misc/habanalabs/common/pci.c index 4327e5704ebb..923b2606e29f 100644 --- a/drivers/misc/habanalabs/common/pci.c +++ b/drivers/misc/habanalabs/common/pci.c @@ -338,17 +338,12 @@ static int hl_pci_set_dma_mask(struct hl_device *hdev) /** * hl_pci_init() - PCI initialization code. * @hdev: Pointer to hl_device structure. - * @cpu_boot_status_reg: status register of the device's CPU - * @boot_err0_reg: boot error register of the device's CPU - * @preboot_ver_timeout: how much to wait before bailing out on reading - * the preboot version * * Set DMA masks, initialize the PCI controller and map the PCI BARs. * * Return: 0 on success, non-zero for failure. */ -int hl_pci_init(struct hl_device *hdev, u32 cpu_boot_status_reg, - u32 boot_err0_reg, u32 preboot_ver_timeout) +int hl_pci_init(struct hl_device *hdev) { struct pci_dev *pdev = hdev->pdev; int rc; @@ -380,15 +375,6 @@ int hl_pci_init(struct hl_device *hdev, u32 cpu_boot_status_reg, if (rc) goto unmap_pci_bars; - /* Before continuing in the initialization, we need to read the preboot - * version to determine whether we run with a security-enabled firmware - * The check will be done in each ASIC's specific code - */ - rc = hl_fw_read_preboot_ver(hdev, cpu_boot_status_reg, boot_err0_reg, - preboot_ver_timeout); - if (rc) - goto unmap_pci_bars; - return 0; unmap_pci_bars: diff --git a/drivers/misc/habanalabs/common/sysfs.c b/drivers/misc/habanalabs/common/sysfs.c index 3ceae87016b1..4366d8f93842 100644 --- a/drivers/misc/habanalabs/common/sysfs.c +++ b/drivers/misc/habanalabs/common/sysfs.c @@ -12,7 +12,7 @@ long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr) { struct cpucp_packet pkt; - long result; + u64 result; int rc; memset(&pkt, 0, sizeof(pkt)); @@ -32,10 +32,10 @@ long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr) dev_err(hdev->dev, "Failed to get frequency of PLL %d, error %d\n", pll_index, rc); - result = rc; + return rc; } - return result; + return (long) result; } void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq) @@ -62,7 +62,7 @@ void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq) u64 hl_get_max_power(struct hl_device *hdev) { struct cpucp_packet pkt; - long result; + u64 result; int rc; memset(&pkt, 0, sizeof(pkt)); @@ -75,7 +75,7 @@ u64 hl_get_max_power(struct hl_device *hdev) if (rc) { dev_err(hdev->dev, "Failed to get max power, error %d\n", rc); - result = rc; + return (u64) rc; } return result; @@ -276,6 +276,8 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr, str = "In reset"; else if (hdev->disabled) str = "Malfunction"; + else if (hdev->needs_reset) + str = "Needs Reset"; else str = "Operational"; @@ -304,7 +306,7 @@ static ssize_t max_power_show(struct device *dev, struct device_attribute *attr, struct hl_device *hdev = dev_get_drvdata(dev); long val; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; val = hl_get_max_power(hdev); @@ -319,7 +321,7 @@ static ssize_t max_power_store(struct device *dev, unsigned long value; int rc; - if (hl_device_disabled_or_in_reset(hdev)) { + if (!hl_device_operational(hdev, NULL)) { count = -ENODEV; goto out; } @@ -347,7 +349,7 @@ static ssize_t eeprom_read_handler(struct file *filp, struct kobject *kobj, char *data; int rc; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; if (!max_size) diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 7ea6b4368a91..1f1926607c5e 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -17,8 +17,6 @@ #include <linux/pci.h> #include <linux/firmware.h> #include <linux/hwmon.h> -#include <linux/genalloc.h> -#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/iommu.h> #include <linux/seq_file.h> @@ -38,7 +36,7 @@ * * MMU is always enabled. * - * QMAN DMA channels 0,1,5 (PCI DMAN): + * QMAN DMA channels 0,1 (PCI DMAN): * - DMA is not secured. * - PQ and CQ are secured. * - CP is secured: The driver needs to parse CB but WREG should be allowed @@ -55,7 +53,7 @@ * idle) * - MMU page tables area clear (happens on init) * - * QMAN DMA 2-4,6,7, TPC, MME, NIC: + * QMAN DMA 2-7, TPC, MME, NIC: * PQ is secured and is located on the Host (HBM CON TPC3 bug) * CQ, CP and the engine are not secured * @@ -67,7 +65,7 @@ #define GAUDI_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */ -#define GAUDI_RESET_TIMEOUT_MSEC 1000 /* 1000ms */ +#define GAUDI_RESET_TIMEOUT_MSEC 2000 /* 2000ms */ #define GAUDI_RESET_WAIT_MSEC 1 /* 1ms */ #define GAUDI_CPU_RESET_WAIT_MSEC 200 /* 200ms */ #define GAUDI_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */ @@ -103,6 +101,10 @@ BIT(GAUDI_ENGINE_ID_MME_2) |\ GENMASK_ULL(GAUDI_ENGINE_ID_TPC_7, GAUDI_ENGINE_ID_TPC_0)) +#define HBM_SCRUBBING_TIMEOUT_US 1000000 /* 1s */ + +#define GAUDI_PLL_MAX 10 + static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = { "gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3", "gaudi cq 1_0", "gaudi cq 1_1", "gaudi cq 1_2", "gaudi cq 1_3", @@ -113,12 +115,12 @@ static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = { static const u8 gaudi_dma_assignment[GAUDI_DMA_MAX] = { [GAUDI_PCI_DMA_1] = GAUDI_ENGINE_ID_DMA_0, [GAUDI_PCI_DMA_2] = GAUDI_ENGINE_ID_DMA_1, - [GAUDI_PCI_DMA_3] = GAUDI_ENGINE_ID_DMA_5, [GAUDI_HBM_DMA_1] = GAUDI_ENGINE_ID_DMA_2, [GAUDI_HBM_DMA_2] = GAUDI_ENGINE_ID_DMA_3, [GAUDI_HBM_DMA_3] = GAUDI_ENGINE_ID_DMA_4, - [GAUDI_HBM_DMA_4] = GAUDI_ENGINE_ID_DMA_6, - [GAUDI_HBM_DMA_5] = GAUDI_ENGINE_ID_DMA_7 + [GAUDI_HBM_DMA_4] = GAUDI_ENGINE_ID_DMA_5, + [GAUDI_HBM_DMA_5] = GAUDI_ENGINE_ID_DMA_6, + [GAUDI_HBM_DMA_6] = GAUDI_ENGINE_ID_DMA_7 }; static const u8 gaudi_cq_assignment[NUMBER_OF_CMPLT_QUEUES] = { @@ -130,10 +132,6 @@ static const u8 gaudi_cq_assignment[NUMBER_OF_CMPLT_QUEUES] = { [5] = GAUDI_QUEUE_ID_DMA_1_1, [6] = GAUDI_QUEUE_ID_DMA_1_2, [7] = GAUDI_QUEUE_ID_DMA_1_3, - [8] = GAUDI_QUEUE_ID_DMA_5_0, - [9] = GAUDI_QUEUE_ID_DMA_5_1, - [10] = GAUDI_QUEUE_ID_DMA_5_2, - [11] = GAUDI_QUEUE_ID_DMA_5_3 }; static const u16 gaudi_packet_sizes[MAX_PACKET_ID] = { @@ -153,6 +151,19 @@ static const u16 gaudi_packet_sizes[MAX_PACKET_ID] = { [PACKET_LOAD_AND_EXE] = sizeof(struct packet_load_and_exe) }; +static const u32 gaudi_pll_base_addresses[GAUDI_PLL_MAX] = { + [CPU_PLL] = mmPSOC_CPU_PLL_NR, + [PCI_PLL] = mmPSOC_PCI_PLL_NR, + [SRAM_PLL] = mmSRAM_W_PLL_NR, + [HBM_PLL] = mmPSOC_HBM_PLL_NR, + [NIC_PLL] = mmNIC0_PLL_NR, + [DMA_PLL] = mmDMA_W_PLL_NR, + [MESH_PLL] = mmMESH_W_PLL_NR, + [MME_PLL] = mmPSOC_MME_PLL_NR, + [TPC_PLL] = mmPSOC_TPC_PLL_NR, + [IF_PLL] = mmIF_W_PLL_NR +}; + static inline bool validate_packet_id(enum packet_id id) { switch (id) { @@ -249,10 +260,10 @@ static enum hl_queue_type gaudi_queue_type[GAUDI_QUEUE_ID_SIZE] = { QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_1 */ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_2 */ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_3 */ - QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_5_0 */ - QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_5_1 */ - QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_5_2 */ - QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_5_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_5_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_5_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_5_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_5_3 */ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_0 */ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_1 */ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_2 */ @@ -301,46 +312,46 @@ static enum hl_queue_type gaudi_queue_type[GAUDI_QUEUE_ID_SIZE] = { QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_1 */ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_2 */ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_3 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_0_0 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_0_1 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_0_2 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_0_3 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_1_0 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_1_1 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_1_2 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_1_3 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_2_0 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_2_1 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_2_2 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_2_3 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_3_0 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_3_1 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_3_2 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_3_3 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_4_0 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_4_1 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_4_2 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_4_3 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_5_0 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_5_1 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_5_2 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_5_3 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_6_0 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_6_1 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_6_2 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_6_3 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_7_0 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_7_1 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_7_2 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_7_3 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_8_0 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_8_1 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_8_2 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_8_3 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_9_0 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_9_1 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_9_2 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_9_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_0_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_0_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_0_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_0_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_1_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_1_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_1_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_1_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_2_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_2_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_2_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_2_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_3_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_3_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_3_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_3_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_4_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_4_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_4_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_4_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_5_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_5_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_5_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_5_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_6_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_6_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_6_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_6_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_7_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_7_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_7_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_7_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_8_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_8_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_8_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_8_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_9_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_9_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_9_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_9_3 */ }; struct ecc_info_extract_params { @@ -362,6 +373,31 @@ static int gaudi_mmu_clear_pgt_range(struct hl_device *hdev); static int gaudi_cpucp_info_get(struct hl_device *hdev); static void gaudi_disable_clock_gating(struct hl_device *hdev); static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid); +static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id, + u32 size); +static u32 gaudi_gen_wait_cb(struct hl_device *hdev, + struct hl_gen_wait_properties *prop); + +static inline enum hl_collective_mode +get_collective_mode(struct hl_device *hdev, u32 queue_id) +{ + if (gaudi_queue_type[queue_id] == QUEUE_TYPE_EXT) + return HL_COLLECTIVE_MASTER; + + if (queue_id >= GAUDI_QUEUE_ID_DMA_5_0 && + queue_id <= GAUDI_QUEUE_ID_DMA_5_3) + return HL_COLLECTIVE_SLAVE; + + if (queue_id >= GAUDI_QUEUE_ID_TPC_7_0 && + queue_id <= GAUDI_QUEUE_ID_TPC_7_3) + return HL_COLLECTIVE_SLAVE; + + if (queue_id >= GAUDI_QUEUE_ID_NIC_0_0 && + queue_id <= GAUDI_QUEUE_ID_NIC_9_3) + return HL_COLLECTIVE_SLAVE; + + return HL_COLLECTIVE_NOT_SUPPORTED; +} static int gaudi_get_fixed_properties(struct hl_device *hdev) { @@ -381,29 +417,44 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev) if (gaudi_queue_type[i] == QUEUE_TYPE_EXT) { prop->hw_queues_props[i].type = QUEUE_TYPE_EXT; prop->hw_queues_props[i].driver_only = 0; - prop->hw_queues_props[i].requires_kernel_cb = 1; prop->hw_queues_props[i].supports_sync_stream = 1; + prop->hw_queues_props[i].cb_alloc_flags = + CB_ALLOC_KERNEL; num_sync_stream_queues++; } else if (gaudi_queue_type[i] == QUEUE_TYPE_CPU) { prop->hw_queues_props[i].type = QUEUE_TYPE_CPU; prop->hw_queues_props[i].driver_only = 1; - prop->hw_queues_props[i].requires_kernel_cb = 0; prop->hw_queues_props[i].supports_sync_stream = 0; + prop->hw_queues_props[i].cb_alloc_flags = + CB_ALLOC_KERNEL; } else if (gaudi_queue_type[i] == QUEUE_TYPE_INT) { prop->hw_queues_props[i].type = QUEUE_TYPE_INT; prop->hw_queues_props[i].driver_only = 0; - prop->hw_queues_props[i].requires_kernel_cb = 0; - } else if (gaudi_queue_type[i] == QUEUE_TYPE_NA) { - prop->hw_queues_props[i].type = QUEUE_TYPE_NA; - prop->hw_queues_props[i].driver_only = 0; - prop->hw_queues_props[i].requires_kernel_cb = 0; prop->hw_queues_props[i].supports_sync_stream = 0; + prop->hw_queues_props[i].cb_alloc_flags = + CB_ALLOC_USER; + } + prop->hw_queues_props[i].collective_mode = + get_collective_mode(hdev, i); } prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES; - prop->sync_stream_first_sob = 0; - prop->sync_stream_first_mon = 0; + prop->collective_first_sob = 0; + prop->collective_first_mon = 0; + + /* 2 SOBs per internal queue stream are reserved for collective */ + prop->sync_stream_first_sob = + ALIGN(NUMBER_OF_SOBS_IN_GRP, HL_MAX_SOBS_PER_MONITOR) + * QMAN_STREAMS * HL_RSVD_SOBS; + + /* 1 monitor per internal queue stream are reserved for collective + * 2 monitors per external queue stream are reserved for collective + */ + prop->sync_stream_first_mon = + (NUMBER_OF_COLLECTIVE_QUEUES * QMAN_STREAMS) + + (NUMBER_OF_EXT_HW_QUEUES * 2); + prop->dram_base_address = DRAM_PHYS_BASE; prop->dram_size = GAUDI_HBM_SIZE_32GB; prop->dram_end_address = prop->dram_base_address + @@ -426,6 +477,7 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev) prop->mmu_hop_table_size = HOP_TABLE_SIZE; prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE; prop->dram_page_size = PAGE_SIZE_2MB; + prop->dram_supports_virtual_memory = false; prop->pmmu.hop0_shift = HOP0_SHIFT; prop->pmmu.hop1_shift = HOP1_SHIFT; @@ -472,9 +524,16 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev) prop->max_pending_cs = GAUDI_MAX_PENDING_CS; prop->first_available_user_sob[HL_GAUDI_WS_DCORE] = - num_sync_stream_queues * HL_RSVD_SOBS; + prop->sync_stream_first_sob + + (num_sync_stream_queues * HL_RSVD_SOBS); prop->first_available_user_mon[HL_GAUDI_WS_DCORE] = - num_sync_stream_queues * HL_RSVD_MONS; + prop->sync_stream_first_mon + + (num_sync_stream_queues * HL_RSVD_MONS); + + /* disable fw security for now, set it in a later stage */ + prop->fw_security_disabled = true; + prop->fw_security_status_valid = false; + prop->hard_reset_done_by_fw = false; return 0; } @@ -562,6 +621,11 @@ done: return rc; } +static enum hl_device_hw_state gaudi_get_hw_state(struct hl_device *hdev) +{ + return RREG32(mmHW_STATE); +} + static int gaudi_early_init(struct hl_device *hdev) { struct asic_fixed_properties *prop = &hdev->asic_prop; @@ -599,17 +663,32 @@ static int gaudi_early_init(struct hl_device *hdev) prop->dram_pci_bar_size = pci_resource_len(pdev, HBM_BAR_ID); - rc = hl_pci_init(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS, - mmCPU_BOOT_ERR0, GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC); + rc = hl_pci_init(hdev); if (rc) goto free_queue_props; - /* GAUDI Firmware does not yet support security */ - prop->fw_security_disabled = true; - dev_info(hdev->dev, "firmware-level security is disabled\n"); + if (gaudi_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) { + dev_info(hdev->dev, + "H/W state is dirty, must reset before initializing\n"); + hdev->asic_funcs->hw_fini(hdev, true); + } + + /* Before continuing in the initialization, we need to read the preboot + * version to determine whether we run with a security-enabled firmware + */ + rc = hl_fw_read_preboot_status(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS, + mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0, + GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC); + if (rc) { + if (hdev->reset_on_preboot_fail) + hdev->asic_funcs->hw_fini(hdev, true); + goto pci_fini; + } return 0; +pci_fini: + hl_pci_fini(hdev); free_queue_props: kfree(hdev->asic_prop.hw_queues_props); return rc; @@ -624,44 +703,95 @@ static int gaudi_early_fini(struct hl_device *hdev) } /** - * gaudi_fetch_psoc_frequency - Fetch PSOC frequency values + * gaudi_fetch_pll_frequency - Fetch PLL frequency values * * @hdev: pointer to hl_device structure + * @pll_index: index of the pll to fetch frequency from + * @pll_freq: pointer to store the pll frequency in MHz in each of the available + * outputs. if a certain output is not available a 0 will be set * */ -static void gaudi_fetch_psoc_frequency(struct hl_device *hdev) +static int gaudi_fetch_pll_frequency(struct hl_device *hdev, + enum gaudi_pll_index pll_index, + u16 *pll_freq_arr) { - struct asic_fixed_properties *prop = &hdev->asic_prop; - u32 trace_freq = 0; - u32 pll_clk = 0; - u32 div_fctr = RREG32(mmPSOC_CPU_PLL_DIV_FACTOR_2); - u32 div_sel = RREG32(mmPSOC_CPU_PLL_DIV_SEL_2); - u32 nr = RREG32(mmPSOC_CPU_PLL_NR); - u32 nf = RREG32(mmPSOC_CPU_PLL_NF); - u32 od = RREG32(mmPSOC_CPU_PLL_OD); - - if (div_sel == DIV_SEL_REF_CLK || div_sel == DIV_SEL_DIVIDED_REF) { - if (div_sel == DIV_SEL_REF_CLK) - trace_freq = PLL_REF_CLK; - else - trace_freq = PLL_REF_CLK / (div_fctr + 1); - } else if (div_sel == DIV_SEL_PLL_CLK || + u32 nr = 0, nf = 0, od = 0, pll_clk = 0, div_fctr, div_sel, + pll_base_addr = gaudi_pll_base_addresses[pll_index]; + u16 freq = 0; + int i, rc; + + if (hdev->asic_prop.fw_security_status_valid && + (hdev->asic_prop.fw_app_security_map & + CPU_BOOT_DEV_STS0_PLL_INFO_EN)) { + rc = hl_fw_cpucp_pll_info_get(hdev, pll_index, pll_freq_arr); + + if (rc) + return rc; + } else if (hdev->asic_prop.fw_security_disabled) { + /* Backward compatibility */ + nr = RREG32(pll_base_addr + PLL_NR_OFFSET); + nf = RREG32(pll_base_addr + PLL_NF_OFFSET); + od = RREG32(pll_base_addr + PLL_OD_OFFSET); + + for (i = 0; i < HL_PLL_NUM_OUTPUTS; i++) { + div_fctr = RREG32(pll_base_addr + + PLL_DIV_FACTOR_0_OFFSET + i * 4); + div_sel = RREG32(pll_base_addr + + PLL_DIV_SEL_0_OFFSET + i * 4); + + if (div_sel == DIV_SEL_REF_CLK || + div_sel == DIV_SEL_DIVIDED_REF) { + if (div_sel == DIV_SEL_REF_CLK) + freq = PLL_REF_CLK; + else + freq = PLL_REF_CLK / (div_fctr + 1); + } else if (div_sel == DIV_SEL_PLL_CLK || div_sel == DIV_SEL_DIVIDED_PLL) { - pll_clk = PLL_REF_CLK * (nf + 1) / ((nr + 1) * (od + 1)); - if (div_sel == DIV_SEL_PLL_CLK) - trace_freq = pll_clk; - else - trace_freq = pll_clk / (div_fctr + 1); + pll_clk = PLL_REF_CLK * (nf + 1) / + ((nr + 1) * (od + 1)); + if (div_sel == DIV_SEL_PLL_CLK) + freq = pll_clk; + else + freq = pll_clk / (div_fctr + 1); + } else { + dev_warn(hdev->dev, + "Received invalid div select value: %d", + div_sel); + } + + pll_freq_arr[i] = freq; + } } else { - dev_warn(hdev->dev, - "Received invalid div select value: %d", div_sel); + dev_err(hdev->dev, "Failed to fetch PLL frequency values\n"); + return -EIO; } - prop->psoc_timestamp_frequency = trace_freq; - prop->psoc_pci_pll_nr = nr; - prop->psoc_pci_pll_nf = nf; - prop->psoc_pci_pll_od = od; - prop->psoc_pci_pll_div_factor = div_fctr; + return 0; +} + +/** + * gaudi_fetch_psoc_frequency - Fetch PSOC frequency values + * + * @hdev: pointer to hl_device structure + * + */ +static int gaudi_fetch_psoc_frequency(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + u16 pll_freq[HL_PLL_NUM_OUTPUTS]; + int rc; + + rc = gaudi_fetch_pll_frequency(hdev, CPU_PLL, pll_freq); + if (rc) + return rc; + + prop->psoc_timestamp_frequency = pll_freq[2]; + prop->psoc_pci_pll_nr = 0; + prop->psoc_pci_pll_nf = 0; + prop->psoc_pci_pll_od = 0; + prop->psoc_pci_pll_div_factor = 0; + + return 0; } static int _gaudi_init_tpc_mem(struct hl_device *hdev, @@ -708,7 +838,7 @@ static int _gaudi_init_tpc_mem(struct hl_device *hdev, job->id = 0; job->user_cb = cb; - job->user_cb->cs_cnt++; + atomic_inc(&job->user_cb->cs_cnt); job->user_cb_size = cb_size; job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0; job->patched_cb = job->user_cb; @@ -731,7 +861,7 @@ free_job: hl_userptr_delete_list(hdev, &job->userptr_list); hl_debugfs_remove_job(hdev, job); kfree(job); - cb->cs_cnt--; + atomic_dec(&cb->cs_cnt); release_cb: hl_cb_put(cb); @@ -786,6 +916,451 @@ out: return rc; } +static void gaudi_collective_map_sobs(struct hl_device *hdev, u32 stream) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + struct gaudi_collective_properties *prop = &gaudi->collective_props; + struct hl_hw_queue *q; + u32 i, sob_id, sob_group_id, queue_id; + + /* Iterate through SOB groups and assign a SOB for each slave queue */ + sob_group_id = + stream * HL_RSVD_SOBS + prop->curr_sob_group_idx[stream]; + sob_id = prop->hw_sob_group[sob_group_id].base_sob_id; + + queue_id = GAUDI_QUEUE_ID_NIC_0_0 + stream; + for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++) { + q = &hdev->kernel_queues[queue_id + (4 * i)]; + q->sync_stream_prop.collective_sob_id = sob_id + i; + } + + /* Both DMA5 and TPC7 use the same resources since only a single + * engine need to participate in the reduction process + */ + queue_id = GAUDI_QUEUE_ID_DMA_5_0 + stream; + q = &hdev->kernel_queues[queue_id]; + q->sync_stream_prop.collective_sob_id = + sob_id + NIC_NUMBER_OF_ENGINES; + + queue_id = GAUDI_QUEUE_ID_TPC_7_0 + stream; + q = &hdev->kernel_queues[queue_id]; + q->sync_stream_prop.collective_sob_id = + sob_id + NIC_NUMBER_OF_ENGINES; +} + +static void gaudi_sob_group_hw_reset(struct kref *ref) +{ + struct gaudi_hw_sob_group *hw_sob_group = + container_of(ref, struct gaudi_hw_sob_group, kref); + struct hl_device *hdev = hw_sob_group->hdev; + int i; + + for (i = 0 ; i < NUMBER_OF_SOBS_IN_GRP ; i++) + WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + + (hw_sob_group->base_sob_id + i) * 4, 0); + + kref_init(&hw_sob_group->kref); +} + +static void gaudi_sob_group_reset_error(struct kref *ref) +{ + struct gaudi_hw_sob_group *hw_sob_group = + container_of(ref, struct gaudi_hw_sob_group, kref); + struct hl_device *hdev = hw_sob_group->hdev; + + dev_crit(hdev->dev, + "SOB release shouldn't be called here, base_sob_id: %d\n", + hw_sob_group->base_sob_id); +} + +static int gaudi_collective_init(struct hl_device *hdev) +{ + u32 i, master_monitor_sobs, sob_id, reserved_sobs_per_group; + struct gaudi_collective_properties *prop; + struct gaudi_device *gaudi; + + gaudi = hdev->asic_specific; + prop = &gaudi->collective_props; + sob_id = hdev->asic_prop.collective_first_sob; + + /* First sob in group must be aligned to HL_MAX_SOBS_PER_MONITOR */ + reserved_sobs_per_group = + ALIGN(NUMBER_OF_SOBS_IN_GRP, HL_MAX_SOBS_PER_MONITOR); + + /* Init SOB groups */ + for (i = 0 ; i < NUM_SOB_GROUPS; i++) { + prop->hw_sob_group[i].hdev = hdev; + prop->hw_sob_group[i].base_sob_id = sob_id; + sob_id += reserved_sobs_per_group; + gaudi_sob_group_hw_reset(&prop->hw_sob_group[i].kref); + } + + for (i = 0 ; i < QMAN_STREAMS; i++) { + prop->next_sob_group_val[i] = 1; + prop->curr_sob_group_idx[i] = 0; + gaudi_collective_map_sobs(hdev, i); + } + + prop->mstr_sob_mask[0] = 0; + master_monitor_sobs = HL_MAX_SOBS_PER_MONITOR; + for (i = 0 ; i < master_monitor_sobs ; i++) + if (gaudi->hw_cap_initialized & BIT(HW_CAP_NIC_SHIFT + i)) + prop->mstr_sob_mask[0] |= BIT(i); + + prop->mstr_sob_mask[1] = 0; + master_monitor_sobs = + NIC_NUMBER_OF_ENGINES - HL_MAX_SOBS_PER_MONITOR; + for (i = 0 ; i < master_monitor_sobs; i++) { + if (gaudi->hw_cap_initialized & BIT(HW_CAP_NIC_SHIFT + i)) + prop->mstr_sob_mask[1] |= BIT(i); + } + + /* Set collective engine bit */ + prop->mstr_sob_mask[1] |= BIT(i); + + return 0; +} + +static void gaudi_reset_sob_group(struct hl_device *hdev, u16 sob_group) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + struct gaudi_collective_properties *cprop = &gaudi->collective_props; + + kref_put(&cprop->hw_sob_group[sob_group].kref, + gaudi_sob_group_hw_reset); +} + +static void gaudi_collective_master_init_job(struct hl_device *hdev, + struct hl_cs_job *job, u32 stream, u32 sob_group_offset) +{ + u32 master_sob_base, master_monitor, queue_id, cb_size = 0; + struct gaudi_collective_properties *cprop; + struct hl_gen_wait_properties wait_prop; + struct hl_sync_stream_properties *prop; + struct gaudi_device *gaudi; + + gaudi = hdev->asic_specific; + cprop = &gaudi->collective_props; + queue_id = job->hw_queue_id; + prop = &hdev->kernel_queues[queue_id].sync_stream_prop; + + master_sob_base = + cprop->hw_sob_group[sob_group_offset].base_sob_id; + master_monitor = prop->collective_mstr_mon_id[0]; + + dev_dbg(hdev->dev, + "Generate master wait CBs, sob %d (mask %#x), val:0x%x, mon %u, q %d\n", + master_sob_base, cprop->mstr_sob_mask[0], + cprop->next_sob_group_val[stream], + master_monitor, queue_id); + + wait_prop.data = (void *) job->patched_cb; + wait_prop.sob_base = master_sob_base; + wait_prop.sob_mask = cprop->mstr_sob_mask[0]; + wait_prop.sob_val = cprop->next_sob_group_val[stream]; + wait_prop.mon_id = master_monitor; + wait_prop.q_idx = queue_id; + wait_prop.size = cb_size; + cb_size += gaudi_gen_wait_cb(hdev, &wait_prop); + + master_sob_base += HL_MAX_SOBS_PER_MONITOR; + master_monitor = prop->collective_mstr_mon_id[1]; + + dev_dbg(hdev->dev, + "Generate master wait CBs, sob %d (mask %#x), val:0x%x, mon %u, q %d\n", + master_sob_base, cprop->mstr_sob_mask[1], + cprop->next_sob_group_val[stream], + master_monitor, queue_id); + + wait_prop.sob_base = master_sob_base; + wait_prop.sob_mask = cprop->mstr_sob_mask[1]; + wait_prop.mon_id = master_monitor; + wait_prop.size = cb_size; + cb_size += gaudi_gen_wait_cb(hdev, &wait_prop); +} + +static void gaudi_collective_slave_init_job(struct hl_device *hdev, + struct hl_cs_job *job, struct hl_cs_compl *cs_cmpl) +{ + struct hl_gen_wait_properties wait_prop; + struct hl_sync_stream_properties *prop; + u32 queue_id, cb_size = 0; + + queue_id = job->hw_queue_id; + prop = &hdev->kernel_queues[queue_id].sync_stream_prop; + + /* Add to wait CBs using slave monitor */ + wait_prop.data = (void *) job->user_cb; + wait_prop.sob_base = cs_cmpl->hw_sob->sob_id; + wait_prop.sob_mask = 0x1; + wait_prop.sob_val = cs_cmpl->sob_val; + wait_prop.mon_id = prop->collective_slave_mon_id; + wait_prop.q_idx = queue_id; + wait_prop.size = cb_size; + + dev_dbg(hdev->dev, + "Generate slave wait CB, sob %d, val:0x%x, mon %d, q %d\n", + cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, + prop->collective_slave_mon_id, queue_id); + + cb_size += gaudi_gen_wait_cb(hdev, &wait_prop); + + dev_dbg(hdev->dev, + "generate signal CB, sob_id: %d, sob val: 1, q_idx: %d\n", + prop->collective_sob_id, queue_id); + + cb_size += gaudi_gen_signal_cb(hdev, job->user_cb, + prop->collective_sob_id, cb_size); +} + +static void gaudi_collective_wait_init_cs(struct hl_cs *cs) +{ + struct hl_cs_compl *signal_cs_cmpl = + container_of(cs->signal_fence, struct hl_cs_compl, base_fence); + struct hl_cs_compl *cs_cmpl = + container_of(cs->fence, struct hl_cs_compl, base_fence); + struct gaudi_collective_properties *cprop; + u32 stream, queue_id, sob_group_offset; + struct gaudi_device *gaudi; + struct hl_device *hdev; + struct hl_cs_job *job; + struct hl_ctx *ctx; + + ctx = cs->ctx; + hdev = ctx->hdev; + gaudi = hdev->asic_specific; + cprop = &gaudi->collective_props; + + /* copy the SOB id and value of the signal CS */ + cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob; + cs_cmpl->sob_val = signal_cs_cmpl->sob_val; + + /* Calculate the stream from collective master queue (1st job) */ + job = list_first_entry(&cs->job_list, struct hl_cs_job, cs_node); + stream = job->hw_queue_id % 4; + sob_group_offset = + stream * HL_RSVD_SOBS + cprop->curr_sob_group_idx[stream]; + + list_for_each_entry(job, &cs->job_list, cs_node) { + queue_id = job->hw_queue_id; + + if (hdev->kernel_queues[queue_id].collective_mode == + HL_COLLECTIVE_MASTER) + gaudi_collective_master_init_job(hdev, job, stream, + sob_group_offset); + else + gaudi_collective_slave_init_job(hdev, job, cs_cmpl); + } + + cs_cmpl->sob_group = sob_group_offset; + + /* Handle sob group kref and wraparound */ + kref_get(&cprop->hw_sob_group[sob_group_offset].kref); + cprop->next_sob_group_val[stream]++; + + if (cprop->next_sob_group_val[stream] == HL_MAX_SOB_VAL) { + /* + * Decrement as we reached the max value. + * The release function won't be called here as we've + * just incremented the refcount. + */ + kref_put(&cprop->hw_sob_group[sob_group_offset].kref, + gaudi_sob_group_reset_error); + cprop->next_sob_group_val[stream] = 1; + /* only two SOBs are currently in use */ + cprop->curr_sob_group_idx[stream] = + (cprop->curr_sob_group_idx[stream] + 1) & + (HL_RSVD_SOBS - 1); + + gaudi_collective_map_sobs(hdev, stream); + + dev_dbg(hdev->dev, "switched to SOB group %d, stream: %d\n", + cprop->curr_sob_group_idx[stream], stream); + } + + /* Increment kref since all slave queues are now waiting on it */ + kref_get(&cs_cmpl->hw_sob->kref); + /* + * Must put the signal fence after the SOB refcnt increment so + * the SOB refcnt won't turn 0 and reset the SOB before the + * wait CS was submitted. + */ + mb(); + hl_fence_put(cs->signal_fence); + cs->signal_fence = NULL; +} + +static int gaudi_collective_wait_create_job(struct hl_device *hdev, + struct hl_ctx *ctx, struct hl_cs *cs, + enum hl_collective_mode mode, u32 queue_id, u32 wait_queue_id) +{ + struct hw_queue_properties *hw_queue_prop; + struct hl_cs_counters_atomic *cntr; + struct hl_cs_job *job; + struct hl_cb *cb; + u32 cb_size; + bool patched_cb; + + cntr = &hdev->aggregated_cs_counters; + + if (mode == HL_COLLECTIVE_MASTER) { + /* CB size of collective master queue contains + * 4 msg short packets for monitor 1 configuration + * 1 fence packet + * 4 msg short packets for monitor 2 configuration + * 1 fence packet + * 2 msg prot packets for completion and MSI-X + */ + cb_size = sizeof(struct packet_msg_short) * 8 + + sizeof(struct packet_fence) * 2 + + sizeof(struct packet_msg_prot) * 2; + patched_cb = true; + } else { + /* CB size of collective slave queues contains + * 4 msg short packets for monitor configuration + * 1 fence packet + * 1 additional msg short packet for sob signal + */ + cb_size = sizeof(struct packet_msg_short) * 5 + + sizeof(struct packet_fence); + patched_cb = false; + } + + hw_queue_prop = &hdev->asic_prop.hw_queues_props[queue_id]; + job = hl_cs_allocate_job(hdev, hw_queue_prop->type, true); + if (!job) { + atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); + atomic64_inc(&cntr->out_of_mem_drop_cnt); + dev_err(hdev->dev, "Failed to allocate a new job\n"); + return -ENOMEM; + } + + /* Allocate internal mapped CB for non patched CBs */ + cb = hl_cb_kernel_create(hdev, cb_size, + hdev->mmu_enable && !patched_cb); + if (!cb) { + atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); + atomic64_inc(&cntr->out_of_mem_drop_cnt); + kfree(job); + return -EFAULT; + } + + job->id = 0; + job->cs = cs; + job->user_cb = cb; + atomic_inc(&job->user_cb->cs_cnt); + job->user_cb_size = cb_size; + job->hw_queue_id = queue_id; + + /* + * No need in parsing, user CB is the patched CB. + * We call hl_cb_destroy() out of two reasons - we don't need + * the CB in the CB idr anymore and to decrement its refcount as + * it was incremented inside hl_cb_kernel_create(). + */ + if (patched_cb) + job->patched_cb = job->user_cb; + else + job->patched_cb = NULL; + + job->job_cb_size = job->user_cb_size; + hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT); + + /* increment refcount as for external queues we get completion */ + if (hw_queue_prop->type == QUEUE_TYPE_EXT) + cs_get(cs); + + cs->jobs_in_queue_cnt[job->hw_queue_id]++; + + list_add_tail(&job->cs_node, &cs->job_list); + + hl_debugfs_add_job(hdev, job); + + return 0; +} + +static int gaudi_collective_wait_create_jobs(struct hl_device *hdev, + struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id, + u32 collective_engine_id) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + struct hw_queue_properties *hw_queue_prop; + u32 queue_id, collective_queue, num_jobs; + u32 stream, nic_queue, nic_idx = 0; + bool skip; + int i, rc; + + /* Verify wait queue id is configured as master */ + hw_queue_prop = &hdev->asic_prop.hw_queues_props[wait_queue_id]; + if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) { + dev_err(hdev->dev, + "Queue %d is not configured as collective master\n", + wait_queue_id); + return -EINVAL; + } + + /* Verify engine id is supported */ + if (collective_engine_id != GAUDI_ENGINE_ID_DMA_5 && + collective_engine_id != GAUDI_ENGINE_ID_TPC_7) { + dev_err(hdev->dev, + "Collective wait does not support engine %u\n", + collective_engine_id); + return -EINVAL; + } + + stream = wait_queue_id % 4; + + if (collective_engine_id == GAUDI_ENGINE_ID_DMA_5) + collective_queue = GAUDI_QUEUE_ID_DMA_5_0 + stream; + else + collective_queue = GAUDI_QUEUE_ID_TPC_7_0 + stream; + + num_jobs = NUMBER_OF_SOBS_IN_GRP + 1; + nic_queue = GAUDI_QUEUE_ID_NIC_0_0 + stream; + + /* First job goes to the collective master queue, it will wait for + * the collective slave queues to finish execution. + * The synchronization is done using two monitors: + * First monitor for NICs 0-7, second monitor for NICs 8-9 and the + * reduction engine (DMA5/TPC7). + * + * Rest of the jobs goes to the collective slave queues which will + * all wait for the user to signal sob 'cs_cmpl->sob_val'. + */ + for (i = 0 ; i < num_jobs ; i++) { + if (i == 0) { + queue_id = wait_queue_id; + rc = gaudi_collective_wait_create_job(hdev, ctx, cs, + HL_COLLECTIVE_MASTER, queue_id, wait_queue_id); + } else { + if (nic_idx < NIC_NUMBER_OF_ENGINES) { + if (gaudi->hw_cap_initialized & + BIT(HW_CAP_NIC_SHIFT + nic_idx)) + skip = false; + else + skip = true; + + queue_id = nic_queue; + nic_queue += 4; + nic_idx++; + + if (skip) + continue; + } else { + queue_id = collective_queue; + } + + rc = gaudi_collective_wait_create_job(hdev, ctx, cs, + HL_COLLECTIVE_SLAVE, queue_id, wait_queue_id); + } + + if (rc) + return rc; + } + + return rc; +} + static int gaudi_late_init(struct hl_device *hdev) { struct gaudi_device *gaudi = hdev->asic_specific; @@ -797,6 +1372,27 @@ static int gaudi_late_init(struct hl_device *hdev) return rc; } + if ((hdev->card_type == cpucp_card_type_pci) && + (hdev->nic_ports_mask & 0x3)) { + dev_info(hdev->dev, + "PCI card detected, only 8 ports are enabled\n"); + hdev->nic_ports_mask &= ~0x3; + + /* Stop and disable unused NIC QMANs */ + WREG32(mmNIC0_QM0_GLBL_CFG1, NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CP_STOP_MASK); + + WREG32(mmNIC0_QM1_GLBL_CFG1, NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CP_STOP_MASK); + + WREG32(mmNIC0_QM0_GLBL_CFG0, 0); + WREG32(mmNIC0_QM1_GLBL_CFG0, 0); + + gaudi->hw_cap_initialized &= ~(HW_CAP_NIC0 | HW_CAP_NIC1); + } + rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS); if (rc) { dev_err(hdev->dev, "Failed to enable PCI access from CPU\n"); @@ -805,7 +1401,11 @@ static int gaudi_late_init(struct hl_device *hdev) WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_INTS_REGISTER); - gaudi_fetch_psoc_frequency(hdev); + rc = gaudi_fetch_psoc_frequency(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to fetch psoc frequency\n"); + goto disable_pci_access; + } rc = gaudi_mmu_clear_pgt_range(hdev); if (rc) { @@ -819,6 +1419,12 @@ static int gaudi_late_init(struct hl_device *hdev) goto disable_pci_access; } + rc = gaudi_collective_init(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to init collective\n"); + goto disable_pci_access; + } + return 0; disable_pci_access: @@ -892,7 +1498,8 @@ static int gaudi_alloc_cpu_accessible_dma_mem(struct hl_device *hdev) hdev->cpu_pci_msb_addr = GAUDI_CPU_PCI_MSB_ADDR(hdev->cpu_accessible_dma_address); - GAUDI_PCI_TO_CPU_ADDR(hdev->cpu_accessible_dma_address); + if (hdev->asic_prop.fw_security_disabled) + GAUDI_PCI_TO_CPU_ADDR(hdev->cpu_accessible_dma_address); free_dma_mem_arr: for (j = 0 ; j < i ; j++) @@ -933,8 +1540,7 @@ static int gaudi_alloc_internal_qmans_pq_mem(struct hl_device *hdev) q = &gaudi->internal_qmans[i]; switch (i) { - case GAUDI_QUEUE_ID_DMA_2_0 ... GAUDI_QUEUE_ID_DMA_4_3: - case GAUDI_QUEUE_ID_DMA_6_0 ... GAUDI_QUEUE_ID_DMA_7_3: + case GAUDI_QUEUE_ID_DMA_2_0 ... GAUDI_QUEUE_ID_DMA_7_3: q->pq_size = HBM_DMA_QMAN_SIZE_IN_BYTES; break; case GAUDI_QUEUE_ID_MME_0_0 ... GAUDI_QUEUE_ID_MME_1_3: @@ -943,6 +1549,9 @@ static int gaudi_alloc_internal_qmans_pq_mem(struct hl_device *hdev) case GAUDI_QUEUE_ID_TPC_0_0 ... GAUDI_QUEUE_ID_TPC_7_3: q->pq_size = TPC_QMAN_SIZE_IN_BYTES; break; + case GAUDI_QUEUE_ID_NIC_0_0 ... GAUDI_QUEUE_ID_NIC_9_3: + q->pq_size = NIC_QMAN_SIZE_IN_BYTES; + break; default: dev_err(hdev->dev, "Bad internal queue index %d", i); rc = -EINVAL; @@ -1044,8 +1653,9 @@ static int gaudi_sw_init(struct hl_device *hdev) free_cpu_accessible_dma_pool: gen_pool_destroy(hdev->cpu_accessible_dma_pool); free_cpu_dma_mem: - GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address, - hdev->cpu_pci_msb_addr); + if (hdev->asic_prop.fw_security_disabled) + GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address, + hdev->cpu_pci_msb_addr); hdev->asic_funcs->asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem, @@ -1065,8 +1675,10 @@ static int gaudi_sw_fini(struct hl_device *hdev) gen_pool_destroy(hdev->cpu_accessible_dma_pool); - GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address, + if (hdev->asic_prop.fw_security_disabled) + GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address, hdev->cpu_pci_msb_addr); + hdev->asic_funcs->asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem, @@ -1120,7 +1732,7 @@ static int gaudi_enable_msi_single(struct hl_device *hdev) { int rc, irq; - dev_info(hdev->dev, "Working in single MSI IRQ mode\n"); + dev_dbg(hdev->dev, "Working in single MSI IRQ mode\n"); irq = gaudi_pci_irq_vector(hdev, 0, false); rc = request_irq(irq, gaudi_irq_handler_single, 0, @@ -1252,6 +1864,14 @@ static void gaudi_init_scrambler_sram(struct hl_device *hdev) { struct gaudi_device *gaudi = hdev->asic_specific; + if (!hdev->asic_prop.fw_security_disabled) + return; + + if (hdev->asic_prop.fw_security_status_valid && + (hdev->asic_prop.fw_app_security_map & + CPU_BOOT_DEV_STS0_SRAM_SCR_EN)) + return; + if (gaudi->hw_cap_initialized & HW_CAP_SRAM_SCRAMBLER) return; @@ -1316,6 +1936,14 @@ static void gaudi_init_scrambler_hbm(struct hl_device *hdev) { struct gaudi_device *gaudi = hdev->asic_specific; + if (!hdev->asic_prop.fw_security_disabled) + return; + + if (hdev->asic_prop.fw_security_status_valid && + (hdev->asic_prop.fw_boot_cpu_security_map & + CPU_BOOT_DEV_STS0_DRAM_SCR_EN)) + return; + if (gaudi->hw_cap_initialized & HW_CAP_HBM_SCRAMBLER) return; @@ -1378,6 +2006,14 @@ static void gaudi_init_scrambler_hbm(struct hl_device *hdev) static void gaudi_init_e2e(struct hl_device *hdev) { + if (!hdev->asic_prop.fw_security_disabled) + return; + + if (hdev->asic_prop.fw_security_status_valid && + (hdev->asic_prop.fw_boot_cpu_security_map & + CPU_BOOT_DEV_STS0_E2E_CRED_EN)) + return; + WREG32(mmSIF_RTR_CTRL_0_E2E_HBM_WR_SIZE, 247 >> 3); WREG32(mmSIF_RTR_CTRL_0_E2E_HBM_RD_SIZE, 785 >> 3); WREG32(mmSIF_RTR_CTRL_0_E2E_PCI_WR_SIZE, 49); @@ -1745,6 +2381,14 @@ static void gaudi_init_hbm_cred(struct hl_device *hdev) { uint32_t hbm0_wr, hbm1_wr, hbm0_rd, hbm1_rd; + if (!hdev->asic_prop.fw_security_disabled) + return; + + if (hdev->asic_prop.fw_security_status_valid && + (hdev->asic_prop.fw_boot_cpu_security_map & + CPU_BOOT_DEV_STS0_HBM_CRED_EN)) + return; + hbm0_wr = 0x33333333; hbm0_rd = 0x77777777; hbm1_wr = 0x55555555; @@ -1803,7 +2447,6 @@ static void gaudi_init_golden_registers(struct hl_device *hdev) int tpc_id, i; gaudi_init_e2e(hdev); - gaudi_init_hbm_cred(hdev); hdev->asic_funcs->disable_clock_gating(hdev); @@ -1998,21 +2641,29 @@ static void gaudi_init_pci_dma_qmans(struct hl_device *hdev) static void gaudi_init_hbm_dma_qman(struct hl_device *hdev, int dma_id, int qman_id, u64 qman_base_addr) { - u32 mtr_base_lo, mtr_base_hi; - u32 so_base_lo, so_base_hi; + u32 mtr_base_en_lo, mtr_base_en_hi, mtr_base_ws_lo, mtr_base_ws_hi; + u32 so_base_en_lo, so_base_en_hi, so_base_ws_lo, so_base_ws_hi; u32 q_off, dma_qm_offset; u32 dma_qm_err_cfg; dma_qm_offset = dma_id * DMA_QMAN_OFFSET; - mtr_base_lo = lower_32_bits(CFG_BASE + - mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); - mtr_base_hi = upper_32_bits(CFG_BASE + + mtr_base_en_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + mtr_base_en_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); - so_base_lo = lower_32_bits(CFG_BASE + + so_base_en_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0); - so_base_hi = upper_32_bits(CFG_BASE + + so_base_en_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0); + mtr_base_ws_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + mtr_base_ws_hi = upper_32_bits(CFG_BASE + + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + so_base_ws_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0); + so_base_ws_hi = upper_32_bits(CFG_BASE + + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0); q_off = dma_qm_offset + qman_id * 4; @@ -2070,10 +2721,22 @@ static void gaudi_init_hbm_dma_qman(struct hl_device *hdev, int dma_id, QMAN_INTERNAL_MAKE_TRUSTED); } - WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_lo); - WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_hi); - WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_lo); - WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_hi); + WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_en_lo); + WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_en_hi); + WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_en_lo); + WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_en_hi); + + /* Configure DMA5 CP_MSG_BASE 2/3 for sync stream collective */ + if (gaudi_dma_assignment[dma_id] == GAUDI_ENGINE_ID_DMA_5) { + WREG32(mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_0 + q_off, + mtr_base_ws_lo); + WREG32(mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_0 + q_off, + mtr_base_ws_hi); + WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_0 + q_off, + so_base_ws_lo); + WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_0 + q_off, + so_base_ws_hi); + } } static void gaudi_init_hbm_dma_qmans(struct hl_device *hdev) @@ -2236,22 +2899,33 @@ static void gaudi_init_mme_qmans(struct hl_device *hdev) static void gaudi_init_tpc_qman(struct hl_device *hdev, u32 tpc_offset, int qman_id, u64 qman_base_addr) { - u32 mtr_base_lo, mtr_base_hi; - u32 so_base_lo, so_base_hi; + u32 mtr_base_en_lo, mtr_base_en_hi, mtr_base_ws_lo, mtr_base_ws_hi; + u32 so_base_en_lo, so_base_en_hi, so_base_ws_lo, so_base_ws_hi; u32 q_off, tpc_id; u32 tpc_qm_err_cfg; - mtr_base_lo = lower_32_bits(CFG_BASE + - mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); - mtr_base_hi = upper_32_bits(CFG_BASE + + mtr_base_en_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + mtr_base_en_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); - so_base_lo = lower_32_bits(CFG_BASE + + so_base_en_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0); - so_base_hi = upper_32_bits(CFG_BASE + + so_base_en_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0); + mtr_base_ws_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + mtr_base_ws_hi = upper_32_bits(CFG_BASE + + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + so_base_ws_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0); + so_base_ws_hi = upper_32_bits(CFG_BASE + + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0); q_off = tpc_offset + qman_id * 4; + tpc_id = tpc_offset / + (mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0); + if (qman_id < 4) { WREG32(mmTPC0_QM_PQ_BASE_LO_0 + q_off, lower_32_bits(qman_base_addr)); @@ -2277,9 +2951,6 @@ static void gaudi_init_tpc_qman(struct hl_device *hdev, u32 tpc_offset, QMAN_LDMA_DST_OFFSET); /* Configure RAZWI IRQ */ - tpc_id = tpc_offset / - (mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0); - tpc_qm_err_cfg = TPC_QMAN_GLBL_ERR_CFG_MSG_EN_MASK; if (hdev->stop_on_err) { tpc_qm_err_cfg |= @@ -2309,10 +2980,22 @@ static void gaudi_init_tpc_qman(struct hl_device *hdev, u32 tpc_offset, QMAN_INTERNAL_MAKE_TRUSTED); } - WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_lo); - WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_hi); - WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_lo); - WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_hi); + WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_en_lo); + WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_en_hi); + WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_en_lo); + WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_en_hi); + + /* Configure TPC7 CP_MSG_BASE 2/3 for sync stream collective */ + if (tpc_id == 6) { + WREG32(mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_0 + q_off, + mtr_base_ws_lo); + WREG32(mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_0 + q_off, + mtr_base_ws_hi); + WREG32(mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_0 + q_off, + so_base_ws_lo); + WREG32(mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_0 + q_off, + so_base_ws_hi); + } } static void gaudi_init_tpc_qmans(struct hl_device *hdev) @@ -2360,6 +3043,142 @@ static void gaudi_init_tpc_qmans(struct hl_device *hdev) } } +static void gaudi_init_nic_qman(struct hl_device *hdev, u32 nic_offset, + int qman_id, u64 qman_base_addr, int nic_id) +{ + u32 mtr_base_en_lo, mtr_base_en_hi, mtr_base_ws_lo, mtr_base_ws_hi; + u32 so_base_en_lo, so_base_en_hi, so_base_ws_lo, so_base_ws_hi; + u32 q_off; + u32 nic_qm_err_cfg; + + mtr_base_en_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + mtr_base_en_hi = upper_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + so_base_en_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0); + so_base_en_hi = upper_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0); + mtr_base_ws_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + mtr_base_ws_hi = upper_32_bits(CFG_BASE + + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + so_base_ws_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0); + so_base_ws_hi = upper_32_bits(CFG_BASE + + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0); + + q_off = nic_offset + qman_id * 4; + + WREG32(mmNIC0_QM0_PQ_BASE_LO_0 + q_off, lower_32_bits(qman_base_addr)); + WREG32(mmNIC0_QM0_PQ_BASE_HI_0 + q_off, upper_32_bits(qman_base_addr)); + + WREG32(mmNIC0_QM0_PQ_SIZE_0 + q_off, ilog2(NIC_QMAN_LENGTH)); + WREG32(mmNIC0_QM0_PQ_PI_0 + q_off, 0); + WREG32(mmNIC0_QM0_PQ_CI_0 + q_off, 0); + + WREG32(mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_0 + q_off, + QMAN_LDMA_SIZE_OFFSET); + WREG32(mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, + QMAN_LDMA_SRC_OFFSET); + WREG32(mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, + QMAN_LDMA_DST_OFFSET); + + WREG32(mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_en_lo); + WREG32(mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_en_hi); + WREG32(mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_en_lo); + WREG32(mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_en_hi); + + /* Configure NIC CP_MSG_BASE 2/3 for sync stream collective */ + WREG32(mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_0 + q_off, mtr_base_ws_lo); + WREG32(mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_0 + q_off, mtr_base_ws_hi); + WREG32(mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_0 + q_off, so_base_ws_lo); + WREG32(mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_0 + q_off, so_base_ws_hi); + + if (qman_id == 0) { + /* Configure RAZWI IRQ */ + nic_qm_err_cfg = NIC_QMAN_GLBL_ERR_CFG_MSG_EN_MASK; + if (hdev->stop_on_err) { + nic_qm_err_cfg |= + NIC_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK; + } + + WREG32(mmNIC0_QM0_GLBL_ERR_CFG + nic_offset, nic_qm_err_cfg); + WREG32(mmNIC0_QM0_GLBL_ERR_ADDR_LO + nic_offset, + lower_32_bits(CFG_BASE + + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR)); + WREG32(mmNIC0_QM0_GLBL_ERR_ADDR_HI + nic_offset, + upper_32_bits(CFG_BASE + + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR)); + WREG32(mmNIC0_QM0_GLBL_ERR_WDATA + nic_offset, + gaudi_irq_map_table[GAUDI_EVENT_NIC0_QM0].cpu_id + + nic_id); + + WREG32(mmNIC0_QM0_ARB_ERR_MSG_EN + nic_offset, + QM_ARB_ERR_MSG_EN_MASK); + + /* Increase ARB WDT to support streams architecture */ + WREG32(mmNIC0_QM0_ARB_SLV_CHOISE_WDT + nic_offset, + GAUDI_ARB_WDT_TIMEOUT); + + WREG32(mmNIC0_QM0_GLBL_CFG1 + nic_offset, 0); + WREG32(mmNIC0_QM0_GLBL_PROT + nic_offset, + QMAN_INTERNAL_MAKE_TRUSTED); + } +} + +static void gaudi_init_nic_qmans(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + struct gaudi_internal_qman_info *q; + u64 qman_base_addr; + u32 nic_offset = 0; + u32 nic_delta_between_qmans = + mmNIC0_QM1_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0; + u32 nic_delta_between_nics = + mmNIC1_QM0_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0; + int i, nic_id, internal_q_index; + + if (!hdev->nic_ports_mask) + return; + + if (gaudi->hw_cap_initialized & HW_CAP_NIC_MASK) + return; + + dev_dbg(hdev->dev, "Initializing NIC QMANs\n"); + + for (nic_id = 0 ; nic_id < NIC_NUMBER_OF_ENGINES ; nic_id++) { + if (!(hdev->nic_ports_mask & (1 << nic_id))) { + nic_offset += nic_delta_between_qmans; + if (nic_id & 1) { + nic_offset -= (nic_delta_between_qmans * 2); + nic_offset += nic_delta_between_nics; + } + continue; + } + + for (i = 0 ; i < QMAN_STREAMS ; i++) { + internal_q_index = GAUDI_QUEUE_ID_NIC_0_0 + + nic_id * QMAN_STREAMS + i; + q = &gaudi->internal_qmans[internal_q_index]; + qman_base_addr = (u64) q->pq_dma_addr; + gaudi_init_nic_qman(hdev, nic_offset, (i & 0x3), + qman_base_addr, nic_id); + } + + /* Enable the QMAN */ + WREG32(mmNIC0_QM0_GLBL_CFG0 + nic_offset, NIC_QMAN_ENABLE); + + nic_offset += nic_delta_between_qmans; + if (nic_id & 1) { + nic_offset -= (nic_delta_between_qmans * 2); + nic_offset += nic_delta_between_nics; + } + + gaudi->hw_cap_initialized |= 1 << (HW_CAP_NIC_SHIFT + nic_id); + } +} + static void gaudi_disable_pci_dma_qmans(struct hl_device *hdev) { struct gaudi_device *gaudi = hdev->asic_specific; @@ -2412,6 +3231,30 @@ static void gaudi_disable_tpc_qmans(struct hl_device *hdev) } } +static void gaudi_disable_nic_qmans(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + u32 nic_mask, nic_offset = 0; + u32 nic_delta_between_qmans = + mmNIC0_QM1_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0; + u32 nic_delta_between_nics = + mmNIC1_QM0_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0; + int nic_id; + + for (nic_id = 0 ; nic_id < NIC_NUMBER_OF_ENGINES ; nic_id++) { + nic_mask = 1 << (HW_CAP_NIC_SHIFT + nic_id); + + if (gaudi->hw_cap_initialized & nic_mask) + WREG32(mmNIC0_QM0_GLBL_CFG0 + nic_offset, 0); + + nic_offset += nic_delta_between_qmans; + if (nic_id & 1) { + nic_offset -= (nic_delta_between_qmans * 2); + nic_offset += nic_delta_between_nics; + } + } +} + static void gaudi_stop_pci_dma_qmans(struct hl_device *hdev) { struct gaudi_device *gaudi = hdev->asic_specific; @@ -2470,6 +3313,73 @@ static void gaudi_stop_tpc_qmans(struct hl_device *hdev) WREG32(mmTPC7_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT); } +static void gaudi_stop_nic_qmans(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + /* Stop upper CPs of QMANs */ + + if (gaudi->hw_cap_initialized & HW_CAP_NIC0) + WREG32(mmNIC0_QM0_GLBL_CFG1, + NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CP_STOP_MASK); + + if (gaudi->hw_cap_initialized & HW_CAP_NIC1) + WREG32(mmNIC0_QM1_GLBL_CFG1, + NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CP_STOP_MASK); + + if (gaudi->hw_cap_initialized & HW_CAP_NIC2) + WREG32(mmNIC1_QM0_GLBL_CFG1, + NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CP_STOP_MASK); + + if (gaudi->hw_cap_initialized & HW_CAP_NIC3) + WREG32(mmNIC1_QM1_GLBL_CFG1, + NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CP_STOP_MASK); + + if (gaudi->hw_cap_initialized & HW_CAP_NIC4) + WREG32(mmNIC2_QM0_GLBL_CFG1, + NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CP_STOP_MASK); + + if (gaudi->hw_cap_initialized & HW_CAP_NIC5) + WREG32(mmNIC2_QM1_GLBL_CFG1, + NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CP_STOP_MASK); + + if (gaudi->hw_cap_initialized & HW_CAP_NIC6) + WREG32(mmNIC3_QM0_GLBL_CFG1, + NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CP_STOP_MASK); + + if (gaudi->hw_cap_initialized & HW_CAP_NIC7) + WREG32(mmNIC3_QM1_GLBL_CFG1, + NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CP_STOP_MASK); + + if (gaudi->hw_cap_initialized & HW_CAP_NIC8) + WREG32(mmNIC4_QM0_GLBL_CFG1, + NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CP_STOP_MASK); + + if (gaudi->hw_cap_initialized & HW_CAP_NIC9) + WREG32(mmNIC4_QM1_GLBL_CFG1, + NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CP_STOP_MASK); +} + static void gaudi_pci_dma_stall(struct hl_device *hdev) { struct gaudi_device *gaudi = hdev->asic_specific; @@ -2659,7 +3569,7 @@ static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset) else wait_timeout_ms = GAUDI_RESET_WAIT_MSEC; - + gaudi_stop_nic_qmans(hdev); gaudi_stop_mme_qmans(hdev); gaudi_stop_tpc_qmans(hdev); gaudi_stop_hbm_dma_qmans(hdev); @@ -2676,6 +3586,7 @@ static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset) msleep(wait_timeout_ms); + gaudi_disable_nic_qmans(hdev); gaudi_disable_mme_qmans(hdev); gaudi_disable_tpc_qmans(hdev); gaudi_disable_hbm_dma_qmans(hdev); @@ -2699,8 +3610,6 @@ static int gaudi_mmu_init(struct hl_device *hdev) if (gaudi->hw_cap_initialized & HW_CAP_MMU) return 0; - hdev->dram_supports_virtual_memory = false; - for (i = 0 ; i < prop->max_asid ; i++) { hop0_addr = prop->mmu_pgt_addr + (i * prop->mmu_hop_table_size); @@ -2748,7 +3657,7 @@ static int gaudi_load_firmware_to_device(struct hl_device *hdev) dst = hdev->pcie_bar[HBM_BAR_ID] + LINUX_FW_OFFSET; - return hl_fw_load_fw_to_device(hdev, GAUDI_LINUX_FW_FILE, dst); + return hl_fw_load_fw_to_device(hdev, GAUDI_LINUX_FW_FILE, dst, 0, 0); } static int gaudi_load_boot_fit_to_device(struct hl_device *hdev) @@ -2757,10 +3666,10 @@ static int gaudi_load_boot_fit_to_device(struct hl_device *hdev) dst = hdev->pcie_bar[SRAM_BAR_ID] + BOOT_FIT_SRAM_OFFSET; - return hl_fw_load_fw_to_device(hdev, GAUDI_BOOT_FIT_FILE, dst); + return hl_fw_load_fw_to_device(hdev, GAUDI_BOOT_FIT_FILE, dst, 0, 0); } -static void gaudi_read_device_fw_version(struct hl_device *hdev, +static int gaudi_read_device_fw_version(struct hl_device *hdev, enum hl_fw_component fwc) { const char *name; @@ -2780,7 +3689,7 @@ static void gaudi_read_device_fw_version(struct hl_device *hdev, break; default: dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc); - return; + return -EIO; } ver_off &= ~((u32)SRAM_BASE_ADDR); @@ -2792,7 +3701,10 @@ static void gaudi_read_device_fw_version(struct hl_device *hdev, dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n", name, ver_off); strcpy(dest, "unavailable"); + return -EIO; } + + return 0; } static int gaudi_init_cpu(struct hl_device *hdev) @@ -2810,12 +3722,13 @@ static int gaudi_init_cpu(struct hl_device *hdev) * The device CPU works with 40 bits addresses. * This register sets the extension to 50 bits. */ - WREG32(mmCPU_IF_CPU_MSB_ADDR, hdev->cpu_pci_msb_addr); + if (hdev->asic_prop.fw_security_disabled) + WREG32(mmCPU_IF_CPU_MSB_ADDR, hdev->cpu_pci_msb_addr); rc = hl_fw_init_cpu(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS, mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, mmCPU_CMD_STATUS_TO_HOST, - mmCPU_BOOT_ERR0, + mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0, !hdev->bmc_enable, GAUDI_CPU_TIMEOUT_USEC, GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC); @@ -2895,17 +3808,19 @@ static void gaudi_pre_hw_init(struct hl_device *hdev) /* Perform read from the device to make sure device is up */ RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG); - /* Set the access through PCI bars (Linux driver only) as - * secured - */ - WREG32(mmPCIE_WRAP_LBW_PROT_OVR, - (PCIE_WRAP_LBW_PROT_OVR_RD_EN_MASK | - PCIE_WRAP_LBW_PROT_OVR_WR_EN_MASK)); + if (hdev->asic_prop.fw_security_disabled) { + /* Set the access through PCI bars (Linux driver only) as + * secured + */ + WREG32(mmPCIE_WRAP_LBW_PROT_OVR, + (PCIE_WRAP_LBW_PROT_OVR_RD_EN_MASK | + PCIE_WRAP_LBW_PROT_OVR_WR_EN_MASK)); - /* Perform read to flush the waiting writes to ensure - * configuration was set in the device - */ - RREG32(mmPCIE_WRAP_LBW_PROT_OVR); + /* Perform read to flush the waiting writes to ensure + * configuration was set in the device + */ + RREG32(mmPCIE_WRAP_LBW_PROT_OVR); + } /* * Let's mark in the H/W that we have reached this point. We check @@ -2914,40 +3829,12 @@ static void gaudi_pre_hw_init(struct hl_device *hdev) * cleared by the H/W upon H/W reset */ WREG32(mmHW_STATE, HL_DEVICE_HW_STATE_DIRTY); - - /* Configure the reset registers. Must be done as early as possible - * in case we fail during H/W initialization - */ - WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_H, - (CFG_RST_H_DMA_MASK | - CFG_RST_H_MME_MASK | - CFG_RST_H_SM_MASK | - CFG_RST_H_TPC_7_MASK)); - - WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_L, CFG_RST_L_TPC_MASK); - - WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_H, - (CFG_RST_H_HBM_MASK | - CFG_RST_H_TPC_7_MASK | - CFG_RST_H_NIC_MASK | - CFG_RST_H_SM_MASK | - CFG_RST_H_DMA_MASK | - CFG_RST_H_MME_MASK | - CFG_RST_H_CPU_MASK | - CFG_RST_H_MMU_MASK)); - - WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_L, - (CFG_RST_L_IF_MASK | - CFG_RST_L_PSOC_MASK | - CFG_RST_L_TPC_MASK)); } static int gaudi_hw_init(struct hl_device *hdev) { int rc; - dev_info(hdev->dev, "Starting initialization of H/W\n"); - gaudi_pre_hw_init(hdev); gaudi_init_pci_dma_qmans(hdev); @@ -2978,11 +3865,13 @@ static int gaudi_hw_init(struct hl_device *hdev) gaudi_init_tpc_qmans(hdev); + gaudi_init_nic_qmans(hdev); + hdev->asic_funcs->set_clock_gating(hdev); gaudi_enable_timestamp(hdev); - /* MSI must be enabled before CPU queues are initialized */ + /* MSI must be enabled before CPU queues and NIC are initialized */ rc = gaudi_enable_msi(hdev); if (rc) goto disable_queues; @@ -3012,7 +3901,7 @@ disable_queues: static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset) { struct gaudi_device *gaudi = hdev->asic_specific; - u32 status, reset_timeout_ms, cpu_timeout_ms, boot_strap = 0; + u32 status, reset_timeout_ms, cpu_timeout_ms; if (!hard_reset) { dev_err(hdev->dev, "GAUDI doesn't support soft-reset\n"); @@ -3030,35 +3919,60 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset) /* Set device to handle FLR by H/W as we will put the device CPU to * halt mode */ - WREG32(mmPCIE_AUX_FLR_CTRL, (PCIE_AUX_FLR_CTRL_HW_CTRL_MASK | + if (hdev->asic_prop.fw_security_disabled && + !hdev->asic_prop.hard_reset_done_by_fw) + WREG32(mmPCIE_AUX_FLR_CTRL, (PCIE_AUX_FLR_CTRL_HW_CTRL_MASK | PCIE_AUX_FLR_CTRL_INT_MASK_MASK)); /* I don't know what is the state of the CPU so make sure it is * stopped in any means necessary */ WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_GOTO_WFE); - WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_HALT_MACHINE); - - msleep(cpu_timeout_ms); - - /* Tell ASIC not to re-initialize PCIe */ - WREG32(mmPREBOOT_PCIE_EN, LKD_HARD_RESET_MAGIC); - - boot_strap = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS); - /* H/W bug WA: - * rdata[31:0] = strap_read_val; - * wdata[31:0] = rdata[30:21],1'b0,rdata[20:0] - */ - boot_strap = (((boot_strap & 0x7FE00000) << 1) | - (boot_strap & 0x001FFFFF)); - WREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS, boot_strap & ~0x2); + WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_HALT_MACHINE); - /* Restart BTL/BLR upon hard-reset */ - WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START, 1); + if (hdev->asic_prop.fw_security_disabled && + !hdev->asic_prop.hard_reset_done_by_fw) { - WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST, + /* Configure the reset registers. Must be done as early as + * possible in case we fail during H/W initialization + */ + WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_H, + (CFG_RST_H_DMA_MASK | + CFG_RST_H_MME_MASK | + CFG_RST_H_SM_MASK | + CFG_RST_H_TPC_7_MASK)); + + WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_L, CFG_RST_L_TPC_MASK); + + WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_H, + (CFG_RST_H_HBM_MASK | + CFG_RST_H_TPC_7_MASK | + CFG_RST_H_NIC_MASK | + CFG_RST_H_SM_MASK | + CFG_RST_H_DMA_MASK | + CFG_RST_H_MME_MASK | + CFG_RST_H_CPU_MASK | + CFG_RST_H_MMU_MASK)); + + WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_L, + (CFG_RST_L_IF_MASK | + CFG_RST_L_PSOC_MASK | + CFG_RST_L_TPC_MASK)); + + msleep(cpu_timeout_ms); + + /* Tell ASIC not to re-initialize PCIe */ + WREG32(mmPREBOOT_PCIE_EN, LKD_HARD_RESET_MAGIC); + + /* Restart BTL/BLR upon hard-reset */ + if (hdev->asic_prop.fw_security_disabled) + WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START, 1); + + WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST, 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT); + } + dev_info(hdev->dev, "Issued HARD reset command, going to wait %dms\n", reset_timeout_ms); @@ -3075,18 +3989,18 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset) "Timeout while waiting for device to reset 0x%x\n", status); - WREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS, boot_strap); - - gaudi->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q | - HW_CAP_HBM | HW_CAP_PCI_DMA | - HW_CAP_MME | HW_CAP_TPC_MASK | - HW_CAP_HBM_DMA | HW_CAP_PLL | - HW_CAP_MMU | - HW_CAP_SRAM_SCRAMBLER | - HW_CAP_HBM_SCRAMBLER | - HW_CAP_CLK_GATE); + if (gaudi) { + gaudi->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q | + HW_CAP_HBM | HW_CAP_PCI_DMA | + HW_CAP_MME | HW_CAP_TPC_MASK | + HW_CAP_HBM_DMA | HW_CAP_PLL | + HW_CAP_NIC_MASK | HW_CAP_MMU | + HW_CAP_SRAM_SCRAMBLER | + HW_CAP_HBM_SCRAMBLER | + HW_CAP_CLK_GATE); - memset(gaudi->events_stat, 0, sizeof(gaudi->events_stat)); + memset(gaudi->events_stat, 0, sizeof(gaudi->events_stat)); + } } static int gaudi_suspend(struct hl_device *hdev) @@ -3164,21 +4078,21 @@ static void gaudi_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi) break; case GAUDI_QUEUE_ID_DMA_5_0...GAUDI_QUEUE_ID_DMA_5_3: - dma_id = gaudi_dma_assignment[GAUDI_PCI_DMA_3]; + dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_4]; dma_qm_offset = dma_id * DMA_QMAN_OFFSET; q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4; db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off; break; case GAUDI_QUEUE_ID_DMA_6_0...GAUDI_QUEUE_ID_DMA_6_3: - dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_4]; + dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_5]; dma_qm_offset = dma_id * DMA_QMAN_OFFSET; q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4; db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off; break; case GAUDI_QUEUE_ID_DMA_7_0...GAUDI_QUEUE_ID_DMA_7_3: - dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_5]; + dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_6]; dma_qm_offset = dma_id * DMA_QMAN_OFFSET; q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4; db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off; @@ -3351,6 +4265,166 @@ static void gaudi_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi) db_reg_offset = mmTPC7_QM_PQ_PI_3; break; + case GAUDI_QUEUE_ID_NIC_0_0: + db_reg_offset = mmNIC0_QM0_PQ_PI_0; + break; + + case GAUDI_QUEUE_ID_NIC_0_1: + db_reg_offset = mmNIC0_QM0_PQ_PI_1; + break; + + case GAUDI_QUEUE_ID_NIC_0_2: + db_reg_offset = mmNIC0_QM0_PQ_PI_2; + break; + + case GAUDI_QUEUE_ID_NIC_0_3: + db_reg_offset = mmNIC0_QM0_PQ_PI_3; + break; + + case GAUDI_QUEUE_ID_NIC_1_0: + db_reg_offset = mmNIC0_QM1_PQ_PI_0; + break; + + case GAUDI_QUEUE_ID_NIC_1_1: + db_reg_offset = mmNIC0_QM1_PQ_PI_1; + break; + + case GAUDI_QUEUE_ID_NIC_1_2: + db_reg_offset = mmNIC0_QM1_PQ_PI_2; + break; + + case GAUDI_QUEUE_ID_NIC_1_3: + db_reg_offset = mmNIC0_QM1_PQ_PI_3; + break; + + case GAUDI_QUEUE_ID_NIC_2_0: + db_reg_offset = mmNIC1_QM0_PQ_PI_0; + break; + + case GAUDI_QUEUE_ID_NIC_2_1: + db_reg_offset = mmNIC1_QM0_PQ_PI_1; + break; + + case GAUDI_QUEUE_ID_NIC_2_2: + db_reg_offset = mmNIC1_QM0_PQ_PI_2; + break; + + case GAUDI_QUEUE_ID_NIC_2_3: + db_reg_offset = mmNIC1_QM0_PQ_PI_3; + break; + + case GAUDI_QUEUE_ID_NIC_3_0: + db_reg_offset = mmNIC1_QM1_PQ_PI_0; + break; + + case GAUDI_QUEUE_ID_NIC_3_1: + db_reg_offset = mmNIC1_QM1_PQ_PI_1; + break; + + case GAUDI_QUEUE_ID_NIC_3_2: + db_reg_offset = mmNIC1_QM1_PQ_PI_2; + break; + + case GAUDI_QUEUE_ID_NIC_3_3: + db_reg_offset = mmNIC1_QM1_PQ_PI_3; + break; + + case GAUDI_QUEUE_ID_NIC_4_0: + db_reg_offset = mmNIC2_QM0_PQ_PI_0; + break; + + case GAUDI_QUEUE_ID_NIC_4_1: + db_reg_offset = mmNIC2_QM0_PQ_PI_1; + break; + + case GAUDI_QUEUE_ID_NIC_4_2: + db_reg_offset = mmNIC2_QM0_PQ_PI_2; + break; + + case GAUDI_QUEUE_ID_NIC_4_3: + db_reg_offset = mmNIC2_QM0_PQ_PI_3; + break; + + case GAUDI_QUEUE_ID_NIC_5_0: + db_reg_offset = mmNIC2_QM1_PQ_PI_0; + break; + + case GAUDI_QUEUE_ID_NIC_5_1: + db_reg_offset = mmNIC2_QM1_PQ_PI_1; + break; + + case GAUDI_QUEUE_ID_NIC_5_2: + db_reg_offset = mmNIC2_QM1_PQ_PI_2; + break; + + case GAUDI_QUEUE_ID_NIC_5_3: + db_reg_offset = mmNIC2_QM1_PQ_PI_3; + break; + + case GAUDI_QUEUE_ID_NIC_6_0: + db_reg_offset = mmNIC3_QM0_PQ_PI_0; + break; + + case GAUDI_QUEUE_ID_NIC_6_1: + db_reg_offset = mmNIC3_QM0_PQ_PI_1; + break; + + case GAUDI_QUEUE_ID_NIC_6_2: + db_reg_offset = mmNIC3_QM0_PQ_PI_2; + break; + + case GAUDI_QUEUE_ID_NIC_6_3: + db_reg_offset = mmNIC3_QM0_PQ_PI_3; + break; + + case GAUDI_QUEUE_ID_NIC_7_0: + db_reg_offset = mmNIC3_QM1_PQ_PI_0; + break; + + case GAUDI_QUEUE_ID_NIC_7_1: + db_reg_offset = mmNIC3_QM1_PQ_PI_1; + break; + + case GAUDI_QUEUE_ID_NIC_7_2: + db_reg_offset = mmNIC3_QM1_PQ_PI_2; + break; + + case GAUDI_QUEUE_ID_NIC_7_3: + db_reg_offset = mmNIC3_QM1_PQ_PI_3; + break; + + case GAUDI_QUEUE_ID_NIC_8_0: + db_reg_offset = mmNIC4_QM0_PQ_PI_0; + break; + + case GAUDI_QUEUE_ID_NIC_8_1: + db_reg_offset = mmNIC4_QM0_PQ_PI_1; + break; + + case GAUDI_QUEUE_ID_NIC_8_2: + db_reg_offset = mmNIC4_QM0_PQ_PI_2; + break; + + case GAUDI_QUEUE_ID_NIC_8_3: + db_reg_offset = mmNIC4_QM0_PQ_PI_3; + break; + + case GAUDI_QUEUE_ID_NIC_9_0: + db_reg_offset = mmNIC4_QM1_PQ_PI_0; + break; + + case GAUDI_QUEUE_ID_NIC_9_1: + db_reg_offset = mmNIC4_QM1_PQ_PI_1; + break; + + case GAUDI_QUEUE_ID_NIC_9_2: + db_reg_offset = mmNIC4_QM1_PQ_PI_2; + break; + + case GAUDI_QUEUE_ID_NIC_9_3: + db_reg_offset = mmNIC4_QM1_PQ_PI_3; + break; + default: invalid_queue = true; } @@ -3404,6 +4478,121 @@ static void gaudi_dma_free_coherent(struct hl_device *hdev, size_t size, dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle); } +static int gaudi_hbm_scrubbing(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 cur_addr = DRAM_BASE_ADDR_USER; + u32 val; + u32 chunk_size; + int rc, dma_id; + + while (cur_addr < prop->dram_end_address) { + for (dma_id = 0 ; dma_id < DMA_NUMBER_OF_CHANNELS ; dma_id++) { + u32 dma_offset = dma_id * DMA_CORE_OFFSET; + + chunk_size = + min((u64)SZ_2G, prop->dram_end_address - cur_addr); + + dev_dbg(hdev->dev, + "Doing HBM scrubbing for 0x%09llx - 0x%09llx\n", + cur_addr, cur_addr + chunk_size); + + WREG32(mmDMA0_CORE_SRC_BASE_LO + dma_offset, 0); + WREG32(mmDMA0_CORE_SRC_BASE_HI + dma_offset, 0); + WREG32(mmDMA0_CORE_DST_BASE_LO + dma_offset, + lower_32_bits(cur_addr)); + WREG32(mmDMA0_CORE_DST_BASE_HI + dma_offset, + upper_32_bits(cur_addr)); + WREG32(mmDMA0_CORE_DST_TSIZE_0 + dma_offset, + chunk_size); + WREG32(mmDMA0_CORE_COMMIT + dma_offset, + ((1 << DMA0_CORE_COMMIT_LIN_SHIFT) | + (1 << DMA0_CORE_COMMIT_MEM_SET_SHIFT))); + + cur_addr += chunk_size; + + if (cur_addr == prop->dram_end_address) + break; + } + + for (dma_id = 0 ; dma_id < DMA_NUMBER_OF_CHANNELS ; dma_id++) { + u32 dma_offset = dma_id * DMA_CORE_OFFSET; + + rc = hl_poll_timeout( + hdev, + mmDMA0_CORE_STS0 + dma_offset, + val, + ((val & DMA0_CORE_STS0_BUSY_MASK) == 0), + 1000, + HBM_SCRUBBING_TIMEOUT_US); + + if (rc) { + dev_err(hdev->dev, + "DMA Timeout during HBM scrubbing of DMA #%d\n", + dma_id); + return -EIO; + } + } + } + + return 0; +} + +static int gaudi_scrub_device_mem(struct hl_device *hdev, u64 addr, u64 size) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct gaudi_device *gaudi = hdev->asic_specific; + u64 idle_mask = 0; + int rc = 0; + u64 val = 0; + + if (!hdev->memory_scrub) + return 0; + + if (!addr && !size) { + /* Wait till device is idle */ + rc = hl_poll_timeout( + hdev, + mmDMA0_CORE_STS0/* dummy */, + val/* dummy */, + (hdev->asic_funcs->is_device_idle(hdev, + &idle_mask, NULL)), + 1000, + HBM_SCRUBBING_TIMEOUT_US); + if (rc) { + dev_err(hdev->dev, "waiting for idle timeout\n"); + return -EIO; + } + + /* Scrub SRAM */ + addr = prop->sram_user_base_address; + size = hdev->pldm ? 0x10000 : + (prop->sram_size - SRAM_USER_BASE_OFFSET); + val = 0x7777777777777777ull; + + rc = gaudi_memset_device_memory(hdev, addr, size, val); + if (rc) { + dev_err(hdev->dev, + "Failed to clear SRAM in mem scrub all\n"); + return rc; + } + + mutex_lock(&gaudi->clk_gate_mutex); + hdev->asic_funcs->disable_clock_gating(hdev); + + /* Scrub HBM using all DMA channels in parallel */ + rc = gaudi_hbm_scrubbing(hdev); + if (rc) + dev_err(hdev->dev, + "Failed to clear HBM in mem scrub all\n"); + + hdev->asic_funcs->set_clock_gating(hdev); + mutex_unlock(&gaudi->clk_gate_mutex); + } + + return rc; +} + static void *gaudi_get_int_queue_base(struct hl_device *hdev, u32 queue_id, dma_addr_t *dma_handle, u16 *queue_len) @@ -3425,7 +4614,7 @@ static void *gaudi_get_int_queue_base(struct hl_device *hdev, } static int gaudi_send_cpu_message(struct hl_device *hdev, u32 *msg, - u16 len, u32 timeout, long *result) + u16 len, u32 timeout, u64 *result) { struct gaudi_device *gaudi = hdev->asic_specific; @@ -4244,6 +5433,17 @@ static int gaudi_parse_cb_no_ext_queue(struct hl_device *hdev, struct hl_cs_parser *parser) { struct asic_fixed_properties *asic_prop = &hdev->asic_prop; + struct gaudi_device *gaudi = hdev->asic_specific; + u32 nic_mask_q_id = 1 << (HW_CAP_NIC_SHIFT + + ((parser->hw_queue_id - GAUDI_QUEUE_ID_NIC_0_0) >> 2)); + + if ((parser->hw_queue_id >= GAUDI_QUEUE_ID_NIC_0_0) && + (parser->hw_queue_id <= GAUDI_QUEUE_ID_NIC_9_3) && + (!(gaudi->hw_cap_initialized & nic_mask_q_id))) { + dev_err(hdev->dev, "h/w queue %d is disabled\n", + parser->hw_queue_id); + return -EINVAL; + } /* For internal queue jobs just check if CB address is valid */ if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb, @@ -4370,7 +5570,7 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr, job->id = 0; job->user_cb = cb; - job->user_cb->cs_cnt++; + atomic_inc(&job->user_cb->cs_cnt); job->user_cb_size = cb_size; job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0; job->patched_cb = job->user_cb; @@ -4381,7 +5581,7 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr, rc = gaudi_send_job_on_qman0(hdev, job); hl_debugfs_remove_job(hdev, job); kfree(job); - cb->cs_cnt--; + atomic_dec(&cb->cs_cnt); /* Verify DMA is OK */ err_cause = RREG32(mmDMA0_CORE_ERR_CAUSE); @@ -4476,6 +5676,12 @@ static void gaudi_restore_qm_registers(struct hl_device *hdev) qman_offset = i * TPC_QMAN_OFFSET; WREG32(mmTPC0_QM_ARB_CFG_0 + qman_offset, 0); } + + for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++) { + qman_offset = (i >> 1) * NIC_MACRO_QMAN_OFFSET + + (i & 0x1) * NIC_ENGINE_QMAN_OFFSET; + WREG32(mmNIC0_QM0_ARB_CFG_0 + qman_offset, 0); + } } static void gaudi_restore_user_registers(struct hl_device *hdev) @@ -4487,21 +5693,6 @@ static void gaudi_restore_user_registers(struct hl_device *hdev) static int gaudi_context_switch(struct hl_device *hdev, u32 asid) { - struct asic_fixed_properties *prop = &hdev->asic_prop; - u64 addr = prop->sram_user_base_address; - u32 size = hdev->pldm ? 0x10000 : - (prop->sram_size - SRAM_USER_BASE_OFFSET); - u64 val = 0x7777777777777777ull; - int rc; - - rc = gaudi_memset_device_memory(hdev, addr, size, val); - if (rc) { - dev_err(hdev->dev, "Failed to clear SRAM in context switch\n"); - return rc; - } - - gaudi_mmu_prepare(hdev, asid); - gaudi_restore_user_registers(hdev); return 0; @@ -4910,6 +6101,136 @@ static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid) gaudi_mmu_prepare_reg(hdev, mmMME2_ACC_WBC, asid); gaudi_mmu_prepare_reg(hdev, mmMME3_ACC_WBC, asid); + if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC0) { + gaudi_mmu_prepare_reg(hdev, mmNIC0_QM0_GLBL_NON_SECURE_PROPS_0, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC0_QM0_GLBL_NON_SECURE_PROPS_1, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC0_QM0_GLBL_NON_SECURE_PROPS_2, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC0_QM0_GLBL_NON_SECURE_PROPS_3, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC0_QM0_GLBL_NON_SECURE_PROPS_4, + asid); + } + + if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC1) { + gaudi_mmu_prepare_reg(hdev, mmNIC0_QM1_GLBL_NON_SECURE_PROPS_0, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC0_QM1_GLBL_NON_SECURE_PROPS_1, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC0_QM1_GLBL_NON_SECURE_PROPS_2, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC0_QM1_GLBL_NON_SECURE_PROPS_3, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC0_QM1_GLBL_NON_SECURE_PROPS_4, + asid); + } + + if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC2) { + gaudi_mmu_prepare_reg(hdev, mmNIC1_QM0_GLBL_NON_SECURE_PROPS_0, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC1_QM0_GLBL_NON_SECURE_PROPS_1, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC1_QM0_GLBL_NON_SECURE_PROPS_2, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC1_QM0_GLBL_NON_SECURE_PROPS_3, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC1_QM0_GLBL_NON_SECURE_PROPS_4, + asid); + } + + if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC3) { + gaudi_mmu_prepare_reg(hdev, mmNIC1_QM1_GLBL_NON_SECURE_PROPS_0, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC1_QM1_GLBL_NON_SECURE_PROPS_1, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC1_QM1_GLBL_NON_SECURE_PROPS_2, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC1_QM1_GLBL_NON_SECURE_PROPS_3, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC1_QM1_GLBL_NON_SECURE_PROPS_4, + asid); + } + + if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC4) { + gaudi_mmu_prepare_reg(hdev, mmNIC2_QM0_GLBL_NON_SECURE_PROPS_0, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC2_QM0_GLBL_NON_SECURE_PROPS_1, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC2_QM0_GLBL_NON_SECURE_PROPS_2, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC2_QM0_GLBL_NON_SECURE_PROPS_3, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC2_QM0_GLBL_NON_SECURE_PROPS_4, + asid); + } + + if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC5) { + gaudi_mmu_prepare_reg(hdev, mmNIC2_QM1_GLBL_NON_SECURE_PROPS_0, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC2_QM1_GLBL_NON_SECURE_PROPS_1, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC2_QM1_GLBL_NON_SECURE_PROPS_2, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC2_QM1_GLBL_NON_SECURE_PROPS_3, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC2_QM1_GLBL_NON_SECURE_PROPS_4, + asid); + } + + if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC6) { + gaudi_mmu_prepare_reg(hdev, mmNIC3_QM0_GLBL_NON_SECURE_PROPS_0, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC3_QM0_GLBL_NON_SECURE_PROPS_1, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC3_QM0_GLBL_NON_SECURE_PROPS_2, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC3_QM0_GLBL_NON_SECURE_PROPS_3, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC3_QM0_GLBL_NON_SECURE_PROPS_4, + asid); + } + + if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC7) { + gaudi_mmu_prepare_reg(hdev, mmNIC3_QM1_GLBL_NON_SECURE_PROPS_0, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC3_QM1_GLBL_NON_SECURE_PROPS_1, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC3_QM1_GLBL_NON_SECURE_PROPS_2, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC3_QM1_GLBL_NON_SECURE_PROPS_3, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC3_QM1_GLBL_NON_SECURE_PROPS_4, + asid); + } + + if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC8) { + gaudi_mmu_prepare_reg(hdev, mmNIC4_QM0_GLBL_NON_SECURE_PROPS_0, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC4_QM0_GLBL_NON_SECURE_PROPS_1, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC4_QM0_GLBL_NON_SECURE_PROPS_2, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC4_QM0_GLBL_NON_SECURE_PROPS_3, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC4_QM0_GLBL_NON_SECURE_PROPS_4, + asid); + } + + if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC9) { + gaudi_mmu_prepare_reg(hdev, mmNIC4_QM1_GLBL_NON_SECURE_PROPS_0, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC4_QM1_GLBL_NON_SECURE_PROPS_1, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC4_QM1_GLBL_NON_SECURE_PROPS_2, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC4_QM1_GLBL_NON_SECURE_PROPS_3, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC4_QM1_GLBL_NON_SECURE_PROPS_4, + asid); + } + hdev->asic_funcs->set_clock_gating(hdev); mutex_unlock(&gaudi->clk_gate_mutex); @@ -5489,6 +6810,56 @@ static void gaudi_handle_qman_err(struct hl_device *hdev, u16 event_type) mmDMA0_QM_ARB_ERR_CAUSE + index * DMA_QMAN_OFFSET; snprintf(desc, ARRAY_SIZE(desc), "%s%d", "DMA_QM", index); break; + case GAUDI_EVENT_NIC0_QM0: + glbl_sts_addr = mmNIC0_QM0_GLBL_STS1_0; + arb_err_addr = mmNIC0_QM0_ARB_ERR_CAUSE; + snprintf(desc, ARRAY_SIZE(desc), "NIC0_QM0"); + break; + case GAUDI_EVENT_NIC0_QM1: + glbl_sts_addr = mmNIC0_QM1_GLBL_STS1_0; + arb_err_addr = mmNIC0_QM1_ARB_ERR_CAUSE; + snprintf(desc, ARRAY_SIZE(desc), "NIC0_QM1"); + break; + case GAUDI_EVENT_NIC1_QM0: + glbl_sts_addr = mmNIC1_QM0_GLBL_STS1_0; + arb_err_addr = mmNIC1_QM0_ARB_ERR_CAUSE; + snprintf(desc, ARRAY_SIZE(desc), "NIC1_QM0"); + break; + case GAUDI_EVENT_NIC1_QM1: + glbl_sts_addr = mmNIC1_QM1_GLBL_STS1_0; + arb_err_addr = mmNIC1_QM1_ARB_ERR_CAUSE; + snprintf(desc, ARRAY_SIZE(desc), "NIC1_QM1"); + break; + case GAUDI_EVENT_NIC2_QM0: + glbl_sts_addr = mmNIC2_QM0_GLBL_STS1_0; + arb_err_addr = mmNIC2_QM0_ARB_ERR_CAUSE; + snprintf(desc, ARRAY_SIZE(desc), "NIC2_QM0"); + break; + case GAUDI_EVENT_NIC2_QM1: + glbl_sts_addr = mmNIC2_QM1_GLBL_STS1_0; + arb_err_addr = mmNIC2_QM1_ARB_ERR_CAUSE; + snprintf(desc, ARRAY_SIZE(desc), "NIC2_QM1"); + break; + case GAUDI_EVENT_NIC3_QM0: + glbl_sts_addr = mmNIC3_QM0_GLBL_STS1_0; + arb_err_addr = mmNIC3_QM0_ARB_ERR_CAUSE; + snprintf(desc, ARRAY_SIZE(desc), "NIC3_QM0"); + break; + case GAUDI_EVENT_NIC3_QM1: + glbl_sts_addr = mmNIC3_QM1_GLBL_STS1_0; + arb_err_addr = mmNIC3_QM1_ARB_ERR_CAUSE; + snprintf(desc, ARRAY_SIZE(desc), "NIC3_QM1"); + break; + case GAUDI_EVENT_NIC4_QM0: + glbl_sts_addr = mmNIC4_QM0_GLBL_STS1_0; + arb_err_addr = mmNIC4_QM0_ARB_ERR_CAUSE; + snprintf(desc, ARRAY_SIZE(desc), "NIC4_QM0"); + break; + case GAUDI_EVENT_NIC4_QM1: + glbl_sts_addr = mmNIC4_QM1_GLBL_STS1_0; + arb_err_addr = mmNIC4_QM1_ARB_ERR_CAUSE; + snprintf(desc, ARRAY_SIZE(desc), "NIC4_QM1"); + break; default: return; } @@ -5521,10 +6892,41 @@ static int gaudi_soft_reset_late_init(struct hl_device *hdev) return hl_fw_unmask_irq_arr(hdev, gaudi->events, sizeof(gaudi->events)); } -static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device) +static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device, + struct hl_eq_hbm_ecc_data *hbm_ecc_data) { - int ch, err = 0; - u32 base, val, val2; + u32 base, val, val2, wr_par, rd_par, ca_par, derr, serr, type, ch; + int err = 0; + + if (!hdev->asic_prop.fw_security_disabled) { + if (!hbm_ecc_data) { + dev_err(hdev->dev, "No FW ECC data"); + return 0; + } + + wr_par = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_WR_PAR_MASK, + le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); + rd_par = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_RD_PAR_MASK, + le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); + ca_par = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_CA_PAR_MASK, + le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); + derr = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_DERR_MASK, + le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); + serr = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_SERR_MASK, + le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); + type = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_TYPE_MASK, + le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); + ch = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_HBM_CH_MASK, + le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); + + dev_err(hdev->dev, + "HBM%d pc%d ECC: TYPE=%d, WR_PAR=%d, RD_PAR=%d, CA_PAR=%d, SERR=%d, DERR=%d\n", + device, ch, type, wr_par, rd_par, ca_par, serr, derr); + + err = 1; + + return 0; + } base = GAUDI_HBM_CFG_BASE + device * GAUDI_HBM_CFG_OFFSET; for (ch = 0 ; ch < GAUDI_HBM_CHANNELS ; ch++) { @@ -5540,7 +6942,7 @@ static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device) val2 = RREG32(base + ch * 0x1000 + 0x060); dev_err(hdev->dev, - "HBM%d pc%d ECC info: 1ST_ERR_ADDR=0x%x, 1ST_ERR_TYPE=%d, SEC_CONT_CNT=%d, SEC_CNT=%d, DED_CNT=%d\n", + "HBM%d pc%d ECC info: 1ST_ERR_ADDR=0x%x, 1ST_ERR_TYPE=%d, SEC_CONT_CNT=%d, SEC_CNT=%d, DEC_CNT=%d\n", device, ch * 2, RREG32(base + ch * 0x1000 + 0x064), (val2 & 0x200) >> 9, (val2 & 0xFC00) >> 10, @@ -5560,7 +6962,7 @@ static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device) val2 = RREG32(base + ch * 0x1000 + 0x070); dev_err(hdev->dev, - "HBM%d pc%d ECC info: 1ST_ERR_ADDR=0x%x, 1ST_ERR_TYPE=%d, SEC_CONT_CNT=%d, SEC_CNT=%d, DED_CNT=%d\n", + "HBM%d pc%d ECC info: 1ST_ERR_ADDR=0x%x, 1ST_ERR_TYPE=%d, SEC_CONT_CNT=%d, SEC_CNT=%d, DEC_CNT=%d\n", device, ch * 2 + 1, RREG32(base + ch * 0x1000 + 0x074), (val2 & 0x200) >> 9, (val2 & 0xFC00) >> 10, @@ -5761,7 +7163,8 @@ static void gaudi_handle_eqe(struct hl_device *hdev, case GAUDI_EVENT_HBM3_SPI_0: gaudi_print_irq_info(hdev, event_type, false); gaudi_hbm_read_interrupts(hdev, - gaudi_hbm_event_to_dev(event_type)); + gaudi_hbm_event_to_dev(event_type), + &eq_entry->hbm_ecc_data); if (hdev->hard_reset_on_fw_events) hl_device_reset(hdev, true, false); break; @@ -5772,7 +7175,8 @@ static void gaudi_handle_eqe(struct hl_device *hdev, case GAUDI_EVENT_HBM3_SPI_1: gaudi_print_irq_info(hdev, event_type, false); gaudi_hbm_read_interrupts(hdev, - gaudi_hbm_event_to_dev(event_type)); + gaudi_hbm_event_to_dev(event_type), + &eq_entry->hbm_ecc_data); break; case GAUDI_EVENT_TPC0_DEC: @@ -5866,6 +7270,16 @@ static void gaudi_handle_eqe(struct hl_device *hdev, case GAUDI_EVENT_MME0_QM ... GAUDI_EVENT_MME2_QM: case GAUDI_EVENT_DMA0_QM ... GAUDI_EVENT_DMA7_QM: fallthrough; + case GAUDI_EVENT_NIC0_QM0: + case GAUDI_EVENT_NIC0_QM1: + case GAUDI_EVENT_NIC1_QM0: + case GAUDI_EVENT_NIC1_QM1: + case GAUDI_EVENT_NIC2_QM0: + case GAUDI_EVENT_NIC2_QM1: + case GAUDI_EVENT_NIC3_QM0: + case GAUDI_EVENT_NIC3_QM1: + case GAUDI_EVENT_NIC4_QM0: + case GAUDI_EVENT_NIC4_QM1: case GAUDI_EVENT_DMA0_CORE ... GAUDI_EVENT_DMA7_CORE: gaudi_print_irq_info(hdev, event_type, true); gaudi_handle_qman_err(hdev, event_type); @@ -6073,7 +7487,7 @@ static int gaudi_cpucp_info_get(struct hl_device *hdev) if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) return 0; - rc = hl_fw_cpucp_info_get(hdev); + rc = hl_fw_cpucp_info_get(hdev, mmCPU_BOOT_DEV_STS0); if (rc) return rc; @@ -6099,10 +7513,11 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask, struct gaudi_device *gaudi = hdev->asic_specific; const char *fmt = "%-5d%-9s%#-14x%#-12x%#x\n"; const char *mme_slave_fmt = "%-5d%-9s%-14s%-12s%#x\n"; + const char *nic_fmt = "%-5d%-9s%#-14x%#x\n"; u32 qm_glbl_sts0, qm_cgm_sts, dma_core_sts0, tpc_cfg_sts, mme_arch_sts; bool is_idle = true, is_eng_idle, is_slave; u64 offset; - int i, dma_id; + int i, dma_id, port; mutex_lock(&gaudi->clk_gate_mutex); @@ -6192,6 +7607,45 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask, } if (s) + seq_puts(s, "\nNIC is_idle QM_GLBL_STS0 QM_CGM_STS\n" + "--- ------- ------------ ----------\n"); + + for (i = 0 ; i < (NIC_NUMBER_OF_ENGINES / 2) ; i++) { + offset = i * NIC_MACRO_QMAN_OFFSET; + port = 2 * i; + if (hdev->nic_ports_mask & BIT(port)) { + qm_glbl_sts0 = RREG32(mmNIC0_QM0_GLBL_STS0 + offset); + qm_cgm_sts = RREG32(mmNIC0_QM0_CGM_STS + offset); + is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts); + is_idle &= is_eng_idle; + + if (mask) + *mask |= ((u64) !is_eng_idle) << + (GAUDI_ENGINE_ID_NIC_0 + port); + if (s) + seq_printf(s, nic_fmt, port, + is_eng_idle ? "Y" : "N", + qm_glbl_sts0, qm_cgm_sts); + } + + port = 2 * i + 1; + if (hdev->nic_ports_mask & BIT(port)) { + qm_glbl_sts0 = RREG32(mmNIC0_QM1_GLBL_STS0 + offset); + qm_cgm_sts = RREG32(mmNIC0_QM1_CGM_STS + offset); + is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts); + is_idle &= is_eng_idle; + + if (mask) + *mask |= ((u64) !is_eng_idle) << + (GAUDI_ENGINE_ID_NIC_0 + port); + if (s) + seq_printf(s, nic_fmt, port, + is_eng_idle ? "Y" : "N", + qm_glbl_sts0, qm_cgm_sts); + } + } + + if (s) seq_puts(s, "\n"); hdev->asic_funcs->set_clock_gating(hdev); @@ -6346,14 +7800,121 @@ static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel, return 0; } -static enum hl_device_hw_state gaudi_get_hw_state(struct hl_device *hdev) +static int gaudi_internal_cb_pool_init(struct hl_device *hdev, + struct hl_ctx *ctx) { - return RREG32(mmHW_STATE); + struct gaudi_device *gaudi = hdev->asic_specific; + int min_alloc_order, rc, collective_cb_size; + + if (!(gaudi->hw_cap_initialized & HW_CAP_MMU)) + return 0; + + hdev->internal_cb_pool_virt_addr = + hdev->asic_funcs->asic_dma_alloc_coherent(hdev, + HOST_SPACE_INTERNAL_CB_SZ, + &hdev->internal_cb_pool_dma_addr, + GFP_KERNEL | __GFP_ZERO); + + if (!hdev->internal_cb_pool_virt_addr) + return -ENOMEM; + + collective_cb_size = sizeof(struct packet_msg_short) * 5 + + sizeof(struct packet_fence); + min_alloc_order = ilog2(collective_cb_size); + + hdev->internal_cb_pool = gen_pool_create(min_alloc_order, -1); + if (!hdev->internal_cb_pool) { + dev_err(hdev->dev, + "Failed to create internal CB pool\n"); + rc = -ENOMEM; + goto free_internal_cb_pool; + } + + rc = gen_pool_add(hdev->internal_cb_pool, + (uintptr_t) hdev->internal_cb_pool_virt_addr, + HOST_SPACE_INTERNAL_CB_SZ, -1); + if (rc) { + dev_err(hdev->dev, + "Failed to add memory to internal CB pool\n"); + rc = -EFAULT; + goto destroy_internal_cb_pool; + } + + hdev->internal_cb_va_base = hl_reserve_va_block(hdev, ctx, + HL_VA_RANGE_TYPE_HOST, HOST_SPACE_INTERNAL_CB_SZ, + HL_MMU_VA_ALIGNMENT_NOT_NEEDED); + + if (!hdev->internal_cb_va_base) + goto destroy_internal_cb_pool; + + mutex_lock(&ctx->mmu_lock); + rc = hl_mmu_map_contiguous(ctx, hdev->internal_cb_va_base, + hdev->internal_cb_pool_dma_addr, + HOST_SPACE_INTERNAL_CB_SZ); + + hdev->asic_funcs->mmu_invalidate_cache(hdev, false, VM_TYPE_USERPTR); + mutex_unlock(&ctx->mmu_lock); + + if (rc) + goto unreserve_internal_cb_pool; + + return 0; + +unreserve_internal_cb_pool: + hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base, + HOST_SPACE_INTERNAL_CB_SZ); +destroy_internal_cb_pool: + gen_pool_destroy(hdev->internal_cb_pool); +free_internal_cb_pool: + hdev->asic_funcs->asic_dma_free_coherent(hdev, + HOST_SPACE_INTERNAL_CB_SZ, + hdev->internal_cb_pool_virt_addr, + hdev->internal_cb_pool_dma_addr); + + return rc; +} + +static void gaudi_internal_cb_pool_fini(struct hl_device *hdev, + struct hl_ctx *ctx) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + if (!(gaudi->hw_cap_initialized & HW_CAP_MMU)) + return; + + mutex_lock(&ctx->mmu_lock); + hl_mmu_unmap_contiguous(ctx, hdev->internal_cb_va_base, + HOST_SPACE_INTERNAL_CB_SZ); + hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base, + HOST_SPACE_INTERNAL_CB_SZ); + hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR); + mutex_unlock(&ctx->mmu_lock); + + gen_pool_destroy(hdev->internal_cb_pool); + + hdev->asic_funcs->asic_dma_free_coherent(hdev, + HOST_SPACE_INTERNAL_CB_SZ, + hdev->internal_cb_pool_virt_addr, + hdev->internal_cb_pool_dma_addr); } static int gaudi_ctx_init(struct hl_ctx *ctx) { - return 0; + gaudi_mmu_prepare(ctx->hdev, ctx->asid); + return gaudi_internal_cb_pool_init(ctx->hdev, ctx); +} + +static void gaudi_ctx_fini(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + + /* Gaudi will NEVER support more then a single compute context. + * Therefore, don't clear anything unless it is the compute context + */ + if (hdev->compute_ctx != ctx) + return; + + gaudi_internal_cb_pool_fini(ctx->hdev, ctx); } static u32 gaudi_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx) @@ -6374,14 +7935,15 @@ static u32 gaudi_get_wait_cb_size(struct hl_device *hdev) sizeof(struct packet_msg_prot) * 2; } -static void gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id) +static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id, + u32 size) { struct hl_cb *cb = (struct hl_cb *) data; struct packet_msg_short *pkt; - u32 value, ctl; + u32 value, ctl, pkt_size = sizeof(*pkt); - pkt = cb->kernel_address; - memset(pkt, 0, sizeof(*pkt)); + pkt = cb->kernel_address + size; + memset(pkt, 0, pkt_size); /* Inc by 1, Mode ADD */ value = FIELD_PREP(GAUDI_PKT_SHORT_VAL_SOB_SYNC_VAL_MASK, 1); @@ -6397,6 +7959,8 @@ static void gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id) pkt->value = cpu_to_le32(value); pkt->ctl = cpu_to_le32(ctl); + + return size + pkt_size; } static u32 gaudi_add_mon_msg_short(struct packet_msg_short *pkt, u32 value, @@ -6419,21 +7983,42 @@ static u32 gaudi_add_mon_msg_short(struct packet_msg_short *pkt, u32 value, return pkt_size; } -static u32 gaudi_add_arm_monitor_pkt(struct packet_msg_short *pkt, u16 sob_id, - u16 sob_val, u16 addr) +static u32 gaudi_add_arm_monitor_pkt(struct hl_device *hdev, + struct packet_msg_short *pkt, u16 sob_base, u8 sob_mask, + u16 sob_val, u16 mon_id) { + u64 monitor_base; u32 ctl, value, pkt_size = sizeof(*pkt); - u8 mask = ~(1 << (sob_id & 0x7)); + u16 msg_addr_offset; + u8 mask; + + if (hl_gen_sob_mask(sob_base, sob_mask, &mask)) { + dev_err(hdev->dev, + "sob_base %u (mask %#x) is not valid\n", + sob_base, sob_mask); + return 0; + } + + /* + * monitor_base should be the content of the base0 address registers, + * so it will be added to the msg short offsets + */ + monitor_base = mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0; + + msg_addr_offset = + (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0 + mon_id * 4) - + monitor_base; memset(pkt, 0, pkt_size); - value = FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_SYNC_GID_MASK, sob_id / 8); + /* Monitor config packet: bind the monitor to a sync object */ + value = FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_SYNC_GID_MASK, sob_base / 8); value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_SYNC_VAL_MASK, sob_val); value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_MODE_MASK, 0); /* GREATER OR EQUAL*/ value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_MASK_MASK, mask); - ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, addr); + ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, msg_addr_offset); ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OP_MASK, 0); /* write the value */ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 2); /* W_S MON base */ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT); @@ -6468,60 +8053,133 @@ static u32 gaudi_add_fence_pkt(struct packet_fence *pkt) return pkt_size; } -static void gaudi_gen_wait_cb(struct hl_device *hdev, void *data, u16 sob_id, - u16 sob_val, u16 mon_id, u32 q_idx) +static int gaudi_get_fence_addr(struct hl_device *hdev, u32 queue_id, u64 *addr) { - struct hl_cb *cb = (struct hl_cb *) data; - void *buf = cb->kernel_address; - u64 monitor_base, fence_addr = 0; - u32 size = 0; - u16 msg_addr_offset; + u32 offset, nic_index; - switch (q_idx) { + switch (queue_id) { case GAUDI_QUEUE_ID_DMA_0_0: - fence_addr = mmDMA0_QM_CP_FENCE2_RDATA_0; + offset = mmDMA0_QM_CP_FENCE2_RDATA_0; break; case GAUDI_QUEUE_ID_DMA_0_1: - fence_addr = mmDMA0_QM_CP_FENCE2_RDATA_1; + offset = mmDMA0_QM_CP_FENCE2_RDATA_1; break; case GAUDI_QUEUE_ID_DMA_0_2: - fence_addr = mmDMA0_QM_CP_FENCE2_RDATA_2; + offset = mmDMA0_QM_CP_FENCE2_RDATA_2; break; case GAUDI_QUEUE_ID_DMA_0_3: - fence_addr = mmDMA0_QM_CP_FENCE2_RDATA_3; + offset = mmDMA0_QM_CP_FENCE2_RDATA_3; break; case GAUDI_QUEUE_ID_DMA_1_0: - fence_addr = mmDMA1_QM_CP_FENCE2_RDATA_0; + offset = mmDMA1_QM_CP_FENCE2_RDATA_0; break; case GAUDI_QUEUE_ID_DMA_1_1: - fence_addr = mmDMA1_QM_CP_FENCE2_RDATA_1; + offset = mmDMA1_QM_CP_FENCE2_RDATA_1; break; case GAUDI_QUEUE_ID_DMA_1_2: - fence_addr = mmDMA1_QM_CP_FENCE2_RDATA_2; + offset = mmDMA1_QM_CP_FENCE2_RDATA_2; break; case GAUDI_QUEUE_ID_DMA_1_3: - fence_addr = mmDMA1_QM_CP_FENCE2_RDATA_3; + offset = mmDMA1_QM_CP_FENCE2_RDATA_3; break; case GAUDI_QUEUE_ID_DMA_5_0: - fence_addr = mmDMA5_QM_CP_FENCE2_RDATA_0; + offset = mmDMA5_QM_CP_FENCE2_RDATA_0; break; case GAUDI_QUEUE_ID_DMA_5_1: - fence_addr = mmDMA5_QM_CP_FENCE2_RDATA_1; + offset = mmDMA5_QM_CP_FENCE2_RDATA_1; break; case GAUDI_QUEUE_ID_DMA_5_2: - fence_addr = mmDMA5_QM_CP_FENCE2_RDATA_2; + offset = mmDMA5_QM_CP_FENCE2_RDATA_2; break; case GAUDI_QUEUE_ID_DMA_5_3: - fence_addr = mmDMA5_QM_CP_FENCE2_RDATA_3; + offset = mmDMA5_QM_CP_FENCE2_RDATA_3; + break; + case GAUDI_QUEUE_ID_TPC_7_0: + offset = mmTPC7_QM_CP_FENCE2_RDATA_0; + break; + case GAUDI_QUEUE_ID_TPC_7_1: + offset = mmTPC7_QM_CP_FENCE2_RDATA_1; + break; + case GAUDI_QUEUE_ID_TPC_7_2: + offset = mmTPC7_QM_CP_FENCE2_RDATA_2; + break; + case GAUDI_QUEUE_ID_TPC_7_3: + offset = mmTPC7_QM_CP_FENCE2_RDATA_3; + break; + case GAUDI_QUEUE_ID_NIC_0_0: + case GAUDI_QUEUE_ID_NIC_1_0: + case GAUDI_QUEUE_ID_NIC_2_0: + case GAUDI_QUEUE_ID_NIC_3_0: + case GAUDI_QUEUE_ID_NIC_4_0: + case GAUDI_QUEUE_ID_NIC_5_0: + case GAUDI_QUEUE_ID_NIC_6_0: + case GAUDI_QUEUE_ID_NIC_7_0: + case GAUDI_QUEUE_ID_NIC_8_0: + case GAUDI_QUEUE_ID_NIC_9_0: + nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_0) >> 2; + offset = mmNIC0_QM0_CP_FENCE2_RDATA_0 + + (nic_index >> 1) * NIC_MACRO_QMAN_OFFSET + + (nic_index & 0x1) * NIC_ENGINE_QMAN_OFFSET; + break; + case GAUDI_QUEUE_ID_NIC_0_1: + case GAUDI_QUEUE_ID_NIC_1_1: + case GAUDI_QUEUE_ID_NIC_2_1: + case GAUDI_QUEUE_ID_NIC_3_1: + case GAUDI_QUEUE_ID_NIC_4_1: + case GAUDI_QUEUE_ID_NIC_5_1: + case GAUDI_QUEUE_ID_NIC_6_1: + case GAUDI_QUEUE_ID_NIC_7_1: + case GAUDI_QUEUE_ID_NIC_8_1: + case GAUDI_QUEUE_ID_NIC_9_1: + nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_1) >> 2; + offset = mmNIC0_QM0_CP_FENCE2_RDATA_1 + + (nic_index >> 1) * NIC_MACRO_QMAN_OFFSET + + (nic_index & 0x1) * NIC_ENGINE_QMAN_OFFSET; + break; + case GAUDI_QUEUE_ID_NIC_0_2: + case GAUDI_QUEUE_ID_NIC_1_2: + case GAUDI_QUEUE_ID_NIC_2_2: + case GAUDI_QUEUE_ID_NIC_3_2: + case GAUDI_QUEUE_ID_NIC_4_2: + case GAUDI_QUEUE_ID_NIC_5_2: + case GAUDI_QUEUE_ID_NIC_6_2: + case GAUDI_QUEUE_ID_NIC_7_2: + case GAUDI_QUEUE_ID_NIC_8_2: + case GAUDI_QUEUE_ID_NIC_9_2: + nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_2) >> 2; + offset = mmNIC0_QM0_CP_FENCE2_RDATA_2 + + (nic_index >> 1) * NIC_MACRO_QMAN_OFFSET + + (nic_index & 0x1) * NIC_ENGINE_QMAN_OFFSET; + break; + case GAUDI_QUEUE_ID_NIC_0_3: + case GAUDI_QUEUE_ID_NIC_1_3: + case GAUDI_QUEUE_ID_NIC_2_3: + case GAUDI_QUEUE_ID_NIC_3_3: + case GAUDI_QUEUE_ID_NIC_4_3: + case GAUDI_QUEUE_ID_NIC_5_3: + case GAUDI_QUEUE_ID_NIC_6_3: + case GAUDI_QUEUE_ID_NIC_7_3: + case GAUDI_QUEUE_ID_NIC_8_3: + case GAUDI_QUEUE_ID_NIC_9_3: + nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_3) >> 2; + offset = mmNIC0_QM0_CP_FENCE2_RDATA_3 + + (nic_index >> 1) * NIC_MACRO_QMAN_OFFSET + + (nic_index & 0x1) * NIC_ENGINE_QMAN_OFFSET; break; default: - /* queue index should be valid here */ - dev_crit(hdev->dev, "wrong queue id %d for wait packet\n", - q_idx); - return; + return -EINVAL; } - fence_addr += CFG_BASE; + *addr = CFG_BASE + offset; + + return 0; +} + +static u32 gaudi_add_mon_pkts(void *buf, u16 mon_id, u64 fence_addr) +{ + u64 monitor_base; + u32 size = 0; + u16 msg_addr_offset; /* * monitor_base should be the content of the base0 address registers, @@ -6555,15 +8213,29 @@ static void gaudi_gen_wait_cb(struct hl_device *hdev, void *data, u16 sob_id, size += gaudi_add_mon_msg_short(buf + size, 1, msg_addr_offset); - /* Fourth monitor config packet: bind the monitor to a sync object */ - msg_addr_offset = - (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0 + mon_id * 4) - - monitor_base; - size += gaudi_add_arm_monitor_pkt(buf + size, sob_id, sob_val, - msg_addr_offset); + return size; +} + +static u32 gaudi_gen_wait_cb(struct hl_device *hdev, + struct hl_gen_wait_properties *prop) +{ + struct hl_cb *cb = (struct hl_cb *) prop->data; + void *buf = cb->kernel_address; + u64 fence_addr = 0; + u32 size = prop->size; - /* Fence packet */ + if (gaudi_get_fence_addr(hdev, prop->q_idx, &fence_addr)) { + dev_crit(hdev->dev, "wrong queue id %d for wait packet\n", + prop->q_idx); + return 0; + } + + size += gaudi_add_mon_pkts(buf + size, prop->mon_id, fence_addr); + size += gaudi_add_arm_monitor_pkt(hdev, buf + size, prop->sob_base, + prop->sob_mask, prop->sob_val, prop->mon_id); size += gaudi_add_fence_pkt(buf + size); + + return size; } static void gaudi_reset_sob(struct hl_device *hdev, void *data) @@ -6615,6 +8287,7 @@ static const struct hl_asic_funcs gaudi_funcs = { .pqe_write = gaudi_pqe_write, .asic_dma_alloc_coherent = gaudi_dma_alloc_coherent, .asic_dma_free_coherent = gaudi_dma_free_coherent, + .scrub_device_mem = gaudi_scrub_device_mem, .get_int_queue_base = gaudi_get_int_queue_base, .test_queues = gaudi_test_queues, .asic_dma_pool_zalloc = gaudi_dma_pool_zalloc, @@ -6652,13 +8325,13 @@ static const struct hl_asic_funcs gaudi_funcs = { .get_pci_id = gaudi_get_pci_id, .get_eeprom_data = gaudi_get_eeprom_data, .send_cpu_message = gaudi_send_cpu_message, - .get_hw_state = gaudi_get_hw_state, .pci_bars_map = gaudi_pci_bars_map, .init_iatu = gaudi_init_iatu, .rreg = hl_rreg, .wreg = hl_wreg, .halt_coresight = gaudi_halt_coresight, .ctx_init = gaudi_ctx_init, + .ctx_fini = gaudi_ctx_fini, .get_clk_rate = gaudi_get_clk_rate, .get_queue_id_for_cq = gaudi_get_queue_id_for_cq, .read_device_fw_version = gaudi_read_device_fw_version, @@ -6669,8 +8342,11 @@ static const struct hl_asic_funcs gaudi_funcs = { .gen_signal_cb = gaudi_gen_signal_cb, .gen_wait_cb = gaudi_gen_wait_cb, .reset_sob = gaudi_reset_sob, + .reset_sob_group = gaudi_reset_sob_group, .set_dma_mask_from_fw = gaudi_set_dma_mask_from_fw, - .get_device_time = gaudi_get_device_time + .get_device_time = gaudi_get_device_time, + .collective_wait_init_cs = gaudi_collective_wait_init_cs, + .collective_wait_create_jobs = gaudi_collective_wait_create_jobs }; /** diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h index 8eb598db81b2..f2d91f4fcffe 100644 --- a/drivers/misc/habanalabs/gaudi/gaudiP.h +++ b/drivers/misc/habanalabs/gaudi/gaudiP.h @@ -14,8 +14,9 @@ #include "../include/gaudi/gaudi_packets.h" #include "../include/gaudi/gaudi.h" #include "../include/gaudi/gaudi_async_events.h" +#include "../include/gaudi/gaudi_fw_if.h" -#define NUMBER_OF_EXT_HW_QUEUES 12 +#define NUMBER_OF_EXT_HW_QUEUES 8 #define NUMBER_OF_CMPLT_QUEUES NUMBER_OF_EXT_HW_QUEUES #define NUMBER_OF_CPU_HW_QUEUES 1 #define NUMBER_OF_INT_HW_QUEUES 100 @@ -23,6 +24,10 @@ NUMBER_OF_CPU_HW_QUEUES + \ NUMBER_OF_INT_HW_QUEUES) +/* 10 NIC QMANs, DMA5 QMAN, TPC7 QMAN */ +#define NUMBER_OF_COLLECTIVE_QUEUES 12 +#define NUMBER_OF_SOBS_IN_GRP 11 + /* * Number of MSI interrupts IDS: * Each completion queue has 1 ID @@ -56,14 +61,14 @@ #define GAUDI_DEFAULT_CARD_NAME "HL2000" -#define GAUDI_MAX_PENDING_CS 1024 +#define GAUDI_MAX_PENDING_CS SZ_16K #if !IS_MAX_PENDING_CS_VALID(GAUDI_MAX_PENDING_CS) #error "GAUDI_MAX_PENDING_CS must be power of 2 and greater than 1" #endif -#define PCI_DMA_NUMBER_OF_CHNLS 3 -#define HBM_DMA_NUMBER_OF_CHNLS 5 +#define PCI_DMA_NUMBER_OF_CHNLS 2 +#define HBM_DMA_NUMBER_OF_CHNLS 6 #define DMA_NUMBER_OF_CHNLS (PCI_DMA_NUMBER_OF_CHNLS + \ HBM_DMA_NUMBER_OF_CHNLS) @@ -79,6 +84,7 @@ #define TPC_QMAN_OFFSET (mmTPC1_QM_BASE - mmTPC0_QM_BASE) #define MME_QMAN_OFFSET (mmMME1_QM_BASE - mmMME0_QM_BASE) #define NIC_MACRO_QMAN_OFFSET (mmNIC1_QM0_BASE - mmNIC0_QM0_BASE) +#define NIC_ENGINE_QMAN_OFFSET (mmNIC0_QM1_BASE - mmNIC0_QM0_BASE) #define TPC_CFG_OFFSET (mmTPC1_CFG_BASE - mmTPC0_CFG_BASE) @@ -99,6 +105,13 @@ #define MME_ACC_OFFSET (mmMME1_ACC_BASE - mmMME0_ACC_BASE) #define SRAM_BANK_OFFSET (mmSRAM_Y0_X1_RTR_BASE - mmSRAM_Y0_X0_RTR_BASE) +#define PLL_NR_OFFSET 0 +#define PLL_NF_OFFSET (mmPSOC_CPU_PLL_NF - mmPSOC_CPU_PLL_NR) +#define PLL_OD_OFFSET (mmPSOC_CPU_PLL_OD - mmPSOC_CPU_PLL_NR) +#define PLL_DIV_FACTOR_0_OFFSET (mmPSOC_CPU_PLL_DIV_FACTOR_0 - \ + mmPSOC_CPU_PLL_NR) +#define PLL_DIV_SEL_0_OFFSET (mmPSOC_CPU_PLL_DIV_SEL_0 - mmPSOC_CPU_PLL_NR) + #define NUM_OF_SOB_IN_BLOCK \ (((mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_2047 - \ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0) + 4) >> 2) @@ -140,13 +153,18 @@ #define TPC_QMAN_LENGTH 1024 #define TPC_QMAN_SIZE_IN_BYTES (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE) +#define NIC_QMAN_LENGTH 1024 +#define NIC_QMAN_SIZE_IN_BYTES (NIC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE) + + #define SRAM_USER_BASE_OFFSET GAUDI_DRIVER_SRAM_RESERVED_SIZE_FROM_START /* Virtual address space */ #define VA_HOST_SPACE_START 0x1000000000000ull /* 256TB */ -#define VA_HOST_SPACE_END 0x3FF8000000000ull /* 1PB - 1TB */ +#define VA_HOST_SPACE_END 0x3FF8000000000ull /* 1PB - 512GB */ #define VA_HOST_SPACE_SIZE (VA_HOST_SPACE_END - \ VA_HOST_SPACE_START) /* 767TB */ +#define HOST_SPACE_INTERNAL_CB_SZ SZ_2M #define HW_CAP_PLL BIT(0) #define HW_CAP_HBM BIT(1) @@ -161,6 +179,19 @@ #define HW_CAP_SRAM_SCRAMBLER BIT(10) #define HW_CAP_HBM_SCRAMBLER BIT(11) +#define HW_CAP_NIC0 BIT(14) +#define HW_CAP_NIC1 BIT(15) +#define HW_CAP_NIC2 BIT(16) +#define HW_CAP_NIC3 BIT(17) +#define HW_CAP_NIC4 BIT(18) +#define HW_CAP_NIC5 BIT(19) +#define HW_CAP_NIC6 BIT(20) +#define HW_CAP_NIC7 BIT(21) +#define HW_CAP_NIC8 BIT(22) +#define HW_CAP_NIC9 BIT(23) +#define HW_CAP_NIC_MASK GENMASK(23, 14) +#define HW_CAP_NIC_SHIFT 14 + #define HW_CAP_TPC0 BIT(24) #define HW_CAP_TPC1 BIT(25) #define HW_CAP_TPC2 BIT(26) @@ -187,12 +218,12 @@ enum gaudi_dma_channels { GAUDI_PCI_DMA_1, GAUDI_PCI_DMA_2, - GAUDI_PCI_DMA_3, GAUDI_HBM_DMA_1, GAUDI_HBM_DMA_2, GAUDI_HBM_DMA_3, GAUDI_HBM_DMA_4, GAUDI_HBM_DMA_5, + GAUDI_HBM_DMA_6, GAUDI_DMA_MAX }; @@ -208,6 +239,48 @@ enum gaudi_tpc_mask { GAUDI_TPC_MASK_ALL = 0xFF }; +enum gaudi_nic_mask { + GAUDI_NIC_MASK_NIC0 = 0x01, + GAUDI_NIC_MASK_NIC1 = 0x02, + GAUDI_NIC_MASK_NIC2 = 0x04, + GAUDI_NIC_MASK_NIC3 = 0x08, + GAUDI_NIC_MASK_NIC4 = 0x10, + GAUDI_NIC_MASK_NIC5 = 0x20, + GAUDI_NIC_MASK_NIC6 = 0x40, + GAUDI_NIC_MASK_NIC7 = 0x80, + GAUDI_NIC_MASK_NIC8 = 0x100, + GAUDI_NIC_MASK_NIC9 = 0x200, + GAUDI_NIC_MASK_ALL = 0x3FF +}; + +/* + * struct gaudi_hw_sob_group - H/W SOB group info. + * @hdev: habanalabs device structure. + * @kref: refcount of this SOB group. group will reset once refcount is zero. + * @base_sob_id: base sob id of this SOB group. + */ +struct gaudi_hw_sob_group { + struct hl_device *hdev; + struct kref kref; + u32 base_sob_id; +}; + +#define NUM_SOB_GROUPS (HL_RSVD_SOBS * QMAN_STREAMS) +/** + * struct gaudi_collective_properties - + * holds all SOB groups and queues info reserved for the collective + * @hw_sob_group: H/W SOB groups. + * @next_sob_group_val: the next value to use for the currently used SOB group. + * @curr_sob_group_idx: the index of the currently used SOB group. + * @mstr_sob_mask: pre-defined masks for collective master monitors + */ +struct gaudi_collective_properties { + struct gaudi_hw_sob_group hw_sob_group[NUM_SOB_GROUPS]; + u16 next_sob_group_val[QMAN_STREAMS]; + u8 curr_sob_group_idx[QMAN_STREAMS]; + u8 mstr_sob_mask[HL_COLLECTIVE_RSVD_MSTR_MONS]; +}; + /** * struct gaudi_internal_qman_info - Internal QMAN information. * @pq_kernel_addr: Kernel address of the PQ memory area in the host. @@ -253,6 +326,8 @@ struct gaudi_device { struct gaudi_internal_qman_info internal_qmans[GAUDI_QUEUE_ID_SIZE]; + struct gaudi_collective_properties collective_props; + u64 hbm_bar_cur_addr; u64 max_freq_value; diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c index 3d2b0f0f4650..2e3612e1ee28 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c +++ b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c @@ -11,8 +11,6 @@ #include "../include/gaudi/gaudi_masks.h" #include <uapi/misc/habanalabs.h> -#include <linux/coresight.h> - #define SPMU_SECTION_SIZE MME0_ACC_SPMU_MAX_OFFSET #define SPMU_EVENT_TYPES_OFFSET 0x400 #define SPMU_MAX_COUNTERS 6 diff --git a/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c b/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c index 1076b4932ce2..8c49da4bcbd5 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c +++ b/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c @@ -20,7 +20,7 @@ int gaudi_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk) { long value; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; value = hl_get_frequency(hdev, MME_PLL, false); @@ -54,7 +54,7 @@ static ssize_t clk_max_freq_mhz_show(struct device *dev, struct gaudi_device *gaudi = hdev->asic_specific; long value; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; value = hl_get_frequency(hdev, MME_PLL, false); @@ -72,7 +72,7 @@ static ssize_t clk_max_freq_mhz_store(struct device *dev, int rc; u64 value; - if (hl_device_disabled_or_in_reset(hdev)) { + if (!hl_device_operational(hdev, NULL)) { count = -ENODEV; goto fail; } @@ -97,7 +97,7 @@ static ssize_t clk_cur_freq_mhz_show(struct device *dev, struct hl_device *hdev = dev_get_drvdata(dev); long value; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; value = hl_get_frequency(hdev, MME_PLL, true); diff --git a/drivers/misc/habanalabs/gaudi/gaudi_security.c b/drivers/misc/habanalabs/gaudi/gaudi_security.c index 2d7add0e5bcc..e10181692d0b 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi_security.c +++ b/drivers/misc/habanalabs/gaudi/gaudi_security.c @@ -1448,21 +1448,23 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev) u32 pb_addr, mask; u8 word_offset; - gaudi_pb_set_block(hdev, mmDMA_IF_E_S_BASE); - gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_CH0_BASE); - gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_CH1_BASE); - gaudi_pb_set_block(hdev, mmDMA_E_PLL_BASE); - gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_BASE); - - gaudi_pb_set_block(hdev, mmDMA_IF_W_N_BASE); - gaudi_pb_set_block(hdev, mmDMA_IF_W_N_DOWN_CH0_BASE); - gaudi_pb_set_block(hdev, mmDMA_IF_W_N_DOWN_CH1_BASE); - gaudi_pb_set_block(hdev, mmDMA_IF_W_N_DOWN_BASE); - - gaudi_pb_set_block(hdev, mmDMA_IF_E_N_BASE); - gaudi_pb_set_block(hdev, mmDMA_IF_E_N_DOWN_CH0_BASE); - gaudi_pb_set_block(hdev, mmDMA_IF_E_N_DOWN_CH1_BASE); - gaudi_pb_set_block(hdev, mmDMA_IF_E_N_DOWN_BASE); + if (hdev->asic_prop.fw_security_disabled) { + gaudi_pb_set_block(hdev, mmDMA_IF_E_S_BASE); + gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_CH0_BASE); + gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_CH1_BASE); + gaudi_pb_set_block(hdev, mmDMA_E_PLL_BASE); + gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_BASE); + + gaudi_pb_set_block(hdev, mmDMA_IF_W_N_BASE); + gaudi_pb_set_block(hdev, mmDMA_IF_W_N_DOWN_CH0_BASE); + gaudi_pb_set_block(hdev, mmDMA_IF_W_N_DOWN_CH1_BASE); + gaudi_pb_set_block(hdev, mmDMA_IF_W_N_DOWN_BASE); + + gaudi_pb_set_block(hdev, mmDMA_IF_E_N_BASE); + gaudi_pb_set_block(hdev, mmDMA_IF_E_N_DOWN_CH0_BASE); + gaudi_pb_set_block(hdev, mmDMA_IF_E_N_DOWN_CH1_BASE); + gaudi_pb_set_block(hdev, mmDMA_IF_E_N_DOWN_BASE); + } WREG32(mmDMA0_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); WREG32(mmDMA1_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); @@ -5157,19 +5159,3992 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev) WREG32(pb_addr + word_offset, ~mask); } +static void gaudi_init_nic_protection_bits(struct hl_device *hdev) +{ + u32 pb_addr, mask; + u8 word_offset; + + WREG32(mmNIC0_QM0_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmNIC0_QM1_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + + pb_addr = (mmNIC0_QM0_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_2 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_24 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_23 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_LOCAL_RANGE_BASE & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_2 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_24 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_23 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_LOCAL_RANGE_BASE & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + WREG32(mmNIC1_QM0_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmNIC1_QM1_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + + pb_addr = (mmNIC1_QM0_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_2 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_24 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_23 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_LOCAL_RANGE_BASE & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_2 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_24 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_23 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_LOCAL_RANGE_BASE & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + WREG32(mmNIC2_QM0_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmNIC2_QM1_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + + pb_addr = (mmNIC2_QM0_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_24 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_23 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_LOCAL_RANGE_BASE & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_2 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_24 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_23 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_LOCAL_RANGE_BASE & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + WREG32(mmNIC3_QM0_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmNIC3_QM1_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + + pb_addr = (mmNIC3_QM0_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_2 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_24 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_23 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_LOCAL_RANGE_BASE & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_2 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_24 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_23 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_LOCAL_RANGE_BASE & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + WREG32(mmNIC4_QM0_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmNIC4_QM1_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + + pb_addr = (mmNIC4_QM0_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_2 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_24 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_23 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_LOCAL_RANGE_BASE & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_2 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_24 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_23 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_LOCAL_RANGE_BASE & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); +} + static void gaudi_init_tpc_protection_bits(struct hl_device *hdev) { u32 pb_addr, mask; u8 word_offset; - gaudi_pb_set_block(hdev, mmTPC0_E2E_CRED_BASE); - gaudi_pb_set_block(hdev, mmTPC1_E2E_CRED_BASE); - gaudi_pb_set_block(hdev, mmTPC2_E2E_CRED_BASE); - gaudi_pb_set_block(hdev, mmTPC3_E2E_CRED_BASE); - gaudi_pb_set_block(hdev, mmTPC4_E2E_CRED_BASE); - gaudi_pb_set_block(hdev, mmTPC5_E2E_CRED_BASE); - gaudi_pb_set_block(hdev, mmTPC6_E2E_CRED_BASE); - gaudi_pb_set_block(hdev, mmTPC7_E2E_CRED_BASE); + if (hdev->asic_prop.fw_security_disabled) { + gaudi_pb_set_block(hdev, mmTPC0_E2E_CRED_BASE); + gaudi_pb_set_block(hdev, mmTPC1_E2E_CRED_BASE); + gaudi_pb_set_block(hdev, mmTPC2_E2E_CRED_BASE); + gaudi_pb_set_block(hdev, mmTPC3_E2E_CRED_BASE); + gaudi_pb_set_block(hdev, mmTPC4_E2E_CRED_BASE); + gaudi_pb_set_block(hdev, mmTPC5_E2E_CRED_BASE); + gaudi_pb_set_block(hdev, mmTPC6_E2E_CRED_BASE); + gaudi_pb_set_block(hdev, mmTPC7_E2E_CRED_BASE); + } WREG32(mmTPC0_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); WREG32(mmTPC0_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); @@ -8851,16 +12826,20 @@ static void gaudi_init_protection_bits(struct hl_device *hdev) * secured */ - gaudi_pb_set_block(hdev, mmIF_E_PLL_BASE); - gaudi_pb_set_block(hdev, mmMESH_W_PLL_BASE); - gaudi_pb_set_block(hdev, mmSRAM_W_PLL_BASE); - gaudi_pb_set_block(hdev, mmMESH_E_PLL_BASE); - gaudi_pb_set_block(hdev, mmSRAM_E_PLL_BASE); + if (hdev->asic_prop.fw_security_disabled) { + gaudi_pb_set_block(hdev, mmIF_E_PLL_BASE); + gaudi_pb_set_block(hdev, mmMESH_W_PLL_BASE); + gaudi_pb_set_block(hdev, mmSRAM_W_PLL_BASE); + gaudi_pb_set_block(hdev, mmMESH_E_PLL_BASE); + gaudi_pb_set_block(hdev, mmSRAM_E_PLL_BASE); + } gaudi_init_dma_protection_bits(hdev); gaudi_init_mme_protection_bits(hdev); + gaudi_init_nic_protection_bits(hdev); + gaudi_init_tpc_protection_bits(hdev); } @@ -9052,17 +13031,20 @@ void gaudi_init_security(struct hl_device *hdev) * property configuration of MME SBAB and ACC to be non-privileged and * non-secured */ - WREG32(mmMME0_SBAB_PROT, 0x2); - WREG32(mmMME0_ACC_PROT, 0x2); - WREG32(mmMME1_SBAB_PROT, 0x2); - WREG32(mmMME1_ACC_PROT, 0x2); - WREG32(mmMME2_SBAB_PROT, 0x2); - WREG32(mmMME2_ACC_PROT, 0x2); - WREG32(mmMME3_SBAB_PROT, 0x2); - WREG32(mmMME3_ACC_PROT, 0x2); + if (hdev->asic_prop.fw_security_disabled) { + WREG32(mmMME0_SBAB_PROT, 0x2); + WREG32(mmMME0_ACC_PROT, 0x2); + WREG32(mmMME1_SBAB_PROT, 0x2); + WREG32(mmMME1_ACC_PROT, 0x2); + WREG32(mmMME2_SBAB_PROT, 0x2); + WREG32(mmMME2_ACC_PROT, 0x2); + WREG32(mmMME3_SBAB_PROT, 0x2); + WREG32(mmMME3_ACC_PROT, 0x2); + } /* On RAZWI, 0 will be returned from RR and 0xBABA0BAD from PB */ - WREG32(0xC01B28, 0x1); + if (hdev->asic_prop.fw_security_disabled) + WREG32(0xC01B28, 0x1); gaudi_init_range_registers_lbw(hdev); diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 235d47b2420f..3e5eb9e3d7bd 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -12,9 +12,7 @@ #include "../include/goya/goya_reg_map.h" #include <linux/pci.h> -#include <linux/genalloc.h> #include <linux/hwmon.h> -#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/iommu.h> #include <linux/seq_file.h> @@ -373,20 +371,20 @@ int goya_get_fixed_properties(struct hl_device *hdev) for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) { prop->hw_queues_props[i].type = QUEUE_TYPE_EXT; prop->hw_queues_props[i].driver_only = 0; - prop->hw_queues_props[i].requires_kernel_cb = 1; + prop->hw_queues_props[i].cb_alloc_flags = CB_ALLOC_KERNEL; } for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) { prop->hw_queues_props[i].type = QUEUE_TYPE_CPU; prop->hw_queues_props[i].driver_only = 1; - prop->hw_queues_props[i].requires_kernel_cb = 0; + prop->hw_queues_props[i].cb_alloc_flags = CB_ALLOC_KERNEL; } for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES + NUMBER_OF_INT_HW_QUEUES; i++) { prop->hw_queues_props[i].type = QUEUE_TYPE_INT; prop->hw_queues_props[i].driver_only = 0; - prop->hw_queues_props[i].requires_kernel_cb = 0; + prop->hw_queues_props[i].cb_alloc_flags = CB_ALLOC_USER; } prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES; @@ -412,6 +410,7 @@ int goya_get_fixed_properties(struct hl_device *hdev) prop->mmu_hop_table_size = HOP_TABLE_SIZE; prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE; prop->dram_page_size = PAGE_SIZE_2MB; + prop->dram_supports_virtual_memory = true; prop->dmmu.hop0_shift = HOP0_SHIFT; prop->dmmu.hop1_shift = HOP1_SHIFT; @@ -456,6 +455,11 @@ int goya_get_fixed_properties(struct hl_device *hdev) prop->max_pending_cs = GOYA_MAX_PENDING_CS; + /* disable fw security for now, set it in a later stage */ + prop->fw_security_disabled = true; + prop->fw_security_status_valid = false; + prop->hard_reset_done_by_fw = false; + return 0; } @@ -551,6 +555,11 @@ done: return rc; } +static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev) +{ + return RREG32(mmHW_STATE); +} + /* * goya_early_init - GOYA early initialization code * @@ -600,14 +609,27 @@ static int goya_early_init(struct hl_device *hdev) prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID); - rc = hl_pci_init(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS, - mmCPU_BOOT_ERR0, GOYA_BOOT_FIT_REQ_TIMEOUT_USEC); + rc = hl_pci_init(hdev); if (rc) goto free_queue_props; - /* Goya Firmware does not support security */ - prop->fw_security_disabled = true; - dev_info(hdev->dev, "firmware-level security is disabled\n"); + if (goya_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) { + dev_info(hdev->dev, + "H/W state is dirty, must reset before initializing\n"); + hdev->asic_funcs->hw_fini(hdev, true); + } + + /* Before continuing in the initialization, we need to read the preboot + * version to determine whether we run with a security-enabled firmware + */ + rc = hl_fw_read_preboot_status(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS, + mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0, + GOYA_BOOT_FIT_REQ_TIMEOUT_USEC); + if (rc) { + if (hdev->reset_on_preboot_fail) + hdev->asic_funcs->hw_fini(hdev, true); + goto pci_fini; + } if (!hdev->pldm) { val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS); @@ -618,6 +640,8 @@ static int goya_early_init(struct hl_device *hdev) return 0; +pci_fini: + hl_pci_fini(hdev); free_queue_props: kfree(hdev->asic_prop.hw_queues_props); return rc; @@ -2315,7 +2339,7 @@ static int goya_load_firmware_to_device(struct hl_device *hdev) dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET; - return hl_fw_load_fw_to_device(hdev, GOYA_LINUX_FW_FILE, dst); + return hl_fw_load_fw_to_device(hdev, GOYA_LINUX_FW_FILE, dst, 0, 0); } /* @@ -2332,14 +2356,14 @@ static int goya_load_boot_fit_to_device(struct hl_device *hdev) dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + BOOT_FIT_SRAM_OFFSET; - return hl_fw_load_fw_to_device(hdev, GOYA_BOOT_FIT_FILE, dst); + return hl_fw_load_fw_to_device(hdev, GOYA_BOOT_FIT_FILE, dst, 0, 0); } /* * FW component passes an offset from SRAM_BASE_ADDR in SCRATCHPAD_xx. * The version string should be located by that offset. */ -static void goya_read_device_fw_version(struct hl_device *hdev, +static int goya_read_device_fw_version(struct hl_device *hdev, enum hl_fw_component fwc) { const char *name; @@ -2359,7 +2383,7 @@ static void goya_read_device_fw_version(struct hl_device *hdev, break; default: dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc); - return; + return -EIO; } ver_off &= ~((u32)SRAM_BASE_ADDR); @@ -2371,7 +2395,11 @@ static void goya_read_device_fw_version(struct hl_device *hdev, dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n", name, ver_off); strcpy(dest, "unavailable"); + + return -EIO; } + + return 0; } static int goya_init_cpu(struct hl_device *hdev) @@ -2397,7 +2425,8 @@ static int goya_init_cpu(struct hl_device *hdev) rc = hl_fw_init_cpu(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS, mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, - mmCPU_CMD_STATUS_TO_HOST, mmCPU_BOOT_ERR0, + mmCPU_CMD_STATUS_TO_HOST, + mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0, false, GOYA_CPU_TIMEOUT_USEC, GOYA_BOOT_FIT_REQ_TIMEOUT_USEC); @@ -2454,7 +2483,6 @@ int goya_mmu_init(struct hl_device *hdev) if (goya->hw_cap_initialized & HW_CAP_MMU) return 0; - hdev->dram_supports_virtual_memory = true; hdev->dram_default_page_mapping = true; for (i = 0 ; i < prop->max_asid ; i++) { @@ -2505,8 +2533,6 @@ static int goya_hw_init(struct hl_device *hdev) struct asic_fixed_properties *prop = &hdev->asic_prop; int rc; - dev_info(hdev->dev, "Starting initialization of H/W\n"); - /* Perform read from the device to make sure device is up */ RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG); @@ -2628,7 +2654,7 @@ static void goya_hw_fini(struct hl_device *hdev, bool hard_reset) "Timeout while waiting for device to reset 0x%x\n", status); - if (!hard_reset) { + if (!hard_reset && goya) { goya->hw_cap_initialized &= ~(HW_CAP_DMA | HW_CAP_MME | HW_CAP_GOLDEN | HW_CAP_TPC); WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, @@ -2643,12 +2669,15 @@ static void goya_hw_fini(struct hl_device *hdev, bool hard_reset) WREG32(mmPSOC_GLOBAL_CONF_SW_BTM_FSM, 0xA << PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_SHIFT); - goya->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q | - HW_CAP_DDR_0 | HW_CAP_DDR_1 | - HW_CAP_DMA | HW_CAP_MME | - HW_CAP_MMU | HW_CAP_TPC_MBIST | - HW_CAP_GOLDEN | HW_CAP_TPC); - memset(goya->events_stat, 0, sizeof(goya->events_stat)); + if (goya) { + goya->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q | + HW_CAP_DDR_0 | HW_CAP_DDR_1 | + HW_CAP_DMA | HW_CAP_MME | + HW_CAP_MMU | HW_CAP_TPC_MBIST | + HW_CAP_GOLDEN | HW_CAP_TPC); + + memset(goya->events_stat, 0, sizeof(goya->events_stat)); + } } int goya_suspend(struct hl_device *hdev) @@ -2792,6 +2821,11 @@ static void goya_dma_free_coherent(struct hl_device *hdev, size_t size, dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle); } +int goya_scrub_device_mem(struct hl_device *hdev, u64 addr, u64 size) +{ + return 0; +} + void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id, dma_addr_t *dma_handle, u16 *queue_len) { @@ -2920,7 +2954,7 @@ free_fence_ptr: } int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len, - u32 timeout, long *result) + u32 timeout, u64 *result) { struct goya_device *goya = hdev->asic_specific; @@ -4506,7 +4540,7 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr, { struct cpucp_unmask_irq_arr_packet *pkt; size_t total_pkt_size; - long result; + u64 result; int rc; int irq_num_entries, irq_arr_index; __le32 *goya_irq_arr; @@ -4565,7 +4599,7 @@ static int goya_soft_reset_late_init(struct hl_device *hdev) static int goya_unmask_irq(struct hl_device *hdev, u16 event_type) { struct cpucp_packet pkt; - long result; + u64 result; int rc; memset(&pkt, 0, sizeof(pkt)); @@ -4777,7 +4811,7 @@ static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size, job->id = 0; job->user_cb = cb; - job->user_cb->cs_cnt++; + atomic_inc(&job->user_cb->cs_cnt); job->user_cb_size = cb_size; job->hw_queue_id = GOYA_QUEUE_ID_DMA_0; job->patched_cb = job->user_cb; @@ -4789,7 +4823,7 @@ static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size, hl_debugfs_remove_job(hdev, job); kfree(job); - cb->cs_cnt--; + atomic_dec(&cb->cs_cnt); release_cb: hl_cb_put(cb); @@ -4872,9 +4906,10 @@ static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev) return 0; for (off = 0 ; off < CPU_FW_IMAGE_SIZE ; off += PAGE_SIZE_2MB) { - rc = hl_mmu_map(hdev->kernel_ctx, prop->dram_base_address + off, - prop->dram_base_address + off, PAGE_SIZE_2MB, - (off + PAGE_SIZE_2MB) == CPU_FW_IMAGE_SIZE); + rc = hl_mmu_map_page(hdev->kernel_ctx, + prop->dram_base_address + off, + prop->dram_base_address + off, PAGE_SIZE_2MB, + (off + PAGE_SIZE_2MB) == CPU_FW_IMAGE_SIZE); if (rc) { dev_err(hdev->dev, "Map failed for address 0x%llx\n", prop->dram_base_address + off); @@ -4883,8 +4918,10 @@ static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev) } if (!(hdev->cpu_accessible_dma_address & (PAGE_SIZE_2MB - 1))) { - rc = hl_mmu_map(hdev->kernel_ctx, VA_CPU_ACCESSIBLE_MEM_ADDR, - hdev->cpu_accessible_dma_address, PAGE_SIZE_2MB, true); + rc = hl_mmu_map_page(hdev->kernel_ctx, + VA_CPU_ACCESSIBLE_MEM_ADDR, + hdev->cpu_accessible_dma_address, + PAGE_SIZE_2MB, true); if (rc) { dev_err(hdev->dev, @@ -4894,7 +4931,7 @@ static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev) } } else { for (cpu_off = 0 ; cpu_off < SZ_2M ; cpu_off += PAGE_SIZE_4KB) { - rc = hl_mmu_map(hdev->kernel_ctx, + rc = hl_mmu_map_page(hdev->kernel_ctx, VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off, hdev->cpu_accessible_dma_address + cpu_off, PAGE_SIZE_4KB, true); @@ -4921,7 +4958,7 @@ static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev) unmap_cpu: for (; cpu_off >= 0 ; cpu_off -= PAGE_SIZE_4KB) - if (hl_mmu_unmap(hdev->kernel_ctx, + if (hl_mmu_unmap_page(hdev->kernel_ctx, VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off, PAGE_SIZE_4KB, true)) dev_warn_ratelimited(hdev->dev, @@ -4929,7 +4966,7 @@ unmap_cpu: VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off); unmap: for (; off >= 0 ; off -= PAGE_SIZE_2MB) - if (hl_mmu_unmap(hdev->kernel_ctx, + if (hl_mmu_unmap_page(hdev->kernel_ctx, prop->dram_base_address + off, PAGE_SIZE_2MB, true)) dev_warn_ratelimited(hdev->dev, @@ -4955,13 +4992,14 @@ void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev) WREG32(mmCPU_IF_AWUSER_OVR_EN, 0); if (!(hdev->cpu_accessible_dma_address & (PAGE_SIZE_2MB - 1))) { - if (hl_mmu_unmap(hdev->kernel_ctx, VA_CPU_ACCESSIBLE_MEM_ADDR, + if (hl_mmu_unmap_page(hdev->kernel_ctx, + VA_CPU_ACCESSIBLE_MEM_ADDR, PAGE_SIZE_2MB, true)) dev_warn(hdev->dev, "Failed to unmap CPU accessible memory\n"); } else { for (cpu_off = 0 ; cpu_off < SZ_2M ; cpu_off += PAGE_SIZE_4KB) - if (hl_mmu_unmap(hdev->kernel_ctx, + if (hl_mmu_unmap_page(hdev->kernel_ctx, VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off, PAGE_SIZE_4KB, (cpu_off + PAGE_SIZE_4KB) >= SZ_2M)) @@ -4971,7 +5009,7 @@ void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev) } for (off = 0 ; off < CPU_FW_IMAGE_SIZE ; off += PAGE_SIZE_2MB) - if (hl_mmu_unmap(hdev->kernel_ctx, + if (hl_mmu_unmap_page(hdev->kernel_ctx, prop->dram_base_address + off, PAGE_SIZE_2MB, (off + PAGE_SIZE_2MB) >= CPU_FW_IMAGE_SIZE)) dev_warn_ratelimited(hdev->dev, @@ -5118,7 +5156,7 @@ int goya_cpucp_info_get(struct hl_device *hdev) if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) return 0; - rc = hl_fw_cpucp_info_get(hdev); + rc = hl_fw_cpucp_info_get(hdev, mmCPU_BOOT_DEV_STS0); if (rc) return rc; @@ -5265,11 +5303,6 @@ static int goya_get_eeprom_data(struct hl_device *hdev, void *data, return hl_fw_get_eeprom_data(hdev, data, max_size); } -static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev) -{ - return RREG32(mmHW_STATE); -} - static int goya_ctx_init(struct hl_ctx *ctx) { return 0; @@ -5290,18 +5323,24 @@ static u32 goya_get_wait_cb_size(struct hl_device *hdev) return 0; } -static void goya_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id) +static u32 goya_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id, + u32 size) { + return 0; +} +static u32 goya_gen_wait_cb(struct hl_device *hdev, + struct hl_gen_wait_properties *prop) +{ + return 0; } -static void goya_gen_wait_cb(struct hl_device *hdev, void *data, u16 sob_id, - u16 sob_val, u16 mon_id, u32 q_idx) +static void goya_reset_sob(struct hl_device *hdev, void *data) { } -static void goya_reset_sob(struct hl_device *hdev, void *data) +static void goya_reset_sob_group(struct hl_device *hdev, u16 sob_group) { } @@ -5327,6 +5366,23 @@ u64 goya_get_device_time(struct hl_device *hdev) return device_time | RREG32(mmPSOC_TIMESTAMP_CNTCVL); } +static void goya_collective_wait_init_cs(struct hl_cs *cs) +{ + +} + +static int goya_collective_wait_create_jobs(struct hl_device *hdev, + struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id, + u32 collective_engine_id) +{ + return -EINVAL; +} + +static void goya_ctx_fini(struct hl_ctx *ctx) +{ + +} + static const struct hl_asic_funcs goya_funcs = { .early_init = goya_early_init, .early_fini = goya_early_fini, @@ -5344,6 +5400,7 @@ static const struct hl_asic_funcs goya_funcs = { .pqe_write = goya_pqe_write, .asic_dma_alloc_coherent = goya_dma_alloc_coherent, .asic_dma_free_coherent = goya_dma_free_coherent, + .scrub_device_mem = goya_scrub_device_mem, .get_int_queue_base = goya_get_int_queue_base, .test_queues = goya_test_queues, .asic_dma_pool_zalloc = goya_dma_pool_zalloc, @@ -5381,13 +5438,13 @@ static const struct hl_asic_funcs goya_funcs = { .get_pci_id = goya_get_pci_id, .get_eeprom_data = goya_get_eeprom_data, .send_cpu_message = goya_send_cpu_message, - .get_hw_state = goya_get_hw_state, .pci_bars_map = goya_pci_bars_map, .init_iatu = goya_init_iatu, .rreg = hl_rreg, .wreg = hl_wreg, .halt_coresight = goya_halt_coresight, .ctx_init = goya_ctx_init, + .ctx_fini = goya_ctx_fini, .get_clk_rate = goya_get_clk_rate, .get_queue_id_for_cq = goya_get_queue_id_for_cq, .read_device_fw_version = goya_read_device_fw_version, @@ -5398,8 +5455,11 @@ static const struct hl_asic_funcs goya_funcs = { .gen_signal_cb = goya_gen_signal_cb, .gen_wait_cb = goya_gen_wait_cb, .reset_sob = goya_reset_sob, + .reset_sob_group = goya_reset_sob_group, .set_dma_mask_from_fw = goya_set_dma_mask_from_fw, - .get_device_time = goya_get_device_time + .get_device_time = goya_get_device_time, + .collective_wait_init_cs = goya_collective_wait_init_cs, + .collective_wait_create_jobs = goya_collective_wait_create_jobs }; /* diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h index def86c75e035..8b3408211af6 100644 --- a/drivers/misc/habanalabs/goya/goyaP.h +++ b/drivers/misc/habanalabs/goya/goyaP.h @@ -192,7 +192,7 @@ int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id); int goya_test_queues(struct hl_device *hdev); int goya_test_cpu_queue(struct hl_device *hdev); int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len, - u32 timeout, long *result); + u32 timeout, u64 *result); long goya_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr); long goya_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr); @@ -221,6 +221,7 @@ void goya_add_end_of_cb_packets(struct hl_device *hdev, void *kernel_address, u32 len, u64 cq_addr, u32 cq_val, u32 msix_vec, bool eb); int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser); +int goya_scrub_device_mem(struct hl_device *hdev, u64 addr, u64 size); void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id, dma_addr_t *dma_handle, u16 *queue_len); u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt); diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c index 4027a6a334d7..6fa03933b438 100644 --- a/drivers/misc/habanalabs/goya/goya_coresight.c +++ b/drivers/misc/habanalabs/goya/goya_coresight.c @@ -12,8 +12,6 @@ #include <uapi/misc/habanalabs.h> -#include <linux/coresight.h> - #define GOYA_PLDM_CORESIGHT_TIMEOUT_USEC (CORESIGHT_TIMEOUT_USEC * 100) #define SPMU_SECTION_SIZE DMA_CH_0_CS_SPMU_MAX_OFFSET diff --git a/drivers/misc/habanalabs/goya/goya_hwmgr.c b/drivers/misc/habanalabs/goya/goya_hwmgr.c index cdd4903e48fa..3acb36a1a902 100644 --- a/drivers/misc/habanalabs/goya/goya_hwmgr.c +++ b/drivers/misc/habanalabs/goya/goya_hwmgr.c @@ -36,7 +36,7 @@ int goya_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk) { long value; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; value = hl_get_frequency(hdev, MME_PLL, false); @@ -69,7 +69,7 @@ static ssize_t mme_clk_show(struct device *dev, struct device_attribute *attr, struct hl_device *hdev = dev_get_drvdata(dev); long value; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; value = hl_get_frequency(hdev, MME_PLL, false); @@ -88,7 +88,7 @@ static ssize_t mme_clk_store(struct device *dev, struct device_attribute *attr, int rc; long value; - if (hl_device_disabled_or_in_reset(hdev)) { + if (!hl_device_operational(hdev, NULL)) { count = -ENODEV; goto fail; } @@ -118,7 +118,7 @@ static ssize_t tpc_clk_show(struct device *dev, struct device_attribute *attr, struct hl_device *hdev = dev_get_drvdata(dev); long value; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; value = hl_get_frequency(hdev, TPC_PLL, false); @@ -137,7 +137,7 @@ static ssize_t tpc_clk_store(struct device *dev, struct device_attribute *attr, int rc; long value; - if (hl_device_disabled_or_in_reset(hdev)) { + if (!hl_device_operational(hdev, NULL)) { count = -ENODEV; goto fail; } @@ -167,7 +167,7 @@ static ssize_t ic_clk_show(struct device *dev, struct device_attribute *attr, struct hl_device *hdev = dev_get_drvdata(dev); long value; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; value = hl_get_frequency(hdev, IC_PLL, false); @@ -186,7 +186,7 @@ static ssize_t ic_clk_store(struct device *dev, struct device_attribute *attr, int rc; long value; - if (hl_device_disabled_or_in_reset(hdev)) { + if (!hl_device_operational(hdev, NULL)) { count = -ENODEV; goto fail; } @@ -216,7 +216,7 @@ static ssize_t mme_clk_curr_show(struct device *dev, struct hl_device *hdev = dev_get_drvdata(dev); long value; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; value = hl_get_frequency(hdev, MME_PLL, true); @@ -233,7 +233,7 @@ static ssize_t tpc_clk_curr_show(struct device *dev, struct hl_device *hdev = dev_get_drvdata(dev); long value; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; value = hl_get_frequency(hdev, TPC_PLL, true); @@ -250,7 +250,7 @@ static ssize_t ic_clk_curr_show(struct device *dev, struct hl_device *hdev = dev_get_drvdata(dev); long value; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; value = hl_get_frequency(hdev, IC_PLL, true); @@ -266,7 +266,7 @@ static ssize_t pm_mng_profile_show(struct device *dev, { struct hl_device *hdev = dev_get_drvdata(dev); - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; return sprintf(buf, "%s\n", @@ -280,7 +280,7 @@ static ssize_t pm_mng_profile_store(struct device *dev, { struct hl_device *hdev = dev_get_drvdata(dev); - if (hl_device_disabled_or_in_reset(hdev)) { + if (!hl_device_operational(hdev, NULL)) { count = -ENODEV; goto out; } @@ -335,7 +335,7 @@ static ssize_t high_pll_show(struct device *dev, struct device_attribute *attr, { struct hl_device *hdev = dev_get_drvdata(dev); - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; return sprintf(buf, "%u\n", hdev->high_pll); @@ -348,7 +348,7 @@ static ssize_t high_pll_store(struct device *dev, struct device_attribute *attr, long value; int rc; - if (hl_device_disabled_or_in_reset(hdev)) { + if (!hl_device_operational(hdev, NULL)) { count = -ENODEV; goto out; } diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h index 2a5c9cb3d505..00bd9b392f93 100644 --- a/drivers/misc/habanalabs/include/common/cpucp_if.h +++ b/drivers/misc/habanalabs/include/common/cpucp_if.h @@ -9,6 +9,38 @@ #define CPUCP_IF_H #include <linux/types.h> +#include <linux/if_ether.h> + +#define NUM_HBM_PSEUDO_CH 2 +#define NUM_HBM_CH_PER_DEV 8 +#define CPUCP_PKT_HBM_ECC_INFO_WR_PAR_SHIFT 0 +#define CPUCP_PKT_HBM_ECC_INFO_WR_PAR_MASK 0x00000001 +#define CPUCP_PKT_HBM_ECC_INFO_RD_PAR_SHIFT 1 +#define CPUCP_PKT_HBM_ECC_INFO_RD_PAR_MASK 0x00000002 +#define CPUCP_PKT_HBM_ECC_INFO_CA_PAR_SHIFT 2 +#define CPUCP_PKT_HBM_ECC_INFO_CA_PAR_MASK 0x00000004 +#define CPUCP_PKT_HBM_ECC_INFO_DERR_SHIFT 3 +#define CPUCP_PKT_HBM_ECC_INFO_DERR_MASK 0x00000008 +#define CPUCP_PKT_HBM_ECC_INFO_SERR_SHIFT 4 +#define CPUCP_PKT_HBM_ECC_INFO_SERR_MASK 0x00000010 +#define CPUCP_PKT_HBM_ECC_INFO_TYPE_SHIFT 5 +#define CPUCP_PKT_HBM_ECC_INFO_TYPE_MASK 0x00000020 +#define CPUCP_PKT_HBM_ECC_INFO_HBM_CH_SHIFT 6 +#define CPUCP_PKT_HBM_ECC_INFO_HBM_CH_MASK 0x000007C0 + +struct hl_eq_hbm_ecc_data { + /* SERR counter */ + __le32 sec_cnt; + /* DERR counter */ + __le32 dec_cnt; + /* Supplemental Information according to the mask bits */ + __le32 hbm_ecc_info; + /* Address in hbm where the ecc happened */ + __le32 first_addr; + /* SERR continuous address counter */ + __le32 sec_cont_cnt; + __le32 pad; +}; /* * EVENT QUEUE @@ -30,6 +62,7 @@ struct hl_eq_entry { struct hl_eq_header hdr; union { struct hl_eq_ecc_data ecc_data; + struct hl_eq_hbm_ecc_data hbm_ecc_data; __le64 data[7]; }; }; @@ -199,6 +232,11 @@ enum pq_init_status { * CpuCP to write to the structure, to prevent data corruption in case of * mismatched driver/FW versions. * + * CPUCP_PACKET_NIC_INFO_GET - + * Fetch information from the device regarding the NIC. the host's driver + * passes the max size it allows the CpuCP to write to the structure, to + * prevent data corruption in case of mismatched driver/FW versions. + * * CPUCP_PACKET_TEMPERATURE_SET - * Set the value of the offset property of a specified thermal sensor. * The packet's arguments specify the desired sensor and the field to @@ -214,10 +252,26 @@ enum pq_init_status { * The packet's arguments specify the desired sensor and the field to * set. * - * CPUCP_PACKET_PLL_REG_GET - * Fetch register of PLL from the required PLL IP. - * The packet's arguments specify the PLL IP and the register to get. - * Each register is 32-bit value which is returned in result field. + * CPUCP_PACKET_PCIE_THROUGHPUT_GET + * Get throughput of PCIe. + * The packet's arguments specify the transaction direction (TX/RX). + * The window measurement is 10[msec], and the return value is in KB/sec. + * + * CPUCP_PACKET_PCIE_REPLAY_CNT_GET + * Replay count measures number of "replay" events, which is basicly + * number of retries done by PCIe. + * + * CPUCP_PACKET_TOTAL_ENERGY_GET + * Total Energy is measurement of energy from the time FW Linux + * is loaded. It is calculated by multiplying the average power + * by time (passed from armcp start). The units are in MilliJouls. + * + * CPUCP_PACKET_PLL_INFO_GET + * Fetch frequencies of PLL from the required PLL IP. + * The packet's arguments specify the device PLL type + * Pll type is the PLL from device pll_index enum. + * The result is composed of 4 outputs, each is 16-bit + * frequency in MHz. * */ @@ -244,14 +298,14 @@ enum cpucp_packet_id { CPUCP_PACKET_MAX_POWER_GET, /* sysfs */ CPUCP_PACKET_MAX_POWER_SET, /* sysfs */ CPUCP_PACKET_EEPROM_DATA_GET, /* sysfs */ - CPUCP_RESERVED, + CPUCP_PACKET_NIC_INFO_GET, /* internal */ CPUCP_PACKET_TEMPERATURE_SET, /* sysfs */ CPUCP_PACKET_VOLTAGE_SET, /* sysfs */ CPUCP_PACKET_CURRENT_SET, /* sysfs */ - CPUCP_PACKET_PCIE_THROUGHPUT_GET, /* internal */ - CPUCP_PACKET_PCIE_REPLAY_CNT_GET, /* internal */ + CPUCP_PACKET_PCIE_THROUGHPUT_GET, /* internal */ + CPUCP_PACKET_PCIE_REPLAY_CNT_GET, /* internal */ CPUCP_PACKET_TOTAL_ENERGY_GET, /* internal */ - CPUCP_PACKET_PLL_REG_GET, /* internal */ + CPUCP_PACKET_PLL_INFO_GET, /* internal */ }; #define CPUCP_PACKET_FENCE_VAL 0xFE8CE7A5 @@ -262,6 +316,15 @@ enum cpucp_packet_id { #define CPUCP_PKT_CTL_OPCODE_SHIFT 16 #define CPUCP_PKT_CTL_OPCODE_MASK 0x1FFF0000 +#define CPUCP_PKT_RES_PLL_OUT0_SHIFT 0 +#define CPUCP_PKT_RES_PLL_OUT0_MASK 0x000000000000FFFFull +#define CPUCP_PKT_RES_PLL_OUT1_SHIFT 16 +#define CPUCP_PKT_RES_PLL_OUT1_MASK 0x00000000FFFF0000ull +#define CPUCP_PKT_RES_PLL_OUT2_SHIFT 32 +#define CPUCP_PKT_RES_PLL_OUT2_MASK 0x0000FFFF00000000ull +#define CPUCP_PKT_RES_PLL_OUT3_SHIFT 48 +#define CPUCP_PKT_RES_PLL_OUT3_MASK 0xFFFF000000000000ull + struct cpucp_packet { union { __le64 value; /* For SET packets */ @@ -286,8 +349,9 @@ struct cpucp_packet { __u8 pad; /* unused */ }; - struct {/* For PLL register fetch */ + struct {/* For PLL info fetch */ __le16 pll_type; + /* TODO pll_reg is kept temporary before removal */ __le16 pll_reg; }; @@ -300,7 +364,7 @@ struct cpucp_packet { /* For led set */ __le32 led_index; - /* For get CpuCP info/EEPROM data */ + /* For get CpuCP info/EEPROM data/NIC info */ __le32 data_max_size; }; @@ -366,6 +430,7 @@ enum cpucp_pcie_throughput_attributes { cpucp_pcie_throughput_rx }; +/* TODO temporary kept before removal */ enum cpucp_pll_reg_attributes { cpucp_pll_nr_reg, cpucp_pll_nf_reg, @@ -374,6 +439,7 @@ enum cpucp_pll_reg_attributes { cpucp_pll_div_sel_reg }; +/* TODO temporary kept before removal */ enum cpucp_pll_type_attributes { cpucp_pll_cpu, cpucp_pll_pci, @@ -392,6 +458,12 @@ struct eq_generic_event { #define CARD_NAME_MAX_LEN 16 #define VERSION_MAX_LEN 128 #define CPUCP_MAX_SENSORS 128 +#define CPUCP_MAX_NICS 128 +#define CPUCP_LANES_PER_NIC 4 +#define CPUCP_NIC_QSFP_EEPROM_MAX_LEN 1024 +#define CPUCP_MAX_NIC_LANES (CPUCP_MAX_NICS * CPUCP_LANES_PER_NIC) +#define CPUCP_NIC_MASK_ARR_LEN ((CPUCP_MAX_NICS + 63) / 64) +#define CPUCP_NIC_POLARITY_ARR_LEN ((CPUCP_MAX_NIC_LANES + 63) / 64) struct cpucp_sensor { __le32 type; @@ -408,6 +480,29 @@ enum cpucp_card_types { cpucp_card_type_pmc }; +#define CPUCP_SEC_CONF_ENABLED_SHIFT 0 +#define CPUCP_SEC_CONF_ENABLED_MASK 0x00000001 + +#define CPUCP_SEC_CONF_FLASH_WP_SHIFT 1 +#define CPUCP_SEC_CONF_FLASH_WP_MASK 0x00000002 + +#define CPUCP_SEC_CONF_EEPROM_WP_SHIFT 2 +#define CPUCP_SEC_CONF_EEPROM_WP_MASK 0x00000004 + +/** + * struct cpucp_security_info - Security information. + * @config: configuration bit field + * @keys_num: number of stored keys + * @revoked_keys: revoked keys bit field + * @min_svn: minimal security version + */ +struct cpucp_security_info { + __u8 config; + __u8 keys_num; + __u8 revoked_keys; + __u8 min_svn; +}; + /** * struct cpucp_info - Info from CpuCP that is necessary to the host's driver * @sensors: available sensors description. @@ -423,6 +518,7 @@ enum cpucp_card_types { * @cpucp_version: CpuCP S/W version. * @dram_size: available DRAM size. * @card_name: card name that will be displayed in HWMON subsystem on the host + * @sec_info: security information */ struct cpucp_info { struct cpucp_sensor sensors[CPUCP_MAX_SENSORS]; @@ -438,6 +534,26 @@ struct cpucp_info { __le32 reserved2; __le64 dram_size; char card_name[CARD_NAME_MAX_LEN]; + __le64 reserved3; + __le64 reserved4; + __u8 reserved5; + __u8 pad[7]; + struct cpucp_security_info sec_info; + __le32 reserved6; +}; + +struct cpucp_mac_addr { + __u8 mac_addr[ETH_ALEN]; +}; + +struct cpucp_nic_info { + struct cpucp_mac_addr mac_addrs[CPUCP_MAX_NICS]; + __le64 link_mask[CPUCP_NIC_MASK_ARR_LEN]; + __le64 pol_tx_mask[CPUCP_NIC_POLARITY_ARR_LEN]; + __le64 pol_rx_mask[CPUCP_NIC_POLARITY_ARR_LEN]; + __le64 link_ext_mask[CPUCP_NIC_MASK_ARR_LEN]; + __u8 qsfp_eeprom[CPUCP_NIC_QSFP_EEPROM_MAX_LEN]; + __le64 auto_neg_mask[CPUCP_NIC_MASK_ARR_LEN]; }; #endif /* CPUCP_IF_H */ diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h index bb67cafc6e00..e5801ecf0cb2 100644 --- a/drivers/misc/habanalabs/include/common/hl_boot_if.h +++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h @@ -53,6 +53,23 @@ * trust), boot authentication (chain of * trust), data packets authentication. * + * CPU_BOOT_ERR0_EFUSE_FAIL Reading from eFuse failed. + * The PCI device ID might be wrong. + * + * CPU_BOOT_ERR0_PRI_IMG_VER_FAIL Verification of primary image failed. + * It mean that ppboot checksum + * verification for the preboot primary + * image has failed to match expected + * checksum. Trying to program image again + * might solve this. + * + * CPU_BOOT_ERR0_SEC_IMG_VER_FAIL Verification of secondary image failed. + * It mean that ppboot checksum + * verification for the preboot secondary + * image has failed to match expected + * checksum. Trying to program image again + * might solve this. + * * CPU_BOOT_ERR0_ENABLED Error registers enabled. * This is a main indication that the * running FW populates the error @@ -68,8 +85,94 @@ #define CPU_BOOT_ERR0_NIC_FW_FAIL (1 << 6) #define CPU_BOOT_ERR0_SECURITY_NOT_RDY (1 << 7) #define CPU_BOOT_ERR0_SECURITY_FAIL (1 << 8) +#define CPU_BOOT_ERR0_EFUSE_FAIL (1 << 9) +#define CPU_BOOT_ERR0_PRI_IMG_VER_FAIL (1 << 10) +#define CPU_BOOT_ERR0_SEC_IMG_VER_FAIL (1 << 11) #define CPU_BOOT_ERR0_ENABLED (1 << 31) +/* + * BOOT DEVICE STATUS bits in BOOT_DEVICE_STS registers + * + * CPU_BOOT_DEV_STS0_SECURITY_EN Security is Enabled. + * This is an indication for security + * enabled in FW, which means that + * all conditions for security are met: + * device is indicated as security enabled, + * registers are protected, and device + * uses keys for image verification. + * Initialized in: preboot + * + * CPU_BOOT_DEV_STS0_DEBUG_EN Debug is enabled. + * Enabled when JTAG or DEBUG is enabled + * in FW. + * Initialized in: preboot + * + * CPU_BOOT_DEV_STS0_WATCHDOG_EN Watchdog is enabled. + * Watchdog is enabled in FW. + * Initialized in: preboot + * + * CPU_BOOT_DEV_STS0_DRAM_INIT_EN DRAM initialization is enabled. + * DRAM initialization has been done in FW. + * Initialized in: u-boot + * + * CPU_BOOT_DEV_STS0_BMC_WAIT_EN Waiting for BMC data enabled. + * If set, it means that during boot, + * FW waited for BMC data. + * Initialized in: u-boot + * + * CPU_BOOT_DEV_STS0_E2E_CRED_EN E2E credits initialized. + * FW initialized E2E credits. + * Initialized in: u-boot + * + * CPU_BOOT_DEV_STS0_HBM_CRED_EN HBM credits initialized. + * FW initialized HBM credits. + * Initialized in: u-boot + * + * CPU_BOOT_DEV_STS0_RL_EN Rate limiter initialized. + * FW initialized rate limiter. + * Initialized in: u-boot + * + * CPU_BOOT_DEV_STS0_SRAM_SCR_EN SRAM scrambler enabled. + * FW initialized SRAM scrambler. + * Initialized in: linux + * + * CPU_BOOT_DEV_STS0_DRAM_SCR_EN DRAM scrambler enabled. + * FW initialized DRAM scrambler. + * Initialized in: u-boot + * + * CPU_BOOT_DEV_STS0_FW_HARD_RST_EN FW hard reset procedure is enabled. + * FW has the hard reset procedure + * implemented. This means that FW will + * perform hard reset procedure on + * receiving the halt-machine event. + * Initialized in: linux + * + * CPU_BOOT_DEV_STS0_PLL_INFO_EN FW retrieval of PLL info is enabled. + * Initialized in: linux + * + * CPU_BOOT_DEV_STS0_ENABLED Device status register enabled. + * This is a main indication that the + * running FW populates the device status + * register. Meaning the device status + * bits are not garbage, but actual + * statuses. + * Initialized in: preboot + * + */ +#define CPU_BOOT_DEV_STS0_SECURITY_EN (1 << 0) +#define CPU_BOOT_DEV_STS0_DEBUG_EN (1 << 1) +#define CPU_BOOT_DEV_STS0_WATCHDOG_EN (1 << 2) +#define CPU_BOOT_DEV_STS0_DRAM_INIT_EN (1 << 3) +#define CPU_BOOT_DEV_STS0_BMC_WAIT_EN (1 << 4) +#define CPU_BOOT_DEV_STS0_E2E_CRED_EN (1 << 5) +#define CPU_BOOT_DEV_STS0_HBM_CRED_EN (1 << 6) +#define CPU_BOOT_DEV_STS0_RL_EN (1 << 7) +#define CPU_BOOT_DEV_STS0_SRAM_SCR_EN (1 << 8) +#define CPU_BOOT_DEV_STS0_DRAM_SCR_EN (1 << 9) +#define CPU_BOOT_DEV_STS0_FW_HARD_RST_EN (1 << 10) +#define CPU_BOOT_DEV_STS0_PLL_INFO_EN (1 << 11) +#define CPU_BOOT_DEV_STS0_ENABLED (1 << 31) + enum cpu_boot_status { CPU_BOOT_STATUS_NA = 0, /* Default value after reset of chip */ CPU_BOOT_STATUS_IN_WFE = 1, diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h index f92dc53af074..5bb54b34a8ae 100644 --- a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h @@ -81,6 +81,7 @@ #include "sif_rtr_ctrl_6_regs.h" #include "sif_rtr_ctrl_7_regs.h" #include "psoc_etr_regs.h" +#include "psoc_cpu_pll_regs.h" #include "dma0_qm_masks.h" #include "mme0_qm_masks.h" @@ -89,9 +90,18 @@ #include "tpc0_cfg_masks.h" #include "psoc_global_conf_masks.h" -#include "psoc_pci_pll_regs.h" -#include "psoc_hbm_pll_regs.h" -#include "psoc_cpu_pll_regs.h" +#include "nic0_qm0_regs.h" +#include "nic1_qm0_regs.h" +#include "nic2_qm0_regs.h" +#include "nic3_qm0_regs.h" +#include "nic4_qm0_regs.h" +#include "nic0_qm1_regs.h" +#include "nic1_qm1_regs.h" +#include "nic2_qm1_regs.h" +#include "nic3_qm1_regs.h" +#include "nic4_qm1_regs.h" + +#include "nic0_qm0_masks.h" #define GAUDI_ECC_MEM_SEL_OFFSET 0xF18 #define GAUDI_ECC_ADDRESS_OFFSET 0xF1C @@ -295,4 +305,14 @@ #define mmPCIE_AUX_FLR_CTRL 0xC07394 #define mmPCIE_AUX_DBI 0xC07490 +#define mmPSOC_PCI_PLL_NR 0xC72100 +#define mmSRAM_W_PLL_NR 0x4C8100 +#define mmPSOC_HBM_PLL_NR 0xC74100 +#define mmNIC0_PLL_NR 0xCF9100 +#define mmDMA_W_PLL_NR 0x487100 +#define mmMESH_W_PLL_NR 0x4C7100 +#define mmPSOC_MME_PLL_NR 0xC71100 +#define mmPSOC_TPC_PLL_NR 0xC73100 +#define mmIF_W_PLL_NR 0x488100 + #endif /* ASIC_REG_GAUDI_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_masks.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_masks.h new file mode 100644 index 000000000000..bd37b6452133 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_masks.h @@ -0,0 +1,800 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_NIC0_QM0_MASKS_H_ +#define ASIC_REG_NIC0_QM0_MASKS_H_ + +/* + ***************************************** + * NIC0_QM0 (Prototype: QMAN) + ***************************************** + */ + +/* NIC0_QM0_GLBL_CFG0 */ +#define NIC0_QM0_GLBL_CFG0_PQF_EN_SHIFT 0 +#define NIC0_QM0_GLBL_CFG0_PQF_EN_MASK 0xF +#define NIC0_QM0_GLBL_CFG0_CQF_EN_SHIFT 4 +#define NIC0_QM0_GLBL_CFG0_CQF_EN_MASK 0x1F0 +#define NIC0_QM0_GLBL_CFG0_CP_EN_SHIFT 9 +#define NIC0_QM0_GLBL_CFG0_CP_EN_MASK 0x3E00 + +/* NIC0_QM0_GLBL_CFG1 */ +#define NIC0_QM0_GLBL_CFG1_PQF_STOP_SHIFT 0 +#define NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK 0xF +#define NIC0_QM0_GLBL_CFG1_CQF_STOP_SHIFT 4 +#define NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK 0x1F0 +#define NIC0_QM0_GLBL_CFG1_CP_STOP_SHIFT 9 +#define NIC0_QM0_GLBL_CFG1_CP_STOP_MASK 0x3E00 +#define NIC0_QM0_GLBL_CFG1_PQF_FLUSH_SHIFT 16 +#define NIC0_QM0_GLBL_CFG1_PQF_FLUSH_MASK 0xF0000 +#define NIC0_QM0_GLBL_CFG1_CQF_FLUSH_SHIFT 20 +#define NIC0_QM0_GLBL_CFG1_CQF_FLUSH_MASK 0x1F00000 +#define NIC0_QM0_GLBL_CFG1_CP_FLUSH_SHIFT 25 +#define NIC0_QM0_GLBL_CFG1_CP_FLUSH_MASK 0x3E000000 + +/* NIC0_QM0_GLBL_PROT */ +#define NIC0_QM0_GLBL_PROT_PQF_SHIFT 0 +#define NIC0_QM0_GLBL_PROT_PQF_MASK 0xF +#define NIC0_QM0_GLBL_PROT_CQF_SHIFT 4 +#define NIC0_QM0_GLBL_PROT_CQF_MASK 0x1F0 +#define NIC0_QM0_GLBL_PROT_CP_SHIFT 9 +#define NIC0_QM0_GLBL_PROT_CP_MASK 0x3E00 +#define NIC0_QM0_GLBL_PROT_ERR_SHIFT 14 +#define NIC0_QM0_GLBL_PROT_ERR_MASK 0x4000 +#define NIC0_QM0_GLBL_PROT_ARB_SHIFT 15 +#define NIC0_QM0_GLBL_PROT_ARB_MASK 0x8000 + +/* NIC0_QM0_GLBL_ERR_CFG */ +#define NIC0_QM0_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 0 +#define NIC0_QM0_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0xF +#define NIC0_QM0_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4 +#define NIC0_QM0_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x1F0 +#define NIC0_QM0_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 9 +#define NIC0_QM0_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x3E00 +#define NIC0_QM0_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 16 +#define NIC0_QM0_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0xF0000 +#define NIC0_QM0_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 20 +#define NIC0_QM0_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x1F00000 +#define NIC0_QM0_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 25 +#define NIC0_QM0_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x3E000000 +#define NIC0_QM0_GLBL_ERR_CFG_ARB_STOP_ON_ERR_SHIFT 31 +#define NIC0_QM0_GLBL_ERR_CFG_ARB_STOP_ON_ERR_MASK 0x80000000 + +/* NIC0_QM0_GLBL_SECURE_PROPS */ +#define NIC0_QM0_GLBL_SECURE_PROPS_0_ASID_SHIFT 0 +#define NIC0_QM0_GLBL_SECURE_PROPS_0_ASID_MASK 0x3FF +#define NIC0_QM0_GLBL_SECURE_PROPS_1_ASID_SHIFT 0 +#define NIC0_QM0_GLBL_SECURE_PROPS_1_ASID_MASK 0x3FF +#define NIC0_QM0_GLBL_SECURE_PROPS_2_ASID_SHIFT 0 +#define NIC0_QM0_GLBL_SECURE_PROPS_2_ASID_MASK 0x3FF +#define NIC0_QM0_GLBL_SECURE_PROPS_3_ASID_SHIFT 0 +#define NIC0_QM0_GLBL_SECURE_PROPS_3_ASID_MASK 0x3FF +#define NIC0_QM0_GLBL_SECURE_PROPS_4_ASID_SHIFT 0 +#define NIC0_QM0_GLBL_SECURE_PROPS_4_ASID_MASK 0x3FF +#define NIC0_QM0_GLBL_SECURE_PROPS_0_MMBP_SHIFT 10 +#define NIC0_QM0_GLBL_SECURE_PROPS_0_MMBP_MASK 0x400 +#define NIC0_QM0_GLBL_SECURE_PROPS_1_MMBP_SHIFT 10 +#define NIC0_QM0_GLBL_SECURE_PROPS_1_MMBP_MASK 0x400 +#define NIC0_QM0_GLBL_SECURE_PROPS_2_MMBP_SHIFT 10 +#define NIC0_QM0_GLBL_SECURE_PROPS_2_MMBP_MASK 0x400 +#define NIC0_QM0_GLBL_SECURE_PROPS_3_MMBP_SHIFT 10 +#define NIC0_QM0_GLBL_SECURE_PROPS_3_MMBP_MASK 0x400 +#define NIC0_QM0_GLBL_SECURE_PROPS_4_MMBP_SHIFT 10 +#define NIC0_QM0_GLBL_SECURE_PROPS_4_MMBP_MASK 0x400 + +/* NIC0_QM0_GLBL_NON_SECURE_PROPS */ +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_0_ASID_SHIFT 0 +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_0_ASID_MASK 0x3FF +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_1_ASID_SHIFT 0 +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_1_ASID_MASK 0x3FF +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_2_ASID_SHIFT 0 +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_2_ASID_MASK 0x3FF +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_3_ASID_SHIFT 0 +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_3_ASID_MASK 0x3FF +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_4_ASID_SHIFT 0 +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_4_ASID_MASK 0x3FF +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_0_MMBP_SHIFT 10 +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_0_MMBP_MASK 0x400 +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_1_MMBP_SHIFT 10 +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_1_MMBP_MASK 0x400 +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_2_MMBP_SHIFT 10 +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_2_MMBP_MASK 0x400 +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_3_MMBP_SHIFT 10 +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_3_MMBP_MASK 0x400 +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_4_MMBP_SHIFT 10 +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_4_MMBP_MASK 0x400 + +/* NIC0_QM0_GLBL_STS0 */ +#define NIC0_QM0_GLBL_STS0_PQF_IDLE_SHIFT 0 +#define NIC0_QM0_GLBL_STS0_PQF_IDLE_MASK 0xF +#define NIC0_QM0_GLBL_STS0_CQF_IDLE_SHIFT 4 +#define NIC0_QM0_GLBL_STS0_CQF_IDLE_MASK 0x1F0 +#define NIC0_QM0_GLBL_STS0_CP_IDLE_SHIFT 9 +#define NIC0_QM0_GLBL_STS0_CP_IDLE_MASK 0x3E00 +#define NIC0_QM0_GLBL_STS0_PQF_IS_STOP_SHIFT 16 +#define NIC0_QM0_GLBL_STS0_PQF_IS_STOP_MASK 0xF0000 +#define NIC0_QM0_GLBL_STS0_CQF_IS_STOP_SHIFT 20 +#define NIC0_QM0_GLBL_STS0_CQF_IS_STOP_MASK 0x1F00000 +#define NIC0_QM0_GLBL_STS0_CP_IS_STOP_SHIFT 25 +#define NIC0_QM0_GLBL_STS0_CP_IS_STOP_MASK 0x3E000000 +#define NIC0_QM0_GLBL_STS0_ARB_IS_STOP_SHIFT 31 +#define NIC0_QM0_GLBL_STS0_ARB_IS_STOP_MASK 0x80000000 + +/* NIC0_QM0_GLBL_STS1 */ +#define NIC0_QM0_GLBL_STS1_PQF_RD_ERR_SHIFT 0 +#define NIC0_QM0_GLBL_STS1_PQF_RD_ERR_MASK 0x1 +#define NIC0_QM0_GLBL_STS1_CQF_RD_ERR_SHIFT 1 +#define NIC0_QM0_GLBL_STS1_CQF_RD_ERR_MASK 0x2 +#define NIC0_QM0_GLBL_STS1_CP_RD_ERR_SHIFT 2 +#define NIC0_QM0_GLBL_STS1_CP_RD_ERR_MASK 0x4 +#define NIC0_QM0_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3 +#define NIC0_QM0_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8 +#define NIC0_QM0_GLBL_STS1_CP_STOP_OP_SHIFT 4 +#define NIC0_QM0_GLBL_STS1_CP_STOP_OP_MASK 0x10 +#define NIC0_QM0_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5 +#define NIC0_QM0_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20 +#define NIC0_QM0_GLBL_STS1_CP_WREG_ERR_SHIFT 6 +#define NIC0_QM0_GLBL_STS1_CP_WREG_ERR_MASK 0x40 +#define NIC0_QM0_GLBL_STS1_CP_FENCE0_OVF_ERR_SHIFT 8 +#define NIC0_QM0_GLBL_STS1_CP_FENCE0_OVF_ERR_MASK 0x100 +#define NIC0_QM0_GLBL_STS1_CP_FENCE1_OVF_ERR_SHIFT 9 +#define NIC0_QM0_GLBL_STS1_CP_FENCE1_OVF_ERR_MASK 0x200 +#define NIC0_QM0_GLBL_STS1_CP_FENCE2_OVF_ERR_SHIFT 10 +#define NIC0_QM0_GLBL_STS1_CP_FENCE2_OVF_ERR_MASK 0x400 +#define NIC0_QM0_GLBL_STS1_CP_FENCE3_OVF_ERR_SHIFT 11 +#define NIC0_QM0_GLBL_STS1_CP_FENCE3_OVF_ERR_MASK 0x800 +#define NIC0_QM0_GLBL_STS1_CP_FENCE0_UDF_ERR_SHIFT 12 +#define NIC0_QM0_GLBL_STS1_CP_FENCE0_UDF_ERR_MASK 0x1000 +#define NIC0_QM0_GLBL_STS1_CP_FENCE1_UDF_ERR_SHIFT 13 +#define NIC0_QM0_GLBL_STS1_CP_FENCE1_UDF_ERR_MASK 0x2000 +#define NIC0_QM0_GLBL_STS1_CP_FENCE2_UDF_ERR_SHIFT 14 +#define NIC0_QM0_GLBL_STS1_CP_FENCE2_UDF_ERR_MASK 0x4000 +#define NIC0_QM0_GLBL_STS1_CP_FENCE3_UDF_ERR_SHIFT 15 +#define NIC0_QM0_GLBL_STS1_CP_FENCE3_UDF_ERR_MASK 0x8000 + +/* NIC0_QM0_GLBL_STS1_4 */ +#define NIC0_QM0_GLBL_STS1_4_CQF_RD_ERR_SHIFT 1 +#define NIC0_QM0_GLBL_STS1_4_CQF_RD_ERR_MASK 0x2 +#define NIC0_QM0_GLBL_STS1_4_CP_RD_ERR_SHIFT 2 +#define NIC0_QM0_GLBL_STS1_4_CP_RD_ERR_MASK 0x4 +#define NIC0_QM0_GLBL_STS1_4_CP_UNDEF_CMD_ERR_SHIFT 3 +#define NIC0_QM0_GLBL_STS1_4_CP_UNDEF_CMD_ERR_MASK 0x8 +#define NIC0_QM0_GLBL_STS1_4_CP_STOP_OP_SHIFT 4 +#define NIC0_QM0_GLBL_STS1_4_CP_STOP_OP_MASK 0x10 +#define NIC0_QM0_GLBL_STS1_4_CP_MSG_WR_ERR_SHIFT 5 +#define NIC0_QM0_GLBL_STS1_4_CP_MSG_WR_ERR_MASK 0x20 +#define NIC0_QM0_GLBL_STS1_4_CP_WREG_ERR_SHIFT 6 +#define NIC0_QM0_GLBL_STS1_4_CP_WREG_ERR_MASK 0x40 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE0_OVF_ERR_SHIFT 8 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE0_OVF_ERR_MASK 0x100 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE1_OVF_ERR_SHIFT 9 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE1_OVF_ERR_MASK 0x200 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE2_OVF_ERR_SHIFT 10 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE2_OVF_ERR_MASK 0x400 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE3_OVF_ERR_SHIFT 11 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE3_OVF_ERR_MASK 0x800 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE0_UDF_ERR_SHIFT 12 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE0_UDF_ERR_MASK 0x1000 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE1_UDF_ERR_SHIFT 13 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE1_UDF_ERR_MASK 0x2000 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE2_UDF_ERR_SHIFT 14 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE2_UDF_ERR_MASK 0x4000 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE3_UDF_ERR_SHIFT 15 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE3_UDF_ERR_MASK 0x8000 + +/* NIC0_QM0_GLBL_MSG_EN */ +#define NIC0_QM0_GLBL_MSG_EN_PQF_RD_ERR_SHIFT 0 +#define NIC0_QM0_GLBL_MSG_EN_PQF_RD_ERR_MASK 0x1 +#define NIC0_QM0_GLBL_MSG_EN_CQF_RD_ERR_SHIFT 1 +#define NIC0_QM0_GLBL_MSG_EN_CQF_RD_ERR_MASK 0x2 +#define NIC0_QM0_GLBL_MSG_EN_CP_RD_ERR_SHIFT 2 +#define NIC0_QM0_GLBL_MSG_EN_CP_RD_ERR_MASK 0x4 +#define NIC0_QM0_GLBL_MSG_EN_CP_UNDEF_CMD_ERR_SHIFT 3 +#define NIC0_QM0_GLBL_MSG_EN_CP_UNDEF_CMD_ERR_MASK 0x8 +#define NIC0_QM0_GLBL_MSG_EN_CP_STOP_OP_SHIFT 4 +#define NIC0_QM0_GLBL_MSG_EN_CP_STOP_OP_MASK 0x10 +#define NIC0_QM0_GLBL_MSG_EN_CP_MSG_WR_ERR_SHIFT 5 +#define NIC0_QM0_GLBL_MSG_EN_CP_MSG_WR_ERR_MASK 0x20 +#define NIC0_QM0_GLBL_MSG_EN_CP_WREG_ERR_SHIFT 6 +#define NIC0_QM0_GLBL_MSG_EN_CP_WREG_ERR_MASK 0x40 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE0_OVF_ERR_SHIFT 8 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE0_OVF_ERR_MASK 0x100 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE1_OVF_ERR_SHIFT 9 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE1_OVF_ERR_MASK 0x200 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE2_OVF_ERR_SHIFT 10 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE2_OVF_ERR_MASK 0x400 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE3_OVF_ERR_SHIFT 11 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE3_OVF_ERR_MASK 0x800 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE0_UDF_ERR_SHIFT 12 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE0_UDF_ERR_MASK 0x1000 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE1_UDF_ERR_SHIFT 13 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE1_UDF_ERR_MASK 0x2000 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE2_UDF_ERR_SHIFT 14 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE2_UDF_ERR_MASK 0x4000 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE3_UDF_ERR_SHIFT 15 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE3_UDF_ERR_MASK 0x8000 + +/* NIC0_QM0_GLBL_MSG_EN_4 */ +#define NIC0_QM0_GLBL_MSG_EN_4_CQF_RD_ERR_SHIFT 1 +#define NIC0_QM0_GLBL_MSG_EN_4_CQF_RD_ERR_MASK 0x2 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_RD_ERR_SHIFT 2 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_RD_ERR_MASK 0x4 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_UNDEF_CMD_ERR_SHIFT 3 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_UNDEF_CMD_ERR_MASK 0x8 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_STOP_OP_SHIFT 4 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_STOP_OP_MASK 0x10 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_MSG_WR_ERR_SHIFT 5 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_MSG_WR_ERR_MASK 0x20 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_WREG_ERR_SHIFT 6 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_WREG_ERR_MASK 0x40 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE0_OVF_ERR_SHIFT 8 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE0_OVF_ERR_MASK 0x100 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE1_OVF_ERR_SHIFT 9 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE1_OVF_ERR_MASK 0x200 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE2_OVF_ERR_SHIFT 10 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE2_OVF_ERR_MASK 0x400 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE3_OVF_ERR_SHIFT 11 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE3_OVF_ERR_MASK 0x800 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE0_UDF_ERR_SHIFT 12 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE0_UDF_ERR_MASK 0x1000 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE1_UDF_ERR_SHIFT 13 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE1_UDF_ERR_MASK 0x2000 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE2_UDF_ERR_SHIFT 14 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE2_UDF_ERR_MASK 0x4000 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE3_UDF_ERR_SHIFT 15 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE3_UDF_ERR_MASK 0x8000 + +/* NIC0_QM0_PQ_BASE_LO */ +#define NIC0_QM0_PQ_BASE_LO_VAL_SHIFT 0 +#define NIC0_QM0_PQ_BASE_LO_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_PQ_BASE_HI */ +#define NIC0_QM0_PQ_BASE_HI_VAL_SHIFT 0 +#define NIC0_QM0_PQ_BASE_HI_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_PQ_SIZE */ +#define NIC0_QM0_PQ_SIZE_VAL_SHIFT 0 +#define NIC0_QM0_PQ_SIZE_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_PQ_PI */ +#define NIC0_QM0_PQ_PI_VAL_SHIFT 0 +#define NIC0_QM0_PQ_PI_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_PQ_CI */ +#define NIC0_QM0_PQ_CI_VAL_SHIFT 0 +#define NIC0_QM0_PQ_CI_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_PQ_CFG0 */ +#define NIC0_QM0_PQ_CFG0_RESERVED_SHIFT 0 +#define NIC0_QM0_PQ_CFG0_RESERVED_MASK 0x1 + +/* NIC0_QM0_PQ_CFG1 */ +#define NIC0_QM0_PQ_CFG1_CREDIT_LIM_SHIFT 0 +#define NIC0_QM0_PQ_CFG1_CREDIT_LIM_MASK 0xFFFF +#define NIC0_QM0_PQ_CFG1_MAX_INFLIGHT_SHIFT 16 +#define NIC0_QM0_PQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000 + +/* NIC0_QM0_PQ_ARUSER_31_11 */ +#define NIC0_QM0_PQ_ARUSER_31_11_VAL_SHIFT 0 +#define NIC0_QM0_PQ_ARUSER_31_11_VAL_MASK 0x1FFFFF + +/* NIC0_QM0_PQ_STS0 */ +#define NIC0_QM0_PQ_STS0_PQ_CREDIT_CNT_SHIFT 0 +#define NIC0_QM0_PQ_STS0_PQ_CREDIT_CNT_MASK 0xFFFF +#define NIC0_QM0_PQ_STS0_PQ_FREE_CNT_SHIFT 16 +#define NIC0_QM0_PQ_STS0_PQ_FREE_CNT_MASK 0xFFFF0000 + +/* NIC0_QM0_PQ_STS1 */ +#define NIC0_QM0_PQ_STS1_PQ_INFLIGHT_CNT_SHIFT 0 +#define NIC0_QM0_PQ_STS1_PQ_INFLIGHT_CNT_MASK 0xFFFF +#define NIC0_QM0_PQ_STS1_PQ_BUF_EMPTY_SHIFT 30 +#define NIC0_QM0_PQ_STS1_PQ_BUF_EMPTY_MASK 0x40000000 +#define NIC0_QM0_PQ_STS1_PQ_BUSY_SHIFT 31 +#define NIC0_QM0_PQ_STS1_PQ_BUSY_MASK 0x80000000 + +/* NIC0_QM0_CQ_CFG0 */ +#define NIC0_QM0_CQ_CFG0_RESERVED_SHIFT 0 +#define NIC0_QM0_CQ_CFG0_RESERVED_MASK 0x1 + +/* NIC0_QM0_CQ_CFG1 */ +#define NIC0_QM0_CQ_CFG1_CREDIT_LIM_SHIFT 0 +#define NIC0_QM0_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF +#define NIC0_QM0_CQ_CFG1_MAX_INFLIGHT_SHIFT 16 +#define NIC0_QM0_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000 + +/* NIC0_QM0_CQ_ARUSER_31_11 */ +#define NIC0_QM0_CQ_ARUSER_31_11_VAL_SHIFT 0 +#define NIC0_QM0_CQ_ARUSER_31_11_VAL_MASK 0x1FFFFF + +/* NIC0_QM0_CQ_STS0 */ +#define NIC0_QM0_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0 +#define NIC0_QM0_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF +#define NIC0_QM0_CQ_STS0_CQ_FREE_CNT_SHIFT 16 +#define NIC0_QM0_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000 + +/* NIC0_QM0_CQ_STS1 */ +#define NIC0_QM0_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0 +#define NIC0_QM0_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF +#define NIC0_QM0_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30 +#define NIC0_QM0_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000 +#define NIC0_QM0_CQ_STS1_CQ_BUSY_SHIFT 31 +#define NIC0_QM0_CQ_STS1_CQ_BUSY_MASK 0x80000000 + +/* NIC0_QM0_CQ_PTR_LO_0 */ +#define NIC0_QM0_CQ_PTR_LO_0_VAL_SHIFT 0 +#define NIC0_QM0_CQ_PTR_LO_0_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_PTR_HI_0 */ +#define NIC0_QM0_CQ_PTR_HI_0_VAL_SHIFT 0 +#define NIC0_QM0_CQ_PTR_HI_0_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_TSIZE_0 */ +#define NIC0_QM0_CQ_TSIZE_0_VAL_SHIFT 0 +#define NIC0_QM0_CQ_TSIZE_0_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_CTL_0 */ +#define NIC0_QM0_CQ_CTL_0_RPT_SHIFT 0 +#define NIC0_QM0_CQ_CTL_0_RPT_MASK 0xFFFF +#define NIC0_QM0_CQ_CTL_0_CTL_SHIFT 16 +#define NIC0_QM0_CQ_CTL_0_CTL_MASK 0xFFFF0000 + +/* NIC0_QM0_CQ_PTR_LO_1 */ +#define NIC0_QM0_CQ_PTR_LO_1_VAL_SHIFT 0 +#define NIC0_QM0_CQ_PTR_LO_1_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_PTR_HI_1 */ +#define NIC0_QM0_CQ_PTR_HI_1_VAL_SHIFT 0 +#define NIC0_QM0_CQ_PTR_HI_1_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_TSIZE_1 */ +#define NIC0_QM0_CQ_TSIZE_1_VAL_SHIFT 0 +#define NIC0_QM0_CQ_TSIZE_1_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_CTL_1 */ +#define NIC0_QM0_CQ_CTL_1_RPT_SHIFT 0 +#define NIC0_QM0_CQ_CTL_1_RPT_MASK 0xFFFF +#define NIC0_QM0_CQ_CTL_1_CTL_SHIFT 16 +#define NIC0_QM0_CQ_CTL_1_CTL_MASK 0xFFFF0000 + +/* NIC0_QM0_CQ_PTR_LO_2 */ +#define NIC0_QM0_CQ_PTR_LO_2_VAL_SHIFT 0 +#define NIC0_QM0_CQ_PTR_LO_2_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_PTR_HI_2 */ +#define NIC0_QM0_CQ_PTR_HI_2_VAL_SHIFT 0 +#define NIC0_QM0_CQ_PTR_HI_2_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_TSIZE_2 */ +#define NIC0_QM0_CQ_TSIZE_2_VAL_SHIFT 0 +#define NIC0_QM0_CQ_TSIZE_2_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_CTL_2 */ +#define NIC0_QM0_CQ_CTL_2_RPT_SHIFT 0 +#define NIC0_QM0_CQ_CTL_2_RPT_MASK 0xFFFF +#define NIC0_QM0_CQ_CTL_2_CTL_SHIFT 16 +#define NIC0_QM0_CQ_CTL_2_CTL_MASK 0xFFFF0000 + +/* NIC0_QM0_CQ_PTR_LO_3 */ +#define NIC0_QM0_CQ_PTR_LO_3_VAL_SHIFT 0 +#define NIC0_QM0_CQ_PTR_LO_3_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_PTR_HI_3 */ +#define NIC0_QM0_CQ_PTR_HI_3_VAL_SHIFT 0 +#define NIC0_QM0_CQ_PTR_HI_3_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_TSIZE_3 */ +#define NIC0_QM0_CQ_TSIZE_3_VAL_SHIFT 0 +#define NIC0_QM0_CQ_TSIZE_3_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_CTL_3 */ +#define NIC0_QM0_CQ_CTL_3_RPT_SHIFT 0 +#define NIC0_QM0_CQ_CTL_3_RPT_MASK 0xFFFF +#define NIC0_QM0_CQ_CTL_3_CTL_SHIFT 16 +#define NIC0_QM0_CQ_CTL_3_CTL_MASK 0xFFFF0000 + +/* NIC0_QM0_CQ_PTR_LO_4 */ +#define NIC0_QM0_CQ_PTR_LO_4_VAL_SHIFT 0 +#define NIC0_QM0_CQ_PTR_LO_4_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_PTR_HI_4 */ +#define NIC0_QM0_CQ_PTR_HI_4_VAL_SHIFT 0 +#define NIC0_QM0_CQ_PTR_HI_4_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_TSIZE_4 */ +#define NIC0_QM0_CQ_TSIZE_4_VAL_SHIFT 0 +#define NIC0_QM0_CQ_TSIZE_4_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_CTL_4 */ +#define NIC0_QM0_CQ_CTL_4_RPT_SHIFT 0 +#define NIC0_QM0_CQ_CTL_4_RPT_MASK 0xFFFF +#define NIC0_QM0_CQ_CTL_4_CTL_SHIFT 16 +#define NIC0_QM0_CQ_CTL_4_CTL_MASK 0xFFFF0000 + +/* NIC0_QM0_CQ_PTR_LO_STS */ +#define NIC0_QM0_CQ_PTR_LO_STS_VAL_SHIFT 0 +#define NIC0_QM0_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_PTR_HI_STS */ +#define NIC0_QM0_CQ_PTR_HI_STS_VAL_SHIFT 0 +#define NIC0_QM0_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_TSIZE_STS */ +#define NIC0_QM0_CQ_TSIZE_STS_VAL_SHIFT 0 +#define NIC0_QM0_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_CTL_STS */ +#define NIC0_QM0_CQ_CTL_STS_RPT_SHIFT 0 +#define NIC0_QM0_CQ_CTL_STS_RPT_MASK 0xFFFF +#define NIC0_QM0_CQ_CTL_STS_CTL_SHIFT 16 +#define NIC0_QM0_CQ_CTL_STS_CTL_MASK 0xFFFF0000 + +/* NIC0_QM0_CQ_IFIFO_CNT */ +#define NIC0_QM0_CQ_IFIFO_CNT_VAL_SHIFT 0 +#define NIC0_QM0_CQ_IFIFO_CNT_VAL_MASK 0x3 + +/* NIC0_QM0_CP_MSG_BASE0_ADDR_LO */ +#define NIC0_QM0_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0 +#define NIC0_QM0_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CP_MSG_BASE0_ADDR_HI */ +#define NIC0_QM0_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0 +#define NIC0_QM0_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CP_MSG_BASE1_ADDR_LO */ +#define NIC0_QM0_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0 +#define NIC0_QM0_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CP_MSG_BASE1_ADDR_HI */ +#define NIC0_QM0_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0 +#define NIC0_QM0_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CP_MSG_BASE2_ADDR_LO */ +#define NIC0_QM0_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0 +#define NIC0_QM0_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CP_MSG_BASE2_ADDR_HI */ +#define NIC0_QM0_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0 +#define NIC0_QM0_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CP_MSG_BASE3_ADDR_LO */ +#define NIC0_QM0_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0 +#define NIC0_QM0_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CP_MSG_BASE3_ADDR_HI */ +#define NIC0_QM0_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0 +#define NIC0_QM0_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CP_LDMA_TSIZE_OFFSET */ +#define NIC0_QM0_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0 +#define NIC0_QM0_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET */ +#define NIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0 +#define NIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET */ +#define NIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0 +#define NIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CP_FENCE0_RDATA */ +#define NIC0_QM0_CP_FENCE0_RDATA_INC_VAL_SHIFT 0 +#define NIC0_QM0_CP_FENCE0_RDATA_INC_VAL_MASK 0xF + +/* NIC0_QM0_CP_FENCE1_RDATA */ +#define NIC0_QM0_CP_FENCE1_RDATA_INC_VAL_SHIFT 0 +#define NIC0_QM0_CP_FENCE1_RDATA_INC_VAL_MASK 0xF + +/* NIC0_QM0_CP_FENCE2_RDATA */ +#define NIC0_QM0_CP_FENCE2_RDATA_INC_VAL_SHIFT 0 +#define NIC0_QM0_CP_FENCE2_RDATA_INC_VAL_MASK 0xF + +/* NIC0_QM0_CP_FENCE3_RDATA */ +#define NIC0_QM0_CP_FENCE3_RDATA_INC_VAL_SHIFT 0 +#define NIC0_QM0_CP_FENCE3_RDATA_INC_VAL_MASK 0xF + +/* NIC0_QM0_CP_FENCE0_CNT */ +#define NIC0_QM0_CP_FENCE0_CNT_VAL_SHIFT 0 +#define NIC0_QM0_CP_FENCE0_CNT_VAL_MASK 0x3FFF + +/* NIC0_QM0_CP_FENCE1_CNT */ +#define NIC0_QM0_CP_FENCE1_CNT_VAL_SHIFT 0 +#define NIC0_QM0_CP_FENCE1_CNT_VAL_MASK 0x3FFF + +/* NIC0_QM0_CP_FENCE2_CNT */ +#define NIC0_QM0_CP_FENCE2_CNT_VAL_SHIFT 0 +#define NIC0_QM0_CP_FENCE2_CNT_VAL_MASK 0x3FFF + +/* NIC0_QM0_CP_FENCE3_CNT */ +#define NIC0_QM0_CP_FENCE3_CNT_VAL_SHIFT 0 +#define NIC0_QM0_CP_FENCE3_CNT_VAL_MASK 0x3FFF + +/* NIC0_QM0_CP_STS */ +#define NIC0_QM0_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0 +#define NIC0_QM0_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF +#define NIC0_QM0_CP_STS_ERDY_SHIFT 16 +#define NIC0_QM0_CP_STS_ERDY_MASK 0x10000 +#define NIC0_QM0_CP_STS_RRDY_SHIFT 17 +#define NIC0_QM0_CP_STS_RRDY_MASK 0x20000 +#define NIC0_QM0_CP_STS_MRDY_SHIFT 18 +#define NIC0_QM0_CP_STS_MRDY_MASK 0x40000 +#define NIC0_QM0_CP_STS_SW_STOP_SHIFT 19 +#define NIC0_QM0_CP_STS_SW_STOP_MASK 0x80000 +#define NIC0_QM0_CP_STS_FENCE_ID_SHIFT 20 +#define NIC0_QM0_CP_STS_FENCE_ID_MASK 0x300000 +#define NIC0_QM0_CP_STS_FENCE_IN_PROGRESS_SHIFT 22 +#define NIC0_QM0_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000 + +/* NIC0_QM0_CP_CURRENT_INST_LO */ +#define NIC0_QM0_CP_CURRENT_INST_LO_VAL_SHIFT 0 +#define NIC0_QM0_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CP_CURRENT_INST_HI */ +#define NIC0_QM0_CP_CURRENT_INST_HI_VAL_SHIFT 0 +#define NIC0_QM0_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CP_BARRIER_CFG */ +#define NIC0_QM0_CP_BARRIER_CFG_EBGUARD_SHIFT 0 +#define NIC0_QM0_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF +#define NIC0_QM0_CP_BARRIER_CFG_RBGUARD_SHIFT 16 +#define NIC0_QM0_CP_BARRIER_CFG_RBGUARD_MASK 0xF0000 + +/* NIC0_QM0_CP_DBG_0 */ +#define NIC0_QM0_CP_DBG_0_CS_SHIFT 0 +#define NIC0_QM0_CP_DBG_0_CS_MASK 0xF +#define NIC0_QM0_CP_DBG_0_EB_CNT_NOT_ZERO_SHIFT 4 +#define NIC0_QM0_CP_DBG_0_EB_CNT_NOT_ZERO_MASK 0x10 +#define NIC0_QM0_CP_DBG_0_BULK_CNT_NOT_ZERO_SHIFT 5 +#define NIC0_QM0_CP_DBG_0_BULK_CNT_NOT_ZERO_MASK 0x20 +#define NIC0_QM0_CP_DBG_0_MREB_STALL_SHIFT 6 +#define NIC0_QM0_CP_DBG_0_MREB_STALL_MASK 0x40 +#define NIC0_QM0_CP_DBG_0_STALL_SHIFT 7 +#define NIC0_QM0_CP_DBG_0_STALL_MASK 0x80 + +/* NIC0_QM0_CP_ARUSER_31_11 */ +#define NIC0_QM0_CP_ARUSER_31_11_VAL_SHIFT 0 +#define NIC0_QM0_CP_ARUSER_31_11_VAL_MASK 0x1FFFFF + +/* NIC0_QM0_CP_AWUSER_31_11 */ +#define NIC0_QM0_CP_AWUSER_31_11_VAL_SHIFT 0 +#define NIC0_QM0_CP_AWUSER_31_11_VAL_MASK 0x1FFFFF + +/* NIC0_QM0_ARB_CFG_0 */ +#define NIC0_QM0_ARB_CFG_0_TYPE_SHIFT 0 +#define NIC0_QM0_ARB_CFG_0_TYPE_MASK 0x1 +#define NIC0_QM0_ARB_CFG_0_IS_MASTER_SHIFT 4 +#define NIC0_QM0_ARB_CFG_0_IS_MASTER_MASK 0x10 +#define NIC0_QM0_ARB_CFG_0_EN_SHIFT 8 +#define NIC0_QM0_ARB_CFG_0_EN_MASK 0x100 +#define NIC0_QM0_ARB_CFG_0_MASK_SHIFT 12 +#define NIC0_QM0_ARB_CFG_0_MASK_MASK 0xF000 +#define NIC0_QM0_ARB_CFG_0_MST_MSG_NOSTALL_SHIFT 16 +#define NIC0_QM0_ARB_CFG_0_MST_MSG_NOSTALL_MASK 0x10000 + +/* NIC0_QM0_ARB_CHOISE_Q_PUSH */ +#define NIC0_QM0_ARB_CHOISE_Q_PUSH_VAL_SHIFT 0 +#define NIC0_QM0_ARB_CHOISE_Q_PUSH_VAL_MASK 0x3 + +/* NIC0_QM0_ARB_WRR_WEIGHT */ +#define NIC0_QM0_ARB_WRR_WEIGHT_VAL_SHIFT 0 +#define NIC0_QM0_ARB_WRR_WEIGHT_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_ARB_CFG_1 */ +#define NIC0_QM0_ARB_CFG_1_CLR_SHIFT 0 +#define NIC0_QM0_ARB_CFG_1_CLR_MASK 0x1 + +/* NIC0_QM0_ARB_MST_AVAIL_CRED */ +#define NIC0_QM0_ARB_MST_AVAIL_CRED_VAL_SHIFT 0 +#define NIC0_QM0_ARB_MST_AVAIL_CRED_VAL_MASK 0x7F + +/* NIC0_QM0_ARB_MST_CRED_INC */ +#define NIC0_QM0_ARB_MST_CRED_INC_VAL_SHIFT 0 +#define NIC0_QM0_ARB_MST_CRED_INC_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_ARB_MST_CHOISE_PUSH_OFST */ +#define NIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_VAL_SHIFT 0 +#define NIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_ARB_SLV_MASTER_INC_CRED_OFST */ +#define NIC0_QM0_ARB_SLV_MASTER_INC_CRED_OFST_VAL_SHIFT 0 +#define NIC0_QM0_ARB_SLV_MASTER_INC_CRED_OFST_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_ARB_MST_SLAVE_EN */ +#define NIC0_QM0_ARB_MST_SLAVE_EN_VAL_SHIFT 0 +#define NIC0_QM0_ARB_MST_SLAVE_EN_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_ARB_MST_QUIET_PER */ +#define NIC0_QM0_ARB_MST_QUIET_PER_VAL_SHIFT 0 +#define NIC0_QM0_ARB_MST_QUIET_PER_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_ARB_SLV_CHOISE_WDT */ +#define NIC0_QM0_ARB_SLV_CHOISE_WDT_VAL_SHIFT 0 +#define NIC0_QM0_ARB_SLV_CHOISE_WDT_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_ARB_SLV_ID */ +#define NIC0_QM0_ARB_SLV_ID_VAL_SHIFT 0 +#define NIC0_QM0_ARB_SLV_ID_VAL_MASK 0x1F + +/* NIC0_QM0_ARB_MSG_MAX_INFLIGHT */ +#define NIC0_QM0_ARB_MSG_MAX_INFLIGHT_VAL_SHIFT 0 +#define NIC0_QM0_ARB_MSG_MAX_INFLIGHT_VAL_MASK 0x3F + +/* NIC0_QM0_ARB_MSG_AWUSER_31_11 */ +#define NIC0_QM0_ARB_MSG_AWUSER_31_11_VAL_SHIFT 0 +#define NIC0_QM0_ARB_MSG_AWUSER_31_11_VAL_MASK 0x1FFFFF + +/* NIC0_QM0_ARB_MSG_AWUSER_SEC_PROP */ +#define NIC0_QM0_ARB_MSG_AWUSER_SEC_PROP_ASID_SHIFT 0 +#define NIC0_QM0_ARB_MSG_AWUSER_SEC_PROP_ASID_MASK 0x3FF +#define NIC0_QM0_ARB_MSG_AWUSER_SEC_PROP_MMBP_SHIFT 10 +#define NIC0_QM0_ARB_MSG_AWUSER_SEC_PROP_MMBP_MASK 0x400 + +/* NIC0_QM0_ARB_MSG_AWUSER_NON_SEC_PROP */ +#define NIC0_QM0_ARB_MSG_AWUSER_NON_SEC_PROP_ASID_SHIFT 0 +#define NIC0_QM0_ARB_MSG_AWUSER_NON_SEC_PROP_ASID_MASK 0x3FF +#define NIC0_QM0_ARB_MSG_AWUSER_NON_SEC_PROP_MMBP_SHIFT 10 +#define NIC0_QM0_ARB_MSG_AWUSER_NON_SEC_PROP_MMBP_MASK 0x400 + +/* NIC0_QM0_ARB_BASE_LO */ +#define NIC0_QM0_ARB_BASE_LO_VAL_SHIFT 0 +#define NIC0_QM0_ARB_BASE_LO_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_ARB_BASE_HI */ +#define NIC0_QM0_ARB_BASE_HI_VAL_SHIFT 0 +#define NIC0_QM0_ARB_BASE_HI_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_ARB_STATE_STS */ +#define NIC0_QM0_ARB_STATE_STS_VAL_SHIFT 0 +#define NIC0_QM0_ARB_STATE_STS_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_ARB_CHOISE_FULLNESS_STS */ +#define NIC0_QM0_ARB_CHOISE_FULLNESS_STS_VAL_SHIFT 0 +#define NIC0_QM0_ARB_CHOISE_FULLNESS_STS_VAL_MASK 0x7F + +/* NIC0_QM0_ARB_MSG_STS */ +#define NIC0_QM0_ARB_MSG_STS_FULL_SHIFT 0 +#define NIC0_QM0_ARB_MSG_STS_FULL_MASK 0x1 +#define NIC0_QM0_ARB_MSG_STS_NO_INFLIGHT_SHIFT 1 +#define NIC0_QM0_ARB_MSG_STS_NO_INFLIGHT_MASK 0x2 + +/* NIC0_QM0_ARB_SLV_CHOISE_Q_HEAD */ +#define NIC0_QM0_ARB_SLV_CHOISE_Q_HEAD_VAL_SHIFT 0 +#define NIC0_QM0_ARB_SLV_CHOISE_Q_HEAD_VAL_MASK 0x3 + +/* NIC0_QM0_ARB_ERR_CAUSE */ +#define NIC0_QM0_ARB_ERR_CAUSE_CHOISE_OVF_SHIFT 0 +#define NIC0_QM0_ARB_ERR_CAUSE_CHOISE_OVF_MASK 0x1 +#define NIC0_QM0_ARB_ERR_CAUSE_CHOISE_WDT_SHIFT 1 +#define NIC0_QM0_ARB_ERR_CAUSE_CHOISE_WDT_MASK 0x2 +#define NIC0_QM0_ARB_ERR_CAUSE_AXI_LBW_ERR_SHIFT 2 +#define NIC0_QM0_ARB_ERR_CAUSE_AXI_LBW_ERR_MASK 0x4 + +/* NIC0_QM0_ARB_ERR_MSG_EN */ +#define NIC0_QM0_ARB_ERR_MSG_EN_CHOISE_OVF_SHIFT 0 +#define NIC0_QM0_ARB_ERR_MSG_EN_CHOISE_OVF_MASK 0x1 +#define NIC0_QM0_ARB_ERR_MSG_EN_CHOISE_WDT_SHIFT 1 +#define NIC0_QM0_ARB_ERR_MSG_EN_CHOISE_WDT_MASK 0x2 +#define NIC0_QM0_ARB_ERR_MSG_EN_AXI_LBW_ERR_SHIFT 2 +#define NIC0_QM0_ARB_ERR_MSG_EN_AXI_LBW_ERR_MASK 0x4 + +/* NIC0_QM0_ARB_ERR_STS_DRP */ +#define NIC0_QM0_ARB_ERR_STS_DRP_VAL_SHIFT 0 +#define NIC0_QM0_ARB_ERR_STS_DRP_VAL_MASK 0x3 + +/* NIC0_QM0_ARB_MST_CRED_STS */ +#define NIC0_QM0_ARB_MST_CRED_STS_VAL_SHIFT 0 +#define NIC0_QM0_ARB_MST_CRED_STS_VAL_MASK 0x7F + +/* NIC0_QM0_CGM_CFG */ +#define NIC0_QM0_CGM_CFG_IDLE_TH_SHIFT 0 +#define NIC0_QM0_CGM_CFG_IDLE_TH_MASK 0xFFF +#define NIC0_QM0_CGM_CFG_G2F_TH_SHIFT 16 +#define NIC0_QM0_CGM_CFG_G2F_TH_MASK 0xFF0000 +#define NIC0_QM0_CGM_CFG_CP_IDLE_MASK_SHIFT 24 +#define NIC0_QM0_CGM_CFG_CP_IDLE_MASK_MASK 0x1F000000 +#define NIC0_QM0_CGM_CFG_EN_SHIFT 31 +#define NIC0_QM0_CGM_CFG_EN_MASK 0x80000000 + +/* NIC0_QM0_CGM_STS */ +#define NIC0_QM0_CGM_STS_ST_SHIFT 0 +#define NIC0_QM0_CGM_STS_ST_MASK 0x3 +#define NIC0_QM0_CGM_STS_CG_SHIFT 4 +#define NIC0_QM0_CGM_STS_CG_MASK 0x10 +#define NIC0_QM0_CGM_STS_AGENT_IDLE_SHIFT 8 +#define NIC0_QM0_CGM_STS_AGENT_IDLE_MASK 0x100 +#define NIC0_QM0_CGM_STS_AXI_IDLE_SHIFT 9 +#define NIC0_QM0_CGM_STS_AXI_IDLE_MASK 0x200 +#define NIC0_QM0_CGM_STS_CP_IDLE_SHIFT 10 +#define NIC0_QM0_CGM_STS_CP_IDLE_MASK 0x400 + +/* NIC0_QM0_CGM_CFG1 */ +#define NIC0_QM0_CGM_CFG1_MASK_TH_SHIFT 0 +#define NIC0_QM0_CGM_CFG1_MASK_TH_MASK 0xFF + +/* NIC0_QM0_LOCAL_RANGE_BASE */ +#define NIC0_QM0_LOCAL_RANGE_BASE_VAL_SHIFT 0 +#define NIC0_QM0_LOCAL_RANGE_BASE_VAL_MASK 0xFFFF + +/* NIC0_QM0_LOCAL_RANGE_SIZE */ +#define NIC0_QM0_LOCAL_RANGE_SIZE_VAL_SHIFT 0 +#define NIC0_QM0_LOCAL_RANGE_SIZE_VAL_MASK 0xFFFF + +/* NIC0_QM0_CSMR_STRICT_PRIO_CFG */ +#define NIC0_QM0_CSMR_STRICT_PRIO_CFG_TYPE_SHIFT 0 +#define NIC0_QM0_CSMR_STRICT_PRIO_CFG_TYPE_MASK 0x1 + +/* NIC0_QM0_HBW_RD_RATE_LIM_CFG_1 */ +#define NIC0_QM0_HBW_RD_RATE_LIM_CFG_1_TOUT_SHIFT 0 +#define NIC0_QM0_HBW_RD_RATE_LIM_CFG_1_TOUT_MASK 0xFF +#define NIC0_QM0_HBW_RD_RATE_LIM_CFG_1_EN_SHIFT 31 +#define NIC0_QM0_HBW_RD_RATE_LIM_CFG_1_EN_MASK 0x80000000 + +/* NIC0_QM0_LBW_WR_RATE_LIM_CFG_0 */ +#define NIC0_QM0_LBW_WR_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0 +#define NIC0_QM0_LBW_WR_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF +#define NIC0_QM0_LBW_WR_RATE_LIM_CFG_0_SAT_SHIFT 16 +#define NIC0_QM0_LBW_WR_RATE_LIM_CFG_0_SAT_MASK 0xFF0000 + +/* NIC0_QM0_LBW_WR_RATE_LIM_CFG_1 */ +#define NIC0_QM0_LBW_WR_RATE_LIM_CFG_1_TOUT_SHIFT 0 +#define NIC0_QM0_LBW_WR_RATE_LIM_CFG_1_TOUT_MASK 0xFF +#define NIC0_QM0_LBW_WR_RATE_LIM_CFG_1_EN_SHIFT 31 +#define NIC0_QM0_LBW_WR_RATE_LIM_CFG_1_EN_MASK 0x80000000 + +/* NIC0_QM0_HBW_RD_RATE_LIM_CFG_0 */ +#define NIC0_QM0_HBW_RD_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0 +#define NIC0_QM0_HBW_RD_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF +#define NIC0_QM0_HBW_RD_RATE_LIM_CFG_0_SAT_SHIFT 16 +#define NIC0_QM0_HBW_RD_RATE_LIM_CFG_0_SAT_MASK 0xFF0000 + +/* NIC0_QM0_GLBL_AXCACHE */ +#define NIC0_QM0_GLBL_AXCACHE_AR_SHIFT 0 +#define NIC0_QM0_GLBL_AXCACHE_AR_MASK 0xF +#define NIC0_QM0_GLBL_AXCACHE_AW_SHIFT 16 +#define NIC0_QM0_GLBL_AXCACHE_AW_MASK 0xF0000 + +/* NIC0_QM0_IND_GW_APB_CFG */ +#define NIC0_QM0_IND_GW_APB_CFG_ADDR_SHIFT 0 +#define NIC0_QM0_IND_GW_APB_CFG_ADDR_MASK 0x7FFFFFFF +#define NIC0_QM0_IND_GW_APB_CFG_CMD_SHIFT 31 +#define NIC0_QM0_IND_GW_APB_CFG_CMD_MASK 0x80000000 + +/* NIC0_QM0_IND_GW_APB_WDATA */ +#define NIC0_QM0_IND_GW_APB_WDATA_VAL_SHIFT 0 +#define NIC0_QM0_IND_GW_APB_WDATA_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_IND_GW_APB_RDATA */ +#define NIC0_QM0_IND_GW_APB_RDATA_VAL_SHIFT 0 +#define NIC0_QM0_IND_GW_APB_RDATA_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_IND_GW_APB_STATUS */ +#define NIC0_QM0_IND_GW_APB_STATUS_RDY_SHIFT 0 +#define NIC0_QM0_IND_GW_APB_STATUS_RDY_MASK 0x1 +#define NIC0_QM0_IND_GW_APB_STATUS_ERR_SHIFT 1 +#define NIC0_QM0_IND_GW_APB_STATUS_ERR_MASK 0x2 + +/* NIC0_QM0_GLBL_ERR_ADDR_LO */ +#define NIC0_QM0_GLBL_ERR_ADDR_LO_VAL_SHIFT 0 +#define NIC0_QM0_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_GLBL_ERR_ADDR_HI */ +#define NIC0_QM0_GLBL_ERR_ADDR_HI_VAL_SHIFT 0 +#define NIC0_QM0_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_GLBL_ERR_WDATA */ +#define NIC0_QM0_GLBL_ERR_WDATA_VAL_SHIFT 0 +#define NIC0_QM0_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_GLBL_MEM_INIT_BUSY */ +#define NIC0_QM0_GLBL_MEM_INIT_BUSY_RBUF_SHIFT 0 +#define NIC0_QM0_GLBL_MEM_INIT_BUSY_RBUF_MASK 0xF + +#endif /* ASIC_REG_NIC0_QM0_MASKS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_regs.h new file mode 100644 index 000000000000..7c97f4041b8e --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_NIC0_QM0_REGS_H_ +#define ASIC_REG_NIC0_QM0_REGS_H_ + +/* + ***************************************** + * NIC0_QM0 (Prototype: QMAN) + ***************************************** + */ + +#define mmNIC0_QM0_GLBL_CFG0 0xCE0000 + +#define mmNIC0_QM0_GLBL_CFG1 0xCE0004 + +#define mmNIC0_QM0_GLBL_PROT 0xCE0008 + +#define mmNIC0_QM0_GLBL_ERR_CFG 0xCE000C + +#define mmNIC0_QM0_GLBL_SECURE_PROPS_0 0xCE0010 + +#define mmNIC0_QM0_GLBL_SECURE_PROPS_1 0xCE0014 + +#define mmNIC0_QM0_GLBL_SECURE_PROPS_2 0xCE0018 + +#define mmNIC0_QM0_GLBL_SECURE_PROPS_3 0xCE001C + +#define mmNIC0_QM0_GLBL_SECURE_PROPS_4 0xCE0020 + +#define mmNIC0_QM0_GLBL_NON_SECURE_PROPS_0 0xCE0024 + +#define mmNIC0_QM0_GLBL_NON_SECURE_PROPS_1 0xCE0028 + +#define mmNIC0_QM0_GLBL_NON_SECURE_PROPS_2 0xCE002C + +#define mmNIC0_QM0_GLBL_NON_SECURE_PROPS_3 0xCE0030 + +#define mmNIC0_QM0_GLBL_NON_SECURE_PROPS_4 0xCE0034 + +#define mmNIC0_QM0_GLBL_STS0 0xCE0038 + +#define mmNIC0_QM0_GLBL_STS1_0 0xCE0040 + +#define mmNIC0_QM0_GLBL_STS1_1 0xCE0044 + +#define mmNIC0_QM0_GLBL_STS1_2 0xCE0048 + +#define mmNIC0_QM0_GLBL_STS1_3 0xCE004C + +#define mmNIC0_QM0_GLBL_STS1_4 0xCE0050 + +#define mmNIC0_QM0_GLBL_MSG_EN_0 0xCE0054 + +#define mmNIC0_QM0_GLBL_MSG_EN_1 0xCE0058 + +#define mmNIC0_QM0_GLBL_MSG_EN_2 0xCE005C + +#define mmNIC0_QM0_GLBL_MSG_EN_3 0xCE0060 + +#define mmNIC0_QM0_GLBL_MSG_EN_4 0xCE0068 + +#define mmNIC0_QM0_PQ_BASE_LO_0 0xCE0070 + +#define mmNIC0_QM0_PQ_BASE_LO_1 0xCE0074 + +#define mmNIC0_QM0_PQ_BASE_LO_2 0xCE0078 + +#define mmNIC0_QM0_PQ_BASE_LO_3 0xCE007C + +#define mmNIC0_QM0_PQ_BASE_HI_0 0xCE0080 + +#define mmNIC0_QM0_PQ_BASE_HI_1 0xCE0084 + +#define mmNIC0_QM0_PQ_BASE_HI_2 0xCE0088 + +#define mmNIC0_QM0_PQ_BASE_HI_3 0xCE008C + +#define mmNIC0_QM0_PQ_SIZE_0 0xCE0090 + +#define mmNIC0_QM0_PQ_SIZE_1 0xCE0094 + +#define mmNIC0_QM0_PQ_SIZE_2 0xCE0098 + +#define mmNIC0_QM0_PQ_SIZE_3 0xCE009C + +#define mmNIC0_QM0_PQ_PI_0 0xCE00A0 + +#define mmNIC0_QM0_PQ_PI_1 0xCE00A4 + +#define mmNIC0_QM0_PQ_PI_2 0xCE00A8 + +#define mmNIC0_QM0_PQ_PI_3 0xCE00AC + +#define mmNIC0_QM0_PQ_CI_0 0xCE00B0 + +#define mmNIC0_QM0_PQ_CI_1 0xCE00B4 + +#define mmNIC0_QM0_PQ_CI_2 0xCE00B8 + +#define mmNIC0_QM0_PQ_CI_3 0xCE00BC + +#define mmNIC0_QM0_PQ_CFG0_0 0xCE00C0 + +#define mmNIC0_QM0_PQ_CFG0_1 0xCE00C4 + +#define mmNIC0_QM0_PQ_CFG0_2 0xCE00C8 + +#define mmNIC0_QM0_PQ_CFG0_3 0xCE00CC + +#define mmNIC0_QM0_PQ_CFG1_0 0xCE00D0 + +#define mmNIC0_QM0_PQ_CFG1_1 0xCE00D4 + +#define mmNIC0_QM0_PQ_CFG1_2 0xCE00D8 + +#define mmNIC0_QM0_PQ_CFG1_3 0xCE00DC + +#define mmNIC0_QM0_PQ_ARUSER_31_11_0 0xCE00E0 + +#define mmNIC0_QM0_PQ_ARUSER_31_11_1 0xCE00E4 + +#define mmNIC0_QM0_PQ_ARUSER_31_11_2 0xCE00E8 + +#define mmNIC0_QM0_PQ_ARUSER_31_11_3 0xCE00EC + +#define mmNIC0_QM0_PQ_STS0_0 0xCE00F0 + +#define mmNIC0_QM0_PQ_STS0_1 0xCE00F4 + +#define mmNIC0_QM0_PQ_STS0_2 0xCE00F8 + +#define mmNIC0_QM0_PQ_STS0_3 0xCE00FC + +#define mmNIC0_QM0_PQ_STS1_0 0xCE0100 + +#define mmNIC0_QM0_PQ_STS1_1 0xCE0104 + +#define mmNIC0_QM0_PQ_STS1_2 0xCE0108 + +#define mmNIC0_QM0_PQ_STS1_3 0xCE010C + +#define mmNIC0_QM0_CQ_CFG0_0 0xCE0110 + +#define mmNIC0_QM0_CQ_CFG0_1 0xCE0114 + +#define mmNIC0_QM0_CQ_CFG0_2 0xCE0118 + +#define mmNIC0_QM0_CQ_CFG0_3 0xCE011C + +#define mmNIC0_QM0_CQ_CFG0_4 0xCE0120 + +#define mmNIC0_QM0_CQ_CFG1_0 0xCE0124 + +#define mmNIC0_QM0_CQ_CFG1_1 0xCE0128 + +#define mmNIC0_QM0_CQ_CFG1_2 0xCE012C + +#define mmNIC0_QM0_CQ_CFG1_3 0xCE0130 + +#define mmNIC0_QM0_CQ_CFG1_4 0xCE0134 + +#define mmNIC0_QM0_CQ_ARUSER_31_11_0 0xCE0138 + +#define mmNIC0_QM0_CQ_ARUSER_31_11_1 0xCE013C + +#define mmNIC0_QM0_CQ_ARUSER_31_11_2 0xCE0140 + +#define mmNIC0_QM0_CQ_ARUSER_31_11_3 0xCE0144 + +#define mmNIC0_QM0_CQ_ARUSER_31_11_4 0xCE0148 + +#define mmNIC0_QM0_CQ_STS0_0 0xCE014C + +#define mmNIC0_QM0_CQ_STS0_1 0xCE0150 + +#define mmNIC0_QM0_CQ_STS0_2 0xCE0154 + +#define mmNIC0_QM0_CQ_STS0_3 0xCE0158 + +#define mmNIC0_QM0_CQ_STS0_4 0xCE015C + +#define mmNIC0_QM0_CQ_STS1_0 0xCE0160 + +#define mmNIC0_QM0_CQ_STS1_1 0xCE0164 + +#define mmNIC0_QM0_CQ_STS1_2 0xCE0168 + +#define mmNIC0_QM0_CQ_STS1_3 0xCE016C + +#define mmNIC0_QM0_CQ_STS1_4 0xCE0170 + +#define mmNIC0_QM0_CQ_PTR_LO_0 0xCE0174 + +#define mmNIC0_QM0_CQ_PTR_HI_0 0xCE0178 + +#define mmNIC0_QM0_CQ_TSIZE_0 0xCE017C + +#define mmNIC0_QM0_CQ_CTL_0 0xCE0180 + +#define mmNIC0_QM0_CQ_PTR_LO_1 0xCE0184 + +#define mmNIC0_QM0_CQ_PTR_HI_1 0xCE0188 + +#define mmNIC0_QM0_CQ_TSIZE_1 0xCE018C + +#define mmNIC0_QM0_CQ_CTL_1 0xCE0190 + +#define mmNIC0_QM0_CQ_PTR_LO_2 0xCE0194 + +#define mmNIC0_QM0_CQ_PTR_HI_2 0xCE0198 + +#define mmNIC0_QM0_CQ_TSIZE_2 0xCE019C + +#define mmNIC0_QM0_CQ_CTL_2 0xCE01A0 + +#define mmNIC0_QM0_CQ_PTR_LO_3 0xCE01A4 + +#define mmNIC0_QM0_CQ_PTR_HI_3 0xCE01A8 + +#define mmNIC0_QM0_CQ_TSIZE_3 0xCE01AC + +#define mmNIC0_QM0_CQ_CTL_3 0xCE01B0 + +#define mmNIC0_QM0_CQ_PTR_LO_4 0xCE01B4 + +#define mmNIC0_QM0_CQ_PTR_HI_4 0xCE01B8 + +#define mmNIC0_QM0_CQ_TSIZE_4 0xCE01BC + +#define mmNIC0_QM0_CQ_CTL_4 0xCE01C0 + +#define mmNIC0_QM0_CQ_PTR_LO_STS_0 0xCE01C4 + +#define mmNIC0_QM0_CQ_PTR_LO_STS_1 0xCE01C8 + +#define mmNIC0_QM0_CQ_PTR_LO_STS_2 0xCE01CC + +#define mmNIC0_QM0_CQ_PTR_LO_STS_3 0xCE01D0 + +#define mmNIC0_QM0_CQ_PTR_LO_STS_4 0xCE01D4 + +#define mmNIC0_QM0_CQ_PTR_HI_STS_0 0xCE01D8 + +#define mmNIC0_QM0_CQ_PTR_HI_STS_1 0xCE01DC + +#define mmNIC0_QM0_CQ_PTR_HI_STS_2 0xCE01E0 + +#define mmNIC0_QM0_CQ_PTR_HI_STS_3 0xCE01E4 + +#define mmNIC0_QM0_CQ_PTR_HI_STS_4 0xCE01E8 + +#define mmNIC0_QM0_CQ_TSIZE_STS_0 0xCE01EC + +#define mmNIC0_QM0_CQ_TSIZE_STS_1 0xCE01F0 + +#define mmNIC0_QM0_CQ_TSIZE_STS_2 0xCE01F4 + +#define mmNIC0_QM0_CQ_TSIZE_STS_3 0xCE01F8 + +#define mmNIC0_QM0_CQ_TSIZE_STS_4 0xCE01FC + +#define mmNIC0_QM0_CQ_CTL_STS_0 0xCE0200 + +#define mmNIC0_QM0_CQ_CTL_STS_1 0xCE0204 + +#define mmNIC0_QM0_CQ_CTL_STS_2 0xCE0208 + +#define mmNIC0_QM0_CQ_CTL_STS_3 0xCE020C + +#define mmNIC0_QM0_CQ_CTL_STS_4 0xCE0210 + +#define mmNIC0_QM0_CQ_IFIFO_CNT_0 0xCE0214 + +#define mmNIC0_QM0_CQ_IFIFO_CNT_1 0xCE0218 + +#define mmNIC0_QM0_CQ_IFIFO_CNT_2 0xCE021C + +#define mmNIC0_QM0_CQ_IFIFO_CNT_3 0xCE0220 + +#define mmNIC0_QM0_CQ_IFIFO_CNT_4 0xCE0224 + +#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_0 0xCE0228 + +#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_1 0xCE022C + +#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_2 0xCE0230 + +#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_3 0xCE0234 + +#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_4 0xCE0238 + +#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_0 0xCE023C + +#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_1 0xCE0240 + +#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_2 0xCE0244 + +#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_3 0xCE0248 + +#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_4 0xCE024C + +#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_0 0xCE0250 + +#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_1 0xCE0254 + +#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_2 0xCE0258 + +#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_3 0xCE025C + +#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_4 0xCE0260 + +#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_0 0xCE0264 + +#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_1 0xCE0268 + +#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_2 0xCE026C + +#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_3 0xCE0270 + +#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_4 0xCE0274 + +#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_0 0xCE0278 + +#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_1 0xCE027C + +#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_2 0xCE0280 + +#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_3 0xCE0284 + +#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_4 0xCE0288 + +#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_0 0xCE028C + +#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_1 0xCE0290 + +#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_2 0xCE0294 + +#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_3 0xCE0298 + +#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_4 0xCE029C + +#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_0 0xCE02A0 + +#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_1 0xCE02A4 + +#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_2 0xCE02A8 + +#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_3 0xCE02AC + +#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_4 0xCE02B0 + +#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_0 0xCE02B4 + +#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_1 0xCE02B8 + +#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_2 0xCE02BC + +#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_3 0xCE02C0 + +#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_4 0xCE02C4 + +#define mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_0 0xCE02C8 + +#define mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_1 0xCE02CC + +#define mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_2 0xCE02D0 + +#define mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_3 0xCE02D4 + +#define mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_4 0xCE02D8 + +#define mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xCE02E0 + +#define mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xCE02E4 + +#define mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xCE02E8 + +#define mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xCE02EC + +#define mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xCE02F0 + +#define mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 0xCE02F4 + +#define mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1 0xCE02F8 + +#define mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2 0xCE02FC + +#define mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 0xCE0300 + +#define mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4 0xCE0304 + +#define mmNIC0_QM0_CP_FENCE0_RDATA_0 0xCE0308 + +#define mmNIC0_QM0_CP_FENCE0_RDATA_1 0xCE030C + +#define mmNIC0_QM0_CP_FENCE0_RDATA_2 0xCE0310 + +#define mmNIC0_QM0_CP_FENCE0_RDATA_3 0xCE0314 + +#define mmNIC0_QM0_CP_FENCE0_RDATA_4 0xCE0318 + +#define mmNIC0_QM0_CP_FENCE1_RDATA_0 0xCE031C + +#define mmNIC0_QM0_CP_FENCE1_RDATA_1 0xCE0320 + +#define mmNIC0_QM0_CP_FENCE1_RDATA_2 0xCE0324 + +#define mmNIC0_QM0_CP_FENCE1_RDATA_3 0xCE0328 + +#define mmNIC0_QM0_CP_FENCE1_RDATA_4 0xCE032C + +#define mmNIC0_QM0_CP_FENCE2_RDATA_0 0xCE0330 + +#define mmNIC0_QM0_CP_FENCE2_RDATA_1 0xCE0334 + +#define mmNIC0_QM0_CP_FENCE2_RDATA_2 0xCE0338 + +#define mmNIC0_QM0_CP_FENCE2_RDATA_3 0xCE033C + +#define mmNIC0_QM0_CP_FENCE2_RDATA_4 0xCE0340 + +#define mmNIC0_QM0_CP_FENCE3_RDATA_0 0xCE0344 + +#define mmNIC0_QM0_CP_FENCE3_RDATA_1 0xCE0348 + +#define mmNIC0_QM0_CP_FENCE3_RDATA_2 0xCE034C + +#define mmNIC0_QM0_CP_FENCE3_RDATA_3 0xCE0350 + +#define mmNIC0_QM0_CP_FENCE3_RDATA_4 0xCE0354 + +#define mmNIC0_QM0_CP_FENCE0_CNT_0 0xCE0358 + +#define mmNIC0_QM0_CP_FENCE0_CNT_1 0xCE035C + +#define mmNIC0_QM0_CP_FENCE0_CNT_2 0xCE0360 + +#define mmNIC0_QM0_CP_FENCE0_CNT_3 0xCE0364 + +#define mmNIC0_QM0_CP_FENCE0_CNT_4 0xCE0368 + +#define mmNIC0_QM0_CP_FENCE1_CNT_0 0xCE036C + +#define mmNIC0_QM0_CP_FENCE1_CNT_1 0xCE0370 + +#define mmNIC0_QM0_CP_FENCE1_CNT_2 0xCE0374 + +#define mmNIC0_QM0_CP_FENCE1_CNT_3 0xCE0378 + +#define mmNIC0_QM0_CP_FENCE1_CNT_4 0xCE037C + +#define mmNIC0_QM0_CP_FENCE2_CNT_0 0xCE0380 + +#define mmNIC0_QM0_CP_FENCE2_CNT_1 0xCE0384 + +#define mmNIC0_QM0_CP_FENCE2_CNT_2 0xCE0388 + +#define mmNIC0_QM0_CP_FENCE2_CNT_3 0xCE038C + +#define mmNIC0_QM0_CP_FENCE2_CNT_4 0xCE0390 + +#define mmNIC0_QM0_CP_FENCE3_CNT_0 0xCE0394 + +#define mmNIC0_QM0_CP_FENCE3_CNT_1 0xCE0398 + +#define mmNIC0_QM0_CP_FENCE3_CNT_2 0xCE039C + +#define mmNIC0_QM0_CP_FENCE3_CNT_3 0xCE03A0 + +#define mmNIC0_QM0_CP_FENCE3_CNT_4 0xCE03A4 + +#define mmNIC0_QM0_CP_STS_0 0xCE03A8 + +#define mmNIC0_QM0_CP_STS_1 0xCE03AC + +#define mmNIC0_QM0_CP_STS_2 0xCE03B0 + +#define mmNIC0_QM0_CP_STS_3 0xCE03B4 + +#define mmNIC0_QM0_CP_STS_4 0xCE03B8 + +#define mmNIC0_QM0_CP_CURRENT_INST_LO_0 0xCE03BC + +#define mmNIC0_QM0_CP_CURRENT_INST_LO_1 0xCE03C0 + +#define mmNIC0_QM0_CP_CURRENT_INST_LO_2 0xCE03C4 + +#define mmNIC0_QM0_CP_CURRENT_INST_LO_3 0xCE03C8 + +#define mmNIC0_QM0_CP_CURRENT_INST_LO_4 0xCE03CC + +#define mmNIC0_QM0_CP_CURRENT_INST_HI_0 0xCE03D0 + +#define mmNIC0_QM0_CP_CURRENT_INST_HI_1 0xCE03D4 + +#define mmNIC0_QM0_CP_CURRENT_INST_HI_2 0xCE03D8 + +#define mmNIC0_QM0_CP_CURRENT_INST_HI_3 0xCE03DC + +#define mmNIC0_QM0_CP_CURRENT_INST_HI_4 0xCE03E0 + +#define mmNIC0_QM0_CP_BARRIER_CFG_0 0xCE03F4 + +#define mmNIC0_QM0_CP_BARRIER_CFG_1 0xCE03F8 + +#define mmNIC0_QM0_CP_BARRIER_CFG_2 0xCE03FC + +#define mmNIC0_QM0_CP_BARRIER_CFG_3 0xCE0400 + +#define mmNIC0_QM0_CP_BARRIER_CFG_4 0xCE0404 + +#define mmNIC0_QM0_CP_DBG_0_0 0xCE0408 + +#define mmNIC0_QM0_CP_DBG_0_1 0xCE040C + +#define mmNIC0_QM0_CP_DBG_0_2 0xCE0410 + +#define mmNIC0_QM0_CP_DBG_0_3 0xCE0414 + +#define mmNIC0_QM0_CP_DBG_0_4 0xCE0418 + +#define mmNIC0_QM0_CP_ARUSER_31_11_0 0xCE041C + +#define mmNIC0_QM0_CP_ARUSER_31_11_1 0xCE0420 + +#define mmNIC0_QM0_CP_ARUSER_31_11_2 0xCE0424 + +#define mmNIC0_QM0_CP_ARUSER_31_11_3 0xCE0428 + +#define mmNIC0_QM0_CP_ARUSER_31_11_4 0xCE042C + +#define mmNIC0_QM0_CP_AWUSER_31_11_0 0xCE0430 + +#define mmNIC0_QM0_CP_AWUSER_31_11_1 0xCE0434 + +#define mmNIC0_QM0_CP_AWUSER_31_11_2 0xCE0438 + +#define mmNIC0_QM0_CP_AWUSER_31_11_3 0xCE043C + +#define mmNIC0_QM0_CP_AWUSER_31_11_4 0xCE0440 + +#define mmNIC0_QM0_ARB_CFG_0 0xCE0A00 + +#define mmNIC0_QM0_ARB_CHOISE_Q_PUSH 0xCE0A04 + +#define mmNIC0_QM0_ARB_WRR_WEIGHT_0 0xCE0A08 + +#define mmNIC0_QM0_ARB_WRR_WEIGHT_1 0xCE0A0C + +#define mmNIC0_QM0_ARB_WRR_WEIGHT_2 0xCE0A10 + +#define mmNIC0_QM0_ARB_WRR_WEIGHT_3 0xCE0A14 + +#define mmNIC0_QM0_ARB_CFG_1 0xCE0A18 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_0 0xCE0A20 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_1 0xCE0A24 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_2 0xCE0A28 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_3 0xCE0A2C + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_4 0xCE0A30 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_5 0xCE0A34 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_6 0xCE0A38 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_7 0xCE0A3C + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_8 0xCE0A40 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_9 0xCE0A44 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_10 0xCE0A48 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_11 0xCE0A4C + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_12 0xCE0A50 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_13 0xCE0A54 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_14 0xCE0A58 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_15 0xCE0A5C + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_16 0xCE0A60 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_17 0xCE0A64 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_18 0xCE0A68 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_19 0xCE0A6C + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_20 0xCE0A70 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_21 0xCE0A74 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_22 0xCE0A78 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_23 0xCE0A7C + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_24 0xCE0A80 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_25 0xCE0A84 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_26 0xCE0A88 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_27 0xCE0A8C + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_28 0xCE0A90 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_29 0xCE0A94 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_30 0xCE0A98 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_31 0xCE0A9C + +#define mmNIC0_QM0_ARB_MST_CRED_INC 0xCE0AA0 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_0 0xCE0AA4 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_1 0xCE0AA8 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_2 0xCE0AAC + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_3 0xCE0AB0 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_4 0xCE0AB4 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_5 0xCE0AB8 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_6 0xCE0ABC + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_7 0xCE0AC0 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_8 0xCE0AC4 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_9 0xCE0AC8 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_10 0xCE0ACC + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_11 0xCE0AD0 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_12 0xCE0AD4 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_13 0xCE0AD8 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_14 0xCE0ADC + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_15 0xCE0AE0 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_16 0xCE0AE4 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_17 0xCE0AE8 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_18 0xCE0AEC + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_19 0xCE0AF0 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_20 0xCE0AF4 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_21 0xCE0AF8 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_22 0xCE0AFC + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_23 0xCE0B00 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_24 0xCE0B04 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_25 0xCE0B08 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_26 0xCE0B0C + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_27 0xCE0B10 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_28 0xCE0B14 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_29 0xCE0B18 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_30 0xCE0B1C + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_31 0xCE0B20 + +#define mmNIC0_QM0_ARB_SLV_MASTER_INC_CRED_OFST 0xCE0B28 + +#define mmNIC0_QM0_ARB_MST_SLAVE_EN 0xCE0B2C + +#define mmNIC0_QM0_ARB_MST_QUIET_PER 0xCE0B34 + +#define mmNIC0_QM0_ARB_SLV_CHOISE_WDT 0xCE0B38 + +#define mmNIC0_QM0_ARB_SLV_ID 0xCE0B3C + +#define mmNIC0_QM0_ARB_MSG_MAX_INFLIGHT 0xCE0B44 + +#define mmNIC0_QM0_ARB_MSG_AWUSER_31_11 0xCE0B48 + +#define mmNIC0_QM0_ARB_MSG_AWUSER_SEC_PROP 0xCE0B4C + +#define mmNIC0_QM0_ARB_MSG_AWUSER_NON_SEC_PROP 0xCE0B50 + +#define mmNIC0_QM0_ARB_BASE_LO 0xCE0B54 + +#define mmNIC0_QM0_ARB_BASE_HI 0xCE0B58 + +#define mmNIC0_QM0_ARB_STATE_STS 0xCE0B80 + +#define mmNIC0_QM0_ARB_CHOISE_FULLNESS_STS 0xCE0B84 + +#define mmNIC0_QM0_ARB_MSG_STS 0xCE0B88 + +#define mmNIC0_QM0_ARB_SLV_CHOISE_Q_HEAD 0xCE0B8C + +#define mmNIC0_QM0_ARB_ERR_CAUSE 0xCE0B9C + +#define mmNIC0_QM0_ARB_ERR_MSG_EN 0xCE0BA0 + +#define mmNIC0_QM0_ARB_ERR_STS_DRP 0xCE0BA8 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_0 0xCE0BB0 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_1 0xCE0BB4 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_2 0xCE0BB8 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_3 0xCE0BBC + +#define mmNIC0_QM0_ARB_MST_CRED_STS_4 0xCE0BC0 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_5 0xCE0BC4 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_6 0xCE0BC8 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_7 0xCE0BCC + +#define mmNIC0_QM0_ARB_MST_CRED_STS_8 0xCE0BD0 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_9 0xCE0BD4 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_10 0xCE0BD8 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_11 0xCE0BDC + +#define mmNIC0_QM0_ARB_MST_CRED_STS_12 0xCE0BE0 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_13 0xCE0BE4 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_14 0xCE0BE8 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_15 0xCE0BEC + +#define mmNIC0_QM0_ARB_MST_CRED_STS_16 0xCE0BF0 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_17 0xCE0BF4 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_18 0xCE0BF8 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_19 0xCE0BFC + +#define mmNIC0_QM0_ARB_MST_CRED_STS_20 0xCE0C00 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_21 0xCE0C04 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_22 0xCE0C08 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_23 0xCE0C0C + +#define mmNIC0_QM0_ARB_MST_CRED_STS_24 0xCE0C10 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_25 0xCE0C14 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_26 0xCE0C18 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_27 0xCE0C1C + +#define mmNIC0_QM0_ARB_MST_CRED_STS_28 0xCE0C20 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_29 0xCE0C24 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_30 0xCE0C28 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_31 0xCE0C2C + +#define mmNIC0_QM0_CGM_CFG 0xCE0C70 + +#define mmNIC0_QM0_CGM_STS 0xCE0C74 + +#define mmNIC0_QM0_CGM_CFG1 0xCE0C78 + +#define mmNIC0_QM0_LOCAL_RANGE_BASE 0xCE0C80 + +#define mmNIC0_QM0_LOCAL_RANGE_SIZE 0xCE0C84 + +#define mmNIC0_QM0_CSMR_STRICT_PRIO_CFG 0xCE0C90 + +#define mmNIC0_QM0_HBW_RD_RATE_LIM_CFG_1 0xCE0C94 + +#define mmNIC0_QM0_LBW_WR_RATE_LIM_CFG_0 0xCE0C98 + +#define mmNIC0_QM0_LBW_WR_RATE_LIM_CFG_1 0xCE0C9C + +#define mmNIC0_QM0_HBW_RD_RATE_LIM_CFG_0 0xCE0CA0 + +#define mmNIC0_QM0_GLBL_AXCACHE 0xCE0CA4 + +#define mmNIC0_QM0_IND_GW_APB_CFG 0xCE0CB0 + +#define mmNIC0_QM0_IND_GW_APB_WDATA 0xCE0CB4 + +#define mmNIC0_QM0_IND_GW_APB_RDATA 0xCE0CB8 + +#define mmNIC0_QM0_IND_GW_APB_STATUS 0xCE0CBC + +#define mmNIC0_QM0_GLBL_ERR_ADDR_LO 0xCE0CD0 + +#define mmNIC0_QM0_GLBL_ERR_ADDR_HI 0xCE0CD4 + +#define mmNIC0_QM0_GLBL_ERR_WDATA 0xCE0CD8 + +#define mmNIC0_QM0_GLBL_MEM_INIT_BUSY 0xCE0D00 + +#endif /* ASIC_REG_NIC0_QM0_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm1_regs.h new file mode 100644 index 000000000000..fe96c575b5c6 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm1_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_NIC0_QM1_REGS_H_ +#define ASIC_REG_NIC0_QM1_REGS_H_ + +/* + ***************************************** + * NIC0_QM1 (Prototype: QMAN) + ***************************************** + */ + +#define mmNIC0_QM1_GLBL_CFG0 0xCE2000 + +#define mmNIC0_QM1_GLBL_CFG1 0xCE2004 + +#define mmNIC0_QM1_GLBL_PROT 0xCE2008 + +#define mmNIC0_QM1_GLBL_ERR_CFG 0xCE200C + +#define mmNIC0_QM1_GLBL_SECURE_PROPS_0 0xCE2010 + +#define mmNIC0_QM1_GLBL_SECURE_PROPS_1 0xCE2014 + +#define mmNIC0_QM1_GLBL_SECURE_PROPS_2 0xCE2018 + +#define mmNIC0_QM1_GLBL_SECURE_PROPS_3 0xCE201C + +#define mmNIC0_QM1_GLBL_SECURE_PROPS_4 0xCE2020 + +#define mmNIC0_QM1_GLBL_NON_SECURE_PROPS_0 0xCE2024 + +#define mmNIC0_QM1_GLBL_NON_SECURE_PROPS_1 0xCE2028 + +#define mmNIC0_QM1_GLBL_NON_SECURE_PROPS_2 0xCE202C + +#define mmNIC0_QM1_GLBL_NON_SECURE_PROPS_3 0xCE2030 + +#define mmNIC0_QM1_GLBL_NON_SECURE_PROPS_4 0xCE2034 + +#define mmNIC0_QM1_GLBL_STS0 0xCE2038 + +#define mmNIC0_QM1_GLBL_STS1_0 0xCE2040 + +#define mmNIC0_QM1_GLBL_STS1_1 0xCE2044 + +#define mmNIC0_QM1_GLBL_STS1_2 0xCE2048 + +#define mmNIC0_QM1_GLBL_STS1_3 0xCE204C + +#define mmNIC0_QM1_GLBL_STS1_4 0xCE2050 + +#define mmNIC0_QM1_GLBL_MSG_EN_0 0xCE2054 + +#define mmNIC0_QM1_GLBL_MSG_EN_1 0xCE2058 + +#define mmNIC0_QM1_GLBL_MSG_EN_2 0xCE205C + +#define mmNIC0_QM1_GLBL_MSG_EN_3 0xCE2060 + +#define mmNIC0_QM1_GLBL_MSG_EN_4 0xCE2068 + +#define mmNIC0_QM1_PQ_BASE_LO_0 0xCE2070 + +#define mmNIC0_QM1_PQ_BASE_LO_1 0xCE2074 + +#define mmNIC0_QM1_PQ_BASE_LO_2 0xCE2078 + +#define mmNIC0_QM1_PQ_BASE_LO_3 0xCE207C + +#define mmNIC0_QM1_PQ_BASE_HI_0 0xCE2080 + +#define mmNIC0_QM1_PQ_BASE_HI_1 0xCE2084 + +#define mmNIC0_QM1_PQ_BASE_HI_2 0xCE2088 + +#define mmNIC0_QM1_PQ_BASE_HI_3 0xCE208C + +#define mmNIC0_QM1_PQ_SIZE_0 0xCE2090 + +#define mmNIC0_QM1_PQ_SIZE_1 0xCE2094 + +#define mmNIC0_QM1_PQ_SIZE_2 0xCE2098 + +#define mmNIC0_QM1_PQ_SIZE_3 0xCE209C + +#define mmNIC0_QM1_PQ_PI_0 0xCE20A0 + +#define mmNIC0_QM1_PQ_PI_1 0xCE20A4 + +#define mmNIC0_QM1_PQ_PI_2 0xCE20A8 + +#define mmNIC0_QM1_PQ_PI_3 0xCE20AC + +#define mmNIC0_QM1_PQ_CI_0 0xCE20B0 + +#define mmNIC0_QM1_PQ_CI_1 0xCE20B4 + +#define mmNIC0_QM1_PQ_CI_2 0xCE20B8 + +#define mmNIC0_QM1_PQ_CI_3 0xCE20BC + +#define mmNIC0_QM1_PQ_CFG0_0 0xCE20C0 + +#define mmNIC0_QM1_PQ_CFG0_1 0xCE20C4 + +#define mmNIC0_QM1_PQ_CFG0_2 0xCE20C8 + +#define mmNIC0_QM1_PQ_CFG0_3 0xCE20CC + +#define mmNIC0_QM1_PQ_CFG1_0 0xCE20D0 + +#define mmNIC0_QM1_PQ_CFG1_1 0xCE20D4 + +#define mmNIC0_QM1_PQ_CFG1_2 0xCE20D8 + +#define mmNIC0_QM1_PQ_CFG1_3 0xCE20DC + +#define mmNIC0_QM1_PQ_ARUSER_31_11_0 0xCE20E0 + +#define mmNIC0_QM1_PQ_ARUSER_31_11_1 0xCE20E4 + +#define mmNIC0_QM1_PQ_ARUSER_31_11_2 0xCE20E8 + +#define mmNIC0_QM1_PQ_ARUSER_31_11_3 0xCE20EC + +#define mmNIC0_QM1_PQ_STS0_0 0xCE20F0 + +#define mmNIC0_QM1_PQ_STS0_1 0xCE20F4 + +#define mmNIC0_QM1_PQ_STS0_2 0xCE20F8 + +#define mmNIC0_QM1_PQ_STS0_3 0xCE20FC + +#define mmNIC0_QM1_PQ_STS1_0 0xCE2100 + +#define mmNIC0_QM1_PQ_STS1_1 0xCE2104 + +#define mmNIC0_QM1_PQ_STS1_2 0xCE2108 + +#define mmNIC0_QM1_PQ_STS1_3 0xCE210C + +#define mmNIC0_QM1_CQ_CFG0_0 0xCE2110 + +#define mmNIC0_QM1_CQ_CFG0_1 0xCE2114 + +#define mmNIC0_QM1_CQ_CFG0_2 0xCE2118 + +#define mmNIC0_QM1_CQ_CFG0_3 0xCE211C + +#define mmNIC0_QM1_CQ_CFG0_4 0xCE2120 + +#define mmNIC0_QM1_CQ_CFG1_0 0xCE2124 + +#define mmNIC0_QM1_CQ_CFG1_1 0xCE2128 + +#define mmNIC0_QM1_CQ_CFG1_2 0xCE212C + +#define mmNIC0_QM1_CQ_CFG1_3 0xCE2130 + +#define mmNIC0_QM1_CQ_CFG1_4 0xCE2134 + +#define mmNIC0_QM1_CQ_ARUSER_31_11_0 0xCE2138 + +#define mmNIC0_QM1_CQ_ARUSER_31_11_1 0xCE213C + +#define mmNIC0_QM1_CQ_ARUSER_31_11_2 0xCE2140 + +#define mmNIC0_QM1_CQ_ARUSER_31_11_3 0xCE2144 + +#define mmNIC0_QM1_CQ_ARUSER_31_11_4 0xCE2148 + +#define mmNIC0_QM1_CQ_STS0_0 0xCE214C + +#define mmNIC0_QM1_CQ_STS0_1 0xCE2150 + +#define mmNIC0_QM1_CQ_STS0_2 0xCE2154 + +#define mmNIC0_QM1_CQ_STS0_3 0xCE2158 + +#define mmNIC0_QM1_CQ_STS0_4 0xCE215C + +#define mmNIC0_QM1_CQ_STS1_0 0xCE2160 + +#define mmNIC0_QM1_CQ_STS1_1 0xCE2164 + +#define mmNIC0_QM1_CQ_STS1_2 0xCE2168 + +#define mmNIC0_QM1_CQ_STS1_3 0xCE216C + +#define mmNIC0_QM1_CQ_STS1_4 0xCE2170 + +#define mmNIC0_QM1_CQ_PTR_LO_0 0xCE2174 + +#define mmNIC0_QM1_CQ_PTR_HI_0 0xCE2178 + +#define mmNIC0_QM1_CQ_TSIZE_0 0xCE217C + +#define mmNIC0_QM1_CQ_CTL_0 0xCE2180 + +#define mmNIC0_QM1_CQ_PTR_LO_1 0xCE2184 + +#define mmNIC0_QM1_CQ_PTR_HI_1 0xCE2188 + +#define mmNIC0_QM1_CQ_TSIZE_1 0xCE218C + +#define mmNIC0_QM1_CQ_CTL_1 0xCE2190 + +#define mmNIC0_QM1_CQ_PTR_LO_2 0xCE2194 + +#define mmNIC0_QM1_CQ_PTR_HI_2 0xCE2198 + +#define mmNIC0_QM1_CQ_TSIZE_2 0xCE219C + +#define mmNIC0_QM1_CQ_CTL_2 0xCE21A0 + +#define mmNIC0_QM1_CQ_PTR_LO_3 0xCE21A4 + +#define mmNIC0_QM1_CQ_PTR_HI_3 0xCE21A8 + +#define mmNIC0_QM1_CQ_TSIZE_3 0xCE21AC + +#define mmNIC0_QM1_CQ_CTL_3 0xCE21B0 + +#define mmNIC0_QM1_CQ_PTR_LO_4 0xCE21B4 + +#define mmNIC0_QM1_CQ_PTR_HI_4 0xCE21B8 + +#define mmNIC0_QM1_CQ_TSIZE_4 0xCE21BC + +#define mmNIC0_QM1_CQ_CTL_4 0xCE21C0 + +#define mmNIC0_QM1_CQ_PTR_LO_STS_0 0xCE21C4 + +#define mmNIC0_QM1_CQ_PTR_LO_STS_1 0xCE21C8 + +#define mmNIC0_QM1_CQ_PTR_LO_STS_2 0xCE21CC + +#define mmNIC0_QM1_CQ_PTR_LO_STS_3 0xCE21D0 + +#define mmNIC0_QM1_CQ_PTR_LO_STS_4 0xCE21D4 + +#define mmNIC0_QM1_CQ_PTR_HI_STS_0 0xCE21D8 + +#define mmNIC0_QM1_CQ_PTR_HI_STS_1 0xCE21DC + +#define mmNIC0_QM1_CQ_PTR_HI_STS_2 0xCE21E0 + +#define mmNIC0_QM1_CQ_PTR_HI_STS_3 0xCE21E4 + +#define mmNIC0_QM1_CQ_PTR_HI_STS_4 0xCE21E8 + +#define mmNIC0_QM1_CQ_TSIZE_STS_0 0xCE21EC + +#define mmNIC0_QM1_CQ_TSIZE_STS_1 0xCE21F0 + +#define mmNIC0_QM1_CQ_TSIZE_STS_2 0xCE21F4 + +#define mmNIC0_QM1_CQ_TSIZE_STS_3 0xCE21F8 + +#define mmNIC0_QM1_CQ_TSIZE_STS_4 0xCE21FC + +#define mmNIC0_QM1_CQ_CTL_STS_0 0xCE2200 + +#define mmNIC0_QM1_CQ_CTL_STS_1 0xCE2204 + +#define mmNIC0_QM1_CQ_CTL_STS_2 0xCE2208 + +#define mmNIC0_QM1_CQ_CTL_STS_3 0xCE220C + +#define mmNIC0_QM1_CQ_CTL_STS_4 0xCE2210 + +#define mmNIC0_QM1_CQ_IFIFO_CNT_0 0xCE2214 + +#define mmNIC0_QM1_CQ_IFIFO_CNT_1 0xCE2218 + +#define mmNIC0_QM1_CQ_IFIFO_CNT_2 0xCE221C + +#define mmNIC0_QM1_CQ_IFIFO_CNT_3 0xCE2220 + +#define mmNIC0_QM1_CQ_IFIFO_CNT_4 0xCE2224 + +#define mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_0 0xCE2228 + +#define mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_1 0xCE222C + +#define mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_2 0xCE2230 + +#define mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_3 0xCE2234 + +#define mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_4 0xCE2238 + +#define mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_0 0xCE223C + +#define mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_1 0xCE2240 + +#define mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_2 0xCE2244 + +#define mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_3 0xCE2248 + +#define mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_4 0xCE224C + +#define mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_0 0xCE2250 + +#define mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_1 0xCE2254 + +#define mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_2 0xCE2258 + +#define mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_3 0xCE225C + +#define mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_4 0xCE2260 + +#define mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_0 0xCE2264 + +#define mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_1 0xCE2268 + +#define mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_2 0xCE226C + +#define mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_3 0xCE2270 + +#define mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_4 0xCE2274 + +#define mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_0 0xCE2278 + +#define mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_1 0xCE227C + +#define mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_2 0xCE2280 + +#define mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_3 0xCE2284 + +#define mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_4 0xCE2288 + +#define mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_0 0xCE228C + +#define mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_1 0xCE2290 + +#define mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_2 0xCE2294 + +#define mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_3 0xCE2298 + +#define mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_4 0xCE229C + +#define mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_0 0xCE22A0 + +#define mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_1 0xCE22A4 + +#define mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_2 0xCE22A8 + +#define mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_3 0xCE22AC + +#define mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_4 0xCE22B0 + +#define mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_0 0xCE22B4 + +#define mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_1 0xCE22B8 + +#define mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_2 0xCE22BC + +#define mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_3 0xCE22C0 + +#define mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_4 0xCE22C4 + +#define mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_0 0xCE22C8 + +#define mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_1 0xCE22CC + +#define mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_2 0xCE22D0 + +#define mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_3 0xCE22D4 + +#define mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_4 0xCE22D8 + +#define mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xCE22E0 + +#define mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xCE22E4 + +#define mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xCE22E8 + +#define mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xCE22EC + +#define mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xCE22F0 + +#define mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0 0xCE22F4 + +#define mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1 0xCE22F8 + +#define mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2 0xCE22FC + +#define mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 0xCE2300 + +#define mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4 0xCE2304 + +#define mmNIC0_QM1_CP_FENCE0_RDATA_0 0xCE2308 + +#define mmNIC0_QM1_CP_FENCE0_RDATA_1 0xCE230C + +#define mmNIC0_QM1_CP_FENCE0_RDATA_2 0xCE2310 + +#define mmNIC0_QM1_CP_FENCE0_RDATA_3 0xCE2314 + +#define mmNIC0_QM1_CP_FENCE0_RDATA_4 0xCE2318 + +#define mmNIC0_QM1_CP_FENCE1_RDATA_0 0xCE231C + +#define mmNIC0_QM1_CP_FENCE1_RDATA_1 0xCE2320 + +#define mmNIC0_QM1_CP_FENCE1_RDATA_2 0xCE2324 + +#define mmNIC0_QM1_CP_FENCE1_RDATA_3 0xCE2328 + +#define mmNIC0_QM1_CP_FENCE1_RDATA_4 0xCE232C + +#define mmNIC0_QM1_CP_FENCE2_RDATA_0 0xCE2330 + +#define mmNIC0_QM1_CP_FENCE2_RDATA_1 0xCE2334 + +#define mmNIC0_QM1_CP_FENCE2_RDATA_2 0xCE2338 + +#define mmNIC0_QM1_CP_FENCE2_RDATA_3 0xCE233C + +#define mmNIC0_QM1_CP_FENCE2_RDATA_4 0xCE2340 + +#define mmNIC0_QM1_CP_FENCE3_RDATA_0 0xCE2344 + +#define mmNIC0_QM1_CP_FENCE3_RDATA_1 0xCE2348 + +#define mmNIC0_QM1_CP_FENCE3_RDATA_2 0xCE234C + +#define mmNIC0_QM1_CP_FENCE3_RDATA_3 0xCE2350 + +#define mmNIC0_QM1_CP_FENCE3_RDATA_4 0xCE2354 + +#define mmNIC0_QM1_CP_FENCE0_CNT_0 0xCE2358 + +#define mmNIC0_QM1_CP_FENCE0_CNT_1 0xCE235C + +#define mmNIC0_QM1_CP_FENCE0_CNT_2 0xCE2360 + +#define mmNIC0_QM1_CP_FENCE0_CNT_3 0xCE2364 + +#define mmNIC0_QM1_CP_FENCE0_CNT_4 0xCE2368 + +#define mmNIC0_QM1_CP_FENCE1_CNT_0 0xCE236C + +#define mmNIC0_QM1_CP_FENCE1_CNT_1 0xCE2370 + +#define mmNIC0_QM1_CP_FENCE1_CNT_2 0xCE2374 + +#define mmNIC0_QM1_CP_FENCE1_CNT_3 0xCE2378 + +#define mmNIC0_QM1_CP_FENCE1_CNT_4 0xCE237C + +#define mmNIC0_QM1_CP_FENCE2_CNT_0 0xCE2380 + +#define mmNIC0_QM1_CP_FENCE2_CNT_1 0xCE2384 + +#define mmNIC0_QM1_CP_FENCE2_CNT_2 0xCE2388 + +#define mmNIC0_QM1_CP_FENCE2_CNT_3 0xCE238C + +#define mmNIC0_QM1_CP_FENCE2_CNT_4 0xCE2390 + +#define mmNIC0_QM1_CP_FENCE3_CNT_0 0xCE2394 + +#define mmNIC0_QM1_CP_FENCE3_CNT_1 0xCE2398 + +#define mmNIC0_QM1_CP_FENCE3_CNT_2 0xCE239C + +#define mmNIC0_QM1_CP_FENCE3_CNT_3 0xCE23A0 + +#define mmNIC0_QM1_CP_FENCE3_CNT_4 0xCE23A4 + +#define mmNIC0_QM1_CP_STS_0 0xCE23A8 + +#define mmNIC0_QM1_CP_STS_1 0xCE23AC + +#define mmNIC0_QM1_CP_STS_2 0xCE23B0 + +#define mmNIC0_QM1_CP_STS_3 0xCE23B4 + +#define mmNIC0_QM1_CP_STS_4 0xCE23B8 + +#define mmNIC0_QM1_CP_CURRENT_INST_LO_0 0xCE23BC + +#define mmNIC0_QM1_CP_CURRENT_INST_LO_1 0xCE23C0 + +#define mmNIC0_QM1_CP_CURRENT_INST_LO_2 0xCE23C4 + +#define mmNIC0_QM1_CP_CURRENT_INST_LO_3 0xCE23C8 + +#define mmNIC0_QM1_CP_CURRENT_INST_LO_4 0xCE23CC + +#define mmNIC0_QM1_CP_CURRENT_INST_HI_0 0xCE23D0 + +#define mmNIC0_QM1_CP_CURRENT_INST_HI_1 0xCE23D4 + +#define mmNIC0_QM1_CP_CURRENT_INST_HI_2 0xCE23D8 + +#define mmNIC0_QM1_CP_CURRENT_INST_HI_3 0xCE23DC + +#define mmNIC0_QM1_CP_CURRENT_INST_HI_4 0xCE23E0 + +#define mmNIC0_QM1_CP_BARRIER_CFG_0 0xCE23F4 + +#define mmNIC0_QM1_CP_BARRIER_CFG_1 0xCE23F8 + +#define mmNIC0_QM1_CP_BARRIER_CFG_2 0xCE23FC + +#define mmNIC0_QM1_CP_BARRIER_CFG_3 0xCE2400 + +#define mmNIC0_QM1_CP_BARRIER_CFG_4 0xCE2404 + +#define mmNIC0_QM1_CP_DBG_0_0 0xCE2408 + +#define mmNIC0_QM1_CP_DBG_0_1 0xCE240C + +#define mmNIC0_QM1_CP_DBG_0_2 0xCE2410 + +#define mmNIC0_QM1_CP_DBG_0_3 0xCE2414 + +#define mmNIC0_QM1_CP_DBG_0_4 0xCE2418 + +#define mmNIC0_QM1_CP_ARUSER_31_11_0 0xCE241C + +#define mmNIC0_QM1_CP_ARUSER_31_11_1 0xCE2420 + +#define mmNIC0_QM1_CP_ARUSER_31_11_2 0xCE2424 + +#define mmNIC0_QM1_CP_ARUSER_31_11_3 0xCE2428 + +#define mmNIC0_QM1_CP_ARUSER_31_11_4 0xCE242C + +#define mmNIC0_QM1_CP_AWUSER_31_11_0 0xCE2430 + +#define mmNIC0_QM1_CP_AWUSER_31_11_1 0xCE2434 + +#define mmNIC0_QM1_CP_AWUSER_31_11_2 0xCE2438 + +#define mmNIC0_QM1_CP_AWUSER_31_11_3 0xCE243C + +#define mmNIC0_QM1_CP_AWUSER_31_11_4 0xCE2440 + +#define mmNIC0_QM1_ARB_CFG_0 0xCE2A00 + +#define mmNIC0_QM1_ARB_CHOISE_Q_PUSH 0xCE2A04 + +#define mmNIC0_QM1_ARB_WRR_WEIGHT_0 0xCE2A08 + +#define mmNIC0_QM1_ARB_WRR_WEIGHT_1 0xCE2A0C + +#define mmNIC0_QM1_ARB_WRR_WEIGHT_2 0xCE2A10 + +#define mmNIC0_QM1_ARB_WRR_WEIGHT_3 0xCE2A14 + +#define mmNIC0_QM1_ARB_CFG_1 0xCE2A18 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_0 0xCE2A20 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_1 0xCE2A24 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_2 0xCE2A28 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_3 0xCE2A2C + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_4 0xCE2A30 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_5 0xCE2A34 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_6 0xCE2A38 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_7 0xCE2A3C + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_8 0xCE2A40 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_9 0xCE2A44 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_10 0xCE2A48 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_11 0xCE2A4C + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_12 0xCE2A50 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_13 0xCE2A54 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_14 0xCE2A58 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_15 0xCE2A5C + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_16 0xCE2A60 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_17 0xCE2A64 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_18 0xCE2A68 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_19 0xCE2A6C + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_20 0xCE2A70 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_21 0xCE2A74 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_22 0xCE2A78 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_23 0xCE2A7C + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_24 0xCE2A80 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_25 0xCE2A84 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_26 0xCE2A88 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_27 0xCE2A8C + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_28 0xCE2A90 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_29 0xCE2A94 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_30 0xCE2A98 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_31 0xCE2A9C + +#define mmNIC0_QM1_ARB_MST_CRED_INC 0xCE2AA0 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_0 0xCE2AA4 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_1 0xCE2AA8 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_2 0xCE2AAC + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_3 0xCE2AB0 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_4 0xCE2AB4 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_5 0xCE2AB8 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_6 0xCE2ABC + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_7 0xCE2AC0 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_8 0xCE2AC4 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_9 0xCE2AC8 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_10 0xCE2ACC + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_11 0xCE2AD0 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_12 0xCE2AD4 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_13 0xCE2AD8 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_14 0xCE2ADC + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_15 0xCE2AE0 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_16 0xCE2AE4 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_17 0xCE2AE8 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_18 0xCE2AEC + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_19 0xCE2AF0 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_20 0xCE2AF4 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_21 0xCE2AF8 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_22 0xCE2AFC + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_23 0xCE2B00 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_24 0xCE2B04 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_25 0xCE2B08 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_26 0xCE2B0C + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_27 0xCE2B10 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_28 0xCE2B14 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_29 0xCE2B18 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_30 0xCE2B1C + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_31 0xCE2B20 + +#define mmNIC0_QM1_ARB_SLV_MASTER_INC_CRED_OFST 0xCE2B28 + +#define mmNIC0_QM1_ARB_MST_SLAVE_EN 0xCE2B2C + +#define mmNIC0_QM1_ARB_MST_QUIET_PER 0xCE2B34 + +#define mmNIC0_QM1_ARB_SLV_CHOISE_WDT 0xCE2B38 + +#define mmNIC0_QM1_ARB_SLV_ID 0xCE2B3C + +#define mmNIC0_QM1_ARB_MSG_MAX_INFLIGHT 0xCE2B44 + +#define mmNIC0_QM1_ARB_MSG_AWUSER_31_11 0xCE2B48 + +#define mmNIC0_QM1_ARB_MSG_AWUSER_SEC_PROP 0xCE2B4C + +#define mmNIC0_QM1_ARB_MSG_AWUSER_NON_SEC_PROP 0xCE2B50 + +#define mmNIC0_QM1_ARB_BASE_LO 0xCE2B54 + +#define mmNIC0_QM1_ARB_BASE_HI 0xCE2B58 + +#define mmNIC0_QM1_ARB_STATE_STS 0xCE2B80 + +#define mmNIC0_QM1_ARB_CHOISE_FULLNESS_STS 0xCE2B84 + +#define mmNIC0_QM1_ARB_MSG_STS 0xCE2B88 + +#define mmNIC0_QM1_ARB_SLV_CHOISE_Q_HEAD 0xCE2B8C + +#define mmNIC0_QM1_ARB_ERR_CAUSE 0xCE2B9C + +#define mmNIC0_QM1_ARB_ERR_MSG_EN 0xCE2BA0 + +#define mmNIC0_QM1_ARB_ERR_STS_DRP 0xCE2BA8 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_0 0xCE2BB0 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_1 0xCE2BB4 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_2 0xCE2BB8 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_3 0xCE2BBC + +#define mmNIC0_QM1_ARB_MST_CRED_STS_4 0xCE2BC0 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_5 0xCE2BC4 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_6 0xCE2BC8 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_7 0xCE2BCC + +#define mmNIC0_QM1_ARB_MST_CRED_STS_8 0xCE2BD0 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_9 0xCE2BD4 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_10 0xCE2BD8 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_11 0xCE2BDC + +#define mmNIC0_QM1_ARB_MST_CRED_STS_12 0xCE2BE0 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_13 0xCE2BE4 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_14 0xCE2BE8 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_15 0xCE2BEC + +#define mmNIC0_QM1_ARB_MST_CRED_STS_16 0xCE2BF0 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_17 0xCE2BF4 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_18 0xCE2BF8 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_19 0xCE2BFC + +#define mmNIC0_QM1_ARB_MST_CRED_STS_20 0xCE2C00 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_21 0xCE2C04 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_22 0xCE2C08 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_23 0xCE2C0C + +#define mmNIC0_QM1_ARB_MST_CRED_STS_24 0xCE2C10 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_25 0xCE2C14 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_26 0xCE2C18 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_27 0xCE2C1C + +#define mmNIC0_QM1_ARB_MST_CRED_STS_28 0xCE2C20 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_29 0xCE2C24 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_30 0xCE2C28 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_31 0xCE2C2C + +#define mmNIC0_QM1_CGM_CFG 0xCE2C70 + +#define mmNIC0_QM1_CGM_STS 0xCE2C74 + +#define mmNIC0_QM1_CGM_CFG1 0xCE2C78 + +#define mmNIC0_QM1_LOCAL_RANGE_BASE 0xCE2C80 + +#define mmNIC0_QM1_LOCAL_RANGE_SIZE 0xCE2C84 + +#define mmNIC0_QM1_CSMR_STRICT_PRIO_CFG 0xCE2C90 + +#define mmNIC0_QM1_HBW_RD_RATE_LIM_CFG_1 0xCE2C94 + +#define mmNIC0_QM1_LBW_WR_RATE_LIM_CFG_0 0xCE2C98 + +#define mmNIC0_QM1_LBW_WR_RATE_LIM_CFG_1 0xCE2C9C + +#define mmNIC0_QM1_HBW_RD_RATE_LIM_CFG_0 0xCE2CA0 + +#define mmNIC0_QM1_GLBL_AXCACHE 0xCE2CA4 + +#define mmNIC0_QM1_IND_GW_APB_CFG 0xCE2CB0 + +#define mmNIC0_QM1_IND_GW_APB_WDATA 0xCE2CB4 + +#define mmNIC0_QM1_IND_GW_APB_RDATA 0xCE2CB8 + +#define mmNIC0_QM1_IND_GW_APB_STATUS 0xCE2CBC + +#define mmNIC0_QM1_GLBL_ERR_ADDR_LO 0xCE2CD0 + +#define mmNIC0_QM1_GLBL_ERR_ADDR_HI 0xCE2CD4 + +#define mmNIC0_QM1_GLBL_ERR_WDATA 0xCE2CD8 + +#define mmNIC0_QM1_GLBL_MEM_INIT_BUSY 0xCE2D00 + +#endif /* ASIC_REG_NIC0_QM1_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm0_regs.h new file mode 100644 index 000000000000..0d1caf057ad0 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm0_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_NIC1_QM0_REGS_H_ +#define ASIC_REG_NIC1_QM0_REGS_H_ + +/* + ***************************************** + * NIC1_QM0 (Prototype: QMAN) + ***************************************** + */ + +#define mmNIC1_QM0_GLBL_CFG0 0xD20000 + +#define mmNIC1_QM0_GLBL_CFG1 0xD20004 + +#define mmNIC1_QM0_GLBL_PROT 0xD20008 + +#define mmNIC1_QM0_GLBL_ERR_CFG 0xD2000C + +#define mmNIC1_QM0_GLBL_SECURE_PROPS_0 0xD20010 + +#define mmNIC1_QM0_GLBL_SECURE_PROPS_1 0xD20014 + +#define mmNIC1_QM0_GLBL_SECURE_PROPS_2 0xD20018 + +#define mmNIC1_QM0_GLBL_SECURE_PROPS_3 0xD2001C + +#define mmNIC1_QM0_GLBL_SECURE_PROPS_4 0xD20020 + +#define mmNIC1_QM0_GLBL_NON_SECURE_PROPS_0 0xD20024 + +#define mmNIC1_QM0_GLBL_NON_SECURE_PROPS_1 0xD20028 + +#define mmNIC1_QM0_GLBL_NON_SECURE_PROPS_2 0xD2002C + +#define mmNIC1_QM0_GLBL_NON_SECURE_PROPS_3 0xD20030 + +#define mmNIC1_QM0_GLBL_NON_SECURE_PROPS_4 0xD20034 + +#define mmNIC1_QM0_GLBL_STS0 0xD20038 + +#define mmNIC1_QM0_GLBL_STS1_0 0xD20040 + +#define mmNIC1_QM0_GLBL_STS1_1 0xD20044 + +#define mmNIC1_QM0_GLBL_STS1_2 0xD20048 + +#define mmNIC1_QM0_GLBL_STS1_3 0xD2004C + +#define mmNIC1_QM0_GLBL_STS1_4 0xD20050 + +#define mmNIC1_QM0_GLBL_MSG_EN_0 0xD20054 + +#define mmNIC1_QM0_GLBL_MSG_EN_1 0xD20058 + +#define mmNIC1_QM0_GLBL_MSG_EN_2 0xD2005C + +#define mmNIC1_QM0_GLBL_MSG_EN_3 0xD20060 + +#define mmNIC1_QM0_GLBL_MSG_EN_4 0xD20068 + +#define mmNIC1_QM0_PQ_BASE_LO_0 0xD20070 + +#define mmNIC1_QM0_PQ_BASE_LO_1 0xD20074 + +#define mmNIC1_QM0_PQ_BASE_LO_2 0xD20078 + +#define mmNIC1_QM0_PQ_BASE_LO_3 0xD2007C + +#define mmNIC1_QM0_PQ_BASE_HI_0 0xD20080 + +#define mmNIC1_QM0_PQ_BASE_HI_1 0xD20084 + +#define mmNIC1_QM0_PQ_BASE_HI_2 0xD20088 + +#define mmNIC1_QM0_PQ_BASE_HI_3 0xD2008C + +#define mmNIC1_QM0_PQ_SIZE_0 0xD20090 + +#define mmNIC1_QM0_PQ_SIZE_1 0xD20094 + +#define mmNIC1_QM0_PQ_SIZE_2 0xD20098 + +#define mmNIC1_QM0_PQ_SIZE_3 0xD2009C + +#define mmNIC1_QM0_PQ_PI_0 0xD200A0 + +#define mmNIC1_QM0_PQ_PI_1 0xD200A4 + +#define mmNIC1_QM0_PQ_PI_2 0xD200A8 + +#define mmNIC1_QM0_PQ_PI_3 0xD200AC + +#define mmNIC1_QM0_PQ_CI_0 0xD200B0 + +#define mmNIC1_QM0_PQ_CI_1 0xD200B4 + +#define mmNIC1_QM0_PQ_CI_2 0xD200B8 + +#define mmNIC1_QM0_PQ_CI_3 0xD200BC + +#define mmNIC1_QM0_PQ_CFG0_0 0xD200C0 + +#define mmNIC1_QM0_PQ_CFG0_1 0xD200C4 + +#define mmNIC1_QM0_PQ_CFG0_2 0xD200C8 + +#define mmNIC1_QM0_PQ_CFG0_3 0xD200CC + +#define mmNIC1_QM0_PQ_CFG1_0 0xD200D0 + +#define mmNIC1_QM0_PQ_CFG1_1 0xD200D4 + +#define mmNIC1_QM0_PQ_CFG1_2 0xD200D8 + +#define mmNIC1_QM0_PQ_CFG1_3 0xD200DC + +#define mmNIC1_QM0_PQ_ARUSER_31_11_0 0xD200E0 + +#define mmNIC1_QM0_PQ_ARUSER_31_11_1 0xD200E4 + +#define mmNIC1_QM0_PQ_ARUSER_31_11_2 0xD200E8 + +#define mmNIC1_QM0_PQ_ARUSER_31_11_3 0xD200EC + +#define mmNIC1_QM0_PQ_STS0_0 0xD200F0 + +#define mmNIC1_QM0_PQ_STS0_1 0xD200F4 + +#define mmNIC1_QM0_PQ_STS0_2 0xD200F8 + +#define mmNIC1_QM0_PQ_STS0_3 0xD200FC + +#define mmNIC1_QM0_PQ_STS1_0 0xD20100 + +#define mmNIC1_QM0_PQ_STS1_1 0xD20104 + +#define mmNIC1_QM0_PQ_STS1_2 0xD20108 + +#define mmNIC1_QM0_PQ_STS1_3 0xD2010C + +#define mmNIC1_QM0_CQ_CFG0_0 0xD20110 + +#define mmNIC1_QM0_CQ_CFG0_1 0xD20114 + +#define mmNIC1_QM0_CQ_CFG0_2 0xD20118 + +#define mmNIC1_QM0_CQ_CFG0_3 0xD2011C + +#define mmNIC1_QM0_CQ_CFG0_4 0xD20120 + +#define mmNIC1_QM0_CQ_CFG1_0 0xD20124 + +#define mmNIC1_QM0_CQ_CFG1_1 0xD20128 + +#define mmNIC1_QM0_CQ_CFG1_2 0xD2012C + +#define mmNIC1_QM0_CQ_CFG1_3 0xD20130 + +#define mmNIC1_QM0_CQ_CFG1_4 0xD20134 + +#define mmNIC1_QM0_CQ_ARUSER_31_11_0 0xD20138 + +#define mmNIC1_QM0_CQ_ARUSER_31_11_1 0xD2013C + +#define mmNIC1_QM0_CQ_ARUSER_31_11_2 0xD20140 + +#define mmNIC1_QM0_CQ_ARUSER_31_11_3 0xD20144 + +#define mmNIC1_QM0_CQ_ARUSER_31_11_4 0xD20148 + +#define mmNIC1_QM0_CQ_STS0_0 0xD2014C + +#define mmNIC1_QM0_CQ_STS0_1 0xD20150 + +#define mmNIC1_QM0_CQ_STS0_2 0xD20154 + +#define mmNIC1_QM0_CQ_STS0_3 0xD20158 + +#define mmNIC1_QM0_CQ_STS0_4 0xD2015C + +#define mmNIC1_QM0_CQ_STS1_0 0xD20160 + +#define mmNIC1_QM0_CQ_STS1_1 0xD20164 + +#define mmNIC1_QM0_CQ_STS1_2 0xD20168 + +#define mmNIC1_QM0_CQ_STS1_3 0xD2016C + +#define mmNIC1_QM0_CQ_STS1_4 0xD20170 + +#define mmNIC1_QM0_CQ_PTR_LO_0 0xD20174 + +#define mmNIC1_QM0_CQ_PTR_HI_0 0xD20178 + +#define mmNIC1_QM0_CQ_TSIZE_0 0xD2017C + +#define mmNIC1_QM0_CQ_CTL_0 0xD20180 + +#define mmNIC1_QM0_CQ_PTR_LO_1 0xD20184 + +#define mmNIC1_QM0_CQ_PTR_HI_1 0xD20188 + +#define mmNIC1_QM0_CQ_TSIZE_1 0xD2018C + +#define mmNIC1_QM0_CQ_CTL_1 0xD20190 + +#define mmNIC1_QM0_CQ_PTR_LO_2 0xD20194 + +#define mmNIC1_QM0_CQ_PTR_HI_2 0xD20198 + +#define mmNIC1_QM0_CQ_TSIZE_2 0xD2019C + +#define mmNIC1_QM0_CQ_CTL_2 0xD201A0 + +#define mmNIC1_QM0_CQ_PTR_LO_3 0xD201A4 + +#define mmNIC1_QM0_CQ_PTR_HI_3 0xD201A8 + +#define mmNIC1_QM0_CQ_TSIZE_3 0xD201AC + +#define mmNIC1_QM0_CQ_CTL_3 0xD201B0 + +#define mmNIC1_QM0_CQ_PTR_LO_4 0xD201B4 + +#define mmNIC1_QM0_CQ_PTR_HI_4 0xD201B8 + +#define mmNIC1_QM0_CQ_TSIZE_4 0xD201BC + +#define mmNIC1_QM0_CQ_CTL_4 0xD201C0 + +#define mmNIC1_QM0_CQ_PTR_LO_STS_0 0xD201C4 + +#define mmNIC1_QM0_CQ_PTR_LO_STS_1 0xD201C8 + +#define mmNIC1_QM0_CQ_PTR_LO_STS_2 0xD201CC + +#define mmNIC1_QM0_CQ_PTR_LO_STS_3 0xD201D0 + +#define mmNIC1_QM0_CQ_PTR_LO_STS_4 0xD201D4 + +#define mmNIC1_QM0_CQ_PTR_HI_STS_0 0xD201D8 + +#define mmNIC1_QM0_CQ_PTR_HI_STS_1 0xD201DC + +#define mmNIC1_QM0_CQ_PTR_HI_STS_2 0xD201E0 + +#define mmNIC1_QM0_CQ_PTR_HI_STS_3 0xD201E4 + +#define mmNIC1_QM0_CQ_PTR_HI_STS_4 0xD201E8 + +#define mmNIC1_QM0_CQ_TSIZE_STS_0 0xD201EC + +#define mmNIC1_QM0_CQ_TSIZE_STS_1 0xD201F0 + +#define mmNIC1_QM0_CQ_TSIZE_STS_2 0xD201F4 + +#define mmNIC1_QM0_CQ_TSIZE_STS_3 0xD201F8 + +#define mmNIC1_QM0_CQ_TSIZE_STS_4 0xD201FC + +#define mmNIC1_QM0_CQ_CTL_STS_0 0xD20200 + +#define mmNIC1_QM0_CQ_CTL_STS_1 0xD20204 + +#define mmNIC1_QM0_CQ_CTL_STS_2 0xD20208 + +#define mmNIC1_QM0_CQ_CTL_STS_3 0xD2020C + +#define mmNIC1_QM0_CQ_CTL_STS_4 0xD20210 + +#define mmNIC1_QM0_CQ_IFIFO_CNT_0 0xD20214 + +#define mmNIC1_QM0_CQ_IFIFO_CNT_1 0xD20218 + +#define mmNIC1_QM0_CQ_IFIFO_CNT_2 0xD2021C + +#define mmNIC1_QM0_CQ_IFIFO_CNT_3 0xD20220 + +#define mmNIC1_QM0_CQ_IFIFO_CNT_4 0xD20224 + +#define mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_0 0xD20228 + +#define mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_1 0xD2022C + +#define mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_2 0xD20230 + +#define mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_3 0xD20234 + +#define mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_4 0xD20238 + +#define mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_0 0xD2023C + +#define mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_1 0xD20240 + +#define mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_2 0xD20244 + +#define mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_3 0xD20248 + +#define mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_4 0xD2024C + +#define mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_0 0xD20250 + +#define mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_1 0xD20254 + +#define mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_2 0xD20258 + +#define mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_3 0xD2025C + +#define mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_4 0xD20260 + +#define mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_0 0xD20264 + +#define mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_1 0xD20268 + +#define mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_2 0xD2026C + +#define mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_3 0xD20270 + +#define mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_4 0xD20274 + +#define mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_0 0xD20278 + +#define mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_1 0xD2027C + +#define mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_2 0xD20280 + +#define mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_3 0xD20284 + +#define mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_4 0xD20288 + +#define mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_0 0xD2028C + +#define mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_1 0xD20290 + +#define mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_2 0xD20294 + +#define mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_3 0xD20298 + +#define mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_4 0xD2029C + +#define mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_0 0xD202A0 + +#define mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_1 0xD202A4 + +#define mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_2 0xD202A8 + +#define mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_3 0xD202AC + +#define mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_4 0xD202B0 + +#define mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_0 0xD202B4 + +#define mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_1 0xD202B8 + +#define mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_2 0xD202BC + +#define mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_3 0xD202C0 + +#define mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_4 0xD202C4 + +#define mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_0 0xD202C8 + +#define mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_1 0xD202CC + +#define mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_2 0xD202D0 + +#define mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_3 0xD202D4 + +#define mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_4 0xD202D8 + +#define mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xD202E0 + +#define mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xD202E4 + +#define mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xD202E8 + +#define mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xD202EC + +#define mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xD202F0 + +#define mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 0xD202F4 + +#define mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1 0xD202F8 + +#define mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2 0xD202FC + +#define mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 0xD20300 + +#define mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4 0xD20304 + +#define mmNIC1_QM0_CP_FENCE0_RDATA_0 0xD20308 + +#define mmNIC1_QM0_CP_FENCE0_RDATA_1 0xD2030C + +#define mmNIC1_QM0_CP_FENCE0_RDATA_2 0xD20310 + +#define mmNIC1_QM0_CP_FENCE0_RDATA_3 0xD20314 + +#define mmNIC1_QM0_CP_FENCE0_RDATA_4 0xD20318 + +#define mmNIC1_QM0_CP_FENCE1_RDATA_0 0xD2031C + +#define mmNIC1_QM0_CP_FENCE1_RDATA_1 0xD20320 + +#define mmNIC1_QM0_CP_FENCE1_RDATA_2 0xD20324 + +#define mmNIC1_QM0_CP_FENCE1_RDATA_3 0xD20328 + +#define mmNIC1_QM0_CP_FENCE1_RDATA_4 0xD2032C + +#define mmNIC1_QM0_CP_FENCE2_RDATA_0 0xD20330 + +#define mmNIC1_QM0_CP_FENCE2_RDATA_1 0xD20334 + +#define mmNIC1_QM0_CP_FENCE2_RDATA_2 0xD20338 + +#define mmNIC1_QM0_CP_FENCE2_RDATA_3 0xD2033C + +#define mmNIC1_QM0_CP_FENCE2_RDATA_4 0xD20340 + +#define mmNIC1_QM0_CP_FENCE3_RDATA_0 0xD20344 + +#define mmNIC1_QM0_CP_FENCE3_RDATA_1 0xD20348 + +#define mmNIC1_QM0_CP_FENCE3_RDATA_2 0xD2034C + +#define mmNIC1_QM0_CP_FENCE3_RDATA_3 0xD20350 + +#define mmNIC1_QM0_CP_FENCE3_RDATA_4 0xD20354 + +#define mmNIC1_QM0_CP_FENCE0_CNT_0 0xD20358 + +#define mmNIC1_QM0_CP_FENCE0_CNT_1 0xD2035C + +#define mmNIC1_QM0_CP_FENCE0_CNT_2 0xD20360 + +#define mmNIC1_QM0_CP_FENCE0_CNT_3 0xD20364 + +#define mmNIC1_QM0_CP_FENCE0_CNT_4 0xD20368 + +#define mmNIC1_QM0_CP_FENCE1_CNT_0 0xD2036C + +#define mmNIC1_QM0_CP_FENCE1_CNT_1 0xD20370 + +#define mmNIC1_QM0_CP_FENCE1_CNT_2 0xD20374 + +#define mmNIC1_QM0_CP_FENCE1_CNT_3 0xD20378 + +#define mmNIC1_QM0_CP_FENCE1_CNT_4 0xD2037C + +#define mmNIC1_QM0_CP_FENCE2_CNT_0 0xD20380 + +#define mmNIC1_QM0_CP_FENCE2_CNT_1 0xD20384 + +#define mmNIC1_QM0_CP_FENCE2_CNT_2 0xD20388 + +#define mmNIC1_QM0_CP_FENCE2_CNT_3 0xD2038C + +#define mmNIC1_QM0_CP_FENCE2_CNT_4 0xD20390 + +#define mmNIC1_QM0_CP_FENCE3_CNT_0 0xD20394 + +#define mmNIC1_QM0_CP_FENCE3_CNT_1 0xD20398 + +#define mmNIC1_QM0_CP_FENCE3_CNT_2 0xD2039C + +#define mmNIC1_QM0_CP_FENCE3_CNT_3 0xD203A0 + +#define mmNIC1_QM0_CP_FENCE3_CNT_4 0xD203A4 + +#define mmNIC1_QM0_CP_STS_0 0xD203A8 + +#define mmNIC1_QM0_CP_STS_1 0xD203AC + +#define mmNIC1_QM0_CP_STS_2 0xD203B0 + +#define mmNIC1_QM0_CP_STS_3 0xD203B4 + +#define mmNIC1_QM0_CP_STS_4 0xD203B8 + +#define mmNIC1_QM0_CP_CURRENT_INST_LO_0 0xD203BC + +#define mmNIC1_QM0_CP_CURRENT_INST_LO_1 0xD203C0 + +#define mmNIC1_QM0_CP_CURRENT_INST_LO_2 0xD203C4 + +#define mmNIC1_QM0_CP_CURRENT_INST_LO_3 0xD203C8 + +#define mmNIC1_QM0_CP_CURRENT_INST_LO_4 0xD203CC + +#define mmNIC1_QM0_CP_CURRENT_INST_HI_0 0xD203D0 + +#define mmNIC1_QM0_CP_CURRENT_INST_HI_1 0xD203D4 + +#define mmNIC1_QM0_CP_CURRENT_INST_HI_2 0xD203D8 + +#define mmNIC1_QM0_CP_CURRENT_INST_HI_3 0xD203DC + +#define mmNIC1_QM0_CP_CURRENT_INST_HI_4 0xD203E0 + +#define mmNIC1_QM0_CP_BARRIER_CFG_0 0xD203F4 + +#define mmNIC1_QM0_CP_BARRIER_CFG_1 0xD203F8 + +#define mmNIC1_QM0_CP_BARRIER_CFG_2 0xD203FC + +#define mmNIC1_QM0_CP_BARRIER_CFG_3 0xD20400 + +#define mmNIC1_QM0_CP_BARRIER_CFG_4 0xD20404 + +#define mmNIC1_QM0_CP_DBG_0_0 0xD20408 + +#define mmNIC1_QM0_CP_DBG_0_1 0xD2040C + +#define mmNIC1_QM0_CP_DBG_0_2 0xD20410 + +#define mmNIC1_QM0_CP_DBG_0_3 0xD20414 + +#define mmNIC1_QM0_CP_DBG_0_4 0xD20418 + +#define mmNIC1_QM0_CP_ARUSER_31_11_0 0xD2041C + +#define mmNIC1_QM0_CP_ARUSER_31_11_1 0xD20420 + +#define mmNIC1_QM0_CP_ARUSER_31_11_2 0xD20424 + +#define mmNIC1_QM0_CP_ARUSER_31_11_3 0xD20428 + +#define mmNIC1_QM0_CP_ARUSER_31_11_4 0xD2042C + +#define mmNIC1_QM0_CP_AWUSER_31_11_0 0xD20430 + +#define mmNIC1_QM0_CP_AWUSER_31_11_1 0xD20434 + +#define mmNIC1_QM0_CP_AWUSER_31_11_2 0xD20438 + +#define mmNIC1_QM0_CP_AWUSER_31_11_3 0xD2043C + +#define mmNIC1_QM0_CP_AWUSER_31_11_4 0xD20440 + +#define mmNIC1_QM0_ARB_CFG_0 0xD20A00 + +#define mmNIC1_QM0_ARB_CHOISE_Q_PUSH 0xD20A04 + +#define mmNIC1_QM0_ARB_WRR_WEIGHT_0 0xD20A08 + +#define mmNIC1_QM0_ARB_WRR_WEIGHT_1 0xD20A0C + +#define mmNIC1_QM0_ARB_WRR_WEIGHT_2 0xD20A10 + +#define mmNIC1_QM0_ARB_WRR_WEIGHT_3 0xD20A14 + +#define mmNIC1_QM0_ARB_CFG_1 0xD20A18 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_0 0xD20A20 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_1 0xD20A24 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_2 0xD20A28 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_3 0xD20A2C + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_4 0xD20A30 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_5 0xD20A34 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_6 0xD20A38 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_7 0xD20A3C + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_8 0xD20A40 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_9 0xD20A44 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_10 0xD20A48 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_11 0xD20A4C + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_12 0xD20A50 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_13 0xD20A54 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_14 0xD20A58 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_15 0xD20A5C + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_16 0xD20A60 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_17 0xD20A64 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_18 0xD20A68 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_19 0xD20A6C + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_20 0xD20A70 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_21 0xD20A74 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_22 0xD20A78 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_23 0xD20A7C + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_24 0xD20A80 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_25 0xD20A84 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_26 0xD20A88 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_27 0xD20A8C + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_28 0xD20A90 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_29 0xD20A94 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_30 0xD20A98 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_31 0xD20A9C + +#define mmNIC1_QM0_ARB_MST_CRED_INC 0xD20AA0 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_0 0xD20AA4 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_1 0xD20AA8 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_2 0xD20AAC + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_3 0xD20AB0 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_4 0xD20AB4 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_5 0xD20AB8 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_6 0xD20ABC + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_7 0xD20AC0 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_8 0xD20AC4 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_9 0xD20AC8 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_10 0xD20ACC + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_11 0xD20AD0 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_12 0xD20AD4 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_13 0xD20AD8 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_14 0xD20ADC + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_15 0xD20AE0 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_16 0xD20AE4 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_17 0xD20AE8 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_18 0xD20AEC + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_19 0xD20AF0 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_20 0xD20AF4 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_21 0xD20AF8 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_22 0xD20AFC + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_23 0xD20B00 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_24 0xD20B04 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_25 0xD20B08 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_26 0xD20B0C + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_27 0xD20B10 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_28 0xD20B14 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_29 0xD20B18 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_30 0xD20B1C + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_31 0xD20B20 + +#define mmNIC1_QM0_ARB_SLV_MASTER_INC_CRED_OFST 0xD20B28 + +#define mmNIC1_QM0_ARB_MST_SLAVE_EN 0xD20B2C + +#define mmNIC1_QM0_ARB_MST_QUIET_PER 0xD20B34 + +#define mmNIC1_QM0_ARB_SLV_CHOISE_WDT 0xD20B38 + +#define mmNIC1_QM0_ARB_SLV_ID 0xD20B3C + +#define mmNIC1_QM0_ARB_MSG_MAX_INFLIGHT 0xD20B44 + +#define mmNIC1_QM0_ARB_MSG_AWUSER_31_11 0xD20B48 + +#define mmNIC1_QM0_ARB_MSG_AWUSER_SEC_PROP 0xD20B4C + +#define mmNIC1_QM0_ARB_MSG_AWUSER_NON_SEC_PROP 0xD20B50 + +#define mmNIC1_QM0_ARB_BASE_LO 0xD20B54 + +#define mmNIC1_QM0_ARB_BASE_HI 0xD20B58 + +#define mmNIC1_QM0_ARB_STATE_STS 0xD20B80 + +#define mmNIC1_QM0_ARB_CHOISE_FULLNESS_STS 0xD20B84 + +#define mmNIC1_QM0_ARB_MSG_STS 0xD20B88 + +#define mmNIC1_QM0_ARB_SLV_CHOISE_Q_HEAD 0xD20B8C + +#define mmNIC1_QM0_ARB_ERR_CAUSE 0xD20B9C + +#define mmNIC1_QM0_ARB_ERR_MSG_EN 0xD20BA0 + +#define mmNIC1_QM0_ARB_ERR_STS_DRP 0xD20BA8 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_0 0xD20BB0 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_1 0xD20BB4 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_2 0xD20BB8 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_3 0xD20BBC + +#define mmNIC1_QM0_ARB_MST_CRED_STS_4 0xD20BC0 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_5 0xD20BC4 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_6 0xD20BC8 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_7 0xD20BCC + +#define mmNIC1_QM0_ARB_MST_CRED_STS_8 0xD20BD0 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_9 0xD20BD4 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_10 0xD20BD8 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_11 0xD20BDC + +#define mmNIC1_QM0_ARB_MST_CRED_STS_12 0xD20BE0 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_13 0xD20BE4 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_14 0xD20BE8 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_15 0xD20BEC + +#define mmNIC1_QM0_ARB_MST_CRED_STS_16 0xD20BF0 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_17 0xD20BF4 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_18 0xD20BF8 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_19 0xD20BFC + +#define mmNIC1_QM0_ARB_MST_CRED_STS_20 0xD20C00 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_21 0xD20C04 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_22 0xD20C08 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_23 0xD20C0C + +#define mmNIC1_QM0_ARB_MST_CRED_STS_24 0xD20C10 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_25 0xD20C14 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_26 0xD20C18 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_27 0xD20C1C + +#define mmNIC1_QM0_ARB_MST_CRED_STS_28 0xD20C20 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_29 0xD20C24 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_30 0xD20C28 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_31 0xD20C2C + +#define mmNIC1_QM0_CGM_CFG 0xD20C70 + +#define mmNIC1_QM0_CGM_STS 0xD20C74 + +#define mmNIC1_QM0_CGM_CFG1 0xD20C78 + +#define mmNIC1_QM0_LOCAL_RANGE_BASE 0xD20C80 + +#define mmNIC1_QM0_LOCAL_RANGE_SIZE 0xD20C84 + +#define mmNIC1_QM0_CSMR_STRICT_PRIO_CFG 0xD20C90 + +#define mmNIC1_QM0_HBW_RD_RATE_LIM_CFG_1 0xD20C94 + +#define mmNIC1_QM0_LBW_WR_RATE_LIM_CFG_0 0xD20C98 + +#define mmNIC1_QM0_LBW_WR_RATE_LIM_CFG_1 0xD20C9C + +#define mmNIC1_QM0_HBW_RD_RATE_LIM_CFG_0 0xD20CA0 + +#define mmNIC1_QM0_GLBL_AXCACHE 0xD20CA4 + +#define mmNIC1_QM0_IND_GW_APB_CFG 0xD20CB0 + +#define mmNIC1_QM0_IND_GW_APB_WDATA 0xD20CB4 + +#define mmNIC1_QM0_IND_GW_APB_RDATA 0xD20CB8 + +#define mmNIC1_QM0_IND_GW_APB_STATUS 0xD20CBC + +#define mmNIC1_QM0_GLBL_ERR_ADDR_LO 0xD20CD0 + +#define mmNIC1_QM0_GLBL_ERR_ADDR_HI 0xD20CD4 + +#define mmNIC1_QM0_GLBL_ERR_WDATA 0xD20CD8 + +#define mmNIC1_QM0_GLBL_MEM_INIT_BUSY 0xD20D00 + +#endif /* ASIC_REG_NIC1_QM0_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm1_regs.h new file mode 100644 index 000000000000..1b115ee6d6f0 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm1_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_NIC1_QM1_REGS_H_ +#define ASIC_REG_NIC1_QM1_REGS_H_ + +/* + ***************************************** + * NIC1_QM1 (Prototype: QMAN) + ***************************************** + */ + +#define mmNIC1_QM1_GLBL_CFG0 0xD22000 + +#define mmNIC1_QM1_GLBL_CFG1 0xD22004 + +#define mmNIC1_QM1_GLBL_PROT 0xD22008 + +#define mmNIC1_QM1_GLBL_ERR_CFG 0xD2200C + +#define mmNIC1_QM1_GLBL_SECURE_PROPS_0 0xD22010 + +#define mmNIC1_QM1_GLBL_SECURE_PROPS_1 0xD22014 + +#define mmNIC1_QM1_GLBL_SECURE_PROPS_2 0xD22018 + +#define mmNIC1_QM1_GLBL_SECURE_PROPS_3 0xD2201C + +#define mmNIC1_QM1_GLBL_SECURE_PROPS_4 0xD22020 + +#define mmNIC1_QM1_GLBL_NON_SECURE_PROPS_0 0xD22024 + +#define mmNIC1_QM1_GLBL_NON_SECURE_PROPS_1 0xD22028 + +#define mmNIC1_QM1_GLBL_NON_SECURE_PROPS_2 0xD2202C + +#define mmNIC1_QM1_GLBL_NON_SECURE_PROPS_3 0xD22030 + +#define mmNIC1_QM1_GLBL_NON_SECURE_PROPS_4 0xD22034 + +#define mmNIC1_QM1_GLBL_STS0 0xD22038 + +#define mmNIC1_QM1_GLBL_STS1_0 0xD22040 + +#define mmNIC1_QM1_GLBL_STS1_1 0xD22044 + +#define mmNIC1_QM1_GLBL_STS1_2 0xD22048 + +#define mmNIC1_QM1_GLBL_STS1_3 0xD2204C + +#define mmNIC1_QM1_GLBL_STS1_4 0xD22050 + +#define mmNIC1_QM1_GLBL_MSG_EN_0 0xD22054 + +#define mmNIC1_QM1_GLBL_MSG_EN_1 0xD22058 + +#define mmNIC1_QM1_GLBL_MSG_EN_2 0xD2205C + +#define mmNIC1_QM1_GLBL_MSG_EN_3 0xD22060 + +#define mmNIC1_QM1_GLBL_MSG_EN_4 0xD22068 + +#define mmNIC1_QM1_PQ_BASE_LO_0 0xD22070 + +#define mmNIC1_QM1_PQ_BASE_LO_1 0xD22074 + +#define mmNIC1_QM1_PQ_BASE_LO_2 0xD22078 + +#define mmNIC1_QM1_PQ_BASE_LO_3 0xD2207C + +#define mmNIC1_QM1_PQ_BASE_HI_0 0xD22080 + +#define mmNIC1_QM1_PQ_BASE_HI_1 0xD22084 + +#define mmNIC1_QM1_PQ_BASE_HI_2 0xD22088 + +#define mmNIC1_QM1_PQ_BASE_HI_3 0xD2208C + +#define mmNIC1_QM1_PQ_SIZE_0 0xD22090 + +#define mmNIC1_QM1_PQ_SIZE_1 0xD22094 + +#define mmNIC1_QM1_PQ_SIZE_2 0xD22098 + +#define mmNIC1_QM1_PQ_SIZE_3 0xD2209C + +#define mmNIC1_QM1_PQ_PI_0 0xD220A0 + +#define mmNIC1_QM1_PQ_PI_1 0xD220A4 + +#define mmNIC1_QM1_PQ_PI_2 0xD220A8 + +#define mmNIC1_QM1_PQ_PI_3 0xD220AC + +#define mmNIC1_QM1_PQ_CI_0 0xD220B0 + +#define mmNIC1_QM1_PQ_CI_1 0xD220B4 + +#define mmNIC1_QM1_PQ_CI_2 0xD220B8 + +#define mmNIC1_QM1_PQ_CI_3 0xD220BC + +#define mmNIC1_QM1_PQ_CFG0_0 0xD220C0 + +#define mmNIC1_QM1_PQ_CFG0_1 0xD220C4 + +#define mmNIC1_QM1_PQ_CFG0_2 0xD220C8 + +#define mmNIC1_QM1_PQ_CFG0_3 0xD220CC + +#define mmNIC1_QM1_PQ_CFG1_0 0xD220D0 + +#define mmNIC1_QM1_PQ_CFG1_1 0xD220D4 + +#define mmNIC1_QM1_PQ_CFG1_2 0xD220D8 + +#define mmNIC1_QM1_PQ_CFG1_3 0xD220DC + +#define mmNIC1_QM1_PQ_ARUSER_31_11_0 0xD220E0 + +#define mmNIC1_QM1_PQ_ARUSER_31_11_1 0xD220E4 + +#define mmNIC1_QM1_PQ_ARUSER_31_11_2 0xD220E8 + +#define mmNIC1_QM1_PQ_ARUSER_31_11_3 0xD220EC + +#define mmNIC1_QM1_PQ_STS0_0 0xD220F0 + +#define mmNIC1_QM1_PQ_STS0_1 0xD220F4 + +#define mmNIC1_QM1_PQ_STS0_2 0xD220F8 + +#define mmNIC1_QM1_PQ_STS0_3 0xD220FC + +#define mmNIC1_QM1_PQ_STS1_0 0xD22100 + +#define mmNIC1_QM1_PQ_STS1_1 0xD22104 + +#define mmNIC1_QM1_PQ_STS1_2 0xD22108 + +#define mmNIC1_QM1_PQ_STS1_3 0xD2210C + +#define mmNIC1_QM1_CQ_CFG0_0 0xD22110 + +#define mmNIC1_QM1_CQ_CFG0_1 0xD22114 + +#define mmNIC1_QM1_CQ_CFG0_2 0xD22118 + +#define mmNIC1_QM1_CQ_CFG0_3 0xD2211C + +#define mmNIC1_QM1_CQ_CFG0_4 0xD22120 + +#define mmNIC1_QM1_CQ_CFG1_0 0xD22124 + +#define mmNIC1_QM1_CQ_CFG1_1 0xD22128 + +#define mmNIC1_QM1_CQ_CFG1_2 0xD2212C + +#define mmNIC1_QM1_CQ_CFG1_3 0xD22130 + +#define mmNIC1_QM1_CQ_CFG1_4 0xD22134 + +#define mmNIC1_QM1_CQ_ARUSER_31_11_0 0xD22138 + +#define mmNIC1_QM1_CQ_ARUSER_31_11_1 0xD2213C + +#define mmNIC1_QM1_CQ_ARUSER_31_11_2 0xD22140 + +#define mmNIC1_QM1_CQ_ARUSER_31_11_3 0xD22144 + +#define mmNIC1_QM1_CQ_ARUSER_31_11_4 0xD22148 + +#define mmNIC1_QM1_CQ_STS0_0 0xD2214C + +#define mmNIC1_QM1_CQ_STS0_1 0xD22150 + +#define mmNIC1_QM1_CQ_STS0_2 0xD22154 + +#define mmNIC1_QM1_CQ_STS0_3 0xD22158 + +#define mmNIC1_QM1_CQ_STS0_4 0xD2215C + +#define mmNIC1_QM1_CQ_STS1_0 0xD22160 + +#define mmNIC1_QM1_CQ_STS1_1 0xD22164 + +#define mmNIC1_QM1_CQ_STS1_2 0xD22168 + +#define mmNIC1_QM1_CQ_STS1_3 0xD2216C + +#define mmNIC1_QM1_CQ_STS1_4 0xD22170 + +#define mmNIC1_QM1_CQ_PTR_LO_0 0xD22174 + +#define mmNIC1_QM1_CQ_PTR_HI_0 0xD22178 + +#define mmNIC1_QM1_CQ_TSIZE_0 0xD2217C + +#define mmNIC1_QM1_CQ_CTL_0 0xD22180 + +#define mmNIC1_QM1_CQ_PTR_LO_1 0xD22184 + +#define mmNIC1_QM1_CQ_PTR_HI_1 0xD22188 + +#define mmNIC1_QM1_CQ_TSIZE_1 0xD2218C + +#define mmNIC1_QM1_CQ_CTL_1 0xD22190 + +#define mmNIC1_QM1_CQ_PTR_LO_2 0xD22194 + +#define mmNIC1_QM1_CQ_PTR_HI_2 0xD22198 + +#define mmNIC1_QM1_CQ_TSIZE_2 0xD2219C + +#define mmNIC1_QM1_CQ_CTL_2 0xD221A0 + +#define mmNIC1_QM1_CQ_PTR_LO_3 0xD221A4 + +#define mmNIC1_QM1_CQ_PTR_HI_3 0xD221A8 + +#define mmNIC1_QM1_CQ_TSIZE_3 0xD221AC + +#define mmNIC1_QM1_CQ_CTL_3 0xD221B0 + +#define mmNIC1_QM1_CQ_PTR_LO_4 0xD221B4 + +#define mmNIC1_QM1_CQ_PTR_HI_4 0xD221B8 + +#define mmNIC1_QM1_CQ_TSIZE_4 0xD221BC + +#define mmNIC1_QM1_CQ_CTL_4 0xD221C0 + +#define mmNIC1_QM1_CQ_PTR_LO_STS_0 0xD221C4 + +#define mmNIC1_QM1_CQ_PTR_LO_STS_1 0xD221C8 + +#define mmNIC1_QM1_CQ_PTR_LO_STS_2 0xD221CC + +#define mmNIC1_QM1_CQ_PTR_LO_STS_3 0xD221D0 + +#define mmNIC1_QM1_CQ_PTR_LO_STS_4 0xD221D4 + +#define mmNIC1_QM1_CQ_PTR_HI_STS_0 0xD221D8 + +#define mmNIC1_QM1_CQ_PTR_HI_STS_1 0xD221DC + +#define mmNIC1_QM1_CQ_PTR_HI_STS_2 0xD221E0 + +#define mmNIC1_QM1_CQ_PTR_HI_STS_3 0xD221E4 + +#define mmNIC1_QM1_CQ_PTR_HI_STS_4 0xD221E8 + +#define mmNIC1_QM1_CQ_TSIZE_STS_0 0xD221EC + +#define mmNIC1_QM1_CQ_TSIZE_STS_1 0xD221F0 + +#define mmNIC1_QM1_CQ_TSIZE_STS_2 0xD221F4 + +#define mmNIC1_QM1_CQ_TSIZE_STS_3 0xD221F8 + +#define mmNIC1_QM1_CQ_TSIZE_STS_4 0xD221FC + +#define mmNIC1_QM1_CQ_CTL_STS_0 0xD22200 + +#define mmNIC1_QM1_CQ_CTL_STS_1 0xD22204 + +#define mmNIC1_QM1_CQ_CTL_STS_2 0xD22208 + +#define mmNIC1_QM1_CQ_CTL_STS_3 0xD2220C + +#define mmNIC1_QM1_CQ_CTL_STS_4 0xD22210 + +#define mmNIC1_QM1_CQ_IFIFO_CNT_0 0xD22214 + +#define mmNIC1_QM1_CQ_IFIFO_CNT_1 0xD22218 + +#define mmNIC1_QM1_CQ_IFIFO_CNT_2 0xD2221C + +#define mmNIC1_QM1_CQ_IFIFO_CNT_3 0xD22220 + +#define mmNIC1_QM1_CQ_IFIFO_CNT_4 0xD22224 + +#define mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_0 0xD22228 + +#define mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_1 0xD2222C + +#define mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_2 0xD22230 + +#define mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_3 0xD22234 + +#define mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_4 0xD22238 + +#define mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_0 0xD2223C + +#define mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_1 0xD22240 + +#define mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_2 0xD22244 + +#define mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_3 0xD22248 + +#define mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_4 0xD2224C + +#define mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_0 0xD22250 + +#define mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_1 0xD22254 + +#define mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_2 0xD22258 + +#define mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_3 0xD2225C + +#define mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_4 0xD22260 + +#define mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_0 0xD22264 + +#define mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_1 0xD22268 + +#define mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_2 0xD2226C + +#define mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_3 0xD22270 + +#define mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_4 0xD22274 + +#define mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_0 0xD22278 + +#define mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_1 0xD2227C + +#define mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_2 0xD22280 + +#define mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_3 0xD22284 + +#define mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_4 0xD22288 + +#define mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_0 0xD2228C + +#define mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_1 0xD22290 + +#define mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_2 0xD22294 + +#define mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_3 0xD22298 + +#define mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_4 0xD2229C + +#define mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_0 0xD222A0 + +#define mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_1 0xD222A4 + +#define mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_2 0xD222A8 + +#define mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_3 0xD222AC + +#define mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_4 0xD222B0 + +#define mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_0 0xD222B4 + +#define mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_1 0xD222B8 + +#define mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_2 0xD222BC + +#define mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_3 0xD222C0 + +#define mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_4 0xD222C4 + +#define mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_0 0xD222C8 + +#define mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_1 0xD222CC + +#define mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_2 0xD222D0 + +#define mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_3 0xD222D4 + +#define mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_4 0xD222D8 + +#define mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xD222E0 + +#define mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xD222E4 + +#define mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xD222E8 + +#define mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xD222EC + +#define mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xD222F0 + +#define mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0 0xD222F4 + +#define mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1 0xD222F8 + +#define mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2 0xD222FC + +#define mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 0xD22300 + +#define mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4 0xD22304 + +#define mmNIC1_QM1_CP_FENCE0_RDATA_0 0xD22308 + +#define mmNIC1_QM1_CP_FENCE0_RDATA_1 0xD2230C + +#define mmNIC1_QM1_CP_FENCE0_RDATA_2 0xD22310 + +#define mmNIC1_QM1_CP_FENCE0_RDATA_3 0xD22314 + +#define mmNIC1_QM1_CP_FENCE0_RDATA_4 0xD22318 + +#define mmNIC1_QM1_CP_FENCE1_RDATA_0 0xD2231C + +#define mmNIC1_QM1_CP_FENCE1_RDATA_1 0xD22320 + +#define mmNIC1_QM1_CP_FENCE1_RDATA_2 0xD22324 + +#define mmNIC1_QM1_CP_FENCE1_RDATA_3 0xD22328 + +#define mmNIC1_QM1_CP_FENCE1_RDATA_4 0xD2232C + +#define mmNIC1_QM1_CP_FENCE2_RDATA_0 0xD22330 + +#define mmNIC1_QM1_CP_FENCE2_RDATA_1 0xD22334 + +#define mmNIC1_QM1_CP_FENCE2_RDATA_2 0xD22338 + +#define mmNIC1_QM1_CP_FENCE2_RDATA_3 0xD2233C + +#define mmNIC1_QM1_CP_FENCE2_RDATA_4 0xD22340 + +#define mmNIC1_QM1_CP_FENCE3_RDATA_0 0xD22344 + +#define mmNIC1_QM1_CP_FENCE3_RDATA_1 0xD22348 + +#define mmNIC1_QM1_CP_FENCE3_RDATA_2 0xD2234C + +#define mmNIC1_QM1_CP_FENCE3_RDATA_3 0xD22350 + +#define mmNIC1_QM1_CP_FENCE3_RDATA_4 0xD22354 + +#define mmNIC1_QM1_CP_FENCE0_CNT_0 0xD22358 + +#define mmNIC1_QM1_CP_FENCE0_CNT_1 0xD2235C + +#define mmNIC1_QM1_CP_FENCE0_CNT_2 0xD22360 + +#define mmNIC1_QM1_CP_FENCE0_CNT_3 0xD22364 + +#define mmNIC1_QM1_CP_FENCE0_CNT_4 0xD22368 + +#define mmNIC1_QM1_CP_FENCE1_CNT_0 0xD2236C + +#define mmNIC1_QM1_CP_FENCE1_CNT_1 0xD22370 + +#define mmNIC1_QM1_CP_FENCE1_CNT_2 0xD22374 + +#define mmNIC1_QM1_CP_FENCE1_CNT_3 0xD22378 + +#define mmNIC1_QM1_CP_FENCE1_CNT_4 0xD2237C + +#define mmNIC1_QM1_CP_FENCE2_CNT_0 0xD22380 + +#define mmNIC1_QM1_CP_FENCE2_CNT_1 0xD22384 + +#define mmNIC1_QM1_CP_FENCE2_CNT_2 0xD22388 + +#define mmNIC1_QM1_CP_FENCE2_CNT_3 0xD2238C + +#define mmNIC1_QM1_CP_FENCE2_CNT_4 0xD22390 + +#define mmNIC1_QM1_CP_FENCE3_CNT_0 0xD22394 + +#define mmNIC1_QM1_CP_FENCE3_CNT_1 0xD22398 + +#define mmNIC1_QM1_CP_FENCE3_CNT_2 0xD2239C + +#define mmNIC1_QM1_CP_FENCE3_CNT_3 0xD223A0 + +#define mmNIC1_QM1_CP_FENCE3_CNT_4 0xD223A4 + +#define mmNIC1_QM1_CP_STS_0 0xD223A8 + +#define mmNIC1_QM1_CP_STS_1 0xD223AC + +#define mmNIC1_QM1_CP_STS_2 0xD223B0 + +#define mmNIC1_QM1_CP_STS_3 0xD223B4 + +#define mmNIC1_QM1_CP_STS_4 0xD223B8 + +#define mmNIC1_QM1_CP_CURRENT_INST_LO_0 0xD223BC + +#define mmNIC1_QM1_CP_CURRENT_INST_LO_1 0xD223C0 + +#define mmNIC1_QM1_CP_CURRENT_INST_LO_2 0xD223C4 + +#define mmNIC1_QM1_CP_CURRENT_INST_LO_3 0xD223C8 + +#define mmNIC1_QM1_CP_CURRENT_INST_LO_4 0xD223CC + +#define mmNIC1_QM1_CP_CURRENT_INST_HI_0 0xD223D0 + +#define mmNIC1_QM1_CP_CURRENT_INST_HI_1 0xD223D4 + +#define mmNIC1_QM1_CP_CURRENT_INST_HI_2 0xD223D8 + +#define mmNIC1_QM1_CP_CURRENT_INST_HI_3 0xD223DC + +#define mmNIC1_QM1_CP_CURRENT_INST_HI_4 0xD223E0 + +#define mmNIC1_QM1_CP_BARRIER_CFG_0 0xD223F4 + +#define mmNIC1_QM1_CP_BARRIER_CFG_1 0xD223F8 + +#define mmNIC1_QM1_CP_BARRIER_CFG_2 0xD223FC + +#define mmNIC1_QM1_CP_BARRIER_CFG_3 0xD22400 + +#define mmNIC1_QM1_CP_BARRIER_CFG_4 0xD22404 + +#define mmNIC1_QM1_CP_DBG_0_0 0xD22408 + +#define mmNIC1_QM1_CP_DBG_0_1 0xD2240C + +#define mmNIC1_QM1_CP_DBG_0_2 0xD22410 + +#define mmNIC1_QM1_CP_DBG_0_3 0xD22414 + +#define mmNIC1_QM1_CP_DBG_0_4 0xD22418 + +#define mmNIC1_QM1_CP_ARUSER_31_11_0 0xD2241C + +#define mmNIC1_QM1_CP_ARUSER_31_11_1 0xD22420 + +#define mmNIC1_QM1_CP_ARUSER_31_11_2 0xD22424 + +#define mmNIC1_QM1_CP_ARUSER_31_11_3 0xD22428 + +#define mmNIC1_QM1_CP_ARUSER_31_11_4 0xD2242C + +#define mmNIC1_QM1_CP_AWUSER_31_11_0 0xD22430 + +#define mmNIC1_QM1_CP_AWUSER_31_11_1 0xD22434 + +#define mmNIC1_QM1_CP_AWUSER_31_11_2 0xD22438 + +#define mmNIC1_QM1_CP_AWUSER_31_11_3 0xD2243C + +#define mmNIC1_QM1_CP_AWUSER_31_11_4 0xD22440 + +#define mmNIC1_QM1_ARB_CFG_0 0xD22A00 + +#define mmNIC1_QM1_ARB_CHOISE_Q_PUSH 0xD22A04 + +#define mmNIC1_QM1_ARB_WRR_WEIGHT_0 0xD22A08 + +#define mmNIC1_QM1_ARB_WRR_WEIGHT_1 0xD22A0C + +#define mmNIC1_QM1_ARB_WRR_WEIGHT_2 0xD22A10 + +#define mmNIC1_QM1_ARB_WRR_WEIGHT_3 0xD22A14 + +#define mmNIC1_QM1_ARB_CFG_1 0xD22A18 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_0 0xD22A20 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_1 0xD22A24 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_2 0xD22A28 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_3 0xD22A2C + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_4 0xD22A30 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_5 0xD22A34 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_6 0xD22A38 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_7 0xD22A3C + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_8 0xD22A40 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_9 0xD22A44 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_10 0xD22A48 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_11 0xD22A4C + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_12 0xD22A50 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_13 0xD22A54 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_14 0xD22A58 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_15 0xD22A5C + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_16 0xD22A60 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_17 0xD22A64 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_18 0xD22A68 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_19 0xD22A6C + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_20 0xD22A70 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_21 0xD22A74 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_22 0xD22A78 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_23 0xD22A7C + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_24 0xD22A80 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_25 0xD22A84 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_26 0xD22A88 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_27 0xD22A8C + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_28 0xD22A90 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_29 0xD22A94 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_30 0xD22A98 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_31 0xD22A9C + +#define mmNIC1_QM1_ARB_MST_CRED_INC 0xD22AA0 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_0 0xD22AA4 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_1 0xD22AA8 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_2 0xD22AAC + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_3 0xD22AB0 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_4 0xD22AB4 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_5 0xD22AB8 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_6 0xD22ABC + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_7 0xD22AC0 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_8 0xD22AC4 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_9 0xD22AC8 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_10 0xD22ACC + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_11 0xD22AD0 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_12 0xD22AD4 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_13 0xD22AD8 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_14 0xD22ADC + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_15 0xD22AE0 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_16 0xD22AE4 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_17 0xD22AE8 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_18 0xD22AEC + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_19 0xD22AF0 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_20 0xD22AF4 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_21 0xD22AF8 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_22 0xD22AFC + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_23 0xD22B00 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_24 0xD22B04 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_25 0xD22B08 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_26 0xD22B0C + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_27 0xD22B10 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_28 0xD22B14 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_29 0xD22B18 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_30 0xD22B1C + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_31 0xD22B20 + +#define mmNIC1_QM1_ARB_SLV_MASTER_INC_CRED_OFST 0xD22B28 + +#define mmNIC1_QM1_ARB_MST_SLAVE_EN 0xD22B2C + +#define mmNIC1_QM1_ARB_MST_QUIET_PER 0xD22B34 + +#define mmNIC1_QM1_ARB_SLV_CHOISE_WDT 0xD22B38 + +#define mmNIC1_QM1_ARB_SLV_ID 0xD22B3C + +#define mmNIC1_QM1_ARB_MSG_MAX_INFLIGHT 0xD22B44 + +#define mmNIC1_QM1_ARB_MSG_AWUSER_31_11 0xD22B48 + +#define mmNIC1_QM1_ARB_MSG_AWUSER_SEC_PROP 0xD22B4C + +#define mmNIC1_QM1_ARB_MSG_AWUSER_NON_SEC_PROP 0xD22B50 + +#define mmNIC1_QM1_ARB_BASE_LO 0xD22B54 + +#define mmNIC1_QM1_ARB_BASE_HI 0xD22B58 + +#define mmNIC1_QM1_ARB_STATE_STS 0xD22B80 + +#define mmNIC1_QM1_ARB_CHOISE_FULLNESS_STS 0xD22B84 + +#define mmNIC1_QM1_ARB_MSG_STS 0xD22B88 + +#define mmNIC1_QM1_ARB_SLV_CHOISE_Q_HEAD 0xD22B8C + +#define mmNIC1_QM1_ARB_ERR_CAUSE 0xD22B9C + +#define mmNIC1_QM1_ARB_ERR_MSG_EN 0xD22BA0 + +#define mmNIC1_QM1_ARB_ERR_STS_DRP 0xD22BA8 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_0 0xD22BB0 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_1 0xD22BB4 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_2 0xD22BB8 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_3 0xD22BBC + +#define mmNIC1_QM1_ARB_MST_CRED_STS_4 0xD22BC0 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_5 0xD22BC4 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_6 0xD22BC8 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_7 0xD22BCC + +#define mmNIC1_QM1_ARB_MST_CRED_STS_8 0xD22BD0 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_9 0xD22BD4 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_10 0xD22BD8 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_11 0xD22BDC + +#define mmNIC1_QM1_ARB_MST_CRED_STS_12 0xD22BE0 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_13 0xD22BE4 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_14 0xD22BE8 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_15 0xD22BEC + +#define mmNIC1_QM1_ARB_MST_CRED_STS_16 0xD22BF0 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_17 0xD22BF4 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_18 0xD22BF8 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_19 0xD22BFC + +#define mmNIC1_QM1_ARB_MST_CRED_STS_20 0xD22C00 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_21 0xD22C04 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_22 0xD22C08 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_23 0xD22C0C + +#define mmNIC1_QM1_ARB_MST_CRED_STS_24 0xD22C10 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_25 0xD22C14 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_26 0xD22C18 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_27 0xD22C1C + +#define mmNIC1_QM1_ARB_MST_CRED_STS_28 0xD22C20 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_29 0xD22C24 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_30 0xD22C28 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_31 0xD22C2C + +#define mmNIC1_QM1_CGM_CFG 0xD22C70 + +#define mmNIC1_QM1_CGM_STS 0xD22C74 + +#define mmNIC1_QM1_CGM_CFG1 0xD22C78 + +#define mmNIC1_QM1_LOCAL_RANGE_BASE 0xD22C80 + +#define mmNIC1_QM1_LOCAL_RANGE_SIZE 0xD22C84 + +#define mmNIC1_QM1_CSMR_STRICT_PRIO_CFG 0xD22C90 + +#define mmNIC1_QM1_HBW_RD_RATE_LIM_CFG_1 0xD22C94 + +#define mmNIC1_QM1_LBW_WR_RATE_LIM_CFG_0 0xD22C98 + +#define mmNIC1_QM1_LBW_WR_RATE_LIM_CFG_1 0xD22C9C + +#define mmNIC1_QM1_HBW_RD_RATE_LIM_CFG_0 0xD22CA0 + +#define mmNIC1_QM1_GLBL_AXCACHE 0xD22CA4 + +#define mmNIC1_QM1_IND_GW_APB_CFG 0xD22CB0 + +#define mmNIC1_QM1_IND_GW_APB_WDATA 0xD22CB4 + +#define mmNIC1_QM1_IND_GW_APB_RDATA 0xD22CB8 + +#define mmNIC1_QM1_IND_GW_APB_STATUS 0xD22CBC + +#define mmNIC1_QM1_GLBL_ERR_ADDR_LO 0xD22CD0 + +#define mmNIC1_QM1_GLBL_ERR_ADDR_HI 0xD22CD4 + +#define mmNIC1_QM1_GLBL_ERR_WDATA 0xD22CD8 + +#define mmNIC1_QM1_GLBL_MEM_INIT_BUSY 0xD22D00 + +#endif /* ASIC_REG_NIC1_QM1_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm0_regs.h new file mode 100644 index 000000000000..a89116a4586f --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm0_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_NIC2_QM0_REGS_H_ +#define ASIC_REG_NIC2_QM0_REGS_H_ + +/* + ***************************************** + * NIC2_QM0 (Prototype: QMAN) + ***************************************** + */ + +#define mmNIC2_QM0_GLBL_CFG0 0xD60000 + +#define mmNIC2_QM0_GLBL_CFG1 0xD60004 + +#define mmNIC2_QM0_GLBL_PROT 0xD60008 + +#define mmNIC2_QM0_GLBL_ERR_CFG 0xD6000C + +#define mmNIC2_QM0_GLBL_SECURE_PROPS_0 0xD60010 + +#define mmNIC2_QM0_GLBL_SECURE_PROPS_1 0xD60014 + +#define mmNIC2_QM0_GLBL_SECURE_PROPS_2 0xD60018 + +#define mmNIC2_QM0_GLBL_SECURE_PROPS_3 0xD6001C + +#define mmNIC2_QM0_GLBL_SECURE_PROPS_4 0xD60020 + +#define mmNIC2_QM0_GLBL_NON_SECURE_PROPS_0 0xD60024 + +#define mmNIC2_QM0_GLBL_NON_SECURE_PROPS_1 0xD60028 + +#define mmNIC2_QM0_GLBL_NON_SECURE_PROPS_2 0xD6002C + +#define mmNIC2_QM0_GLBL_NON_SECURE_PROPS_3 0xD60030 + +#define mmNIC2_QM0_GLBL_NON_SECURE_PROPS_4 0xD60034 + +#define mmNIC2_QM0_GLBL_STS0 0xD60038 + +#define mmNIC2_QM0_GLBL_STS1_0 0xD60040 + +#define mmNIC2_QM0_GLBL_STS1_1 0xD60044 + +#define mmNIC2_QM0_GLBL_STS1_2 0xD60048 + +#define mmNIC2_QM0_GLBL_STS1_3 0xD6004C + +#define mmNIC2_QM0_GLBL_STS1_4 0xD60050 + +#define mmNIC2_QM0_GLBL_MSG_EN_0 0xD60054 + +#define mmNIC2_QM0_GLBL_MSG_EN_1 0xD60058 + +#define mmNIC2_QM0_GLBL_MSG_EN_2 0xD6005C + +#define mmNIC2_QM0_GLBL_MSG_EN_3 0xD60060 + +#define mmNIC2_QM0_GLBL_MSG_EN_4 0xD60068 + +#define mmNIC2_QM0_PQ_BASE_LO_0 0xD60070 + +#define mmNIC2_QM0_PQ_BASE_LO_1 0xD60074 + +#define mmNIC2_QM0_PQ_BASE_LO_2 0xD60078 + +#define mmNIC2_QM0_PQ_BASE_LO_3 0xD6007C + +#define mmNIC2_QM0_PQ_BASE_HI_0 0xD60080 + +#define mmNIC2_QM0_PQ_BASE_HI_1 0xD60084 + +#define mmNIC2_QM0_PQ_BASE_HI_2 0xD60088 + +#define mmNIC2_QM0_PQ_BASE_HI_3 0xD6008C + +#define mmNIC2_QM0_PQ_SIZE_0 0xD60090 + +#define mmNIC2_QM0_PQ_SIZE_1 0xD60094 + +#define mmNIC2_QM0_PQ_SIZE_2 0xD60098 + +#define mmNIC2_QM0_PQ_SIZE_3 0xD6009C + +#define mmNIC2_QM0_PQ_PI_0 0xD600A0 + +#define mmNIC2_QM0_PQ_PI_1 0xD600A4 + +#define mmNIC2_QM0_PQ_PI_2 0xD600A8 + +#define mmNIC2_QM0_PQ_PI_3 0xD600AC + +#define mmNIC2_QM0_PQ_CI_0 0xD600B0 + +#define mmNIC2_QM0_PQ_CI_1 0xD600B4 + +#define mmNIC2_QM0_PQ_CI_2 0xD600B8 + +#define mmNIC2_QM0_PQ_CI_3 0xD600BC + +#define mmNIC2_QM0_PQ_CFG0_0 0xD600C0 + +#define mmNIC2_QM0_PQ_CFG0_1 0xD600C4 + +#define mmNIC2_QM0_PQ_CFG0_2 0xD600C8 + +#define mmNIC2_QM0_PQ_CFG0_3 0xD600CC + +#define mmNIC2_QM0_PQ_CFG1_0 0xD600D0 + +#define mmNIC2_QM0_PQ_CFG1_1 0xD600D4 + +#define mmNIC2_QM0_PQ_CFG1_2 0xD600D8 + +#define mmNIC2_QM0_PQ_CFG1_3 0xD600DC + +#define mmNIC2_QM0_PQ_ARUSER_31_11_0 0xD600E0 + +#define mmNIC2_QM0_PQ_ARUSER_31_11_1 0xD600E4 + +#define mmNIC2_QM0_PQ_ARUSER_31_11_2 0xD600E8 + +#define mmNIC2_QM0_PQ_ARUSER_31_11_3 0xD600EC + +#define mmNIC2_QM0_PQ_STS0_0 0xD600F0 + +#define mmNIC2_QM0_PQ_STS0_1 0xD600F4 + +#define mmNIC2_QM0_PQ_STS0_2 0xD600F8 + +#define mmNIC2_QM0_PQ_STS0_3 0xD600FC + +#define mmNIC2_QM0_PQ_STS1_0 0xD60100 + +#define mmNIC2_QM0_PQ_STS1_1 0xD60104 + +#define mmNIC2_QM0_PQ_STS1_2 0xD60108 + +#define mmNIC2_QM0_PQ_STS1_3 0xD6010C + +#define mmNIC2_QM0_CQ_CFG0_0 0xD60110 + +#define mmNIC2_QM0_CQ_CFG0_1 0xD60114 + +#define mmNIC2_QM0_CQ_CFG0_2 0xD60118 + +#define mmNIC2_QM0_CQ_CFG0_3 0xD6011C + +#define mmNIC2_QM0_CQ_CFG0_4 0xD60120 + +#define mmNIC2_QM0_CQ_CFG1_0 0xD60124 + +#define mmNIC2_QM0_CQ_CFG1_1 0xD60128 + +#define mmNIC2_QM0_CQ_CFG1_2 0xD6012C + +#define mmNIC2_QM0_CQ_CFG1_3 0xD60130 + +#define mmNIC2_QM0_CQ_CFG1_4 0xD60134 + +#define mmNIC2_QM0_CQ_ARUSER_31_11_0 0xD60138 + +#define mmNIC2_QM0_CQ_ARUSER_31_11_1 0xD6013C + +#define mmNIC2_QM0_CQ_ARUSER_31_11_2 0xD60140 + +#define mmNIC2_QM0_CQ_ARUSER_31_11_3 0xD60144 + +#define mmNIC2_QM0_CQ_ARUSER_31_11_4 0xD60148 + +#define mmNIC2_QM0_CQ_STS0_0 0xD6014C + +#define mmNIC2_QM0_CQ_STS0_1 0xD60150 + +#define mmNIC2_QM0_CQ_STS0_2 0xD60154 + +#define mmNIC2_QM0_CQ_STS0_3 0xD60158 + +#define mmNIC2_QM0_CQ_STS0_4 0xD6015C + +#define mmNIC2_QM0_CQ_STS1_0 0xD60160 + +#define mmNIC2_QM0_CQ_STS1_1 0xD60164 + +#define mmNIC2_QM0_CQ_STS1_2 0xD60168 + +#define mmNIC2_QM0_CQ_STS1_3 0xD6016C + +#define mmNIC2_QM0_CQ_STS1_4 0xD60170 + +#define mmNIC2_QM0_CQ_PTR_LO_0 0xD60174 + +#define mmNIC2_QM0_CQ_PTR_HI_0 0xD60178 + +#define mmNIC2_QM0_CQ_TSIZE_0 0xD6017C + +#define mmNIC2_QM0_CQ_CTL_0 0xD60180 + +#define mmNIC2_QM0_CQ_PTR_LO_1 0xD60184 + +#define mmNIC2_QM0_CQ_PTR_HI_1 0xD60188 + +#define mmNIC2_QM0_CQ_TSIZE_1 0xD6018C + +#define mmNIC2_QM0_CQ_CTL_1 0xD60190 + +#define mmNIC2_QM0_CQ_PTR_LO_2 0xD60194 + +#define mmNIC2_QM0_CQ_PTR_HI_2 0xD60198 + +#define mmNIC2_QM0_CQ_TSIZE_2 0xD6019C + +#define mmNIC2_QM0_CQ_CTL_2 0xD601A0 + +#define mmNIC2_QM0_CQ_PTR_LO_3 0xD601A4 + +#define mmNIC2_QM0_CQ_PTR_HI_3 0xD601A8 + +#define mmNIC2_QM0_CQ_TSIZE_3 0xD601AC + +#define mmNIC2_QM0_CQ_CTL_3 0xD601B0 + +#define mmNIC2_QM0_CQ_PTR_LO_4 0xD601B4 + +#define mmNIC2_QM0_CQ_PTR_HI_4 0xD601B8 + +#define mmNIC2_QM0_CQ_TSIZE_4 0xD601BC + +#define mmNIC2_QM0_CQ_CTL_4 0xD601C0 + +#define mmNIC2_QM0_CQ_PTR_LO_STS_0 0xD601C4 + +#define mmNIC2_QM0_CQ_PTR_LO_STS_1 0xD601C8 + +#define mmNIC2_QM0_CQ_PTR_LO_STS_2 0xD601CC + +#define mmNIC2_QM0_CQ_PTR_LO_STS_3 0xD601D0 + +#define mmNIC2_QM0_CQ_PTR_LO_STS_4 0xD601D4 + +#define mmNIC2_QM0_CQ_PTR_HI_STS_0 0xD601D8 + +#define mmNIC2_QM0_CQ_PTR_HI_STS_1 0xD601DC + +#define mmNIC2_QM0_CQ_PTR_HI_STS_2 0xD601E0 + +#define mmNIC2_QM0_CQ_PTR_HI_STS_3 0xD601E4 + +#define mmNIC2_QM0_CQ_PTR_HI_STS_4 0xD601E8 + +#define mmNIC2_QM0_CQ_TSIZE_STS_0 0xD601EC + +#define mmNIC2_QM0_CQ_TSIZE_STS_1 0xD601F0 + +#define mmNIC2_QM0_CQ_TSIZE_STS_2 0xD601F4 + +#define mmNIC2_QM0_CQ_TSIZE_STS_3 0xD601F8 + +#define mmNIC2_QM0_CQ_TSIZE_STS_4 0xD601FC + +#define mmNIC2_QM0_CQ_CTL_STS_0 0xD60200 + +#define mmNIC2_QM0_CQ_CTL_STS_1 0xD60204 + +#define mmNIC2_QM0_CQ_CTL_STS_2 0xD60208 + +#define mmNIC2_QM0_CQ_CTL_STS_3 0xD6020C + +#define mmNIC2_QM0_CQ_CTL_STS_4 0xD60210 + +#define mmNIC2_QM0_CQ_IFIFO_CNT_0 0xD60214 + +#define mmNIC2_QM0_CQ_IFIFO_CNT_1 0xD60218 + +#define mmNIC2_QM0_CQ_IFIFO_CNT_2 0xD6021C + +#define mmNIC2_QM0_CQ_IFIFO_CNT_3 0xD60220 + +#define mmNIC2_QM0_CQ_IFIFO_CNT_4 0xD60224 + +#define mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_0 0xD60228 + +#define mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_1 0xD6022C + +#define mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_2 0xD60230 + +#define mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_3 0xD60234 + +#define mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_4 0xD60238 + +#define mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_0 0xD6023C + +#define mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_1 0xD60240 + +#define mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_2 0xD60244 + +#define mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_3 0xD60248 + +#define mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_4 0xD6024C + +#define mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_0 0xD60250 + +#define mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_1 0xD60254 + +#define mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_2 0xD60258 + +#define mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_3 0xD6025C + +#define mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_4 0xD60260 + +#define mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_0 0xD60264 + +#define mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_1 0xD60268 + +#define mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_2 0xD6026C + +#define mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_3 0xD60270 + +#define mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_4 0xD60274 + +#define mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_0 0xD60278 + +#define mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_1 0xD6027C + +#define mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_2 0xD60280 + +#define mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_3 0xD60284 + +#define mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_4 0xD60288 + +#define mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_0 0xD6028C + +#define mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_1 0xD60290 + +#define mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_2 0xD60294 + +#define mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_3 0xD60298 + +#define mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_4 0xD6029C + +#define mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_0 0xD602A0 + +#define mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_1 0xD602A4 + +#define mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_2 0xD602A8 + +#define mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_3 0xD602AC + +#define mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_4 0xD602B0 + +#define mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_0 0xD602B4 + +#define mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_1 0xD602B8 + +#define mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_2 0xD602BC + +#define mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_3 0xD602C0 + +#define mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_4 0xD602C4 + +#define mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_0 0xD602C8 + +#define mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_1 0xD602CC + +#define mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_2 0xD602D0 + +#define mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_3 0xD602D4 + +#define mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_4 0xD602D8 + +#define mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xD602E0 + +#define mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xD602E4 + +#define mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xD602E8 + +#define mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xD602EC + +#define mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xD602F0 + +#define mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 0xD602F4 + +#define mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1 0xD602F8 + +#define mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2 0xD602FC + +#define mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 0xD60300 + +#define mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4 0xD60304 + +#define mmNIC2_QM0_CP_FENCE0_RDATA_0 0xD60308 + +#define mmNIC2_QM0_CP_FENCE0_RDATA_1 0xD6030C + +#define mmNIC2_QM0_CP_FENCE0_RDATA_2 0xD60310 + +#define mmNIC2_QM0_CP_FENCE0_RDATA_3 0xD60314 + +#define mmNIC2_QM0_CP_FENCE0_RDATA_4 0xD60318 + +#define mmNIC2_QM0_CP_FENCE1_RDATA_0 0xD6031C + +#define mmNIC2_QM0_CP_FENCE1_RDATA_1 0xD60320 + +#define mmNIC2_QM0_CP_FENCE1_RDATA_2 0xD60324 + +#define mmNIC2_QM0_CP_FENCE1_RDATA_3 0xD60328 + +#define mmNIC2_QM0_CP_FENCE1_RDATA_4 0xD6032C + +#define mmNIC2_QM0_CP_FENCE2_RDATA_0 0xD60330 + +#define mmNIC2_QM0_CP_FENCE2_RDATA_1 0xD60334 + +#define mmNIC2_QM0_CP_FENCE2_RDATA_2 0xD60338 + +#define mmNIC2_QM0_CP_FENCE2_RDATA_3 0xD6033C + +#define mmNIC2_QM0_CP_FENCE2_RDATA_4 0xD60340 + +#define mmNIC2_QM0_CP_FENCE3_RDATA_0 0xD60344 + +#define mmNIC2_QM0_CP_FENCE3_RDATA_1 0xD60348 + +#define mmNIC2_QM0_CP_FENCE3_RDATA_2 0xD6034C + +#define mmNIC2_QM0_CP_FENCE3_RDATA_3 0xD60350 + +#define mmNIC2_QM0_CP_FENCE3_RDATA_4 0xD60354 + +#define mmNIC2_QM0_CP_FENCE0_CNT_0 0xD60358 + +#define mmNIC2_QM0_CP_FENCE0_CNT_1 0xD6035C + +#define mmNIC2_QM0_CP_FENCE0_CNT_2 0xD60360 + +#define mmNIC2_QM0_CP_FENCE0_CNT_3 0xD60364 + +#define mmNIC2_QM0_CP_FENCE0_CNT_4 0xD60368 + +#define mmNIC2_QM0_CP_FENCE1_CNT_0 0xD6036C + +#define mmNIC2_QM0_CP_FENCE1_CNT_1 0xD60370 + +#define mmNIC2_QM0_CP_FENCE1_CNT_2 0xD60374 + +#define mmNIC2_QM0_CP_FENCE1_CNT_3 0xD60378 + +#define mmNIC2_QM0_CP_FENCE1_CNT_4 0xD6037C + +#define mmNIC2_QM0_CP_FENCE2_CNT_0 0xD60380 + +#define mmNIC2_QM0_CP_FENCE2_CNT_1 0xD60384 + +#define mmNIC2_QM0_CP_FENCE2_CNT_2 0xD60388 + +#define mmNIC2_QM0_CP_FENCE2_CNT_3 0xD6038C + +#define mmNIC2_QM0_CP_FENCE2_CNT_4 0xD60390 + +#define mmNIC2_QM0_CP_FENCE3_CNT_0 0xD60394 + +#define mmNIC2_QM0_CP_FENCE3_CNT_1 0xD60398 + +#define mmNIC2_QM0_CP_FENCE3_CNT_2 0xD6039C + +#define mmNIC2_QM0_CP_FENCE3_CNT_3 0xD603A0 + +#define mmNIC2_QM0_CP_FENCE3_CNT_4 0xD603A4 + +#define mmNIC2_QM0_CP_STS_0 0xD603A8 + +#define mmNIC2_QM0_CP_STS_1 0xD603AC + +#define mmNIC2_QM0_CP_STS_2 0xD603B0 + +#define mmNIC2_QM0_CP_STS_3 0xD603B4 + +#define mmNIC2_QM0_CP_STS_4 0xD603B8 + +#define mmNIC2_QM0_CP_CURRENT_INST_LO_0 0xD603BC + +#define mmNIC2_QM0_CP_CURRENT_INST_LO_1 0xD603C0 + +#define mmNIC2_QM0_CP_CURRENT_INST_LO_2 0xD603C4 + +#define mmNIC2_QM0_CP_CURRENT_INST_LO_3 0xD603C8 + +#define mmNIC2_QM0_CP_CURRENT_INST_LO_4 0xD603CC + +#define mmNIC2_QM0_CP_CURRENT_INST_HI_0 0xD603D0 + +#define mmNIC2_QM0_CP_CURRENT_INST_HI_1 0xD603D4 + +#define mmNIC2_QM0_CP_CURRENT_INST_HI_2 0xD603D8 + +#define mmNIC2_QM0_CP_CURRENT_INST_HI_3 0xD603DC + +#define mmNIC2_QM0_CP_CURRENT_INST_HI_4 0xD603E0 + +#define mmNIC2_QM0_CP_BARRIER_CFG_0 0xD603F4 + +#define mmNIC2_QM0_CP_BARRIER_CFG_1 0xD603F8 + +#define mmNIC2_QM0_CP_BARRIER_CFG_2 0xD603FC + +#define mmNIC2_QM0_CP_BARRIER_CFG_3 0xD60400 + +#define mmNIC2_QM0_CP_BARRIER_CFG_4 0xD60404 + +#define mmNIC2_QM0_CP_DBG_0_0 0xD60408 + +#define mmNIC2_QM0_CP_DBG_0_1 0xD6040C + +#define mmNIC2_QM0_CP_DBG_0_2 0xD60410 + +#define mmNIC2_QM0_CP_DBG_0_3 0xD60414 + +#define mmNIC2_QM0_CP_DBG_0_4 0xD60418 + +#define mmNIC2_QM0_CP_ARUSER_31_11_0 0xD6041C + +#define mmNIC2_QM0_CP_ARUSER_31_11_1 0xD60420 + +#define mmNIC2_QM0_CP_ARUSER_31_11_2 0xD60424 + +#define mmNIC2_QM0_CP_ARUSER_31_11_3 0xD60428 + +#define mmNIC2_QM0_CP_ARUSER_31_11_4 0xD6042C + +#define mmNIC2_QM0_CP_AWUSER_31_11_0 0xD60430 + +#define mmNIC2_QM0_CP_AWUSER_31_11_1 0xD60434 + +#define mmNIC2_QM0_CP_AWUSER_31_11_2 0xD60438 + +#define mmNIC2_QM0_CP_AWUSER_31_11_3 0xD6043C + +#define mmNIC2_QM0_CP_AWUSER_31_11_4 0xD60440 + +#define mmNIC2_QM0_ARB_CFG_0 0xD60A00 + +#define mmNIC2_QM0_ARB_CHOISE_Q_PUSH 0xD60A04 + +#define mmNIC2_QM0_ARB_WRR_WEIGHT_0 0xD60A08 + +#define mmNIC2_QM0_ARB_WRR_WEIGHT_1 0xD60A0C + +#define mmNIC2_QM0_ARB_WRR_WEIGHT_2 0xD60A10 + +#define mmNIC2_QM0_ARB_WRR_WEIGHT_3 0xD60A14 + +#define mmNIC2_QM0_ARB_CFG_1 0xD60A18 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_0 0xD60A20 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_1 0xD60A24 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_2 0xD60A28 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_3 0xD60A2C + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_4 0xD60A30 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_5 0xD60A34 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_6 0xD60A38 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_7 0xD60A3C + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_8 0xD60A40 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_9 0xD60A44 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_10 0xD60A48 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_11 0xD60A4C + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_12 0xD60A50 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_13 0xD60A54 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_14 0xD60A58 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_15 0xD60A5C + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_16 0xD60A60 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_17 0xD60A64 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_18 0xD60A68 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_19 0xD60A6C + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_20 0xD60A70 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_21 0xD60A74 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_22 0xD60A78 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_23 0xD60A7C + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_24 0xD60A80 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_25 0xD60A84 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_26 0xD60A88 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_27 0xD60A8C + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_28 0xD60A90 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_29 0xD60A94 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_30 0xD60A98 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_31 0xD60A9C + +#define mmNIC2_QM0_ARB_MST_CRED_INC 0xD60AA0 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_0 0xD60AA4 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_1 0xD60AA8 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_2 0xD60AAC + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_3 0xD60AB0 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_4 0xD60AB4 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_5 0xD60AB8 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_6 0xD60ABC + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_7 0xD60AC0 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_8 0xD60AC4 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_9 0xD60AC8 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_10 0xD60ACC + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_11 0xD60AD0 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_12 0xD60AD4 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_13 0xD60AD8 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_14 0xD60ADC + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_15 0xD60AE0 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_16 0xD60AE4 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_17 0xD60AE8 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_18 0xD60AEC + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_19 0xD60AF0 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_20 0xD60AF4 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_21 0xD60AF8 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_22 0xD60AFC + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_23 0xD60B00 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_24 0xD60B04 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_25 0xD60B08 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_26 0xD60B0C + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_27 0xD60B10 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_28 0xD60B14 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_29 0xD60B18 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_30 0xD60B1C + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_31 0xD60B20 + +#define mmNIC2_QM0_ARB_SLV_MASTER_INC_CRED_OFST 0xD60B28 + +#define mmNIC2_QM0_ARB_MST_SLAVE_EN 0xD60B2C + +#define mmNIC2_QM0_ARB_MST_QUIET_PER 0xD60B34 + +#define mmNIC2_QM0_ARB_SLV_CHOISE_WDT 0xD60B38 + +#define mmNIC2_QM0_ARB_SLV_ID 0xD60B3C + +#define mmNIC2_QM0_ARB_MSG_MAX_INFLIGHT 0xD60B44 + +#define mmNIC2_QM0_ARB_MSG_AWUSER_31_11 0xD60B48 + +#define mmNIC2_QM0_ARB_MSG_AWUSER_SEC_PROP 0xD60B4C + +#define mmNIC2_QM0_ARB_MSG_AWUSER_NON_SEC_PROP 0xD60B50 + +#define mmNIC2_QM0_ARB_BASE_LO 0xD60B54 + +#define mmNIC2_QM0_ARB_BASE_HI 0xD60B58 + +#define mmNIC2_QM0_ARB_STATE_STS 0xD60B80 + +#define mmNIC2_QM0_ARB_CHOISE_FULLNESS_STS 0xD60B84 + +#define mmNIC2_QM0_ARB_MSG_STS 0xD60B88 + +#define mmNIC2_QM0_ARB_SLV_CHOISE_Q_HEAD 0xD60B8C + +#define mmNIC2_QM0_ARB_ERR_CAUSE 0xD60B9C + +#define mmNIC2_QM0_ARB_ERR_MSG_EN 0xD60BA0 + +#define mmNIC2_QM0_ARB_ERR_STS_DRP 0xD60BA8 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_0 0xD60BB0 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_1 0xD60BB4 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_2 0xD60BB8 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_3 0xD60BBC + +#define mmNIC2_QM0_ARB_MST_CRED_STS_4 0xD60BC0 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_5 0xD60BC4 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_6 0xD60BC8 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_7 0xD60BCC + +#define mmNIC2_QM0_ARB_MST_CRED_STS_8 0xD60BD0 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_9 0xD60BD4 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_10 0xD60BD8 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_11 0xD60BDC + +#define mmNIC2_QM0_ARB_MST_CRED_STS_12 0xD60BE0 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_13 0xD60BE4 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_14 0xD60BE8 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_15 0xD60BEC + +#define mmNIC2_QM0_ARB_MST_CRED_STS_16 0xD60BF0 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_17 0xD60BF4 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_18 0xD60BF8 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_19 0xD60BFC + +#define mmNIC2_QM0_ARB_MST_CRED_STS_20 0xD60C00 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_21 0xD60C04 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_22 0xD60C08 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_23 0xD60C0C + +#define mmNIC2_QM0_ARB_MST_CRED_STS_24 0xD60C10 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_25 0xD60C14 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_26 0xD60C18 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_27 0xD60C1C + +#define mmNIC2_QM0_ARB_MST_CRED_STS_28 0xD60C20 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_29 0xD60C24 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_30 0xD60C28 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_31 0xD60C2C + +#define mmNIC2_QM0_CGM_CFG 0xD60C70 + +#define mmNIC2_QM0_CGM_STS 0xD60C74 + +#define mmNIC2_QM0_CGM_CFG1 0xD60C78 + +#define mmNIC2_QM0_LOCAL_RANGE_BASE 0xD60C80 + +#define mmNIC2_QM0_LOCAL_RANGE_SIZE 0xD60C84 + +#define mmNIC2_QM0_CSMR_STRICT_PRIO_CFG 0xD60C90 + +#define mmNIC2_QM0_HBW_RD_RATE_LIM_CFG_1 0xD60C94 + +#define mmNIC2_QM0_LBW_WR_RATE_LIM_CFG_0 0xD60C98 + +#define mmNIC2_QM0_LBW_WR_RATE_LIM_CFG_1 0xD60C9C + +#define mmNIC2_QM0_HBW_RD_RATE_LIM_CFG_0 0xD60CA0 + +#define mmNIC2_QM0_GLBL_AXCACHE 0xD60CA4 + +#define mmNIC2_QM0_IND_GW_APB_CFG 0xD60CB0 + +#define mmNIC2_QM0_IND_GW_APB_WDATA 0xD60CB4 + +#define mmNIC2_QM0_IND_GW_APB_RDATA 0xD60CB8 + +#define mmNIC2_QM0_IND_GW_APB_STATUS 0xD60CBC + +#define mmNIC2_QM0_GLBL_ERR_ADDR_LO 0xD60CD0 + +#define mmNIC2_QM0_GLBL_ERR_ADDR_HI 0xD60CD4 + +#define mmNIC2_QM0_GLBL_ERR_WDATA 0xD60CD8 + +#define mmNIC2_QM0_GLBL_MEM_INIT_BUSY 0xD60D00 + +#endif /* ASIC_REG_NIC2_QM0_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm1_regs.h new file mode 100644 index 000000000000..b7f091ddc89c --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm1_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_NIC2_QM1_REGS_H_ +#define ASIC_REG_NIC2_QM1_REGS_H_ + +/* + ***************************************** + * NIC2_QM1 (Prototype: QMAN) + ***************************************** + */ + +#define mmNIC2_QM1_GLBL_CFG0 0xD62000 + +#define mmNIC2_QM1_GLBL_CFG1 0xD62004 + +#define mmNIC2_QM1_GLBL_PROT 0xD62008 + +#define mmNIC2_QM1_GLBL_ERR_CFG 0xD6200C + +#define mmNIC2_QM1_GLBL_SECURE_PROPS_0 0xD62010 + +#define mmNIC2_QM1_GLBL_SECURE_PROPS_1 0xD62014 + +#define mmNIC2_QM1_GLBL_SECURE_PROPS_2 0xD62018 + +#define mmNIC2_QM1_GLBL_SECURE_PROPS_3 0xD6201C + +#define mmNIC2_QM1_GLBL_SECURE_PROPS_4 0xD62020 + +#define mmNIC2_QM1_GLBL_NON_SECURE_PROPS_0 0xD62024 + +#define mmNIC2_QM1_GLBL_NON_SECURE_PROPS_1 0xD62028 + +#define mmNIC2_QM1_GLBL_NON_SECURE_PROPS_2 0xD6202C + +#define mmNIC2_QM1_GLBL_NON_SECURE_PROPS_3 0xD62030 + +#define mmNIC2_QM1_GLBL_NON_SECURE_PROPS_4 0xD62034 + +#define mmNIC2_QM1_GLBL_STS0 0xD62038 + +#define mmNIC2_QM1_GLBL_STS1_0 0xD62040 + +#define mmNIC2_QM1_GLBL_STS1_1 0xD62044 + +#define mmNIC2_QM1_GLBL_STS1_2 0xD62048 + +#define mmNIC2_QM1_GLBL_STS1_3 0xD6204C + +#define mmNIC2_QM1_GLBL_STS1_4 0xD62050 + +#define mmNIC2_QM1_GLBL_MSG_EN_0 0xD62054 + +#define mmNIC2_QM1_GLBL_MSG_EN_1 0xD62058 + +#define mmNIC2_QM1_GLBL_MSG_EN_2 0xD6205C + +#define mmNIC2_QM1_GLBL_MSG_EN_3 0xD62060 + +#define mmNIC2_QM1_GLBL_MSG_EN_4 0xD62068 + +#define mmNIC2_QM1_PQ_BASE_LO_0 0xD62070 + +#define mmNIC2_QM1_PQ_BASE_LO_1 0xD62074 + +#define mmNIC2_QM1_PQ_BASE_LO_2 0xD62078 + +#define mmNIC2_QM1_PQ_BASE_LO_3 0xD6207C + +#define mmNIC2_QM1_PQ_BASE_HI_0 0xD62080 + +#define mmNIC2_QM1_PQ_BASE_HI_1 0xD62084 + +#define mmNIC2_QM1_PQ_BASE_HI_2 0xD62088 + +#define mmNIC2_QM1_PQ_BASE_HI_3 0xD6208C + +#define mmNIC2_QM1_PQ_SIZE_0 0xD62090 + +#define mmNIC2_QM1_PQ_SIZE_1 0xD62094 + +#define mmNIC2_QM1_PQ_SIZE_2 0xD62098 + +#define mmNIC2_QM1_PQ_SIZE_3 0xD6209C + +#define mmNIC2_QM1_PQ_PI_0 0xD620A0 + +#define mmNIC2_QM1_PQ_PI_1 0xD620A4 + +#define mmNIC2_QM1_PQ_PI_2 0xD620A8 + +#define mmNIC2_QM1_PQ_PI_3 0xD620AC + +#define mmNIC2_QM1_PQ_CI_0 0xD620B0 + +#define mmNIC2_QM1_PQ_CI_1 0xD620B4 + +#define mmNIC2_QM1_PQ_CI_2 0xD620B8 + +#define mmNIC2_QM1_PQ_CI_3 0xD620BC + +#define mmNIC2_QM1_PQ_CFG0_0 0xD620C0 + +#define mmNIC2_QM1_PQ_CFG0_1 0xD620C4 + +#define mmNIC2_QM1_PQ_CFG0_2 0xD620C8 + +#define mmNIC2_QM1_PQ_CFG0_3 0xD620CC + +#define mmNIC2_QM1_PQ_CFG1_0 0xD620D0 + +#define mmNIC2_QM1_PQ_CFG1_1 0xD620D4 + +#define mmNIC2_QM1_PQ_CFG1_2 0xD620D8 + +#define mmNIC2_QM1_PQ_CFG1_3 0xD620DC + +#define mmNIC2_QM1_PQ_ARUSER_31_11_0 0xD620E0 + +#define mmNIC2_QM1_PQ_ARUSER_31_11_1 0xD620E4 + +#define mmNIC2_QM1_PQ_ARUSER_31_11_2 0xD620E8 + +#define mmNIC2_QM1_PQ_ARUSER_31_11_3 0xD620EC + +#define mmNIC2_QM1_PQ_STS0_0 0xD620F0 + +#define mmNIC2_QM1_PQ_STS0_1 0xD620F4 + +#define mmNIC2_QM1_PQ_STS0_2 0xD620F8 + +#define mmNIC2_QM1_PQ_STS0_3 0xD620FC + +#define mmNIC2_QM1_PQ_STS1_0 0xD62100 + +#define mmNIC2_QM1_PQ_STS1_1 0xD62104 + +#define mmNIC2_QM1_PQ_STS1_2 0xD62108 + +#define mmNIC2_QM1_PQ_STS1_3 0xD6210C + +#define mmNIC2_QM1_CQ_CFG0_0 0xD62110 + +#define mmNIC2_QM1_CQ_CFG0_1 0xD62114 + +#define mmNIC2_QM1_CQ_CFG0_2 0xD62118 + +#define mmNIC2_QM1_CQ_CFG0_3 0xD6211C + +#define mmNIC2_QM1_CQ_CFG0_4 0xD62120 + +#define mmNIC2_QM1_CQ_CFG1_0 0xD62124 + +#define mmNIC2_QM1_CQ_CFG1_1 0xD62128 + +#define mmNIC2_QM1_CQ_CFG1_2 0xD6212C + +#define mmNIC2_QM1_CQ_CFG1_3 0xD62130 + +#define mmNIC2_QM1_CQ_CFG1_4 0xD62134 + +#define mmNIC2_QM1_CQ_ARUSER_31_11_0 0xD62138 + +#define mmNIC2_QM1_CQ_ARUSER_31_11_1 0xD6213C + +#define mmNIC2_QM1_CQ_ARUSER_31_11_2 0xD62140 + +#define mmNIC2_QM1_CQ_ARUSER_31_11_3 0xD62144 + +#define mmNIC2_QM1_CQ_ARUSER_31_11_4 0xD62148 + +#define mmNIC2_QM1_CQ_STS0_0 0xD6214C + +#define mmNIC2_QM1_CQ_STS0_1 0xD62150 + +#define mmNIC2_QM1_CQ_STS0_2 0xD62154 + +#define mmNIC2_QM1_CQ_STS0_3 0xD62158 + +#define mmNIC2_QM1_CQ_STS0_4 0xD6215C + +#define mmNIC2_QM1_CQ_STS1_0 0xD62160 + +#define mmNIC2_QM1_CQ_STS1_1 0xD62164 + +#define mmNIC2_QM1_CQ_STS1_2 0xD62168 + +#define mmNIC2_QM1_CQ_STS1_3 0xD6216C + +#define mmNIC2_QM1_CQ_STS1_4 0xD62170 + +#define mmNIC2_QM1_CQ_PTR_LO_0 0xD62174 + +#define mmNIC2_QM1_CQ_PTR_HI_0 0xD62178 + +#define mmNIC2_QM1_CQ_TSIZE_0 0xD6217C + +#define mmNIC2_QM1_CQ_CTL_0 0xD62180 + +#define mmNIC2_QM1_CQ_PTR_LO_1 0xD62184 + +#define mmNIC2_QM1_CQ_PTR_HI_1 0xD62188 + +#define mmNIC2_QM1_CQ_TSIZE_1 0xD6218C + +#define mmNIC2_QM1_CQ_CTL_1 0xD62190 + +#define mmNIC2_QM1_CQ_PTR_LO_2 0xD62194 + +#define mmNIC2_QM1_CQ_PTR_HI_2 0xD62198 + +#define mmNIC2_QM1_CQ_TSIZE_2 0xD6219C + +#define mmNIC2_QM1_CQ_CTL_2 0xD621A0 + +#define mmNIC2_QM1_CQ_PTR_LO_3 0xD621A4 + +#define mmNIC2_QM1_CQ_PTR_HI_3 0xD621A8 + +#define mmNIC2_QM1_CQ_TSIZE_3 0xD621AC + +#define mmNIC2_QM1_CQ_CTL_3 0xD621B0 + +#define mmNIC2_QM1_CQ_PTR_LO_4 0xD621B4 + +#define mmNIC2_QM1_CQ_PTR_HI_4 0xD621B8 + +#define mmNIC2_QM1_CQ_TSIZE_4 0xD621BC + +#define mmNIC2_QM1_CQ_CTL_4 0xD621C0 + +#define mmNIC2_QM1_CQ_PTR_LO_STS_0 0xD621C4 + +#define mmNIC2_QM1_CQ_PTR_LO_STS_1 0xD621C8 + +#define mmNIC2_QM1_CQ_PTR_LO_STS_2 0xD621CC + +#define mmNIC2_QM1_CQ_PTR_LO_STS_3 0xD621D0 + +#define mmNIC2_QM1_CQ_PTR_LO_STS_4 0xD621D4 + +#define mmNIC2_QM1_CQ_PTR_HI_STS_0 0xD621D8 + +#define mmNIC2_QM1_CQ_PTR_HI_STS_1 0xD621DC + +#define mmNIC2_QM1_CQ_PTR_HI_STS_2 0xD621E0 + +#define mmNIC2_QM1_CQ_PTR_HI_STS_3 0xD621E4 + +#define mmNIC2_QM1_CQ_PTR_HI_STS_4 0xD621E8 + +#define mmNIC2_QM1_CQ_TSIZE_STS_0 0xD621EC + +#define mmNIC2_QM1_CQ_TSIZE_STS_1 0xD621F0 + +#define mmNIC2_QM1_CQ_TSIZE_STS_2 0xD621F4 + +#define mmNIC2_QM1_CQ_TSIZE_STS_3 0xD621F8 + +#define mmNIC2_QM1_CQ_TSIZE_STS_4 0xD621FC + +#define mmNIC2_QM1_CQ_CTL_STS_0 0xD62200 + +#define mmNIC2_QM1_CQ_CTL_STS_1 0xD62204 + +#define mmNIC2_QM1_CQ_CTL_STS_2 0xD62208 + +#define mmNIC2_QM1_CQ_CTL_STS_3 0xD6220C + +#define mmNIC2_QM1_CQ_CTL_STS_4 0xD62210 + +#define mmNIC2_QM1_CQ_IFIFO_CNT_0 0xD62214 + +#define mmNIC2_QM1_CQ_IFIFO_CNT_1 0xD62218 + +#define mmNIC2_QM1_CQ_IFIFO_CNT_2 0xD6221C + +#define mmNIC2_QM1_CQ_IFIFO_CNT_3 0xD62220 + +#define mmNIC2_QM1_CQ_IFIFO_CNT_4 0xD62224 + +#define mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_0 0xD62228 + +#define mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_1 0xD6222C + +#define mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_2 0xD62230 + +#define mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_3 0xD62234 + +#define mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_4 0xD62238 + +#define mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_0 0xD6223C + +#define mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_1 0xD62240 + +#define mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_2 0xD62244 + +#define mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_3 0xD62248 + +#define mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_4 0xD6224C + +#define mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_0 0xD62250 + +#define mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_1 0xD62254 + +#define mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_2 0xD62258 + +#define mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_3 0xD6225C + +#define mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_4 0xD62260 + +#define mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_0 0xD62264 + +#define mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_1 0xD62268 + +#define mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_2 0xD6226C + +#define mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_3 0xD62270 + +#define mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_4 0xD62274 + +#define mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_0 0xD62278 + +#define mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_1 0xD6227C + +#define mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_2 0xD62280 + +#define mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_3 0xD62284 + +#define mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_4 0xD62288 + +#define mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_0 0xD6228C + +#define mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_1 0xD62290 + +#define mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_2 0xD62294 + +#define mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_3 0xD62298 + +#define mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_4 0xD6229C + +#define mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_0 0xD622A0 + +#define mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_1 0xD622A4 + +#define mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_2 0xD622A8 + +#define mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_3 0xD622AC + +#define mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_4 0xD622B0 + +#define mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_0 0xD622B4 + +#define mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_1 0xD622B8 + +#define mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_2 0xD622BC + +#define mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_3 0xD622C0 + +#define mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_4 0xD622C4 + +#define mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_0 0xD622C8 + +#define mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_1 0xD622CC + +#define mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_2 0xD622D0 + +#define mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_3 0xD622D4 + +#define mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_4 0xD622D8 + +#define mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xD622E0 + +#define mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xD622E4 + +#define mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xD622E8 + +#define mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xD622EC + +#define mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xD622F0 + +#define mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0 0xD622F4 + +#define mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1 0xD622F8 + +#define mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2 0xD622FC + +#define mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 0xD62300 + +#define mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4 0xD62304 + +#define mmNIC2_QM1_CP_FENCE0_RDATA_0 0xD62308 + +#define mmNIC2_QM1_CP_FENCE0_RDATA_1 0xD6230C + +#define mmNIC2_QM1_CP_FENCE0_RDATA_2 0xD62310 + +#define mmNIC2_QM1_CP_FENCE0_RDATA_3 0xD62314 + +#define mmNIC2_QM1_CP_FENCE0_RDATA_4 0xD62318 + +#define mmNIC2_QM1_CP_FENCE1_RDATA_0 0xD6231C + +#define mmNIC2_QM1_CP_FENCE1_RDATA_1 0xD62320 + +#define mmNIC2_QM1_CP_FENCE1_RDATA_2 0xD62324 + +#define mmNIC2_QM1_CP_FENCE1_RDATA_3 0xD62328 + +#define mmNIC2_QM1_CP_FENCE1_RDATA_4 0xD6232C + +#define mmNIC2_QM1_CP_FENCE2_RDATA_0 0xD62330 + +#define mmNIC2_QM1_CP_FENCE2_RDATA_1 0xD62334 + +#define mmNIC2_QM1_CP_FENCE2_RDATA_2 0xD62338 + +#define mmNIC2_QM1_CP_FENCE2_RDATA_3 0xD6233C + +#define mmNIC2_QM1_CP_FENCE2_RDATA_4 0xD62340 + +#define mmNIC2_QM1_CP_FENCE3_RDATA_0 0xD62344 + +#define mmNIC2_QM1_CP_FENCE3_RDATA_1 0xD62348 + +#define mmNIC2_QM1_CP_FENCE3_RDATA_2 0xD6234C + +#define mmNIC2_QM1_CP_FENCE3_RDATA_3 0xD62350 + +#define mmNIC2_QM1_CP_FENCE3_RDATA_4 0xD62354 + +#define mmNIC2_QM1_CP_FENCE0_CNT_0 0xD62358 + +#define mmNIC2_QM1_CP_FENCE0_CNT_1 0xD6235C + +#define mmNIC2_QM1_CP_FENCE0_CNT_2 0xD62360 + +#define mmNIC2_QM1_CP_FENCE0_CNT_3 0xD62364 + +#define mmNIC2_QM1_CP_FENCE0_CNT_4 0xD62368 + +#define mmNIC2_QM1_CP_FENCE1_CNT_0 0xD6236C + +#define mmNIC2_QM1_CP_FENCE1_CNT_1 0xD62370 + +#define mmNIC2_QM1_CP_FENCE1_CNT_2 0xD62374 + +#define mmNIC2_QM1_CP_FENCE1_CNT_3 0xD62378 + +#define mmNIC2_QM1_CP_FENCE1_CNT_4 0xD6237C + +#define mmNIC2_QM1_CP_FENCE2_CNT_0 0xD62380 + +#define mmNIC2_QM1_CP_FENCE2_CNT_1 0xD62384 + +#define mmNIC2_QM1_CP_FENCE2_CNT_2 0xD62388 + +#define mmNIC2_QM1_CP_FENCE2_CNT_3 0xD6238C + +#define mmNIC2_QM1_CP_FENCE2_CNT_4 0xD62390 + +#define mmNIC2_QM1_CP_FENCE3_CNT_0 0xD62394 + +#define mmNIC2_QM1_CP_FENCE3_CNT_1 0xD62398 + +#define mmNIC2_QM1_CP_FENCE3_CNT_2 0xD6239C + +#define mmNIC2_QM1_CP_FENCE3_CNT_3 0xD623A0 + +#define mmNIC2_QM1_CP_FENCE3_CNT_4 0xD623A4 + +#define mmNIC2_QM1_CP_STS_0 0xD623A8 + +#define mmNIC2_QM1_CP_STS_1 0xD623AC + +#define mmNIC2_QM1_CP_STS_2 0xD623B0 + +#define mmNIC2_QM1_CP_STS_3 0xD623B4 + +#define mmNIC2_QM1_CP_STS_4 0xD623B8 + +#define mmNIC2_QM1_CP_CURRENT_INST_LO_0 0xD623BC + +#define mmNIC2_QM1_CP_CURRENT_INST_LO_1 0xD623C0 + +#define mmNIC2_QM1_CP_CURRENT_INST_LO_2 0xD623C4 + +#define mmNIC2_QM1_CP_CURRENT_INST_LO_3 0xD623C8 + +#define mmNIC2_QM1_CP_CURRENT_INST_LO_4 0xD623CC + +#define mmNIC2_QM1_CP_CURRENT_INST_HI_0 0xD623D0 + +#define mmNIC2_QM1_CP_CURRENT_INST_HI_1 0xD623D4 + +#define mmNIC2_QM1_CP_CURRENT_INST_HI_2 0xD623D8 + +#define mmNIC2_QM1_CP_CURRENT_INST_HI_3 0xD623DC + +#define mmNIC2_QM1_CP_CURRENT_INST_HI_4 0xD623E0 + +#define mmNIC2_QM1_CP_BARRIER_CFG_0 0xD623F4 + +#define mmNIC2_QM1_CP_BARRIER_CFG_1 0xD623F8 + +#define mmNIC2_QM1_CP_BARRIER_CFG_2 0xD623FC + +#define mmNIC2_QM1_CP_BARRIER_CFG_3 0xD62400 + +#define mmNIC2_QM1_CP_BARRIER_CFG_4 0xD62404 + +#define mmNIC2_QM1_CP_DBG_0_0 0xD62408 + +#define mmNIC2_QM1_CP_DBG_0_1 0xD6240C + +#define mmNIC2_QM1_CP_DBG_0_2 0xD62410 + +#define mmNIC2_QM1_CP_DBG_0_3 0xD62414 + +#define mmNIC2_QM1_CP_DBG_0_4 0xD62418 + +#define mmNIC2_QM1_CP_ARUSER_31_11_0 0xD6241C + +#define mmNIC2_QM1_CP_ARUSER_31_11_1 0xD62420 + +#define mmNIC2_QM1_CP_ARUSER_31_11_2 0xD62424 + +#define mmNIC2_QM1_CP_ARUSER_31_11_3 0xD62428 + +#define mmNIC2_QM1_CP_ARUSER_31_11_4 0xD6242C + +#define mmNIC2_QM1_CP_AWUSER_31_11_0 0xD62430 + +#define mmNIC2_QM1_CP_AWUSER_31_11_1 0xD62434 + +#define mmNIC2_QM1_CP_AWUSER_31_11_2 0xD62438 + +#define mmNIC2_QM1_CP_AWUSER_31_11_3 0xD6243C + +#define mmNIC2_QM1_CP_AWUSER_31_11_4 0xD62440 + +#define mmNIC2_QM1_ARB_CFG_0 0xD62A00 + +#define mmNIC2_QM1_ARB_CHOISE_Q_PUSH 0xD62A04 + +#define mmNIC2_QM1_ARB_WRR_WEIGHT_0 0xD62A08 + +#define mmNIC2_QM1_ARB_WRR_WEIGHT_1 0xD62A0C + +#define mmNIC2_QM1_ARB_WRR_WEIGHT_2 0xD62A10 + +#define mmNIC2_QM1_ARB_WRR_WEIGHT_3 0xD62A14 + +#define mmNIC2_QM1_ARB_CFG_1 0xD62A18 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_0 0xD62A20 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_1 0xD62A24 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_2 0xD62A28 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_3 0xD62A2C + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_4 0xD62A30 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_5 0xD62A34 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_6 0xD62A38 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_7 0xD62A3C + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_8 0xD62A40 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_9 0xD62A44 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_10 0xD62A48 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_11 0xD62A4C + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_12 0xD62A50 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_13 0xD62A54 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_14 0xD62A58 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_15 0xD62A5C + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_16 0xD62A60 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_17 0xD62A64 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_18 0xD62A68 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_19 0xD62A6C + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_20 0xD62A70 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_21 0xD62A74 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_22 0xD62A78 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_23 0xD62A7C + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_24 0xD62A80 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_25 0xD62A84 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_26 0xD62A88 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_27 0xD62A8C + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_28 0xD62A90 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_29 0xD62A94 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_30 0xD62A98 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_31 0xD62A9C + +#define mmNIC2_QM1_ARB_MST_CRED_INC 0xD62AA0 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_0 0xD62AA4 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_1 0xD62AA8 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_2 0xD62AAC + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_3 0xD62AB0 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_4 0xD62AB4 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_5 0xD62AB8 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_6 0xD62ABC + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_7 0xD62AC0 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_8 0xD62AC4 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_9 0xD62AC8 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_10 0xD62ACC + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_11 0xD62AD0 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_12 0xD62AD4 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_13 0xD62AD8 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_14 0xD62ADC + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_15 0xD62AE0 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_16 0xD62AE4 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_17 0xD62AE8 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_18 0xD62AEC + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_19 0xD62AF0 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_20 0xD62AF4 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_21 0xD62AF8 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_22 0xD62AFC + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_23 0xD62B00 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_24 0xD62B04 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_25 0xD62B08 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_26 0xD62B0C + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_27 0xD62B10 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_28 0xD62B14 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_29 0xD62B18 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_30 0xD62B1C + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_31 0xD62B20 + +#define mmNIC2_QM1_ARB_SLV_MASTER_INC_CRED_OFST 0xD62B28 + +#define mmNIC2_QM1_ARB_MST_SLAVE_EN 0xD62B2C + +#define mmNIC2_QM1_ARB_MST_QUIET_PER 0xD62B34 + +#define mmNIC2_QM1_ARB_SLV_CHOISE_WDT 0xD62B38 + +#define mmNIC2_QM1_ARB_SLV_ID 0xD62B3C + +#define mmNIC2_QM1_ARB_MSG_MAX_INFLIGHT 0xD62B44 + +#define mmNIC2_QM1_ARB_MSG_AWUSER_31_11 0xD62B48 + +#define mmNIC2_QM1_ARB_MSG_AWUSER_SEC_PROP 0xD62B4C + +#define mmNIC2_QM1_ARB_MSG_AWUSER_NON_SEC_PROP 0xD62B50 + +#define mmNIC2_QM1_ARB_BASE_LO 0xD62B54 + +#define mmNIC2_QM1_ARB_BASE_HI 0xD62B58 + +#define mmNIC2_QM1_ARB_STATE_STS 0xD62B80 + +#define mmNIC2_QM1_ARB_CHOISE_FULLNESS_STS 0xD62B84 + +#define mmNIC2_QM1_ARB_MSG_STS 0xD62B88 + +#define mmNIC2_QM1_ARB_SLV_CHOISE_Q_HEAD 0xD62B8C + +#define mmNIC2_QM1_ARB_ERR_CAUSE 0xD62B9C + +#define mmNIC2_QM1_ARB_ERR_MSG_EN 0xD62BA0 + +#define mmNIC2_QM1_ARB_ERR_STS_DRP 0xD62BA8 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_0 0xD62BB0 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_1 0xD62BB4 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_2 0xD62BB8 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_3 0xD62BBC + +#define mmNIC2_QM1_ARB_MST_CRED_STS_4 0xD62BC0 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_5 0xD62BC4 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_6 0xD62BC8 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_7 0xD62BCC + +#define mmNIC2_QM1_ARB_MST_CRED_STS_8 0xD62BD0 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_9 0xD62BD4 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_10 0xD62BD8 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_11 0xD62BDC + +#define mmNIC2_QM1_ARB_MST_CRED_STS_12 0xD62BE0 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_13 0xD62BE4 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_14 0xD62BE8 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_15 0xD62BEC + +#define mmNIC2_QM1_ARB_MST_CRED_STS_16 0xD62BF0 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_17 0xD62BF4 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_18 0xD62BF8 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_19 0xD62BFC + +#define mmNIC2_QM1_ARB_MST_CRED_STS_20 0xD62C00 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_21 0xD62C04 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_22 0xD62C08 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_23 0xD62C0C + +#define mmNIC2_QM1_ARB_MST_CRED_STS_24 0xD62C10 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_25 0xD62C14 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_26 0xD62C18 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_27 0xD62C1C + +#define mmNIC2_QM1_ARB_MST_CRED_STS_28 0xD62C20 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_29 0xD62C24 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_30 0xD62C28 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_31 0xD62C2C + +#define mmNIC2_QM1_CGM_CFG 0xD62C70 + +#define mmNIC2_QM1_CGM_STS 0xD62C74 + +#define mmNIC2_QM1_CGM_CFG1 0xD62C78 + +#define mmNIC2_QM1_LOCAL_RANGE_BASE 0xD62C80 + +#define mmNIC2_QM1_LOCAL_RANGE_SIZE 0xD62C84 + +#define mmNIC2_QM1_CSMR_STRICT_PRIO_CFG 0xD62C90 + +#define mmNIC2_QM1_HBW_RD_RATE_LIM_CFG_1 0xD62C94 + +#define mmNIC2_QM1_LBW_WR_RATE_LIM_CFG_0 0xD62C98 + +#define mmNIC2_QM1_LBW_WR_RATE_LIM_CFG_1 0xD62C9C + +#define mmNIC2_QM1_HBW_RD_RATE_LIM_CFG_0 0xD62CA0 + +#define mmNIC2_QM1_GLBL_AXCACHE 0xD62CA4 + +#define mmNIC2_QM1_IND_GW_APB_CFG 0xD62CB0 + +#define mmNIC2_QM1_IND_GW_APB_WDATA 0xD62CB4 + +#define mmNIC2_QM1_IND_GW_APB_RDATA 0xD62CB8 + +#define mmNIC2_QM1_IND_GW_APB_STATUS 0xD62CBC + +#define mmNIC2_QM1_GLBL_ERR_ADDR_LO 0xD62CD0 + +#define mmNIC2_QM1_GLBL_ERR_ADDR_HI 0xD62CD4 + +#define mmNIC2_QM1_GLBL_ERR_WDATA 0xD62CD8 + +#define mmNIC2_QM1_GLBL_MEM_INIT_BUSY 0xD62D00 + +#endif /* ASIC_REG_NIC2_QM1_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm0_regs.h new file mode 100644 index 000000000000..4712cc62b009 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm0_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_NIC3_QM0_REGS_H_ +#define ASIC_REG_NIC3_QM0_REGS_H_ + +/* + ***************************************** + * NIC3_QM0 (Prototype: QMAN) + ***************************************** + */ + +#define mmNIC3_QM0_GLBL_CFG0 0xDA0000 + +#define mmNIC3_QM0_GLBL_CFG1 0xDA0004 + +#define mmNIC3_QM0_GLBL_PROT 0xDA0008 + +#define mmNIC3_QM0_GLBL_ERR_CFG 0xDA000C + +#define mmNIC3_QM0_GLBL_SECURE_PROPS_0 0xDA0010 + +#define mmNIC3_QM0_GLBL_SECURE_PROPS_1 0xDA0014 + +#define mmNIC3_QM0_GLBL_SECURE_PROPS_2 0xDA0018 + +#define mmNIC3_QM0_GLBL_SECURE_PROPS_3 0xDA001C + +#define mmNIC3_QM0_GLBL_SECURE_PROPS_4 0xDA0020 + +#define mmNIC3_QM0_GLBL_NON_SECURE_PROPS_0 0xDA0024 + +#define mmNIC3_QM0_GLBL_NON_SECURE_PROPS_1 0xDA0028 + +#define mmNIC3_QM0_GLBL_NON_SECURE_PROPS_2 0xDA002C + +#define mmNIC3_QM0_GLBL_NON_SECURE_PROPS_3 0xDA0030 + +#define mmNIC3_QM0_GLBL_NON_SECURE_PROPS_4 0xDA0034 + +#define mmNIC3_QM0_GLBL_STS0 0xDA0038 + +#define mmNIC3_QM0_GLBL_STS1_0 0xDA0040 + +#define mmNIC3_QM0_GLBL_STS1_1 0xDA0044 + +#define mmNIC3_QM0_GLBL_STS1_2 0xDA0048 + +#define mmNIC3_QM0_GLBL_STS1_3 0xDA004C + +#define mmNIC3_QM0_GLBL_STS1_4 0xDA0050 + +#define mmNIC3_QM0_GLBL_MSG_EN_0 0xDA0054 + +#define mmNIC3_QM0_GLBL_MSG_EN_1 0xDA0058 + +#define mmNIC3_QM0_GLBL_MSG_EN_2 0xDA005C + +#define mmNIC3_QM0_GLBL_MSG_EN_3 0xDA0060 + +#define mmNIC3_QM0_GLBL_MSG_EN_4 0xDA0068 + +#define mmNIC3_QM0_PQ_BASE_LO_0 0xDA0070 + +#define mmNIC3_QM0_PQ_BASE_LO_1 0xDA0074 + +#define mmNIC3_QM0_PQ_BASE_LO_2 0xDA0078 + +#define mmNIC3_QM0_PQ_BASE_LO_3 0xDA007C + +#define mmNIC3_QM0_PQ_BASE_HI_0 0xDA0080 + +#define mmNIC3_QM0_PQ_BASE_HI_1 0xDA0084 + +#define mmNIC3_QM0_PQ_BASE_HI_2 0xDA0088 + +#define mmNIC3_QM0_PQ_BASE_HI_3 0xDA008C + +#define mmNIC3_QM0_PQ_SIZE_0 0xDA0090 + +#define mmNIC3_QM0_PQ_SIZE_1 0xDA0094 + +#define mmNIC3_QM0_PQ_SIZE_2 0xDA0098 + +#define mmNIC3_QM0_PQ_SIZE_3 0xDA009C + +#define mmNIC3_QM0_PQ_PI_0 0xDA00A0 + +#define mmNIC3_QM0_PQ_PI_1 0xDA00A4 + +#define mmNIC3_QM0_PQ_PI_2 0xDA00A8 + +#define mmNIC3_QM0_PQ_PI_3 0xDA00AC + +#define mmNIC3_QM0_PQ_CI_0 0xDA00B0 + +#define mmNIC3_QM0_PQ_CI_1 0xDA00B4 + +#define mmNIC3_QM0_PQ_CI_2 0xDA00B8 + +#define mmNIC3_QM0_PQ_CI_3 0xDA00BC + +#define mmNIC3_QM0_PQ_CFG0_0 0xDA00C0 + +#define mmNIC3_QM0_PQ_CFG0_1 0xDA00C4 + +#define mmNIC3_QM0_PQ_CFG0_2 0xDA00C8 + +#define mmNIC3_QM0_PQ_CFG0_3 0xDA00CC + +#define mmNIC3_QM0_PQ_CFG1_0 0xDA00D0 + +#define mmNIC3_QM0_PQ_CFG1_1 0xDA00D4 + +#define mmNIC3_QM0_PQ_CFG1_2 0xDA00D8 + +#define mmNIC3_QM0_PQ_CFG1_3 0xDA00DC + +#define mmNIC3_QM0_PQ_ARUSER_31_11_0 0xDA00E0 + +#define mmNIC3_QM0_PQ_ARUSER_31_11_1 0xDA00E4 + +#define mmNIC3_QM0_PQ_ARUSER_31_11_2 0xDA00E8 + +#define mmNIC3_QM0_PQ_ARUSER_31_11_3 0xDA00EC + +#define mmNIC3_QM0_PQ_STS0_0 0xDA00F0 + +#define mmNIC3_QM0_PQ_STS0_1 0xDA00F4 + +#define mmNIC3_QM0_PQ_STS0_2 0xDA00F8 + +#define mmNIC3_QM0_PQ_STS0_3 0xDA00FC + +#define mmNIC3_QM0_PQ_STS1_0 0xDA0100 + +#define mmNIC3_QM0_PQ_STS1_1 0xDA0104 + +#define mmNIC3_QM0_PQ_STS1_2 0xDA0108 + +#define mmNIC3_QM0_PQ_STS1_3 0xDA010C + +#define mmNIC3_QM0_CQ_CFG0_0 0xDA0110 + +#define mmNIC3_QM0_CQ_CFG0_1 0xDA0114 + +#define mmNIC3_QM0_CQ_CFG0_2 0xDA0118 + +#define mmNIC3_QM0_CQ_CFG0_3 0xDA011C + +#define mmNIC3_QM0_CQ_CFG0_4 0xDA0120 + +#define mmNIC3_QM0_CQ_CFG1_0 0xDA0124 + +#define mmNIC3_QM0_CQ_CFG1_1 0xDA0128 + +#define mmNIC3_QM0_CQ_CFG1_2 0xDA012C + +#define mmNIC3_QM0_CQ_CFG1_3 0xDA0130 + +#define mmNIC3_QM0_CQ_CFG1_4 0xDA0134 + +#define mmNIC3_QM0_CQ_ARUSER_31_11_0 0xDA0138 + +#define mmNIC3_QM0_CQ_ARUSER_31_11_1 0xDA013C + +#define mmNIC3_QM0_CQ_ARUSER_31_11_2 0xDA0140 + +#define mmNIC3_QM0_CQ_ARUSER_31_11_3 0xDA0144 + +#define mmNIC3_QM0_CQ_ARUSER_31_11_4 0xDA0148 + +#define mmNIC3_QM0_CQ_STS0_0 0xDA014C + +#define mmNIC3_QM0_CQ_STS0_1 0xDA0150 + +#define mmNIC3_QM0_CQ_STS0_2 0xDA0154 + +#define mmNIC3_QM0_CQ_STS0_3 0xDA0158 + +#define mmNIC3_QM0_CQ_STS0_4 0xDA015C + +#define mmNIC3_QM0_CQ_STS1_0 0xDA0160 + +#define mmNIC3_QM0_CQ_STS1_1 0xDA0164 + +#define mmNIC3_QM0_CQ_STS1_2 0xDA0168 + +#define mmNIC3_QM0_CQ_STS1_3 0xDA016C + +#define mmNIC3_QM0_CQ_STS1_4 0xDA0170 + +#define mmNIC3_QM0_CQ_PTR_LO_0 0xDA0174 + +#define mmNIC3_QM0_CQ_PTR_HI_0 0xDA0178 + +#define mmNIC3_QM0_CQ_TSIZE_0 0xDA017C + +#define mmNIC3_QM0_CQ_CTL_0 0xDA0180 + +#define mmNIC3_QM0_CQ_PTR_LO_1 0xDA0184 + +#define mmNIC3_QM0_CQ_PTR_HI_1 0xDA0188 + +#define mmNIC3_QM0_CQ_TSIZE_1 0xDA018C + +#define mmNIC3_QM0_CQ_CTL_1 0xDA0190 + +#define mmNIC3_QM0_CQ_PTR_LO_2 0xDA0194 + +#define mmNIC3_QM0_CQ_PTR_HI_2 0xDA0198 + +#define mmNIC3_QM0_CQ_TSIZE_2 0xDA019C + +#define mmNIC3_QM0_CQ_CTL_2 0xDA01A0 + +#define mmNIC3_QM0_CQ_PTR_LO_3 0xDA01A4 + +#define mmNIC3_QM0_CQ_PTR_HI_3 0xDA01A8 + +#define mmNIC3_QM0_CQ_TSIZE_3 0xDA01AC + +#define mmNIC3_QM0_CQ_CTL_3 0xDA01B0 + +#define mmNIC3_QM0_CQ_PTR_LO_4 0xDA01B4 + +#define mmNIC3_QM0_CQ_PTR_HI_4 0xDA01B8 + +#define mmNIC3_QM0_CQ_TSIZE_4 0xDA01BC + +#define mmNIC3_QM0_CQ_CTL_4 0xDA01C0 + +#define mmNIC3_QM0_CQ_PTR_LO_STS_0 0xDA01C4 + +#define mmNIC3_QM0_CQ_PTR_LO_STS_1 0xDA01C8 + +#define mmNIC3_QM0_CQ_PTR_LO_STS_2 0xDA01CC + +#define mmNIC3_QM0_CQ_PTR_LO_STS_3 0xDA01D0 + +#define mmNIC3_QM0_CQ_PTR_LO_STS_4 0xDA01D4 + +#define mmNIC3_QM0_CQ_PTR_HI_STS_0 0xDA01D8 + +#define mmNIC3_QM0_CQ_PTR_HI_STS_1 0xDA01DC + +#define mmNIC3_QM0_CQ_PTR_HI_STS_2 0xDA01E0 + +#define mmNIC3_QM0_CQ_PTR_HI_STS_3 0xDA01E4 + +#define mmNIC3_QM0_CQ_PTR_HI_STS_4 0xDA01E8 + +#define mmNIC3_QM0_CQ_TSIZE_STS_0 0xDA01EC + +#define mmNIC3_QM0_CQ_TSIZE_STS_1 0xDA01F0 + +#define mmNIC3_QM0_CQ_TSIZE_STS_2 0xDA01F4 + +#define mmNIC3_QM0_CQ_TSIZE_STS_3 0xDA01F8 + +#define mmNIC3_QM0_CQ_TSIZE_STS_4 0xDA01FC + +#define mmNIC3_QM0_CQ_CTL_STS_0 0xDA0200 + +#define mmNIC3_QM0_CQ_CTL_STS_1 0xDA0204 + +#define mmNIC3_QM0_CQ_CTL_STS_2 0xDA0208 + +#define mmNIC3_QM0_CQ_CTL_STS_3 0xDA020C + +#define mmNIC3_QM0_CQ_CTL_STS_4 0xDA0210 + +#define mmNIC3_QM0_CQ_IFIFO_CNT_0 0xDA0214 + +#define mmNIC3_QM0_CQ_IFIFO_CNT_1 0xDA0218 + +#define mmNIC3_QM0_CQ_IFIFO_CNT_2 0xDA021C + +#define mmNIC3_QM0_CQ_IFIFO_CNT_3 0xDA0220 + +#define mmNIC3_QM0_CQ_IFIFO_CNT_4 0xDA0224 + +#define mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_0 0xDA0228 + +#define mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_1 0xDA022C + +#define mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_2 0xDA0230 + +#define mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_3 0xDA0234 + +#define mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_4 0xDA0238 + +#define mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_0 0xDA023C + +#define mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_1 0xDA0240 + +#define mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_2 0xDA0244 + +#define mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_3 0xDA0248 + +#define mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_4 0xDA024C + +#define mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_0 0xDA0250 + +#define mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_1 0xDA0254 + +#define mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_2 0xDA0258 + +#define mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_3 0xDA025C + +#define mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_4 0xDA0260 + +#define mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_0 0xDA0264 + +#define mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_1 0xDA0268 + +#define mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_2 0xDA026C + +#define mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_3 0xDA0270 + +#define mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_4 0xDA0274 + +#define mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_0 0xDA0278 + +#define mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_1 0xDA027C + +#define mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_2 0xDA0280 + +#define mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_3 0xDA0284 + +#define mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_4 0xDA0288 + +#define mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_0 0xDA028C + +#define mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_1 0xDA0290 + +#define mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_2 0xDA0294 + +#define mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_3 0xDA0298 + +#define mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_4 0xDA029C + +#define mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_0 0xDA02A0 + +#define mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_1 0xDA02A4 + +#define mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_2 0xDA02A8 + +#define mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_3 0xDA02AC + +#define mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_4 0xDA02B0 + +#define mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_0 0xDA02B4 + +#define mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_1 0xDA02B8 + +#define mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_2 0xDA02BC + +#define mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_3 0xDA02C0 + +#define mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_4 0xDA02C4 + +#define mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_0 0xDA02C8 + +#define mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_1 0xDA02CC + +#define mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_2 0xDA02D0 + +#define mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_3 0xDA02D4 + +#define mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_4 0xDA02D8 + +#define mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xDA02E0 + +#define mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xDA02E4 + +#define mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xDA02E8 + +#define mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xDA02EC + +#define mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xDA02F0 + +#define mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 0xDA02F4 + +#define mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1 0xDA02F8 + +#define mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2 0xDA02FC + +#define mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 0xDA0300 + +#define mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4 0xDA0304 + +#define mmNIC3_QM0_CP_FENCE0_RDATA_0 0xDA0308 + +#define mmNIC3_QM0_CP_FENCE0_RDATA_1 0xDA030C + +#define mmNIC3_QM0_CP_FENCE0_RDATA_2 0xDA0310 + +#define mmNIC3_QM0_CP_FENCE0_RDATA_3 0xDA0314 + +#define mmNIC3_QM0_CP_FENCE0_RDATA_4 0xDA0318 + +#define mmNIC3_QM0_CP_FENCE1_RDATA_0 0xDA031C + +#define mmNIC3_QM0_CP_FENCE1_RDATA_1 0xDA0320 + +#define mmNIC3_QM0_CP_FENCE1_RDATA_2 0xDA0324 + +#define mmNIC3_QM0_CP_FENCE1_RDATA_3 0xDA0328 + +#define mmNIC3_QM0_CP_FENCE1_RDATA_4 0xDA032C + +#define mmNIC3_QM0_CP_FENCE2_RDATA_0 0xDA0330 + +#define mmNIC3_QM0_CP_FENCE2_RDATA_1 0xDA0334 + +#define mmNIC3_QM0_CP_FENCE2_RDATA_2 0xDA0338 + +#define mmNIC3_QM0_CP_FENCE2_RDATA_3 0xDA033C + +#define mmNIC3_QM0_CP_FENCE2_RDATA_4 0xDA0340 + +#define mmNIC3_QM0_CP_FENCE3_RDATA_0 0xDA0344 + +#define mmNIC3_QM0_CP_FENCE3_RDATA_1 0xDA0348 + +#define mmNIC3_QM0_CP_FENCE3_RDATA_2 0xDA034C + +#define mmNIC3_QM0_CP_FENCE3_RDATA_3 0xDA0350 + +#define mmNIC3_QM0_CP_FENCE3_RDATA_4 0xDA0354 + +#define mmNIC3_QM0_CP_FENCE0_CNT_0 0xDA0358 + +#define mmNIC3_QM0_CP_FENCE0_CNT_1 0xDA035C + +#define mmNIC3_QM0_CP_FENCE0_CNT_2 0xDA0360 + +#define mmNIC3_QM0_CP_FENCE0_CNT_3 0xDA0364 + +#define mmNIC3_QM0_CP_FENCE0_CNT_4 0xDA0368 + +#define mmNIC3_QM0_CP_FENCE1_CNT_0 0xDA036C + +#define mmNIC3_QM0_CP_FENCE1_CNT_1 0xDA0370 + +#define mmNIC3_QM0_CP_FENCE1_CNT_2 0xDA0374 + +#define mmNIC3_QM0_CP_FENCE1_CNT_3 0xDA0378 + +#define mmNIC3_QM0_CP_FENCE1_CNT_4 0xDA037C + +#define mmNIC3_QM0_CP_FENCE2_CNT_0 0xDA0380 + +#define mmNIC3_QM0_CP_FENCE2_CNT_1 0xDA0384 + +#define mmNIC3_QM0_CP_FENCE2_CNT_2 0xDA0388 + +#define mmNIC3_QM0_CP_FENCE2_CNT_3 0xDA038C + +#define mmNIC3_QM0_CP_FENCE2_CNT_4 0xDA0390 + +#define mmNIC3_QM0_CP_FENCE3_CNT_0 0xDA0394 + +#define mmNIC3_QM0_CP_FENCE3_CNT_1 0xDA0398 + +#define mmNIC3_QM0_CP_FENCE3_CNT_2 0xDA039C + +#define mmNIC3_QM0_CP_FENCE3_CNT_3 0xDA03A0 + +#define mmNIC3_QM0_CP_FENCE3_CNT_4 0xDA03A4 + +#define mmNIC3_QM0_CP_STS_0 0xDA03A8 + +#define mmNIC3_QM0_CP_STS_1 0xDA03AC + +#define mmNIC3_QM0_CP_STS_2 0xDA03B0 + +#define mmNIC3_QM0_CP_STS_3 0xDA03B4 + +#define mmNIC3_QM0_CP_STS_4 0xDA03B8 + +#define mmNIC3_QM0_CP_CURRENT_INST_LO_0 0xDA03BC + +#define mmNIC3_QM0_CP_CURRENT_INST_LO_1 0xDA03C0 + +#define mmNIC3_QM0_CP_CURRENT_INST_LO_2 0xDA03C4 + +#define mmNIC3_QM0_CP_CURRENT_INST_LO_3 0xDA03C8 + +#define mmNIC3_QM0_CP_CURRENT_INST_LO_4 0xDA03CC + +#define mmNIC3_QM0_CP_CURRENT_INST_HI_0 0xDA03D0 + +#define mmNIC3_QM0_CP_CURRENT_INST_HI_1 0xDA03D4 + +#define mmNIC3_QM0_CP_CURRENT_INST_HI_2 0xDA03D8 + +#define mmNIC3_QM0_CP_CURRENT_INST_HI_3 0xDA03DC + +#define mmNIC3_QM0_CP_CURRENT_INST_HI_4 0xDA03E0 + +#define mmNIC3_QM0_CP_BARRIER_CFG_0 0xDA03F4 + +#define mmNIC3_QM0_CP_BARRIER_CFG_1 0xDA03F8 + +#define mmNIC3_QM0_CP_BARRIER_CFG_2 0xDA03FC + +#define mmNIC3_QM0_CP_BARRIER_CFG_3 0xDA0400 + +#define mmNIC3_QM0_CP_BARRIER_CFG_4 0xDA0404 + +#define mmNIC3_QM0_CP_DBG_0_0 0xDA0408 + +#define mmNIC3_QM0_CP_DBG_0_1 0xDA040C + +#define mmNIC3_QM0_CP_DBG_0_2 0xDA0410 + +#define mmNIC3_QM0_CP_DBG_0_3 0xDA0414 + +#define mmNIC3_QM0_CP_DBG_0_4 0xDA0418 + +#define mmNIC3_QM0_CP_ARUSER_31_11_0 0xDA041C + +#define mmNIC3_QM0_CP_ARUSER_31_11_1 0xDA0420 + +#define mmNIC3_QM0_CP_ARUSER_31_11_2 0xDA0424 + +#define mmNIC3_QM0_CP_ARUSER_31_11_3 0xDA0428 + +#define mmNIC3_QM0_CP_ARUSER_31_11_4 0xDA042C + +#define mmNIC3_QM0_CP_AWUSER_31_11_0 0xDA0430 + +#define mmNIC3_QM0_CP_AWUSER_31_11_1 0xDA0434 + +#define mmNIC3_QM0_CP_AWUSER_31_11_2 0xDA0438 + +#define mmNIC3_QM0_CP_AWUSER_31_11_3 0xDA043C + +#define mmNIC3_QM0_CP_AWUSER_31_11_4 0xDA0440 + +#define mmNIC3_QM0_ARB_CFG_0 0xDA0A00 + +#define mmNIC3_QM0_ARB_CHOISE_Q_PUSH 0xDA0A04 + +#define mmNIC3_QM0_ARB_WRR_WEIGHT_0 0xDA0A08 + +#define mmNIC3_QM0_ARB_WRR_WEIGHT_1 0xDA0A0C + +#define mmNIC3_QM0_ARB_WRR_WEIGHT_2 0xDA0A10 + +#define mmNIC3_QM0_ARB_WRR_WEIGHT_3 0xDA0A14 + +#define mmNIC3_QM0_ARB_CFG_1 0xDA0A18 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_0 0xDA0A20 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_1 0xDA0A24 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_2 0xDA0A28 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_3 0xDA0A2C + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_4 0xDA0A30 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_5 0xDA0A34 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_6 0xDA0A38 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_7 0xDA0A3C + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_8 0xDA0A40 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_9 0xDA0A44 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_10 0xDA0A48 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_11 0xDA0A4C + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_12 0xDA0A50 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_13 0xDA0A54 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_14 0xDA0A58 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_15 0xDA0A5C + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_16 0xDA0A60 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_17 0xDA0A64 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_18 0xDA0A68 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_19 0xDA0A6C + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_20 0xDA0A70 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_21 0xDA0A74 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_22 0xDA0A78 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_23 0xDA0A7C + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_24 0xDA0A80 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_25 0xDA0A84 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_26 0xDA0A88 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_27 0xDA0A8C + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_28 0xDA0A90 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_29 0xDA0A94 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_30 0xDA0A98 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_31 0xDA0A9C + +#define mmNIC3_QM0_ARB_MST_CRED_INC 0xDA0AA0 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_0 0xDA0AA4 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_1 0xDA0AA8 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_2 0xDA0AAC + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_3 0xDA0AB0 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_4 0xDA0AB4 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_5 0xDA0AB8 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_6 0xDA0ABC + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_7 0xDA0AC0 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_8 0xDA0AC4 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_9 0xDA0AC8 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_10 0xDA0ACC + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_11 0xDA0AD0 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_12 0xDA0AD4 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_13 0xDA0AD8 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_14 0xDA0ADC + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_15 0xDA0AE0 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_16 0xDA0AE4 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_17 0xDA0AE8 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_18 0xDA0AEC + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_19 0xDA0AF0 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_20 0xDA0AF4 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_21 0xDA0AF8 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_22 0xDA0AFC + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_23 0xDA0B00 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_24 0xDA0B04 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_25 0xDA0B08 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_26 0xDA0B0C + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_27 0xDA0B10 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_28 0xDA0B14 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_29 0xDA0B18 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_30 0xDA0B1C + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_31 0xDA0B20 + +#define mmNIC3_QM0_ARB_SLV_MASTER_INC_CRED_OFST 0xDA0B28 + +#define mmNIC3_QM0_ARB_MST_SLAVE_EN 0xDA0B2C + +#define mmNIC3_QM0_ARB_MST_QUIET_PER 0xDA0B34 + +#define mmNIC3_QM0_ARB_SLV_CHOISE_WDT 0xDA0B38 + +#define mmNIC3_QM0_ARB_SLV_ID 0xDA0B3C + +#define mmNIC3_QM0_ARB_MSG_MAX_INFLIGHT 0xDA0B44 + +#define mmNIC3_QM0_ARB_MSG_AWUSER_31_11 0xDA0B48 + +#define mmNIC3_QM0_ARB_MSG_AWUSER_SEC_PROP 0xDA0B4C + +#define mmNIC3_QM0_ARB_MSG_AWUSER_NON_SEC_PROP 0xDA0B50 + +#define mmNIC3_QM0_ARB_BASE_LO 0xDA0B54 + +#define mmNIC3_QM0_ARB_BASE_HI 0xDA0B58 + +#define mmNIC3_QM0_ARB_STATE_STS 0xDA0B80 + +#define mmNIC3_QM0_ARB_CHOISE_FULLNESS_STS 0xDA0B84 + +#define mmNIC3_QM0_ARB_MSG_STS 0xDA0B88 + +#define mmNIC3_QM0_ARB_SLV_CHOISE_Q_HEAD 0xDA0B8C + +#define mmNIC3_QM0_ARB_ERR_CAUSE 0xDA0B9C + +#define mmNIC3_QM0_ARB_ERR_MSG_EN 0xDA0BA0 + +#define mmNIC3_QM0_ARB_ERR_STS_DRP 0xDA0BA8 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_0 0xDA0BB0 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_1 0xDA0BB4 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_2 0xDA0BB8 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_3 0xDA0BBC + +#define mmNIC3_QM0_ARB_MST_CRED_STS_4 0xDA0BC0 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_5 0xDA0BC4 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_6 0xDA0BC8 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_7 0xDA0BCC + +#define mmNIC3_QM0_ARB_MST_CRED_STS_8 0xDA0BD0 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_9 0xDA0BD4 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_10 0xDA0BD8 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_11 0xDA0BDC + +#define mmNIC3_QM0_ARB_MST_CRED_STS_12 0xDA0BE0 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_13 0xDA0BE4 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_14 0xDA0BE8 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_15 0xDA0BEC + +#define mmNIC3_QM0_ARB_MST_CRED_STS_16 0xDA0BF0 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_17 0xDA0BF4 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_18 0xDA0BF8 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_19 0xDA0BFC + +#define mmNIC3_QM0_ARB_MST_CRED_STS_20 0xDA0C00 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_21 0xDA0C04 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_22 0xDA0C08 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_23 0xDA0C0C + +#define mmNIC3_QM0_ARB_MST_CRED_STS_24 0xDA0C10 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_25 0xDA0C14 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_26 0xDA0C18 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_27 0xDA0C1C + +#define mmNIC3_QM0_ARB_MST_CRED_STS_28 0xDA0C20 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_29 0xDA0C24 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_30 0xDA0C28 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_31 0xDA0C2C + +#define mmNIC3_QM0_CGM_CFG 0xDA0C70 + +#define mmNIC3_QM0_CGM_STS 0xDA0C74 + +#define mmNIC3_QM0_CGM_CFG1 0xDA0C78 + +#define mmNIC3_QM0_LOCAL_RANGE_BASE 0xDA0C80 + +#define mmNIC3_QM0_LOCAL_RANGE_SIZE 0xDA0C84 + +#define mmNIC3_QM0_CSMR_STRICT_PRIO_CFG 0xDA0C90 + +#define mmNIC3_QM0_HBW_RD_RATE_LIM_CFG_1 0xDA0C94 + +#define mmNIC3_QM0_LBW_WR_RATE_LIM_CFG_0 0xDA0C98 + +#define mmNIC3_QM0_LBW_WR_RATE_LIM_CFG_1 0xDA0C9C + +#define mmNIC3_QM0_HBW_RD_RATE_LIM_CFG_0 0xDA0CA0 + +#define mmNIC3_QM0_GLBL_AXCACHE 0xDA0CA4 + +#define mmNIC3_QM0_IND_GW_APB_CFG 0xDA0CB0 + +#define mmNIC3_QM0_IND_GW_APB_WDATA 0xDA0CB4 + +#define mmNIC3_QM0_IND_GW_APB_RDATA 0xDA0CB8 + +#define mmNIC3_QM0_IND_GW_APB_STATUS 0xDA0CBC + +#define mmNIC3_QM0_GLBL_ERR_ADDR_LO 0xDA0CD0 + +#define mmNIC3_QM0_GLBL_ERR_ADDR_HI 0xDA0CD4 + +#define mmNIC3_QM0_GLBL_ERR_WDATA 0xDA0CD8 + +#define mmNIC3_QM0_GLBL_MEM_INIT_BUSY 0xDA0D00 + +#endif /* ASIC_REG_NIC3_QM0_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm1_regs.h new file mode 100644 index 000000000000..7fa040f65004 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm1_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_NIC3_QM1_REGS_H_ +#define ASIC_REG_NIC3_QM1_REGS_H_ + +/* + ***************************************** + * NIC3_QM1 (Prototype: QMAN) + ***************************************** + */ + +#define mmNIC3_QM1_GLBL_CFG0 0xDA2000 + +#define mmNIC3_QM1_GLBL_CFG1 0xDA2004 + +#define mmNIC3_QM1_GLBL_PROT 0xDA2008 + +#define mmNIC3_QM1_GLBL_ERR_CFG 0xDA200C + +#define mmNIC3_QM1_GLBL_SECURE_PROPS_0 0xDA2010 + +#define mmNIC3_QM1_GLBL_SECURE_PROPS_1 0xDA2014 + +#define mmNIC3_QM1_GLBL_SECURE_PROPS_2 0xDA2018 + +#define mmNIC3_QM1_GLBL_SECURE_PROPS_3 0xDA201C + +#define mmNIC3_QM1_GLBL_SECURE_PROPS_4 0xDA2020 + +#define mmNIC3_QM1_GLBL_NON_SECURE_PROPS_0 0xDA2024 + +#define mmNIC3_QM1_GLBL_NON_SECURE_PROPS_1 0xDA2028 + +#define mmNIC3_QM1_GLBL_NON_SECURE_PROPS_2 0xDA202C + +#define mmNIC3_QM1_GLBL_NON_SECURE_PROPS_3 0xDA2030 + +#define mmNIC3_QM1_GLBL_NON_SECURE_PROPS_4 0xDA2034 + +#define mmNIC3_QM1_GLBL_STS0 0xDA2038 + +#define mmNIC3_QM1_GLBL_STS1_0 0xDA2040 + +#define mmNIC3_QM1_GLBL_STS1_1 0xDA2044 + +#define mmNIC3_QM1_GLBL_STS1_2 0xDA2048 + +#define mmNIC3_QM1_GLBL_STS1_3 0xDA204C + +#define mmNIC3_QM1_GLBL_STS1_4 0xDA2050 + +#define mmNIC3_QM1_GLBL_MSG_EN_0 0xDA2054 + +#define mmNIC3_QM1_GLBL_MSG_EN_1 0xDA2058 + +#define mmNIC3_QM1_GLBL_MSG_EN_2 0xDA205C + +#define mmNIC3_QM1_GLBL_MSG_EN_3 0xDA2060 + +#define mmNIC3_QM1_GLBL_MSG_EN_4 0xDA2068 + +#define mmNIC3_QM1_PQ_BASE_LO_0 0xDA2070 + +#define mmNIC3_QM1_PQ_BASE_LO_1 0xDA2074 + +#define mmNIC3_QM1_PQ_BASE_LO_2 0xDA2078 + +#define mmNIC3_QM1_PQ_BASE_LO_3 0xDA207C + +#define mmNIC3_QM1_PQ_BASE_HI_0 0xDA2080 + +#define mmNIC3_QM1_PQ_BASE_HI_1 0xDA2084 + +#define mmNIC3_QM1_PQ_BASE_HI_2 0xDA2088 + +#define mmNIC3_QM1_PQ_BASE_HI_3 0xDA208C + +#define mmNIC3_QM1_PQ_SIZE_0 0xDA2090 + +#define mmNIC3_QM1_PQ_SIZE_1 0xDA2094 + +#define mmNIC3_QM1_PQ_SIZE_2 0xDA2098 + +#define mmNIC3_QM1_PQ_SIZE_3 0xDA209C + +#define mmNIC3_QM1_PQ_PI_0 0xDA20A0 + +#define mmNIC3_QM1_PQ_PI_1 0xDA20A4 + +#define mmNIC3_QM1_PQ_PI_2 0xDA20A8 + +#define mmNIC3_QM1_PQ_PI_3 0xDA20AC + +#define mmNIC3_QM1_PQ_CI_0 0xDA20B0 + +#define mmNIC3_QM1_PQ_CI_1 0xDA20B4 + +#define mmNIC3_QM1_PQ_CI_2 0xDA20B8 + +#define mmNIC3_QM1_PQ_CI_3 0xDA20BC + +#define mmNIC3_QM1_PQ_CFG0_0 0xDA20C0 + +#define mmNIC3_QM1_PQ_CFG0_1 0xDA20C4 + +#define mmNIC3_QM1_PQ_CFG0_2 0xDA20C8 + +#define mmNIC3_QM1_PQ_CFG0_3 0xDA20CC + +#define mmNIC3_QM1_PQ_CFG1_0 0xDA20D0 + +#define mmNIC3_QM1_PQ_CFG1_1 0xDA20D4 + +#define mmNIC3_QM1_PQ_CFG1_2 0xDA20D8 + +#define mmNIC3_QM1_PQ_CFG1_3 0xDA20DC + +#define mmNIC3_QM1_PQ_ARUSER_31_11_0 0xDA20E0 + +#define mmNIC3_QM1_PQ_ARUSER_31_11_1 0xDA20E4 + +#define mmNIC3_QM1_PQ_ARUSER_31_11_2 0xDA20E8 + +#define mmNIC3_QM1_PQ_ARUSER_31_11_3 0xDA20EC + +#define mmNIC3_QM1_PQ_STS0_0 0xDA20F0 + +#define mmNIC3_QM1_PQ_STS0_1 0xDA20F4 + +#define mmNIC3_QM1_PQ_STS0_2 0xDA20F8 + +#define mmNIC3_QM1_PQ_STS0_3 0xDA20FC + +#define mmNIC3_QM1_PQ_STS1_0 0xDA2100 + +#define mmNIC3_QM1_PQ_STS1_1 0xDA2104 + +#define mmNIC3_QM1_PQ_STS1_2 0xDA2108 + +#define mmNIC3_QM1_PQ_STS1_3 0xDA210C + +#define mmNIC3_QM1_CQ_CFG0_0 0xDA2110 + +#define mmNIC3_QM1_CQ_CFG0_1 0xDA2114 + +#define mmNIC3_QM1_CQ_CFG0_2 0xDA2118 + +#define mmNIC3_QM1_CQ_CFG0_3 0xDA211C + +#define mmNIC3_QM1_CQ_CFG0_4 0xDA2120 + +#define mmNIC3_QM1_CQ_CFG1_0 0xDA2124 + +#define mmNIC3_QM1_CQ_CFG1_1 0xDA2128 + +#define mmNIC3_QM1_CQ_CFG1_2 0xDA212C + +#define mmNIC3_QM1_CQ_CFG1_3 0xDA2130 + +#define mmNIC3_QM1_CQ_CFG1_4 0xDA2134 + +#define mmNIC3_QM1_CQ_ARUSER_31_11_0 0xDA2138 + +#define mmNIC3_QM1_CQ_ARUSER_31_11_1 0xDA213C + +#define mmNIC3_QM1_CQ_ARUSER_31_11_2 0xDA2140 + +#define mmNIC3_QM1_CQ_ARUSER_31_11_3 0xDA2144 + +#define mmNIC3_QM1_CQ_ARUSER_31_11_4 0xDA2148 + +#define mmNIC3_QM1_CQ_STS0_0 0xDA214C + +#define mmNIC3_QM1_CQ_STS0_1 0xDA2150 + +#define mmNIC3_QM1_CQ_STS0_2 0xDA2154 + +#define mmNIC3_QM1_CQ_STS0_3 0xDA2158 + +#define mmNIC3_QM1_CQ_STS0_4 0xDA215C + +#define mmNIC3_QM1_CQ_STS1_0 0xDA2160 + +#define mmNIC3_QM1_CQ_STS1_1 0xDA2164 + +#define mmNIC3_QM1_CQ_STS1_2 0xDA2168 + +#define mmNIC3_QM1_CQ_STS1_3 0xDA216C + +#define mmNIC3_QM1_CQ_STS1_4 0xDA2170 + +#define mmNIC3_QM1_CQ_PTR_LO_0 0xDA2174 + +#define mmNIC3_QM1_CQ_PTR_HI_0 0xDA2178 + +#define mmNIC3_QM1_CQ_TSIZE_0 0xDA217C + +#define mmNIC3_QM1_CQ_CTL_0 0xDA2180 + +#define mmNIC3_QM1_CQ_PTR_LO_1 0xDA2184 + +#define mmNIC3_QM1_CQ_PTR_HI_1 0xDA2188 + +#define mmNIC3_QM1_CQ_TSIZE_1 0xDA218C + +#define mmNIC3_QM1_CQ_CTL_1 0xDA2190 + +#define mmNIC3_QM1_CQ_PTR_LO_2 0xDA2194 + +#define mmNIC3_QM1_CQ_PTR_HI_2 0xDA2198 + +#define mmNIC3_QM1_CQ_TSIZE_2 0xDA219C + +#define mmNIC3_QM1_CQ_CTL_2 0xDA21A0 + +#define mmNIC3_QM1_CQ_PTR_LO_3 0xDA21A4 + +#define mmNIC3_QM1_CQ_PTR_HI_3 0xDA21A8 + +#define mmNIC3_QM1_CQ_TSIZE_3 0xDA21AC + +#define mmNIC3_QM1_CQ_CTL_3 0xDA21B0 + +#define mmNIC3_QM1_CQ_PTR_LO_4 0xDA21B4 + +#define mmNIC3_QM1_CQ_PTR_HI_4 0xDA21B8 + +#define mmNIC3_QM1_CQ_TSIZE_4 0xDA21BC + +#define mmNIC3_QM1_CQ_CTL_4 0xDA21C0 + +#define mmNIC3_QM1_CQ_PTR_LO_STS_0 0xDA21C4 + +#define mmNIC3_QM1_CQ_PTR_LO_STS_1 0xDA21C8 + +#define mmNIC3_QM1_CQ_PTR_LO_STS_2 0xDA21CC + +#define mmNIC3_QM1_CQ_PTR_LO_STS_3 0xDA21D0 + +#define mmNIC3_QM1_CQ_PTR_LO_STS_4 0xDA21D4 + +#define mmNIC3_QM1_CQ_PTR_HI_STS_0 0xDA21D8 + +#define mmNIC3_QM1_CQ_PTR_HI_STS_1 0xDA21DC + +#define mmNIC3_QM1_CQ_PTR_HI_STS_2 0xDA21E0 + +#define mmNIC3_QM1_CQ_PTR_HI_STS_3 0xDA21E4 + +#define mmNIC3_QM1_CQ_PTR_HI_STS_4 0xDA21E8 + +#define mmNIC3_QM1_CQ_TSIZE_STS_0 0xDA21EC + +#define mmNIC3_QM1_CQ_TSIZE_STS_1 0xDA21F0 + +#define mmNIC3_QM1_CQ_TSIZE_STS_2 0xDA21F4 + +#define mmNIC3_QM1_CQ_TSIZE_STS_3 0xDA21F8 + +#define mmNIC3_QM1_CQ_TSIZE_STS_4 0xDA21FC + +#define mmNIC3_QM1_CQ_CTL_STS_0 0xDA2200 + +#define mmNIC3_QM1_CQ_CTL_STS_1 0xDA2204 + +#define mmNIC3_QM1_CQ_CTL_STS_2 0xDA2208 + +#define mmNIC3_QM1_CQ_CTL_STS_3 0xDA220C + +#define mmNIC3_QM1_CQ_CTL_STS_4 0xDA2210 + +#define mmNIC3_QM1_CQ_IFIFO_CNT_0 0xDA2214 + +#define mmNIC3_QM1_CQ_IFIFO_CNT_1 0xDA2218 + +#define mmNIC3_QM1_CQ_IFIFO_CNT_2 0xDA221C + +#define mmNIC3_QM1_CQ_IFIFO_CNT_3 0xDA2220 + +#define mmNIC3_QM1_CQ_IFIFO_CNT_4 0xDA2224 + +#define mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_0 0xDA2228 + +#define mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_1 0xDA222C + +#define mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_2 0xDA2230 + +#define mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_3 0xDA2234 + +#define mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_4 0xDA2238 + +#define mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_0 0xDA223C + +#define mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_1 0xDA2240 + +#define mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_2 0xDA2244 + +#define mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_3 0xDA2248 + +#define mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_4 0xDA224C + +#define mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_0 0xDA2250 + +#define mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_1 0xDA2254 + +#define mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_2 0xDA2258 + +#define mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_3 0xDA225C + +#define mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_4 0xDA2260 + +#define mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_0 0xDA2264 + +#define mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_1 0xDA2268 + +#define mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_2 0xDA226C + +#define mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_3 0xDA2270 + +#define mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_4 0xDA2274 + +#define mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_0 0xDA2278 + +#define mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_1 0xDA227C + +#define mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_2 0xDA2280 + +#define mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_3 0xDA2284 + +#define mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_4 0xDA2288 + +#define mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_0 0xDA228C + +#define mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_1 0xDA2290 + +#define mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_2 0xDA2294 + +#define mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_3 0xDA2298 + +#define mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_4 0xDA229C + +#define mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_0 0xDA22A0 + +#define mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_1 0xDA22A4 + +#define mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_2 0xDA22A8 + +#define mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_3 0xDA22AC + +#define mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_4 0xDA22B0 + +#define mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_0 0xDA22B4 + +#define mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_1 0xDA22B8 + +#define mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_2 0xDA22BC + +#define mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_3 0xDA22C0 + +#define mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_4 0xDA22C4 + +#define mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_0 0xDA22C8 + +#define mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_1 0xDA22CC + +#define mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_2 0xDA22D0 + +#define mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_3 0xDA22D4 + +#define mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_4 0xDA22D8 + +#define mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xDA22E0 + +#define mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xDA22E4 + +#define mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xDA22E8 + +#define mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xDA22EC + +#define mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xDA22F0 + +#define mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0 0xDA22F4 + +#define mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1 0xDA22F8 + +#define mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2 0xDA22FC + +#define mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 0xDA2300 + +#define mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4 0xDA2304 + +#define mmNIC3_QM1_CP_FENCE0_RDATA_0 0xDA2308 + +#define mmNIC3_QM1_CP_FENCE0_RDATA_1 0xDA230C + +#define mmNIC3_QM1_CP_FENCE0_RDATA_2 0xDA2310 + +#define mmNIC3_QM1_CP_FENCE0_RDATA_3 0xDA2314 + +#define mmNIC3_QM1_CP_FENCE0_RDATA_4 0xDA2318 + +#define mmNIC3_QM1_CP_FENCE1_RDATA_0 0xDA231C + +#define mmNIC3_QM1_CP_FENCE1_RDATA_1 0xDA2320 + +#define mmNIC3_QM1_CP_FENCE1_RDATA_2 0xDA2324 + +#define mmNIC3_QM1_CP_FENCE1_RDATA_3 0xDA2328 + +#define mmNIC3_QM1_CP_FENCE1_RDATA_4 0xDA232C + +#define mmNIC3_QM1_CP_FENCE2_RDATA_0 0xDA2330 + +#define mmNIC3_QM1_CP_FENCE2_RDATA_1 0xDA2334 + +#define mmNIC3_QM1_CP_FENCE2_RDATA_2 0xDA2338 + +#define mmNIC3_QM1_CP_FENCE2_RDATA_3 0xDA233C + +#define mmNIC3_QM1_CP_FENCE2_RDATA_4 0xDA2340 + +#define mmNIC3_QM1_CP_FENCE3_RDATA_0 0xDA2344 + +#define mmNIC3_QM1_CP_FENCE3_RDATA_1 0xDA2348 + +#define mmNIC3_QM1_CP_FENCE3_RDATA_2 0xDA234C + +#define mmNIC3_QM1_CP_FENCE3_RDATA_3 0xDA2350 + +#define mmNIC3_QM1_CP_FENCE3_RDATA_4 0xDA2354 + +#define mmNIC3_QM1_CP_FENCE0_CNT_0 0xDA2358 + +#define mmNIC3_QM1_CP_FENCE0_CNT_1 0xDA235C + +#define mmNIC3_QM1_CP_FENCE0_CNT_2 0xDA2360 + +#define mmNIC3_QM1_CP_FENCE0_CNT_3 0xDA2364 + +#define mmNIC3_QM1_CP_FENCE0_CNT_4 0xDA2368 + +#define mmNIC3_QM1_CP_FENCE1_CNT_0 0xDA236C + +#define mmNIC3_QM1_CP_FENCE1_CNT_1 0xDA2370 + +#define mmNIC3_QM1_CP_FENCE1_CNT_2 0xDA2374 + +#define mmNIC3_QM1_CP_FENCE1_CNT_3 0xDA2378 + +#define mmNIC3_QM1_CP_FENCE1_CNT_4 0xDA237C + +#define mmNIC3_QM1_CP_FENCE2_CNT_0 0xDA2380 + +#define mmNIC3_QM1_CP_FENCE2_CNT_1 0xDA2384 + +#define mmNIC3_QM1_CP_FENCE2_CNT_2 0xDA2388 + +#define mmNIC3_QM1_CP_FENCE2_CNT_3 0xDA238C + +#define mmNIC3_QM1_CP_FENCE2_CNT_4 0xDA2390 + +#define mmNIC3_QM1_CP_FENCE3_CNT_0 0xDA2394 + +#define mmNIC3_QM1_CP_FENCE3_CNT_1 0xDA2398 + +#define mmNIC3_QM1_CP_FENCE3_CNT_2 0xDA239C + +#define mmNIC3_QM1_CP_FENCE3_CNT_3 0xDA23A0 + +#define mmNIC3_QM1_CP_FENCE3_CNT_4 0xDA23A4 + +#define mmNIC3_QM1_CP_STS_0 0xDA23A8 + +#define mmNIC3_QM1_CP_STS_1 0xDA23AC + +#define mmNIC3_QM1_CP_STS_2 0xDA23B0 + +#define mmNIC3_QM1_CP_STS_3 0xDA23B4 + +#define mmNIC3_QM1_CP_STS_4 0xDA23B8 + +#define mmNIC3_QM1_CP_CURRENT_INST_LO_0 0xDA23BC + +#define mmNIC3_QM1_CP_CURRENT_INST_LO_1 0xDA23C0 + +#define mmNIC3_QM1_CP_CURRENT_INST_LO_2 0xDA23C4 + +#define mmNIC3_QM1_CP_CURRENT_INST_LO_3 0xDA23C8 + +#define mmNIC3_QM1_CP_CURRENT_INST_LO_4 0xDA23CC + +#define mmNIC3_QM1_CP_CURRENT_INST_HI_0 0xDA23D0 + +#define mmNIC3_QM1_CP_CURRENT_INST_HI_1 0xDA23D4 + +#define mmNIC3_QM1_CP_CURRENT_INST_HI_2 0xDA23D8 + +#define mmNIC3_QM1_CP_CURRENT_INST_HI_3 0xDA23DC + +#define mmNIC3_QM1_CP_CURRENT_INST_HI_4 0xDA23E0 + +#define mmNIC3_QM1_CP_BARRIER_CFG_0 0xDA23F4 + +#define mmNIC3_QM1_CP_BARRIER_CFG_1 0xDA23F8 + +#define mmNIC3_QM1_CP_BARRIER_CFG_2 0xDA23FC + +#define mmNIC3_QM1_CP_BARRIER_CFG_3 0xDA2400 + +#define mmNIC3_QM1_CP_BARRIER_CFG_4 0xDA2404 + +#define mmNIC3_QM1_CP_DBG_0_0 0xDA2408 + +#define mmNIC3_QM1_CP_DBG_0_1 0xDA240C + +#define mmNIC3_QM1_CP_DBG_0_2 0xDA2410 + +#define mmNIC3_QM1_CP_DBG_0_3 0xDA2414 + +#define mmNIC3_QM1_CP_DBG_0_4 0xDA2418 + +#define mmNIC3_QM1_CP_ARUSER_31_11_0 0xDA241C + +#define mmNIC3_QM1_CP_ARUSER_31_11_1 0xDA2420 + +#define mmNIC3_QM1_CP_ARUSER_31_11_2 0xDA2424 + +#define mmNIC3_QM1_CP_ARUSER_31_11_3 0xDA2428 + +#define mmNIC3_QM1_CP_ARUSER_31_11_4 0xDA242C + +#define mmNIC3_QM1_CP_AWUSER_31_11_0 0xDA2430 + +#define mmNIC3_QM1_CP_AWUSER_31_11_1 0xDA2434 + +#define mmNIC3_QM1_CP_AWUSER_31_11_2 0xDA2438 + +#define mmNIC3_QM1_CP_AWUSER_31_11_3 0xDA243C + +#define mmNIC3_QM1_CP_AWUSER_31_11_4 0xDA2440 + +#define mmNIC3_QM1_ARB_CFG_0 0xDA2A00 + +#define mmNIC3_QM1_ARB_CHOISE_Q_PUSH 0xDA2A04 + +#define mmNIC3_QM1_ARB_WRR_WEIGHT_0 0xDA2A08 + +#define mmNIC3_QM1_ARB_WRR_WEIGHT_1 0xDA2A0C + +#define mmNIC3_QM1_ARB_WRR_WEIGHT_2 0xDA2A10 + +#define mmNIC3_QM1_ARB_WRR_WEIGHT_3 0xDA2A14 + +#define mmNIC3_QM1_ARB_CFG_1 0xDA2A18 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_0 0xDA2A20 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_1 0xDA2A24 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_2 0xDA2A28 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_3 0xDA2A2C + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_4 0xDA2A30 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_5 0xDA2A34 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_6 0xDA2A38 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_7 0xDA2A3C + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_8 0xDA2A40 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_9 0xDA2A44 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_10 0xDA2A48 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_11 0xDA2A4C + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_12 0xDA2A50 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_13 0xDA2A54 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_14 0xDA2A58 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_15 0xDA2A5C + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_16 0xDA2A60 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_17 0xDA2A64 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_18 0xDA2A68 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_19 0xDA2A6C + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_20 0xDA2A70 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_21 0xDA2A74 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_22 0xDA2A78 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_23 0xDA2A7C + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_24 0xDA2A80 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_25 0xDA2A84 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_26 0xDA2A88 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_27 0xDA2A8C + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_28 0xDA2A90 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_29 0xDA2A94 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_30 0xDA2A98 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_31 0xDA2A9C + +#define mmNIC3_QM1_ARB_MST_CRED_INC 0xDA2AA0 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_0 0xDA2AA4 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_1 0xDA2AA8 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_2 0xDA2AAC + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_3 0xDA2AB0 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_4 0xDA2AB4 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_5 0xDA2AB8 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_6 0xDA2ABC + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_7 0xDA2AC0 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_8 0xDA2AC4 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_9 0xDA2AC8 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_10 0xDA2ACC + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_11 0xDA2AD0 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_12 0xDA2AD4 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_13 0xDA2AD8 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_14 0xDA2ADC + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_15 0xDA2AE0 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_16 0xDA2AE4 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_17 0xDA2AE8 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_18 0xDA2AEC + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_19 0xDA2AF0 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_20 0xDA2AF4 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_21 0xDA2AF8 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_22 0xDA2AFC + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_23 0xDA2B00 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_24 0xDA2B04 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_25 0xDA2B08 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_26 0xDA2B0C + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_27 0xDA2B10 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_28 0xDA2B14 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_29 0xDA2B18 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_30 0xDA2B1C + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_31 0xDA2B20 + +#define mmNIC3_QM1_ARB_SLV_MASTER_INC_CRED_OFST 0xDA2B28 + +#define mmNIC3_QM1_ARB_MST_SLAVE_EN 0xDA2B2C + +#define mmNIC3_QM1_ARB_MST_QUIET_PER 0xDA2B34 + +#define mmNIC3_QM1_ARB_SLV_CHOISE_WDT 0xDA2B38 + +#define mmNIC3_QM1_ARB_SLV_ID 0xDA2B3C + +#define mmNIC3_QM1_ARB_MSG_MAX_INFLIGHT 0xDA2B44 + +#define mmNIC3_QM1_ARB_MSG_AWUSER_31_11 0xDA2B48 + +#define mmNIC3_QM1_ARB_MSG_AWUSER_SEC_PROP 0xDA2B4C + +#define mmNIC3_QM1_ARB_MSG_AWUSER_NON_SEC_PROP 0xDA2B50 + +#define mmNIC3_QM1_ARB_BASE_LO 0xDA2B54 + +#define mmNIC3_QM1_ARB_BASE_HI 0xDA2B58 + +#define mmNIC3_QM1_ARB_STATE_STS 0xDA2B80 + +#define mmNIC3_QM1_ARB_CHOISE_FULLNESS_STS 0xDA2B84 + +#define mmNIC3_QM1_ARB_MSG_STS 0xDA2B88 + +#define mmNIC3_QM1_ARB_SLV_CHOISE_Q_HEAD 0xDA2B8C + +#define mmNIC3_QM1_ARB_ERR_CAUSE 0xDA2B9C + +#define mmNIC3_QM1_ARB_ERR_MSG_EN 0xDA2BA0 + +#define mmNIC3_QM1_ARB_ERR_STS_DRP 0xDA2BA8 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_0 0xDA2BB0 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_1 0xDA2BB4 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_2 0xDA2BB8 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_3 0xDA2BBC + +#define mmNIC3_QM1_ARB_MST_CRED_STS_4 0xDA2BC0 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_5 0xDA2BC4 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_6 0xDA2BC8 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_7 0xDA2BCC + +#define mmNIC3_QM1_ARB_MST_CRED_STS_8 0xDA2BD0 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_9 0xDA2BD4 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_10 0xDA2BD8 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_11 0xDA2BDC + +#define mmNIC3_QM1_ARB_MST_CRED_STS_12 0xDA2BE0 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_13 0xDA2BE4 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_14 0xDA2BE8 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_15 0xDA2BEC + +#define mmNIC3_QM1_ARB_MST_CRED_STS_16 0xDA2BF0 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_17 0xDA2BF4 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_18 0xDA2BF8 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_19 0xDA2BFC + +#define mmNIC3_QM1_ARB_MST_CRED_STS_20 0xDA2C00 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_21 0xDA2C04 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_22 0xDA2C08 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_23 0xDA2C0C + +#define mmNIC3_QM1_ARB_MST_CRED_STS_24 0xDA2C10 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_25 0xDA2C14 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_26 0xDA2C18 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_27 0xDA2C1C + +#define mmNIC3_QM1_ARB_MST_CRED_STS_28 0xDA2C20 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_29 0xDA2C24 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_30 0xDA2C28 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_31 0xDA2C2C + +#define mmNIC3_QM1_CGM_CFG 0xDA2C70 + +#define mmNIC3_QM1_CGM_STS 0xDA2C74 + +#define mmNIC3_QM1_CGM_CFG1 0xDA2C78 + +#define mmNIC3_QM1_LOCAL_RANGE_BASE 0xDA2C80 + +#define mmNIC3_QM1_LOCAL_RANGE_SIZE 0xDA2C84 + +#define mmNIC3_QM1_CSMR_STRICT_PRIO_CFG 0xDA2C90 + +#define mmNIC3_QM1_HBW_RD_RATE_LIM_CFG_1 0xDA2C94 + +#define mmNIC3_QM1_LBW_WR_RATE_LIM_CFG_0 0xDA2C98 + +#define mmNIC3_QM1_LBW_WR_RATE_LIM_CFG_1 0xDA2C9C + +#define mmNIC3_QM1_HBW_RD_RATE_LIM_CFG_0 0xDA2CA0 + +#define mmNIC3_QM1_GLBL_AXCACHE 0xDA2CA4 + +#define mmNIC3_QM1_IND_GW_APB_CFG 0xDA2CB0 + +#define mmNIC3_QM1_IND_GW_APB_WDATA 0xDA2CB4 + +#define mmNIC3_QM1_IND_GW_APB_RDATA 0xDA2CB8 + +#define mmNIC3_QM1_IND_GW_APB_STATUS 0xDA2CBC + +#define mmNIC3_QM1_GLBL_ERR_ADDR_LO 0xDA2CD0 + +#define mmNIC3_QM1_GLBL_ERR_ADDR_HI 0xDA2CD4 + +#define mmNIC3_QM1_GLBL_ERR_WDATA 0xDA2CD8 + +#define mmNIC3_QM1_GLBL_MEM_INIT_BUSY 0xDA2D00 + +#endif /* ASIC_REG_NIC3_QM1_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm0_regs.h new file mode 100644 index 000000000000..99d5319672dd --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm0_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_NIC4_QM0_REGS_H_ +#define ASIC_REG_NIC4_QM0_REGS_H_ + +/* + ***************************************** + * NIC4_QM0 (Prototype: QMAN) + ***************************************** + */ + +#define mmNIC4_QM0_GLBL_CFG0 0xDE0000 + +#define mmNIC4_QM0_GLBL_CFG1 0xDE0004 + +#define mmNIC4_QM0_GLBL_PROT 0xDE0008 + +#define mmNIC4_QM0_GLBL_ERR_CFG 0xDE000C + +#define mmNIC4_QM0_GLBL_SECURE_PROPS_0 0xDE0010 + +#define mmNIC4_QM0_GLBL_SECURE_PROPS_1 0xDE0014 + +#define mmNIC4_QM0_GLBL_SECURE_PROPS_2 0xDE0018 + +#define mmNIC4_QM0_GLBL_SECURE_PROPS_3 0xDE001C + +#define mmNIC4_QM0_GLBL_SECURE_PROPS_4 0xDE0020 + +#define mmNIC4_QM0_GLBL_NON_SECURE_PROPS_0 0xDE0024 + +#define mmNIC4_QM0_GLBL_NON_SECURE_PROPS_1 0xDE0028 + +#define mmNIC4_QM0_GLBL_NON_SECURE_PROPS_2 0xDE002C + +#define mmNIC4_QM0_GLBL_NON_SECURE_PROPS_3 0xDE0030 + +#define mmNIC4_QM0_GLBL_NON_SECURE_PROPS_4 0xDE0034 + +#define mmNIC4_QM0_GLBL_STS0 0xDE0038 + +#define mmNIC4_QM0_GLBL_STS1_0 0xDE0040 + +#define mmNIC4_QM0_GLBL_STS1_1 0xDE0044 + +#define mmNIC4_QM0_GLBL_STS1_2 0xDE0048 + +#define mmNIC4_QM0_GLBL_STS1_3 0xDE004C + +#define mmNIC4_QM0_GLBL_STS1_4 0xDE0050 + +#define mmNIC4_QM0_GLBL_MSG_EN_0 0xDE0054 + +#define mmNIC4_QM0_GLBL_MSG_EN_1 0xDE0058 + +#define mmNIC4_QM0_GLBL_MSG_EN_2 0xDE005C + +#define mmNIC4_QM0_GLBL_MSG_EN_3 0xDE0060 + +#define mmNIC4_QM0_GLBL_MSG_EN_4 0xDE0068 + +#define mmNIC4_QM0_PQ_BASE_LO_0 0xDE0070 + +#define mmNIC4_QM0_PQ_BASE_LO_1 0xDE0074 + +#define mmNIC4_QM0_PQ_BASE_LO_2 0xDE0078 + +#define mmNIC4_QM0_PQ_BASE_LO_3 0xDE007C + +#define mmNIC4_QM0_PQ_BASE_HI_0 0xDE0080 + +#define mmNIC4_QM0_PQ_BASE_HI_1 0xDE0084 + +#define mmNIC4_QM0_PQ_BASE_HI_2 0xDE0088 + +#define mmNIC4_QM0_PQ_BASE_HI_3 0xDE008C + +#define mmNIC4_QM0_PQ_SIZE_0 0xDE0090 + +#define mmNIC4_QM0_PQ_SIZE_1 0xDE0094 + +#define mmNIC4_QM0_PQ_SIZE_2 0xDE0098 + +#define mmNIC4_QM0_PQ_SIZE_3 0xDE009C + +#define mmNIC4_QM0_PQ_PI_0 0xDE00A0 + +#define mmNIC4_QM0_PQ_PI_1 0xDE00A4 + +#define mmNIC4_QM0_PQ_PI_2 0xDE00A8 + +#define mmNIC4_QM0_PQ_PI_3 0xDE00AC + +#define mmNIC4_QM0_PQ_CI_0 0xDE00B0 + +#define mmNIC4_QM0_PQ_CI_1 0xDE00B4 + +#define mmNIC4_QM0_PQ_CI_2 0xDE00B8 + +#define mmNIC4_QM0_PQ_CI_3 0xDE00BC + +#define mmNIC4_QM0_PQ_CFG0_0 0xDE00C0 + +#define mmNIC4_QM0_PQ_CFG0_1 0xDE00C4 + +#define mmNIC4_QM0_PQ_CFG0_2 0xDE00C8 + +#define mmNIC4_QM0_PQ_CFG0_3 0xDE00CC + +#define mmNIC4_QM0_PQ_CFG1_0 0xDE00D0 + +#define mmNIC4_QM0_PQ_CFG1_1 0xDE00D4 + +#define mmNIC4_QM0_PQ_CFG1_2 0xDE00D8 + +#define mmNIC4_QM0_PQ_CFG1_3 0xDE00DC + +#define mmNIC4_QM0_PQ_ARUSER_31_11_0 0xDE00E0 + +#define mmNIC4_QM0_PQ_ARUSER_31_11_1 0xDE00E4 + +#define mmNIC4_QM0_PQ_ARUSER_31_11_2 0xDE00E8 + +#define mmNIC4_QM0_PQ_ARUSER_31_11_3 0xDE00EC + +#define mmNIC4_QM0_PQ_STS0_0 0xDE00F0 + +#define mmNIC4_QM0_PQ_STS0_1 0xDE00F4 + +#define mmNIC4_QM0_PQ_STS0_2 0xDE00F8 + +#define mmNIC4_QM0_PQ_STS0_3 0xDE00FC + +#define mmNIC4_QM0_PQ_STS1_0 0xDE0100 + +#define mmNIC4_QM0_PQ_STS1_1 0xDE0104 + +#define mmNIC4_QM0_PQ_STS1_2 0xDE0108 + +#define mmNIC4_QM0_PQ_STS1_3 0xDE010C + +#define mmNIC4_QM0_CQ_CFG0_0 0xDE0110 + +#define mmNIC4_QM0_CQ_CFG0_1 0xDE0114 + +#define mmNIC4_QM0_CQ_CFG0_2 0xDE0118 + +#define mmNIC4_QM0_CQ_CFG0_3 0xDE011C + +#define mmNIC4_QM0_CQ_CFG0_4 0xDE0120 + +#define mmNIC4_QM0_CQ_CFG1_0 0xDE0124 + +#define mmNIC4_QM0_CQ_CFG1_1 0xDE0128 + +#define mmNIC4_QM0_CQ_CFG1_2 0xDE012C + +#define mmNIC4_QM0_CQ_CFG1_3 0xDE0130 + +#define mmNIC4_QM0_CQ_CFG1_4 0xDE0134 + +#define mmNIC4_QM0_CQ_ARUSER_31_11_0 0xDE0138 + +#define mmNIC4_QM0_CQ_ARUSER_31_11_1 0xDE013C + +#define mmNIC4_QM0_CQ_ARUSER_31_11_2 0xDE0140 + +#define mmNIC4_QM0_CQ_ARUSER_31_11_3 0xDE0144 + +#define mmNIC4_QM0_CQ_ARUSER_31_11_4 0xDE0148 + +#define mmNIC4_QM0_CQ_STS0_0 0xDE014C + +#define mmNIC4_QM0_CQ_STS0_1 0xDE0150 + +#define mmNIC4_QM0_CQ_STS0_2 0xDE0154 + +#define mmNIC4_QM0_CQ_STS0_3 0xDE0158 + +#define mmNIC4_QM0_CQ_STS0_4 0xDE015C + +#define mmNIC4_QM0_CQ_STS1_0 0xDE0160 + +#define mmNIC4_QM0_CQ_STS1_1 0xDE0164 + +#define mmNIC4_QM0_CQ_STS1_2 0xDE0168 + +#define mmNIC4_QM0_CQ_STS1_3 0xDE016C + +#define mmNIC4_QM0_CQ_STS1_4 0xDE0170 + +#define mmNIC4_QM0_CQ_PTR_LO_0 0xDE0174 + +#define mmNIC4_QM0_CQ_PTR_HI_0 0xDE0178 + +#define mmNIC4_QM0_CQ_TSIZE_0 0xDE017C + +#define mmNIC4_QM0_CQ_CTL_0 0xDE0180 + +#define mmNIC4_QM0_CQ_PTR_LO_1 0xDE0184 + +#define mmNIC4_QM0_CQ_PTR_HI_1 0xDE0188 + +#define mmNIC4_QM0_CQ_TSIZE_1 0xDE018C + +#define mmNIC4_QM0_CQ_CTL_1 0xDE0190 + +#define mmNIC4_QM0_CQ_PTR_LO_2 0xDE0194 + +#define mmNIC4_QM0_CQ_PTR_HI_2 0xDE0198 + +#define mmNIC4_QM0_CQ_TSIZE_2 0xDE019C + +#define mmNIC4_QM0_CQ_CTL_2 0xDE01A0 + +#define mmNIC4_QM0_CQ_PTR_LO_3 0xDE01A4 + +#define mmNIC4_QM0_CQ_PTR_HI_3 0xDE01A8 + +#define mmNIC4_QM0_CQ_TSIZE_3 0xDE01AC + +#define mmNIC4_QM0_CQ_CTL_3 0xDE01B0 + +#define mmNIC4_QM0_CQ_PTR_LO_4 0xDE01B4 + +#define mmNIC4_QM0_CQ_PTR_HI_4 0xDE01B8 + +#define mmNIC4_QM0_CQ_TSIZE_4 0xDE01BC + +#define mmNIC4_QM0_CQ_CTL_4 0xDE01C0 + +#define mmNIC4_QM0_CQ_PTR_LO_STS_0 0xDE01C4 + +#define mmNIC4_QM0_CQ_PTR_LO_STS_1 0xDE01C8 + +#define mmNIC4_QM0_CQ_PTR_LO_STS_2 0xDE01CC + +#define mmNIC4_QM0_CQ_PTR_LO_STS_3 0xDE01D0 + +#define mmNIC4_QM0_CQ_PTR_LO_STS_4 0xDE01D4 + +#define mmNIC4_QM0_CQ_PTR_HI_STS_0 0xDE01D8 + +#define mmNIC4_QM0_CQ_PTR_HI_STS_1 0xDE01DC + +#define mmNIC4_QM0_CQ_PTR_HI_STS_2 0xDE01E0 + +#define mmNIC4_QM0_CQ_PTR_HI_STS_3 0xDE01E4 + +#define mmNIC4_QM0_CQ_PTR_HI_STS_4 0xDE01E8 + +#define mmNIC4_QM0_CQ_TSIZE_STS_0 0xDE01EC + +#define mmNIC4_QM0_CQ_TSIZE_STS_1 0xDE01F0 + +#define mmNIC4_QM0_CQ_TSIZE_STS_2 0xDE01F4 + +#define mmNIC4_QM0_CQ_TSIZE_STS_3 0xDE01F8 + +#define mmNIC4_QM0_CQ_TSIZE_STS_4 0xDE01FC + +#define mmNIC4_QM0_CQ_CTL_STS_0 0xDE0200 + +#define mmNIC4_QM0_CQ_CTL_STS_1 0xDE0204 + +#define mmNIC4_QM0_CQ_CTL_STS_2 0xDE0208 + +#define mmNIC4_QM0_CQ_CTL_STS_3 0xDE020C + +#define mmNIC4_QM0_CQ_CTL_STS_4 0xDE0210 + +#define mmNIC4_QM0_CQ_IFIFO_CNT_0 0xDE0214 + +#define mmNIC4_QM0_CQ_IFIFO_CNT_1 0xDE0218 + +#define mmNIC4_QM0_CQ_IFIFO_CNT_2 0xDE021C + +#define mmNIC4_QM0_CQ_IFIFO_CNT_3 0xDE0220 + +#define mmNIC4_QM0_CQ_IFIFO_CNT_4 0xDE0224 + +#define mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_0 0xDE0228 + +#define mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_1 0xDE022C + +#define mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_2 0xDE0230 + +#define mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_3 0xDE0234 + +#define mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_4 0xDE0238 + +#define mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_0 0xDE023C + +#define mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_1 0xDE0240 + +#define mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_2 0xDE0244 + +#define mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_3 0xDE0248 + +#define mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_4 0xDE024C + +#define mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_0 0xDE0250 + +#define mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_1 0xDE0254 + +#define mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_2 0xDE0258 + +#define mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_3 0xDE025C + +#define mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_4 0xDE0260 + +#define mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_0 0xDE0264 + +#define mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_1 0xDE0268 + +#define mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_2 0xDE026C + +#define mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_3 0xDE0270 + +#define mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_4 0xDE0274 + +#define mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_0 0xDE0278 + +#define mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_1 0xDE027C + +#define mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_2 0xDE0280 + +#define mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_3 0xDE0284 + +#define mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_4 0xDE0288 + +#define mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_0 0xDE028C + +#define mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_1 0xDE0290 + +#define mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_2 0xDE0294 + +#define mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_3 0xDE0298 + +#define mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_4 0xDE029C + +#define mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_0 0xDE02A0 + +#define mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_1 0xDE02A4 + +#define mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_2 0xDE02A8 + +#define mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_3 0xDE02AC + +#define mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_4 0xDE02B0 + +#define mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_0 0xDE02B4 + +#define mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_1 0xDE02B8 + +#define mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_2 0xDE02BC + +#define mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_3 0xDE02C0 + +#define mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_4 0xDE02C4 + +#define mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_0 0xDE02C8 + +#define mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_1 0xDE02CC + +#define mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_2 0xDE02D0 + +#define mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_3 0xDE02D4 + +#define mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_4 0xDE02D8 + +#define mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xDE02E0 + +#define mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xDE02E4 + +#define mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xDE02E8 + +#define mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xDE02EC + +#define mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xDE02F0 + +#define mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 0xDE02F4 + +#define mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1 0xDE02F8 + +#define mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2 0xDE02FC + +#define mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 0xDE0300 + +#define mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4 0xDE0304 + +#define mmNIC4_QM0_CP_FENCE0_RDATA_0 0xDE0308 + +#define mmNIC4_QM0_CP_FENCE0_RDATA_1 0xDE030C + +#define mmNIC4_QM0_CP_FENCE0_RDATA_2 0xDE0310 + +#define mmNIC4_QM0_CP_FENCE0_RDATA_3 0xDE0314 + +#define mmNIC4_QM0_CP_FENCE0_RDATA_4 0xDE0318 + +#define mmNIC4_QM0_CP_FENCE1_RDATA_0 0xDE031C + +#define mmNIC4_QM0_CP_FENCE1_RDATA_1 0xDE0320 + +#define mmNIC4_QM0_CP_FENCE1_RDATA_2 0xDE0324 + +#define mmNIC4_QM0_CP_FENCE1_RDATA_3 0xDE0328 + +#define mmNIC4_QM0_CP_FENCE1_RDATA_4 0xDE032C + +#define mmNIC4_QM0_CP_FENCE2_RDATA_0 0xDE0330 + +#define mmNIC4_QM0_CP_FENCE2_RDATA_1 0xDE0334 + +#define mmNIC4_QM0_CP_FENCE2_RDATA_2 0xDE0338 + +#define mmNIC4_QM0_CP_FENCE2_RDATA_3 0xDE033C + +#define mmNIC4_QM0_CP_FENCE2_RDATA_4 0xDE0340 + +#define mmNIC4_QM0_CP_FENCE3_RDATA_0 0xDE0344 + +#define mmNIC4_QM0_CP_FENCE3_RDATA_1 0xDE0348 + +#define mmNIC4_QM0_CP_FENCE3_RDATA_2 0xDE034C + +#define mmNIC4_QM0_CP_FENCE3_RDATA_3 0xDE0350 + +#define mmNIC4_QM0_CP_FENCE3_RDATA_4 0xDE0354 + +#define mmNIC4_QM0_CP_FENCE0_CNT_0 0xDE0358 + +#define mmNIC4_QM0_CP_FENCE0_CNT_1 0xDE035C + +#define mmNIC4_QM0_CP_FENCE0_CNT_2 0xDE0360 + +#define mmNIC4_QM0_CP_FENCE0_CNT_3 0xDE0364 + +#define mmNIC4_QM0_CP_FENCE0_CNT_4 0xDE0368 + +#define mmNIC4_QM0_CP_FENCE1_CNT_0 0xDE036C + +#define mmNIC4_QM0_CP_FENCE1_CNT_1 0xDE0370 + +#define mmNIC4_QM0_CP_FENCE1_CNT_2 0xDE0374 + +#define mmNIC4_QM0_CP_FENCE1_CNT_3 0xDE0378 + +#define mmNIC4_QM0_CP_FENCE1_CNT_4 0xDE037C + +#define mmNIC4_QM0_CP_FENCE2_CNT_0 0xDE0380 + +#define mmNIC4_QM0_CP_FENCE2_CNT_1 0xDE0384 + +#define mmNIC4_QM0_CP_FENCE2_CNT_2 0xDE0388 + +#define mmNIC4_QM0_CP_FENCE2_CNT_3 0xDE038C + +#define mmNIC4_QM0_CP_FENCE2_CNT_4 0xDE0390 + +#define mmNIC4_QM0_CP_FENCE3_CNT_0 0xDE0394 + +#define mmNIC4_QM0_CP_FENCE3_CNT_1 0xDE0398 + +#define mmNIC4_QM0_CP_FENCE3_CNT_2 0xDE039C + +#define mmNIC4_QM0_CP_FENCE3_CNT_3 0xDE03A0 + +#define mmNIC4_QM0_CP_FENCE3_CNT_4 0xDE03A4 + +#define mmNIC4_QM0_CP_STS_0 0xDE03A8 + +#define mmNIC4_QM0_CP_STS_1 0xDE03AC + +#define mmNIC4_QM0_CP_STS_2 0xDE03B0 + +#define mmNIC4_QM0_CP_STS_3 0xDE03B4 + +#define mmNIC4_QM0_CP_STS_4 0xDE03B8 + +#define mmNIC4_QM0_CP_CURRENT_INST_LO_0 0xDE03BC + +#define mmNIC4_QM0_CP_CURRENT_INST_LO_1 0xDE03C0 + +#define mmNIC4_QM0_CP_CURRENT_INST_LO_2 0xDE03C4 + +#define mmNIC4_QM0_CP_CURRENT_INST_LO_3 0xDE03C8 + +#define mmNIC4_QM0_CP_CURRENT_INST_LO_4 0xDE03CC + +#define mmNIC4_QM0_CP_CURRENT_INST_HI_0 0xDE03D0 + +#define mmNIC4_QM0_CP_CURRENT_INST_HI_1 0xDE03D4 + +#define mmNIC4_QM0_CP_CURRENT_INST_HI_2 0xDE03D8 + +#define mmNIC4_QM0_CP_CURRENT_INST_HI_3 0xDE03DC + +#define mmNIC4_QM0_CP_CURRENT_INST_HI_4 0xDE03E0 + +#define mmNIC4_QM0_CP_BARRIER_CFG_0 0xDE03F4 + +#define mmNIC4_QM0_CP_BARRIER_CFG_1 0xDE03F8 + +#define mmNIC4_QM0_CP_BARRIER_CFG_2 0xDE03FC + +#define mmNIC4_QM0_CP_BARRIER_CFG_3 0xDE0400 + +#define mmNIC4_QM0_CP_BARRIER_CFG_4 0xDE0404 + +#define mmNIC4_QM0_CP_DBG_0_0 0xDE0408 + +#define mmNIC4_QM0_CP_DBG_0_1 0xDE040C + +#define mmNIC4_QM0_CP_DBG_0_2 0xDE0410 + +#define mmNIC4_QM0_CP_DBG_0_3 0xDE0414 + +#define mmNIC4_QM0_CP_DBG_0_4 0xDE0418 + +#define mmNIC4_QM0_CP_ARUSER_31_11_0 0xDE041C + +#define mmNIC4_QM0_CP_ARUSER_31_11_1 0xDE0420 + +#define mmNIC4_QM0_CP_ARUSER_31_11_2 0xDE0424 + +#define mmNIC4_QM0_CP_ARUSER_31_11_3 0xDE0428 + +#define mmNIC4_QM0_CP_ARUSER_31_11_4 0xDE042C + +#define mmNIC4_QM0_CP_AWUSER_31_11_0 0xDE0430 + +#define mmNIC4_QM0_CP_AWUSER_31_11_1 0xDE0434 + +#define mmNIC4_QM0_CP_AWUSER_31_11_2 0xDE0438 + +#define mmNIC4_QM0_CP_AWUSER_31_11_3 0xDE043C + +#define mmNIC4_QM0_CP_AWUSER_31_11_4 0xDE0440 + +#define mmNIC4_QM0_ARB_CFG_0 0xDE0A00 + +#define mmNIC4_QM0_ARB_CHOISE_Q_PUSH 0xDE0A04 + +#define mmNIC4_QM0_ARB_WRR_WEIGHT_0 0xDE0A08 + +#define mmNIC4_QM0_ARB_WRR_WEIGHT_1 0xDE0A0C + +#define mmNIC4_QM0_ARB_WRR_WEIGHT_2 0xDE0A10 + +#define mmNIC4_QM0_ARB_WRR_WEIGHT_3 0xDE0A14 + +#define mmNIC4_QM0_ARB_CFG_1 0xDE0A18 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_0 0xDE0A20 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_1 0xDE0A24 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_2 0xDE0A28 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_3 0xDE0A2C + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_4 0xDE0A30 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_5 0xDE0A34 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_6 0xDE0A38 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_7 0xDE0A3C + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_8 0xDE0A40 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_9 0xDE0A44 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_10 0xDE0A48 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_11 0xDE0A4C + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_12 0xDE0A50 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_13 0xDE0A54 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_14 0xDE0A58 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_15 0xDE0A5C + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_16 0xDE0A60 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_17 0xDE0A64 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_18 0xDE0A68 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_19 0xDE0A6C + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_20 0xDE0A70 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_21 0xDE0A74 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_22 0xDE0A78 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_23 0xDE0A7C + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_24 0xDE0A80 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_25 0xDE0A84 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_26 0xDE0A88 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_27 0xDE0A8C + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_28 0xDE0A90 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_29 0xDE0A94 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_30 0xDE0A98 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_31 0xDE0A9C + +#define mmNIC4_QM0_ARB_MST_CRED_INC 0xDE0AA0 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_0 0xDE0AA4 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_1 0xDE0AA8 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_2 0xDE0AAC + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_3 0xDE0AB0 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_4 0xDE0AB4 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_5 0xDE0AB8 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_6 0xDE0ABC + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_7 0xDE0AC0 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_8 0xDE0AC4 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_9 0xDE0AC8 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_10 0xDE0ACC + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_11 0xDE0AD0 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_12 0xDE0AD4 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_13 0xDE0AD8 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_14 0xDE0ADC + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_15 0xDE0AE0 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_16 0xDE0AE4 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_17 0xDE0AE8 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_18 0xDE0AEC + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_19 0xDE0AF0 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_20 0xDE0AF4 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_21 0xDE0AF8 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_22 0xDE0AFC + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_23 0xDE0B00 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_24 0xDE0B04 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_25 0xDE0B08 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_26 0xDE0B0C + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_27 0xDE0B10 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_28 0xDE0B14 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_29 0xDE0B18 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_30 0xDE0B1C + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_31 0xDE0B20 + +#define mmNIC4_QM0_ARB_SLV_MASTER_INC_CRED_OFST 0xDE0B28 + +#define mmNIC4_QM0_ARB_MST_SLAVE_EN 0xDE0B2C + +#define mmNIC4_QM0_ARB_MST_QUIET_PER 0xDE0B34 + +#define mmNIC4_QM0_ARB_SLV_CHOISE_WDT 0xDE0B38 + +#define mmNIC4_QM0_ARB_SLV_ID 0xDE0B3C + +#define mmNIC4_QM0_ARB_MSG_MAX_INFLIGHT 0xDE0B44 + +#define mmNIC4_QM0_ARB_MSG_AWUSER_31_11 0xDE0B48 + +#define mmNIC4_QM0_ARB_MSG_AWUSER_SEC_PROP 0xDE0B4C + +#define mmNIC4_QM0_ARB_MSG_AWUSER_NON_SEC_PROP 0xDE0B50 + +#define mmNIC4_QM0_ARB_BASE_LO 0xDE0B54 + +#define mmNIC4_QM0_ARB_BASE_HI 0xDE0B58 + +#define mmNIC4_QM0_ARB_STATE_STS 0xDE0B80 + +#define mmNIC4_QM0_ARB_CHOISE_FULLNESS_STS 0xDE0B84 + +#define mmNIC4_QM0_ARB_MSG_STS 0xDE0B88 + +#define mmNIC4_QM0_ARB_SLV_CHOISE_Q_HEAD 0xDE0B8C + +#define mmNIC4_QM0_ARB_ERR_CAUSE 0xDE0B9C + +#define mmNIC4_QM0_ARB_ERR_MSG_EN 0xDE0BA0 + +#define mmNIC4_QM0_ARB_ERR_STS_DRP 0xDE0BA8 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_0 0xDE0BB0 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_1 0xDE0BB4 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_2 0xDE0BB8 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_3 0xDE0BBC + +#define mmNIC4_QM0_ARB_MST_CRED_STS_4 0xDE0BC0 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_5 0xDE0BC4 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_6 0xDE0BC8 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_7 0xDE0BCC + +#define mmNIC4_QM0_ARB_MST_CRED_STS_8 0xDE0BD0 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_9 0xDE0BD4 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_10 0xDE0BD8 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_11 0xDE0BDC + +#define mmNIC4_QM0_ARB_MST_CRED_STS_12 0xDE0BE0 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_13 0xDE0BE4 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_14 0xDE0BE8 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_15 0xDE0BEC + +#define mmNIC4_QM0_ARB_MST_CRED_STS_16 0xDE0BF0 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_17 0xDE0BF4 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_18 0xDE0BF8 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_19 0xDE0BFC + +#define mmNIC4_QM0_ARB_MST_CRED_STS_20 0xDE0C00 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_21 0xDE0C04 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_22 0xDE0C08 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_23 0xDE0C0C + +#define mmNIC4_QM0_ARB_MST_CRED_STS_24 0xDE0C10 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_25 0xDE0C14 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_26 0xDE0C18 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_27 0xDE0C1C + +#define mmNIC4_QM0_ARB_MST_CRED_STS_28 0xDE0C20 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_29 0xDE0C24 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_30 0xDE0C28 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_31 0xDE0C2C + +#define mmNIC4_QM0_CGM_CFG 0xDE0C70 + +#define mmNIC4_QM0_CGM_STS 0xDE0C74 + +#define mmNIC4_QM0_CGM_CFG1 0xDE0C78 + +#define mmNIC4_QM0_LOCAL_RANGE_BASE 0xDE0C80 + +#define mmNIC4_QM0_LOCAL_RANGE_SIZE 0xDE0C84 + +#define mmNIC4_QM0_CSMR_STRICT_PRIO_CFG 0xDE0C90 + +#define mmNIC4_QM0_HBW_RD_RATE_LIM_CFG_1 0xDE0C94 + +#define mmNIC4_QM0_LBW_WR_RATE_LIM_CFG_0 0xDE0C98 + +#define mmNIC4_QM0_LBW_WR_RATE_LIM_CFG_1 0xDE0C9C + +#define mmNIC4_QM0_HBW_RD_RATE_LIM_CFG_0 0xDE0CA0 + +#define mmNIC4_QM0_GLBL_AXCACHE 0xDE0CA4 + +#define mmNIC4_QM0_IND_GW_APB_CFG 0xDE0CB0 + +#define mmNIC4_QM0_IND_GW_APB_WDATA 0xDE0CB4 + +#define mmNIC4_QM0_IND_GW_APB_RDATA 0xDE0CB8 + +#define mmNIC4_QM0_IND_GW_APB_STATUS 0xDE0CBC + +#define mmNIC4_QM0_GLBL_ERR_ADDR_LO 0xDE0CD0 + +#define mmNIC4_QM0_GLBL_ERR_ADDR_HI 0xDE0CD4 + +#define mmNIC4_QM0_GLBL_ERR_WDATA 0xDE0CD8 + +#define mmNIC4_QM0_GLBL_MEM_INIT_BUSY 0xDE0D00 + +#endif /* ASIC_REG_NIC4_QM0_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm1_regs.h new file mode 100644 index 000000000000..34b21b21da52 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm1_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_NIC4_QM1_REGS_H_ +#define ASIC_REG_NIC4_QM1_REGS_H_ + +/* + ***************************************** + * NIC4_QM1 (Prototype: QMAN) + ***************************************** + */ + +#define mmNIC4_QM1_GLBL_CFG0 0xDE2000 + +#define mmNIC4_QM1_GLBL_CFG1 0xDE2004 + +#define mmNIC4_QM1_GLBL_PROT 0xDE2008 + +#define mmNIC4_QM1_GLBL_ERR_CFG 0xDE200C + +#define mmNIC4_QM1_GLBL_SECURE_PROPS_0 0xDE2010 + +#define mmNIC4_QM1_GLBL_SECURE_PROPS_1 0xDE2014 + +#define mmNIC4_QM1_GLBL_SECURE_PROPS_2 0xDE2018 + +#define mmNIC4_QM1_GLBL_SECURE_PROPS_3 0xDE201C + +#define mmNIC4_QM1_GLBL_SECURE_PROPS_4 0xDE2020 + +#define mmNIC4_QM1_GLBL_NON_SECURE_PROPS_0 0xDE2024 + +#define mmNIC4_QM1_GLBL_NON_SECURE_PROPS_1 0xDE2028 + +#define mmNIC4_QM1_GLBL_NON_SECURE_PROPS_2 0xDE202C + +#define mmNIC4_QM1_GLBL_NON_SECURE_PROPS_3 0xDE2030 + +#define mmNIC4_QM1_GLBL_NON_SECURE_PROPS_4 0xDE2034 + +#define mmNIC4_QM1_GLBL_STS0 0xDE2038 + +#define mmNIC4_QM1_GLBL_STS1_0 0xDE2040 + +#define mmNIC4_QM1_GLBL_STS1_1 0xDE2044 + +#define mmNIC4_QM1_GLBL_STS1_2 0xDE2048 + +#define mmNIC4_QM1_GLBL_STS1_3 0xDE204C + +#define mmNIC4_QM1_GLBL_STS1_4 0xDE2050 + +#define mmNIC4_QM1_GLBL_MSG_EN_0 0xDE2054 + +#define mmNIC4_QM1_GLBL_MSG_EN_1 0xDE2058 + +#define mmNIC4_QM1_GLBL_MSG_EN_2 0xDE205C + +#define mmNIC4_QM1_GLBL_MSG_EN_3 0xDE2060 + +#define mmNIC4_QM1_GLBL_MSG_EN_4 0xDE2068 + +#define mmNIC4_QM1_PQ_BASE_LO_0 0xDE2070 + +#define mmNIC4_QM1_PQ_BASE_LO_1 0xDE2074 + +#define mmNIC4_QM1_PQ_BASE_LO_2 0xDE2078 + +#define mmNIC4_QM1_PQ_BASE_LO_3 0xDE207C + +#define mmNIC4_QM1_PQ_BASE_HI_0 0xDE2080 + +#define mmNIC4_QM1_PQ_BASE_HI_1 0xDE2084 + +#define mmNIC4_QM1_PQ_BASE_HI_2 0xDE2088 + +#define mmNIC4_QM1_PQ_BASE_HI_3 0xDE208C + +#define mmNIC4_QM1_PQ_SIZE_0 0xDE2090 + +#define mmNIC4_QM1_PQ_SIZE_1 0xDE2094 + +#define mmNIC4_QM1_PQ_SIZE_2 0xDE2098 + +#define mmNIC4_QM1_PQ_SIZE_3 0xDE209C + +#define mmNIC4_QM1_PQ_PI_0 0xDE20A0 + +#define mmNIC4_QM1_PQ_PI_1 0xDE20A4 + +#define mmNIC4_QM1_PQ_PI_2 0xDE20A8 + +#define mmNIC4_QM1_PQ_PI_3 0xDE20AC + +#define mmNIC4_QM1_PQ_CI_0 0xDE20B0 + +#define mmNIC4_QM1_PQ_CI_1 0xDE20B4 + +#define mmNIC4_QM1_PQ_CI_2 0xDE20B8 + +#define mmNIC4_QM1_PQ_CI_3 0xDE20BC + +#define mmNIC4_QM1_PQ_CFG0_0 0xDE20C0 + +#define mmNIC4_QM1_PQ_CFG0_1 0xDE20C4 + +#define mmNIC4_QM1_PQ_CFG0_2 0xDE20C8 + +#define mmNIC4_QM1_PQ_CFG0_3 0xDE20CC + +#define mmNIC4_QM1_PQ_CFG1_0 0xDE20D0 + +#define mmNIC4_QM1_PQ_CFG1_1 0xDE20D4 + +#define mmNIC4_QM1_PQ_CFG1_2 0xDE20D8 + +#define mmNIC4_QM1_PQ_CFG1_3 0xDE20DC + +#define mmNIC4_QM1_PQ_ARUSER_31_11_0 0xDE20E0 + +#define mmNIC4_QM1_PQ_ARUSER_31_11_1 0xDE20E4 + +#define mmNIC4_QM1_PQ_ARUSER_31_11_2 0xDE20E8 + +#define mmNIC4_QM1_PQ_ARUSER_31_11_3 0xDE20EC + +#define mmNIC4_QM1_PQ_STS0_0 0xDE20F0 + +#define mmNIC4_QM1_PQ_STS0_1 0xDE20F4 + +#define mmNIC4_QM1_PQ_STS0_2 0xDE20F8 + +#define mmNIC4_QM1_PQ_STS0_3 0xDE20FC + +#define mmNIC4_QM1_PQ_STS1_0 0xDE2100 + +#define mmNIC4_QM1_PQ_STS1_1 0xDE2104 + +#define mmNIC4_QM1_PQ_STS1_2 0xDE2108 + +#define mmNIC4_QM1_PQ_STS1_3 0xDE210C + +#define mmNIC4_QM1_CQ_CFG0_0 0xDE2110 + +#define mmNIC4_QM1_CQ_CFG0_1 0xDE2114 + +#define mmNIC4_QM1_CQ_CFG0_2 0xDE2118 + +#define mmNIC4_QM1_CQ_CFG0_3 0xDE211C + +#define mmNIC4_QM1_CQ_CFG0_4 0xDE2120 + +#define mmNIC4_QM1_CQ_CFG1_0 0xDE2124 + +#define mmNIC4_QM1_CQ_CFG1_1 0xDE2128 + +#define mmNIC4_QM1_CQ_CFG1_2 0xDE212C + +#define mmNIC4_QM1_CQ_CFG1_3 0xDE2130 + +#define mmNIC4_QM1_CQ_CFG1_4 0xDE2134 + +#define mmNIC4_QM1_CQ_ARUSER_31_11_0 0xDE2138 + +#define mmNIC4_QM1_CQ_ARUSER_31_11_1 0xDE213C + +#define mmNIC4_QM1_CQ_ARUSER_31_11_2 0xDE2140 + +#define mmNIC4_QM1_CQ_ARUSER_31_11_3 0xDE2144 + +#define mmNIC4_QM1_CQ_ARUSER_31_11_4 0xDE2148 + +#define mmNIC4_QM1_CQ_STS0_0 0xDE214C + +#define mmNIC4_QM1_CQ_STS0_1 0xDE2150 + +#define mmNIC4_QM1_CQ_STS0_2 0xDE2154 + +#define mmNIC4_QM1_CQ_STS0_3 0xDE2158 + +#define mmNIC4_QM1_CQ_STS0_4 0xDE215C + +#define mmNIC4_QM1_CQ_STS1_0 0xDE2160 + +#define mmNIC4_QM1_CQ_STS1_1 0xDE2164 + +#define mmNIC4_QM1_CQ_STS1_2 0xDE2168 + +#define mmNIC4_QM1_CQ_STS1_3 0xDE216C + +#define mmNIC4_QM1_CQ_STS1_4 0xDE2170 + +#define mmNIC4_QM1_CQ_PTR_LO_0 0xDE2174 + +#define mmNIC4_QM1_CQ_PTR_HI_0 0xDE2178 + +#define mmNIC4_QM1_CQ_TSIZE_0 0xDE217C + +#define mmNIC4_QM1_CQ_CTL_0 0xDE2180 + +#define mmNIC4_QM1_CQ_PTR_LO_1 0xDE2184 + +#define mmNIC4_QM1_CQ_PTR_HI_1 0xDE2188 + +#define mmNIC4_QM1_CQ_TSIZE_1 0xDE218C + +#define mmNIC4_QM1_CQ_CTL_1 0xDE2190 + +#define mmNIC4_QM1_CQ_PTR_LO_2 0xDE2194 + +#define mmNIC4_QM1_CQ_PTR_HI_2 0xDE2198 + +#define mmNIC4_QM1_CQ_TSIZE_2 0xDE219C + +#define mmNIC4_QM1_CQ_CTL_2 0xDE21A0 + +#define mmNIC4_QM1_CQ_PTR_LO_3 0xDE21A4 + +#define mmNIC4_QM1_CQ_PTR_HI_3 0xDE21A8 + +#define mmNIC4_QM1_CQ_TSIZE_3 0xDE21AC + +#define mmNIC4_QM1_CQ_CTL_3 0xDE21B0 + +#define mmNIC4_QM1_CQ_PTR_LO_4 0xDE21B4 + +#define mmNIC4_QM1_CQ_PTR_HI_4 0xDE21B8 + +#define mmNIC4_QM1_CQ_TSIZE_4 0xDE21BC + +#define mmNIC4_QM1_CQ_CTL_4 0xDE21C0 + +#define mmNIC4_QM1_CQ_PTR_LO_STS_0 0xDE21C4 + +#define mmNIC4_QM1_CQ_PTR_LO_STS_1 0xDE21C8 + +#define mmNIC4_QM1_CQ_PTR_LO_STS_2 0xDE21CC + +#define mmNIC4_QM1_CQ_PTR_LO_STS_3 0xDE21D0 + +#define mmNIC4_QM1_CQ_PTR_LO_STS_4 0xDE21D4 + +#define mmNIC4_QM1_CQ_PTR_HI_STS_0 0xDE21D8 + +#define mmNIC4_QM1_CQ_PTR_HI_STS_1 0xDE21DC + +#define mmNIC4_QM1_CQ_PTR_HI_STS_2 0xDE21E0 + +#define mmNIC4_QM1_CQ_PTR_HI_STS_3 0xDE21E4 + +#define mmNIC4_QM1_CQ_PTR_HI_STS_4 0xDE21E8 + +#define mmNIC4_QM1_CQ_TSIZE_STS_0 0xDE21EC + +#define mmNIC4_QM1_CQ_TSIZE_STS_1 0xDE21F0 + +#define mmNIC4_QM1_CQ_TSIZE_STS_2 0xDE21F4 + +#define mmNIC4_QM1_CQ_TSIZE_STS_3 0xDE21F8 + +#define mmNIC4_QM1_CQ_TSIZE_STS_4 0xDE21FC + +#define mmNIC4_QM1_CQ_CTL_STS_0 0xDE2200 + +#define mmNIC4_QM1_CQ_CTL_STS_1 0xDE2204 + +#define mmNIC4_QM1_CQ_CTL_STS_2 0xDE2208 + +#define mmNIC4_QM1_CQ_CTL_STS_3 0xDE220C + +#define mmNIC4_QM1_CQ_CTL_STS_4 0xDE2210 + +#define mmNIC4_QM1_CQ_IFIFO_CNT_0 0xDE2214 + +#define mmNIC4_QM1_CQ_IFIFO_CNT_1 0xDE2218 + +#define mmNIC4_QM1_CQ_IFIFO_CNT_2 0xDE221C + +#define mmNIC4_QM1_CQ_IFIFO_CNT_3 0xDE2220 + +#define mmNIC4_QM1_CQ_IFIFO_CNT_4 0xDE2224 + +#define mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_0 0xDE2228 + +#define mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_1 0xDE222C + +#define mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_2 0xDE2230 + +#define mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_3 0xDE2234 + +#define mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_4 0xDE2238 + +#define mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_0 0xDE223C + +#define mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_1 0xDE2240 + +#define mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_2 0xDE2244 + +#define mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_3 0xDE2248 + +#define mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_4 0xDE224C + +#define mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_0 0xDE2250 + +#define mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_1 0xDE2254 + +#define mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_2 0xDE2258 + +#define mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_3 0xDE225C + +#define mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_4 0xDE2260 + +#define mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_0 0xDE2264 + +#define mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_1 0xDE2268 + +#define mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_2 0xDE226C + +#define mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_3 0xDE2270 + +#define mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_4 0xDE2274 + +#define mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_0 0xDE2278 + +#define mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_1 0xDE227C + +#define mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_2 0xDE2280 + +#define mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_3 0xDE2284 + +#define mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_4 0xDE2288 + +#define mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_0 0xDE228C + +#define mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_1 0xDE2290 + +#define mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_2 0xDE2294 + +#define mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_3 0xDE2298 + +#define mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_4 0xDE229C + +#define mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_0 0xDE22A0 + +#define mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_1 0xDE22A4 + +#define mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_2 0xDE22A8 + +#define mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_3 0xDE22AC + +#define mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_4 0xDE22B0 + +#define mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_0 0xDE22B4 + +#define mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_1 0xDE22B8 + +#define mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_2 0xDE22BC + +#define mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_3 0xDE22C0 + +#define mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_4 0xDE22C4 + +#define mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_0 0xDE22C8 + +#define mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_1 0xDE22CC + +#define mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_2 0xDE22D0 + +#define mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_3 0xDE22D4 + +#define mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_4 0xDE22D8 + +#define mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xDE22E0 + +#define mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xDE22E4 + +#define mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xDE22E8 + +#define mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xDE22EC + +#define mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xDE22F0 + +#define mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0 0xDE22F4 + +#define mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1 0xDE22F8 + +#define mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2 0xDE22FC + +#define mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 0xDE2300 + +#define mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4 0xDE2304 + +#define mmNIC4_QM1_CP_FENCE0_RDATA_0 0xDE2308 + +#define mmNIC4_QM1_CP_FENCE0_RDATA_1 0xDE230C + +#define mmNIC4_QM1_CP_FENCE0_RDATA_2 0xDE2310 + +#define mmNIC4_QM1_CP_FENCE0_RDATA_3 0xDE2314 + +#define mmNIC4_QM1_CP_FENCE0_RDATA_4 0xDE2318 + +#define mmNIC4_QM1_CP_FENCE1_RDATA_0 0xDE231C + +#define mmNIC4_QM1_CP_FENCE1_RDATA_1 0xDE2320 + +#define mmNIC4_QM1_CP_FENCE1_RDATA_2 0xDE2324 + +#define mmNIC4_QM1_CP_FENCE1_RDATA_3 0xDE2328 + +#define mmNIC4_QM1_CP_FENCE1_RDATA_4 0xDE232C + +#define mmNIC4_QM1_CP_FENCE2_RDATA_0 0xDE2330 + +#define mmNIC4_QM1_CP_FENCE2_RDATA_1 0xDE2334 + +#define mmNIC4_QM1_CP_FENCE2_RDATA_2 0xDE2338 + +#define mmNIC4_QM1_CP_FENCE2_RDATA_3 0xDE233C + +#define mmNIC4_QM1_CP_FENCE2_RDATA_4 0xDE2340 + +#define mmNIC4_QM1_CP_FENCE3_RDATA_0 0xDE2344 + +#define mmNIC4_QM1_CP_FENCE3_RDATA_1 0xDE2348 + +#define mmNIC4_QM1_CP_FENCE3_RDATA_2 0xDE234C + +#define mmNIC4_QM1_CP_FENCE3_RDATA_3 0xDE2350 + +#define mmNIC4_QM1_CP_FENCE3_RDATA_4 0xDE2354 + +#define mmNIC4_QM1_CP_FENCE0_CNT_0 0xDE2358 + +#define mmNIC4_QM1_CP_FENCE0_CNT_1 0xDE235C + +#define mmNIC4_QM1_CP_FENCE0_CNT_2 0xDE2360 + +#define mmNIC4_QM1_CP_FENCE0_CNT_3 0xDE2364 + +#define mmNIC4_QM1_CP_FENCE0_CNT_4 0xDE2368 + +#define mmNIC4_QM1_CP_FENCE1_CNT_0 0xDE236C + +#define mmNIC4_QM1_CP_FENCE1_CNT_1 0xDE2370 + +#define mmNIC4_QM1_CP_FENCE1_CNT_2 0xDE2374 + +#define mmNIC4_QM1_CP_FENCE1_CNT_3 0xDE2378 + +#define mmNIC4_QM1_CP_FENCE1_CNT_4 0xDE237C + +#define mmNIC4_QM1_CP_FENCE2_CNT_0 0xDE2380 + +#define mmNIC4_QM1_CP_FENCE2_CNT_1 0xDE2384 + +#define mmNIC4_QM1_CP_FENCE2_CNT_2 0xDE2388 + +#define mmNIC4_QM1_CP_FENCE2_CNT_3 0xDE238C + +#define mmNIC4_QM1_CP_FENCE2_CNT_4 0xDE2390 + +#define mmNIC4_QM1_CP_FENCE3_CNT_0 0xDE2394 + +#define mmNIC4_QM1_CP_FENCE3_CNT_1 0xDE2398 + +#define mmNIC4_QM1_CP_FENCE3_CNT_2 0xDE239C + +#define mmNIC4_QM1_CP_FENCE3_CNT_3 0xDE23A0 + +#define mmNIC4_QM1_CP_FENCE3_CNT_4 0xDE23A4 + +#define mmNIC4_QM1_CP_STS_0 0xDE23A8 + +#define mmNIC4_QM1_CP_STS_1 0xDE23AC + +#define mmNIC4_QM1_CP_STS_2 0xDE23B0 + +#define mmNIC4_QM1_CP_STS_3 0xDE23B4 + +#define mmNIC4_QM1_CP_STS_4 0xDE23B8 + +#define mmNIC4_QM1_CP_CURRENT_INST_LO_0 0xDE23BC + +#define mmNIC4_QM1_CP_CURRENT_INST_LO_1 0xDE23C0 + +#define mmNIC4_QM1_CP_CURRENT_INST_LO_2 0xDE23C4 + +#define mmNIC4_QM1_CP_CURRENT_INST_LO_3 0xDE23C8 + +#define mmNIC4_QM1_CP_CURRENT_INST_LO_4 0xDE23CC + +#define mmNIC4_QM1_CP_CURRENT_INST_HI_0 0xDE23D0 + +#define mmNIC4_QM1_CP_CURRENT_INST_HI_1 0xDE23D4 + +#define mmNIC4_QM1_CP_CURRENT_INST_HI_2 0xDE23D8 + +#define mmNIC4_QM1_CP_CURRENT_INST_HI_3 0xDE23DC + +#define mmNIC4_QM1_CP_CURRENT_INST_HI_4 0xDE23E0 + +#define mmNIC4_QM1_CP_BARRIER_CFG_0 0xDE23F4 + +#define mmNIC4_QM1_CP_BARRIER_CFG_1 0xDE23F8 + +#define mmNIC4_QM1_CP_BARRIER_CFG_2 0xDE23FC + +#define mmNIC4_QM1_CP_BARRIER_CFG_3 0xDE2400 + +#define mmNIC4_QM1_CP_BARRIER_CFG_4 0xDE2404 + +#define mmNIC4_QM1_CP_DBG_0_0 0xDE2408 + +#define mmNIC4_QM1_CP_DBG_0_1 0xDE240C + +#define mmNIC4_QM1_CP_DBG_0_2 0xDE2410 + +#define mmNIC4_QM1_CP_DBG_0_3 0xDE2414 + +#define mmNIC4_QM1_CP_DBG_0_4 0xDE2418 + +#define mmNIC4_QM1_CP_ARUSER_31_11_0 0xDE241C + +#define mmNIC4_QM1_CP_ARUSER_31_11_1 0xDE2420 + +#define mmNIC4_QM1_CP_ARUSER_31_11_2 0xDE2424 + +#define mmNIC4_QM1_CP_ARUSER_31_11_3 0xDE2428 + +#define mmNIC4_QM1_CP_ARUSER_31_11_4 0xDE242C + +#define mmNIC4_QM1_CP_AWUSER_31_11_0 0xDE2430 + +#define mmNIC4_QM1_CP_AWUSER_31_11_1 0xDE2434 + +#define mmNIC4_QM1_CP_AWUSER_31_11_2 0xDE2438 + +#define mmNIC4_QM1_CP_AWUSER_31_11_3 0xDE243C + +#define mmNIC4_QM1_CP_AWUSER_31_11_4 0xDE2440 + +#define mmNIC4_QM1_ARB_CFG_0 0xDE2A00 + +#define mmNIC4_QM1_ARB_CHOISE_Q_PUSH 0xDE2A04 + +#define mmNIC4_QM1_ARB_WRR_WEIGHT_0 0xDE2A08 + +#define mmNIC4_QM1_ARB_WRR_WEIGHT_1 0xDE2A0C + +#define mmNIC4_QM1_ARB_WRR_WEIGHT_2 0xDE2A10 + +#define mmNIC4_QM1_ARB_WRR_WEIGHT_3 0xDE2A14 + +#define mmNIC4_QM1_ARB_CFG_1 0xDE2A18 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_0 0xDE2A20 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_1 0xDE2A24 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_2 0xDE2A28 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_3 0xDE2A2C + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_4 0xDE2A30 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_5 0xDE2A34 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_6 0xDE2A38 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_7 0xDE2A3C + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_8 0xDE2A40 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_9 0xDE2A44 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_10 0xDE2A48 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_11 0xDE2A4C + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_12 0xDE2A50 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_13 0xDE2A54 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_14 0xDE2A58 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_15 0xDE2A5C + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_16 0xDE2A60 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_17 0xDE2A64 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_18 0xDE2A68 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_19 0xDE2A6C + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_20 0xDE2A70 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_21 0xDE2A74 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_22 0xDE2A78 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_23 0xDE2A7C + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_24 0xDE2A80 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_25 0xDE2A84 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_26 0xDE2A88 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_27 0xDE2A8C + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_28 0xDE2A90 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_29 0xDE2A94 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_30 0xDE2A98 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_31 0xDE2A9C + +#define mmNIC4_QM1_ARB_MST_CRED_INC 0xDE2AA0 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_0 0xDE2AA4 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_1 0xDE2AA8 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_2 0xDE2AAC + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_3 0xDE2AB0 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_4 0xDE2AB4 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_5 0xDE2AB8 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_6 0xDE2ABC + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_7 0xDE2AC0 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_8 0xDE2AC4 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_9 0xDE2AC8 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_10 0xDE2ACC + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_11 0xDE2AD0 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_12 0xDE2AD4 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_13 0xDE2AD8 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_14 0xDE2ADC + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_15 0xDE2AE0 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_16 0xDE2AE4 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_17 0xDE2AE8 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_18 0xDE2AEC + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_19 0xDE2AF0 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_20 0xDE2AF4 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_21 0xDE2AF8 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_22 0xDE2AFC + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_23 0xDE2B00 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_24 0xDE2B04 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_25 0xDE2B08 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_26 0xDE2B0C + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_27 0xDE2B10 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_28 0xDE2B14 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_29 0xDE2B18 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_30 0xDE2B1C + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_31 0xDE2B20 + +#define mmNIC4_QM1_ARB_SLV_MASTER_INC_CRED_OFST 0xDE2B28 + +#define mmNIC4_QM1_ARB_MST_SLAVE_EN 0xDE2B2C + +#define mmNIC4_QM1_ARB_MST_QUIET_PER 0xDE2B34 + +#define mmNIC4_QM1_ARB_SLV_CHOISE_WDT 0xDE2B38 + +#define mmNIC4_QM1_ARB_SLV_ID 0xDE2B3C + +#define mmNIC4_QM1_ARB_MSG_MAX_INFLIGHT 0xDE2B44 + +#define mmNIC4_QM1_ARB_MSG_AWUSER_31_11 0xDE2B48 + +#define mmNIC4_QM1_ARB_MSG_AWUSER_SEC_PROP 0xDE2B4C + +#define mmNIC4_QM1_ARB_MSG_AWUSER_NON_SEC_PROP 0xDE2B50 + +#define mmNIC4_QM1_ARB_BASE_LO 0xDE2B54 + +#define mmNIC4_QM1_ARB_BASE_HI 0xDE2B58 + +#define mmNIC4_QM1_ARB_STATE_STS 0xDE2B80 + +#define mmNIC4_QM1_ARB_CHOISE_FULLNESS_STS 0xDE2B84 + +#define mmNIC4_QM1_ARB_MSG_STS 0xDE2B88 + +#define mmNIC4_QM1_ARB_SLV_CHOISE_Q_HEAD 0xDE2B8C + +#define mmNIC4_QM1_ARB_ERR_CAUSE 0xDE2B9C + +#define mmNIC4_QM1_ARB_ERR_MSG_EN 0xDE2BA0 + +#define mmNIC4_QM1_ARB_ERR_STS_DRP 0xDE2BA8 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_0 0xDE2BB0 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_1 0xDE2BB4 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_2 0xDE2BB8 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_3 0xDE2BBC + +#define mmNIC4_QM1_ARB_MST_CRED_STS_4 0xDE2BC0 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_5 0xDE2BC4 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_6 0xDE2BC8 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_7 0xDE2BCC + +#define mmNIC4_QM1_ARB_MST_CRED_STS_8 0xDE2BD0 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_9 0xDE2BD4 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_10 0xDE2BD8 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_11 0xDE2BDC + +#define mmNIC4_QM1_ARB_MST_CRED_STS_12 0xDE2BE0 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_13 0xDE2BE4 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_14 0xDE2BE8 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_15 0xDE2BEC + +#define mmNIC4_QM1_ARB_MST_CRED_STS_16 0xDE2BF0 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_17 0xDE2BF4 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_18 0xDE2BF8 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_19 0xDE2BFC + +#define mmNIC4_QM1_ARB_MST_CRED_STS_20 0xDE2C00 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_21 0xDE2C04 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_22 0xDE2C08 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_23 0xDE2C0C + +#define mmNIC4_QM1_ARB_MST_CRED_STS_24 0xDE2C10 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_25 0xDE2C14 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_26 0xDE2C18 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_27 0xDE2C1C + +#define mmNIC4_QM1_ARB_MST_CRED_STS_28 0xDE2C20 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_29 0xDE2C24 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_30 0xDE2C28 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_31 0xDE2C2C + +#define mmNIC4_QM1_CGM_CFG 0xDE2C70 + +#define mmNIC4_QM1_CGM_STS 0xDE2C74 + +#define mmNIC4_QM1_CGM_CFG1 0xDE2C78 + +#define mmNIC4_QM1_LOCAL_RANGE_BASE 0xDE2C80 + +#define mmNIC4_QM1_LOCAL_RANGE_SIZE 0xDE2C84 + +#define mmNIC4_QM1_CSMR_STRICT_PRIO_CFG 0xDE2C90 + +#define mmNIC4_QM1_HBW_RD_RATE_LIM_CFG_1 0xDE2C94 + +#define mmNIC4_QM1_LBW_WR_RATE_LIM_CFG_0 0xDE2C98 + +#define mmNIC4_QM1_LBW_WR_RATE_LIM_CFG_1 0xDE2C9C + +#define mmNIC4_QM1_HBW_RD_RATE_LIM_CFG_0 0xDE2CA0 + +#define mmNIC4_QM1_GLBL_AXCACHE 0xDE2CA4 + +#define mmNIC4_QM1_IND_GW_APB_CFG 0xDE2CB0 + +#define mmNIC4_QM1_IND_GW_APB_WDATA 0xDE2CB4 + +#define mmNIC4_QM1_IND_GW_APB_RDATA 0xDE2CB8 + +#define mmNIC4_QM1_IND_GW_APB_STATUS 0xDE2CBC + +#define mmNIC4_QM1_GLBL_ERR_ADDR_LO 0xDE2CD0 + +#define mmNIC4_QM1_GLBL_ERR_ADDR_HI 0xDE2CD4 + +#define mmNIC4_QM1_GLBL_ERR_WDATA 0xDE2CD8 + +#define mmNIC4_QM1_GLBL_MEM_INIT_BUSY 0xDE2D00 + +#endif /* ASIC_REG_NIC4_QM1_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_hbm_pll_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_hbm_pll_regs.h deleted file mode 100644 index 687e2255cb19..000000000000 --- a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_hbm_pll_regs.h +++ /dev/null @@ -1,114 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 - * - * Copyright 2016-2018 HabanaLabs, Ltd. - * All Rights Reserved. - * - */ - -/************************************ - ** This is an auto-generated file ** - ** DO NOT EDIT BELOW ** - ************************************/ - -#ifndef ASIC_REG_PSOC_HBM_PLL_REGS_H_ -#define ASIC_REG_PSOC_HBM_PLL_REGS_H_ - -/* - ***************************************** - * PSOC_HBM_PLL (Prototype: PLL) - ***************************************** - */ - -#define mmPSOC_HBM_PLL_NR 0xC74100 - -#define mmPSOC_HBM_PLL_NF 0xC74104 - -#define mmPSOC_HBM_PLL_OD 0xC74108 - -#define mmPSOC_HBM_PLL_NB 0xC7410C - -#define mmPSOC_HBM_PLL_CFG 0xC74110 - -#define mmPSOC_HBM_PLL_LOSE_MASK 0xC74120 - -#define mmPSOC_HBM_PLL_LOCK_INTR 0xC74128 - -#define mmPSOC_HBM_PLL_LOCK_BYPASS 0xC7412C - -#define mmPSOC_HBM_PLL_DATA_CHNG 0xC74130 - -#define mmPSOC_HBM_PLL_RST 0xC74134 - -#define mmPSOC_HBM_PLL_SLIP_WD_CNTR 0xC74150 - -#define mmPSOC_HBM_PLL_DIV_FACTOR_0 0xC74200 - -#define mmPSOC_HBM_PLL_DIV_FACTOR_1 0xC74204 - -#define mmPSOC_HBM_PLL_DIV_FACTOR_2 0xC74208 - -#define mmPSOC_HBM_PLL_DIV_FACTOR_3 0xC7420C - -#define mmPSOC_HBM_PLL_DIV_FACTOR_CMD_0 0xC74220 - -#define mmPSOC_HBM_PLL_DIV_FACTOR_CMD_1 0xC74224 - -#define mmPSOC_HBM_PLL_DIV_FACTOR_CMD_2 0xC74228 - -#define mmPSOC_HBM_PLL_DIV_FACTOR_CMD_3 0xC7422C - -#define mmPSOC_HBM_PLL_DIV_SEL_0 0xC74280 - -#define mmPSOC_HBM_PLL_DIV_SEL_1 0xC74284 - -#define mmPSOC_HBM_PLL_DIV_SEL_2 0xC74288 - -#define mmPSOC_HBM_PLL_DIV_SEL_3 0xC7428C - -#define mmPSOC_HBM_PLL_DIV_EN_0 0xC742A0 - -#define mmPSOC_HBM_PLL_DIV_EN_1 0xC742A4 - -#define mmPSOC_HBM_PLL_DIV_EN_2 0xC742A8 - -#define mmPSOC_HBM_PLL_DIV_EN_3 0xC742AC - -#define mmPSOC_HBM_PLL_DIV_FACTOR_BUSY_0 0xC742C0 - -#define mmPSOC_HBM_PLL_DIV_FACTOR_BUSY_1 0xC742C4 - -#define mmPSOC_HBM_PLL_DIV_FACTOR_BUSY_2 0xC742C8 - -#define mmPSOC_HBM_PLL_DIV_FACTOR_BUSY_3 0xC742CC - -#define mmPSOC_HBM_PLL_CLK_GATER 0xC74300 - -#define mmPSOC_HBM_PLL_CLK_RLX_0 0xC74310 - -#define mmPSOC_HBM_PLL_CLK_RLX_1 0xC74314 - -#define mmPSOC_HBM_PLL_CLK_RLX_2 0xC74318 - -#define mmPSOC_HBM_PLL_CLK_RLX_3 0xC7431C - -#define mmPSOC_HBM_PLL_REF_CNTR_PERIOD 0xC74400 - -#define mmPSOC_HBM_PLL_REF_LOW_THRESHOLD 0xC74410 - -#define mmPSOC_HBM_PLL_REF_HIGH_THRESHOLD 0xC74420 - -#define mmPSOC_HBM_PLL_PLL_NOT_STABLE 0xC74430 - -#define mmPSOC_HBM_PLL_FREQ_CALC_EN 0xC74440 - -#define mmPSOC_HBM_PLL_RLX_BITMAP_CFG 0xC74500 - -#define mmPSOC_HBM_PLL_RLX_BITMAP_0 0xC74510 - -#define mmPSOC_HBM_PLL_RLX_BITMAP_1 0xC74514 - -#define mmPSOC_HBM_PLL_RLX_BITMAP_2 0xC74518 - -#define mmPSOC_HBM_PLL_RLX_BITMAP_3 0xC7451C - -#endif /* ASIC_REG_PSOC_HBM_PLL_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_pci_pll_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_pci_pll_regs.h deleted file mode 100644 index 3dc9bb4542dd..000000000000 --- a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_pci_pll_regs.h +++ /dev/null @@ -1,114 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 - * - * Copyright 2016-2018 HabanaLabs, Ltd. - * All Rights Reserved. - * - */ - -/************************************ - ** This is an auto-generated file ** - ** DO NOT EDIT BELOW ** - ************************************/ - -#ifndef ASIC_REG_PSOC_PCI_PLL_REGS_H_ -#define ASIC_REG_PSOC_PCI_PLL_REGS_H_ - -/* - ***************************************** - * PSOC_PCI_PLL (Prototype: PLL) - ***************************************** - */ - -#define mmPSOC_PCI_PLL_NR 0xC72100 - -#define mmPSOC_PCI_PLL_NF 0xC72104 - -#define mmPSOC_PCI_PLL_OD 0xC72108 - -#define mmPSOC_PCI_PLL_NB 0xC7210C - -#define mmPSOC_PCI_PLL_CFG 0xC72110 - -#define mmPSOC_PCI_PLL_LOSE_MASK 0xC72120 - -#define mmPSOC_PCI_PLL_LOCK_INTR 0xC72128 - -#define mmPSOC_PCI_PLL_LOCK_BYPASS 0xC7212C - -#define mmPSOC_PCI_PLL_DATA_CHNG 0xC72130 - -#define mmPSOC_PCI_PLL_RST 0xC72134 - -#define mmPSOC_PCI_PLL_SLIP_WD_CNTR 0xC72150 - -#define mmPSOC_PCI_PLL_DIV_FACTOR_0 0xC72200 - -#define mmPSOC_PCI_PLL_DIV_FACTOR_1 0xC72204 - -#define mmPSOC_PCI_PLL_DIV_FACTOR_2 0xC72208 - -#define mmPSOC_PCI_PLL_DIV_FACTOR_3 0xC7220C - -#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_0 0xC72220 - -#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_1 0xC72224 - -#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_2 0xC72228 - -#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_3 0xC7222C - -#define mmPSOC_PCI_PLL_DIV_SEL_0 0xC72280 - -#define mmPSOC_PCI_PLL_DIV_SEL_1 0xC72284 - -#define mmPSOC_PCI_PLL_DIV_SEL_2 0xC72288 - -#define mmPSOC_PCI_PLL_DIV_SEL_3 0xC7228C - -#define mmPSOC_PCI_PLL_DIV_EN_0 0xC722A0 - -#define mmPSOC_PCI_PLL_DIV_EN_1 0xC722A4 - -#define mmPSOC_PCI_PLL_DIV_EN_2 0xC722A8 - -#define mmPSOC_PCI_PLL_DIV_EN_3 0xC722AC - -#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_0 0xC722C0 - -#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_1 0xC722C4 - -#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_2 0xC722C8 - -#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_3 0xC722CC - -#define mmPSOC_PCI_PLL_CLK_GATER 0xC72300 - -#define mmPSOC_PCI_PLL_CLK_RLX_0 0xC72310 - -#define mmPSOC_PCI_PLL_CLK_RLX_1 0xC72314 - -#define mmPSOC_PCI_PLL_CLK_RLX_2 0xC72318 - -#define mmPSOC_PCI_PLL_CLK_RLX_3 0xC7231C - -#define mmPSOC_PCI_PLL_REF_CNTR_PERIOD 0xC72400 - -#define mmPSOC_PCI_PLL_REF_LOW_THRESHOLD 0xC72410 - -#define mmPSOC_PCI_PLL_REF_HIGH_THRESHOLD 0xC72420 - -#define mmPSOC_PCI_PLL_PLL_NOT_STABLE 0xC72430 - -#define mmPSOC_PCI_PLL_FREQ_CALC_EN 0xC72440 - -#define mmPSOC_PCI_PLL_RLX_BITMAP_CFG 0xC72500 - -#define mmPSOC_PCI_PLL_RLX_BITMAP_0 0xC72510 - -#define mmPSOC_PCI_PLL_RLX_BITMAP_1 0xC72514 - -#define mmPSOC_PCI_PLL_RLX_BITMAP_2 0xC72518 - -#define mmPSOC_PCI_PLL_RLX_BITMAP_3 0xC7251C - -#endif /* ASIC_REG_PSOC_PCI_PLL_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h b/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h index 8aadc6357da1..25acd9e87e20 100644 --- a/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h +++ b/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h @@ -8,6 +8,8 @@ #ifndef GAUDI_FW_IF_H #define GAUDI_FW_IF_H +#include <linux/types.h> + #define GAUDI_EVENT_QUEUE_MSI_IDX 8 #define GAUDI_NIC_PORT1_MSI_IDX 10 #define GAUDI_NIC_PORT3_MSI_IDX 12 @@ -28,7 +30,30 @@ enum gaudi_pll_index { MESH_PLL, MME_PLL, TPC_PLL, - IF_PLL + IF_PLL, + PLL_MAX +}; + +enum gaudi_nic_axi_error { + RXB, + RXE, + TXS, + TXE, + QPC_RESP, + NON_AXI_ERR, +}; + +/* + * struct eq_nic_sei_event - describes an AXI error cause. + * @axi_error_cause: one of the events defined in enum gaudi_nic_axi_error. + * @id: can be either 0 or 1, to further describe unit with interrupt cause + * (i.e. TXE0 or TXE1). + * @pad[6]: padding structure to 64bit. + */ +struct eq_nic_sei_event { + __u8 axi_error_cause; + __u8 id; + __u8 pad[6]; }; #define GAUDI_PLL_FREQ_LOW 200000000 /* 200 MHz */ diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h index 46aed13f16b1..b9b90d079e23 100644 --- a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h +++ b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h @@ -41,6 +41,11 @@ (FIELD_PREP(TPC0_QM_GLBL_CFG0_CQF_EN_MASK, 0x1F)) | \ (FIELD_PREP(TPC0_QM_GLBL_CFG0_CP_EN_MASK, 0x1F))) +#define NIC_QMAN_ENABLE (\ + (FIELD_PREP(NIC0_QM0_GLBL_CFG0_PQF_EN_MASK, 0xF)) | \ + (FIELD_PREP(NIC0_QM0_GLBL_CFG0_CQF_EN_MASK, 0xF)) | \ + (FIELD_PREP(NIC0_QM0_GLBL_CFG0_CP_EN_MASK, 0xF))) + #define QMAN_UPPER_CP_CGM_PWR_GATE_EN (\ (FIELD_PREP(DMA0_QM_CGM_CFG_IDLE_TH_MASK, 0x20)) | \ (FIELD_PREP(DMA0_QM_CGM_CFG_G2F_TH_MASK, 0xA)) | \ @@ -93,6 +98,16 @@ (FIELD_PREP(MME0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK, 0x1F)) | \ (FIELD_PREP(MME0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK, 0x1F))) +#define NIC_QMAN_GLBL_ERR_CFG_MSG_EN_MASK (\ + (FIELD_PREP(NIC0_QM0_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK, 0xF)) | \ + (FIELD_PREP(NIC0_QM0_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK, 0xF)) | \ + (FIELD_PREP(NIC0_QM0_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK, 0xF))) + +#define NIC_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK (\ + (FIELD_PREP(NIC0_QM0_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK, 0xF)) | \ + (FIELD_PREP(NIC0_QM0_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK, 0xF)) | \ + (FIELD_PREP(NIC0_QM0_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK, 0xF))) + #define QMAN_CGM1_PWR_GATE_EN (FIELD_PREP(DMA0_QM_CGM_CFG1_MASK_TH_MASK, 0xA)) /* RESET registers configuration */ diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h b/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h index 977fb341a6e7..137afedf5f15 100644 --- a/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h +++ b/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h @@ -12,6 +12,8 @@ * PSOC scratch-pad registers */ #define mmHW_STATE mmPSOC_GLOBAL_CONF_SCRATCHPAD_0 +#define mmCPU_BOOT_DEV_STS0 mmPSOC_GLOBAL_CONF_SCRATCHPAD_20 +#define mmCPU_BOOT_DEV_STS1 mmPSOC_GLOBAL_CONF_SCRATCHPAD_21 #define mmFUSE_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_22 #define mmCPU_CMD_STATUS_TO_HOST mmPSOC_GLOBAL_CONF_SCRATCHPAD_23 #define mmCPU_BOOT_ERR0 mmPSOC_GLOBAL_CONF_SCRATCHPAD_24 diff --git a/drivers/misc/habanalabs/include/goya/goya_fw_if.h b/drivers/misc/habanalabs/include/goya/goya_fw_if.h index 0fa80fe9f6cc..daf8d8cd14be 100644 --- a/drivers/misc/habanalabs/include/goya/goya_fw_if.h +++ b/drivers/misc/habanalabs/include/goya/goya_fw_if.h @@ -22,7 +22,8 @@ enum goya_pll_index { MME_PLL, PCI_PLL, EMMC_PLL, - TPC_PLL + TPC_PLL, + PLL_MAX }; #define GOYA_PLL_FREQ_LOW 50000000 /* 50 MHz */ diff --git a/drivers/misc/habanalabs/include/goya/goya_reg_map.h b/drivers/misc/habanalabs/include/goya/goya_reg_map.h index e56124265a05..f3ab282cafa4 100644 --- a/drivers/misc/habanalabs/include/goya/goya_reg_map.h +++ b/drivers/misc/habanalabs/include/goya/goya_reg_map.h @@ -22,6 +22,8 @@ #define mmCPU_CQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_8 #define mmCPU_CQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_9 #define mmCPU_CQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_10 +#define mmCPU_BOOT_DEV_STS0 mmPSOC_GLOBAL_CONF_SCRATCHPAD_20 +#define mmCPU_BOOT_DEV_STS1 mmPSOC_GLOBAL_CONF_SCRATCHPAD_21 #define mmFUSE_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_22 #define mmCPU_CMD_STATUS_TO_HOST mmPSOC_GLOBAL_CONF_SCRATCHPAD_23 #define mmCPU_BOOT_ERR0 mmPSOC_GLOBAL_CONF_SCRATCHPAD_24 diff --git a/drivers/misc/hisi_hikey_usb.c b/drivers/misc/hisi_hikey_usb.c index cc93569e601c..989d7d129469 100644 --- a/drivers/misc/hisi_hikey_usb.c +++ b/drivers/misc/hisi_hikey_usb.c @@ -168,10 +168,7 @@ static int hisi_hikey_usb_parse_kirin970(struct platform_device *pdev, hisi_hikey_usb->reset = devm_gpiod_get(&pdev->dev, "hub_reset_en_gpio", GPIOD_OUT_HIGH); - if (IS_ERR(hisi_hikey_usb->reset)) - return PTR_ERR(hisi_hikey_usb->reset); - - return 0; + return PTR_ERR_OR_ZERO(hisi_hikey_usb->reset); } static int hisi_hikey_usb_probe(struct platform_device *pdev) diff --git a/drivers/misc/isl29003.c b/drivers/misc/isl29003.c index c12406f610d5..703d20e83ebd 100644 --- a/drivers/misc/isl29003.c +++ b/drivers/misc/isl29003.c @@ -127,13 +127,13 @@ static int isl29003_set_resolution(struct i2c_client *client, int res) static int isl29003_get_mode(struct i2c_client *client) { return __isl29003_read_reg(client, ISL29003_REG_COMMAND, - ISL29003_RES_MASK, ISL29003_RES_SHIFT); + ISL29003_MODE_MASK, ISL29003_MODE_SHIFT); } static int isl29003_set_mode(struct i2c_client *client, int mode) { return __isl29003_write_reg(client, ISL29003_REG_COMMAND, - ISL29003_RES_MASK, ISL29003_RES_SHIFT, mode); + ISL29003_MODE_MASK, ISL29003_MODE_SHIFT, mode); } /* power_state */ diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile index 1c4c7aca0026..1ef7888a12b5 100644 --- a/drivers/misc/lkdtm/Makefile +++ b/drivers/misc/lkdtm/Makefile @@ -17,7 +17,7 @@ KCOV_INSTRUMENT_rodata.o := n OBJCOPYFLAGS := OBJCOPYFLAGS_rodata_objcopy.o := \ - --rename-section .text=.rodata,alloc,readonly,load + --rename-section .noinstr.text=.rodata,alloc,readonly,load targets += rodata.o rodata_objcopy.o $(obj)/rodata_objcopy.o: $(obj)/rodata.o FORCE $(call if_changed,objcopy) diff --git a/drivers/misc/lkdtm/rodata.c b/drivers/misc/lkdtm/rodata.c index 58d180af72cf..baacb876d1d9 100644 --- a/drivers/misc/lkdtm/rodata.c +++ b/drivers/misc/lkdtm/rodata.c @@ -5,7 +5,7 @@ */ #include "lkdtm.h" -void notrace lkdtm_rodata_do_nothing(void) +void noinstr lkdtm_rodata_do_nothing(void) { /* Does nothing. We just want an architecture agnostic "return". */ } diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c index 4e30fa98fe7d..d8e760b11ae3 100644 --- a/drivers/misc/mei/bus-fixup.c +++ b/drivers/misc/mei/bus-fixup.c @@ -33,6 +33,9 @@ static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO; #define MEI_UUID_HDCP UUID_LE(0xB638AB7E, 0x94E2, 0x4EA2, \ 0xA5, 0x52, 0xD1, 0xC5, 0x4B, 0x62, 0x7F, 0x04) +#define MEI_UUID_PAVP UUID_LE(0xfbf6fcf1, 0x96cf, 0x4e2e, 0xA6, \ + 0xa6, 0x1b, 0xab, 0x8c, 0xbe, 0x36, 0xb1) + #define MEI_UUID_ANY NULL_UUID_LE /** @@ -148,7 +151,7 @@ static int mei_osver(struct mei_cl_device *cldev) os_ver = (struct mei_os_ver *)fwcaps->data; os_ver->os_type = OSTYPE_LINUX; - return __mei_cl_send(cldev->cl, buf, size, mode); + return __mei_cl_send(cldev->cl, buf, size, 0, mode); } #define MKHI_FWVER_BUF_LEN (sizeof(struct mkhi_msg_hdr) + \ @@ -169,7 +172,7 @@ static int mei_fwver(struct mei_cl_device *cldev) req.hdr.group_id = MKHI_GEN_GROUP_ID; req.hdr.command = MKHI_GEN_GET_FW_VERSION_CMD; - ret = __mei_cl_send(cldev->cl, (u8 *)&req, sizeof(req), + ret = __mei_cl_send(cldev->cl, (u8 *)&req, sizeof(req), 0, MEI_CL_IO_TX_BLOCKING); if (ret < 0) { dev_err(&cldev->dev, "Could not send ReqFWVersion cmd\n"); @@ -177,7 +180,7 @@ static int mei_fwver(struct mei_cl_device *cldev) } ret = 0; - bytes_recv = __mei_cl_recv(cldev->cl, buf, sizeof(buf), 0, + bytes_recv = __mei_cl_recv(cldev->cl, buf, sizeof(buf), NULL, 0, MKHI_RCV_TIMEOUT); if (bytes_recv < 0 || (size_t)bytes_recv < MKHI_FWVER_LEN(1)) { /* @@ -324,13 +327,15 @@ static int mei_nfc_if_version(struct mei_cl *cl, }; struct mei_nfc_reply *reply = NULL; size_t if_version_length; + u8 vtag; int bytes_recv, ret; bus = cl->dev; WARN_ON(mutex_is_locked(&bus->device_lock)); - ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(cmd), MEI_CL_IO_TX_BLOCKING); + ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(cmd), 0, + MEI_CL_IO_TX_BLOCKING); if (ret < 0) { dev_err(bus->dev, "Could not send IF version cmd\n"); return ret; @@ -344,7 +349,8 @@ static int mei_nfc_if_version(struct mei_cl *cl, return -ENOMEM; ret = 0; - bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, 0, 0); + bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, &vtag, + 0, 0); if (bytes_recv < 0 || (size_t)bytes_recv < if_version_length) { dev_err(bus->dev, "Could not read IF version\n"); ret = -EIO; @@ -488,6 +494,7 @@ static struct mei_fixup { MEI_FIXUP(MEI_UUID_MKHIF_FIX, mei_mkhi_fix), MEI_FIXUP(MEI_UUID_HDCP, whitelist), MEI_FIXUP(MEI_UUID_ANY, vt_support), + MEI_FIXUP(MEI_UUID_PAVP, whitelist), }; /** diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 9cdaa7f3af23..2907db260fba 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -26,11 +26,12 @@ * @cl: host client * @buf: buffer to send * @length: buffer length + * @vtag: virtual tag * @mode: sending mode * * Return: written size bytes or < 0 on error */ -ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, +ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, u8 vtag, unsigned int mode) { struct mei_device *bus; @@ -86,6 +87,7 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, rets = -ENOMEM; goto out; } + cb->vtag = vtag; cb->internal = !!(mode & MEI_CL_IO_TX_INTERNAL); cb->blocking = !!(mode & MEI_CL_IO_TX_BLOCKING); @@ -106,11 +108,12 @@ out: * @buf: buffer to receive * @length: buffer length * @mode: io mode + * @vtag: virtual tag * @timeout: recv timeout, 0 for infinite timeout * * Return: read size in bytes of < 0 on error */ -ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, +ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, u8 *vtag, unsigned int mode, unsigned long timeout) { struct mei_device *bus; @@ -196,6 +199,8 @@ copy: r_length = min_t(size_t, length, cb->buf_idx); memcpy(buf, cb->buf.data, r_length); rets = r_length; + if (vtag) + *vtag = cb->vtag; free: mei_cl_del_rd_completed(cl, cb); @@ -206,40 +211,87 @@ out: } /** - * mei_cldev_send - me device send (write) + * mei_cldev_send_vtag - me device send with vtag (write) * * @cldev: me client device * @buf: buffer to send * @length: buffer length + * @vtag: virtual tag * - * Return: written size in bytes or < 0 on error + * Return: + * * written size in bytes + * * < 0 on error */ -ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length) + +ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length, + u8 vtag) { struct mei_cl *cl = cldev->cl; - return __mei_cl_send(cl, buf, length, MEI_CL_IO_TX_BLOCKING); + return __mei_cl_send(cl, buf, length, vtag, MEI_CL_IO_TX_BLOCKING); } -EXPORT_SYMBOL_GPL(mei_cldev_send); +EXPORT_SYMBOL_GPL(mei_cldev_send_vtag); /** - * mei_cldev_recv_nonblock - non block client receive (read) + * mei_cldev_recv_vtag - client receive with vtag (read) * * @cldev: me client device * @buf: buffer to receive * @length: buffer length + * @vtag: virtual tag * - * Return: read size in bytes of < 0 on error - * -EAGAIN if function will block. + * Return: + * * read size in bytes + * * < 0 on error */ -ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf, - size_t length) + +ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length, + u8 *vtag) { struct mei_cl *cl = cldev->cl; - return __mei_cl_recv(cl, buf, length, MEI_CL_IO_RX_NONBLOCK, 0); + return __mei_cl_recv(cl, buf, length, vtag, 0, 0); } -EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock); +EXPORT_SYMBOL_GPL(mei_cldev_recv_vtag); + +/** + * mei_cldev_recv_nonblock_vtag - non block client receive with vtag (read) + * + * @cldev: me client device + * @buf: buffer to receive + * @length: buffer length + * @vtag: virtual tag + * + * Return: + * * read size in bytes + * * -EAGAIN if function will block. + * * < 0 on other error + */ +ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf, + size_t length, u8 *vtag) +{ + struct mei_cl *cl = cldev->cl; + + return __mei_cl_recv(cl, buf, length, vtag, MEI_CL_IO_RX_NONBLOCK, 0); +} +EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock_vtag); + +/** + * mei_cldev_send - me device send (write) + * + * @cldev: me client device + * @buf: buffer to send + * @length: buffer length + * + * Return: + * * written size in bytes + * * < 0 on error + */ +ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length) +{ + return mei_cldev_send_vtag(cldev, buf, length, 0); +} +EXPORT_SYMBOL_GPL(mei_cldev_send); /** * mei_cldev_recv - client receive (read) @@ -252,13 +304,28 @@ EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock); */ ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length) { - struct mei_cl *cl = cldev->cl; - - return __mei_cl_recv(cl, buf, length, 0, 0); + return mei_cldev_recv_vtag(cldev, buf, length, NULL); } EXPORT_SYMBOL_GPL(mei_cldev_recv); /** + * mei_cldev_recv_nonblock - non block client receive (read) + * + * @cldev: me client device + * @buf: buffer to receive + * @length: buffer length + * + * Return: read size in bytes of < 0 on error + * -EAGAIN if function will block. + */ +ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf, + size_t length) +{ + return mei_cldev_recv_nonblock_vtag(cldev, buf, length, NULL); +} +EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock); + +/** * mei_cl_bus_rx_work - dispatch rx event for a bus device * * @work: work @@ -276,7 +343,8 @@ static void mei_cl_bus_rx_work(struct work_struct *work) cldev->rx_cb(cldev); mutex_lock(&bus->device_lock); - mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL); + if (mei_cl_is_connected(cldev->cl)) + mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL); mutex_unlock(&bus->device_lock); } @@ -364,10 +432,16 @@ int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb) INIT_WORK(&cldev->rx_work, mei_cl_bus_rx_work); mutex_lock(&bus->device_lock); - ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL); + if (mei_cl_is_connected(cldev->cl)) + ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL); + else + ret = -ENODEV; mutex_unlock(&bus->device_lock); - if (ret && ret != -EBUSY) + if (ret && ret != -EBUSY) { + cancel_work_sync(&cldev->rx_work); + cldev->rx_cb = NULL; return ret; + } return 0; } @@ -401,8 +475,11 @@ int mei_cldev_register_notif_cb(struct mei_cl_device *cldev, mutex_lock(&bus->device_lock); ret = mei_cl_notify_request(cldev->cl, NULL, 1); mutex_unlock(&bus->device_lock); - if (ret) + if (ret) { + cancel_work_sync(&cldev->notif_work); + cldev->notif_cb = NULL; return ret; + } return 0; } @@ -1037,7 +1114,7 @@ static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus, } /** - * mei_cl_dev_setup - setup me client device + * mei_cl_bus_dev_setup - setup me client device * run fix up routines and set the device name * * @bus: mei device diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index d5c3f7d54634..a56d41321f32 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -1306,7 +1306,7 @@ struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag) * mei_cl_fp_by_vtag - obtain the file pointer by vtag * * @cl: host client - * @vtag: vm tag + * @vtag: virtual tag * * Return: * * A file pointer - on success @@ -1317,7 +1317,9 @@ const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag) struct mei_cl_vtag *vtag_l; list_for_each_entry(vtag_l, &cl->vtag_map, list) - if (vtag_l->vtag == vtag) + /* The client on bus has one fixed fp */ + if ((cl->cldev && mei_cldev_enabled(cl->cldev)) || + vtag_l->vtag == vtag) return vtag_l->fp; return ERR_PTR(-ENOENT); diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index a97eb5d47705..686e8b6a4c55 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -1377,7 +1377,6 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) dev_info(dev->dev, "hbm: stop response: resetting.\n"); /* force the reset */ return -EPROTO; - break; case CLIENT_DISCONNECT_REQ_CMD: dev_dbg(dev->dev, "hbm: disconnect request: message received\n"); diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index 2f4cc1a8aae8..8c395bfdf6f3 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h @@ -340,9 +340,9 @@ struct mei_hw_ops { /* MEI bus API*/ void mei_cl_bus_rescan_work(struct work_struct *work); void mei_cl_bus_dev_fixup(struct mei_cl_device *dev); -ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, +ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, u8 vtag, unsigned int mode); -ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, +ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, u8 *vtag, unsigned int mode, unsigned long timeout); bool mei_cl_bus_rx_event(struct mei_cl *cl); bool mei_cl_bus_notify_event(struct mei_cl *cl); diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c index 4d490b92d951..a68738f38252 100644 --- a/drivers/misc/ocxl/config.c +++ b/drivers/misc/ocxl/config.c @@ -73,7 +73,7 @@ static int find_dvsec_afu_ctrl(struct pci_dev *dev, u8 afu_idx) /** * get_function_0() - Find a related PCI device (function 0) - * @device: PCI device to match + * @dev: PCI device to match * * Returns a pointer to the related device, or null if not found */ diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index 146ca6fb3260..eff481ce08ee 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c @@ -708,7 +708,7 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd, switch (cmd) { case PCITEST_BAR: bar = arg; - if (bar < 0 || bar > 5) + if (bar > BAR_5) goto ret; if (is_am654_pci_dev(pdev) && bar == BAR_0) goto ret; @@ -811,8 +811,10 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, pci_set_master(pdev); - if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) + if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) { + err = -EINVAL; goto err_disable_irq; + } for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { @@ -849,8 +851,10 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, goto err_ida_remove; } - if (!pci_endpoint_test_request_irq(test)) + if (!pci_endpoint_test_request_irq(test)) { + err = -EINVAL; goto err_kfree_test_name; + } misc_device = &test->miscdev; misc_device->minor = MISC_DYNAMIC_MINOR; diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h index 71db60edff65..225f2bb84e39 100644 --- a/drivers/misc/sgi-xp/xpc.h +++ b/drivers/misc/sgi-xp/xpc.h @@ -634,6 +634,7 @@ extern int xpc_setup_rsvd_page(void); extern void xpc_teardown_rsvd_page(void); extern int xpc_identify_activate_IRQ_sender(void); extern int xpc_partition_disengaged(struct xpc_partition *); +extern int xpc_partition_disengaged_from_timer(struct xpc_partition *part); extern enum xp_retval xpc_mark_partition_active(struct xpc_partition *); extern void xpc_mark_partition_inactive(struct xpc_partition *); extern void xpc_discovery(void); diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index e5244fc1dab3..84610bbcc131 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c @@ -179,7 +179,7 @@ xpc_timeout_partition_disengage(struct timer_list *t) DBUG_ON(time_is_after_jiffies(part->disengage_timeout)); - (void)xpc_partition_disengaged(part); + xpc_partition_disengaged_from_timer(part); DBUG_ON(part->disengage_timeout != 0); DBUG_ON(xpc_arch_ops.partition_engaged(XPC_PARTID(part))); diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c index 57df06820bae..1999d02923de 100644 --- a/drivers/misc/sgi-xp/xpc_partition.c +++ b/drivers/misc/sgi-xp/xpc_partition.c @@ -262,8 +262,8 @@ xpc_get_remote_rp(int nasid, unsigned long *discovered_nasids, * from us. Though we requested the remote partition to deactivate with regard * to us, we really only need to wait for the other side to disengage from us. */ -int -xpc_partition_disengaged(struct xpc_partition *part) +static int __xpc_partition_disengaged(struct xpc_partition *part, + bool from_timer) { short partid = XPC_PARTID(part); int disengaged; @@ -289,9 +289,9 @@ xpc_partition_disengaged(struct xpc_partition *part) } part->disengage_timeout = 0; - /* cancel the timer function, provided it's not us */ - if (!in_interrupt()) - del_singleshot_timer_sync(&part->disengage_timer); + /* Cancel the timer function if not called from it */ + if (!from_timer) + del_timer_sync(&part->disengage_timer); DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING && part->act_state != XPC_P_AS_INACTIVE); @@ -303,6 +303,16 @@ xpc_partition_disengaged(struct xpc_partition *part) return disengaged; } +int xpc_partition_disengaged(struct xpc_partition *part) +{ + return __xpc_partition_disengaged(part, false); +} + +int xpc_partition_disengaged_from_timer(struct xpc_partition *part) +{ + return __xpc_partition_disengaged(part, true); +} + /* * Mark specified partition as active. */ diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c index f4ddd1e67015..5a0a5fc3d3ab 100644 --- a/drivers/misc/ti-st/st_core.c +++ b/drivers/misc/ti-st/st_core.c @@ -380,7 +380,7 @@ void st_int_recv(void *disc_data, st_gdata->rx_state = ST_W4_HEADER; st_gdata->rx_count = st_gdata->list[type]->hdr_len; pr_debug("rx_count %ld\n", st_gdata->rx_count); - }; + } ptr++; count--; } diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c index 56dd98ab5a81..d07af4edfcac 100644 --- a/drivers/misc/uacce/uacce.c +++ b/drivers/misc/uacce/uacce.c @@ -231,17 +231,6 @@ static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma) switch (type) { case UACCE_QFRT_MMIO: - if (!uacce->ops->mmap) { - ret = -EINVAL; - goto out_with_lock; - } - - ret = uacce->ops->mmap(q, vma, qfr); - if (ret) - goto out_with_lock; - - break; - case UACCE_QFRT_DUS: if (!uacce->ops->mmap) { ret = -EINVAL; @@ -533,5 +522,5 @@ subsys_initcall(uacce_init); module_exit(uacce_exit); MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Hisilicon Tech. Co., Ltd."); +MODULE_AUTHOR("HiSilicon Tech. Co., Ltd."); MODULE_DESCRIPTION("Accelerator interface for Userland applications"); diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c index 16695366ec92..26ff49fdf0f7 100644 --- a/drivers/misc/vmw_vmci/vmci_context.c +++ b/drivers/misc/vmw_vmci/vmci_context.c @@ -743,7 +743,7 @@ static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context, return VMCI_ERROR_MORE_DATA; } - dbells = kmalloc(data_size, GFP_ATOMIC); + dbells = kzalloc(data_size, GFP_ATOMIC); if (!dbells) return VMCI_ERROR_NO_MEM; diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c index 92291292756a..23c8448a9c3b 100644 --- a/drivers/misc/xilinx_sdfec.c +++ b/drivers/misc/xilinx_sdfec.c @@ -944,8 +944,8 @@ static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd, unsigned long data) { struct xsdfec_dev *xsdfec; - void __user *arg = NULL; - int rval = -EINVAL; + void __user *arg = (void __user *)data; + int rval; xsdfec = container_of(fptr->private_data, struct xsdfec_dev, miscdev); @@ -956,16 +956,6 @@ static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd, return -EPERM; } - if (_IOC_TYPE(cmd) != XSDFEC_MAGIC) - return -ENOTTY; - - /* check if ioctl argument is present and valid */ - if (_IOC_DIR(cmd) != _IOC_NONE) { - arg = (void __user *)data; - if (!arg) - return rval; - } - switch (cmd) { case XSDFEC_START_DEV: rval = xsdfec_start(xsdfec); @@ -1010,20 +1000,12 @@ static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd, rval = xsdfec_is_active(xsdfec, (bool __user *)arg); break; default: - /* Should not get here */ + rval = -ENOTTY; break; } return rval; } -#ifdef CONFIG_COMPAT -static long xsdfec_dev_compat_ioctl(struct file *file, unsigned int cmd, - unsigned long data) -{ - return xsdfec_dev_ioctl(file, cmd, (unsigned long)compat_ptr(data)); -} -#endif - static __poll_t xsdfec_poll(struct file *file, poll_table *wait) { __poll_t mask = 0; @@ -1054,9 +1036,7 @@ static const struct file_operations xsdfec_fops = { .release = xsdfec_dev_release, .unlocked_ioctl = xsdfec_dev_ioctl, .poll = xsdfec_poll, -#ifdef CONFIG_COMPAT - .compat_ioctl = xsdfec_dev_compat_ioctl, -#endif + .compat_ioctl = compat_ptr_ioctl, }; static int xsdfec_parse_of(struct xsdfec_dev *xsdfec) |