summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/iommufd/pages.c
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2022-11-29 16:29:41 -0400
committerJason Gunthorpe <jgg@nvidia.com>2022-11-30 20:16:49 -0400
commit52f528583bb395495f7dd35e6e4d548bccbf8a73 (patch)
tree4f8a4784a2352553acd8895585730ef3924daa5c /drivers/iommu/iommufd/pages.c
parente26eed4f623da70913b535631a29764d108efe98 (diff)
downloadlinux-52f528583bb395495f7dd35e6e4d548bccbf8a73.tar.bz2
iommufd: Add additional invariant assertions
These are on performance paths so we protect them using the CONFIG_IOMMUFD_TEST to not take a hit during normal operation. These are useful when running the test suite and syzkaller to find data structure inconsistencies early. Link: https://lore.kernel.org/r/18-v6-a196d26f289e+11787-iommufd_jgg@nvidia.com Tested-by: Yi Liu <yi.l.liu@intel.com> Tested-by: Matthew Rosato <mjrosato@linux.ibm.com> # s390 Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/iommu/iommufd/pages.c')
-rw-r--r--drivers/iommu/iommufd/pages.c42
1 files changed, 40 insertions, 2 deletions
diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c
index c5d2d9a8c562..429fa3b0a239 100644
--- a/drivers/iommu/iommufd/pages.c
+++ b/drivers/iommu/iommufd/pages.c
@@ -162,12 +162,20 @@ void interval_tree_double_span_iter_next(
static void iopt_pages_add_npinned(struct iopt_pages *pages, size_t npages)
{
- pages->npinned += npages;
+ int rc;
+
+ rc = check_add_overflow(pages->npinned, npages, &pages->npinned);
+ if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
+ WARN_ON(rc || pages->npinned > pages->npages);
}
static void iopt_pages_sub_npinned(struct iopt_pages *pages, size_t npages)
{
- pages->npinned -= npages;
+ int rc;
+
+ rc = check_sub_overflow(pages->npinned, npages, &pages->npinned);
+ if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
+ WARN_ON(rc || pages->npinned > pages->npages);
}
static void iopt_pages_err_unpin(struct iopt_pages *pages,
@@ -189,6 +197,9 @@ static void iopt_pages_err_unpin(struct iopt_pages *pages,
static unsigned long iopt_area_index_to_iova(struct iopt_area *area,
unsigned long index)
{
+ if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
+ WARN_ON(index < iopt_area_index(area) ||
+ index > iopt_area_last_index(area));
index -= iopt_area_index(area);
if (index == 0)
return iopt_area_iova(area);
@@ -198,6 +209,9 @@ static unsigned long iopt_area_index_to_iova(struct iopt_area *area,
static unsigned long iopt_area_index_to_iova_last(struct iopt_area *area,
unsigned long index)
{
+ if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
+ WARN_ON(index < iopt_area_index(area) ||
+ index > iopt_area_last_index(area));
if (index == iopt_area_last_index(area))
return iopt_area_last_iova(area);
return iopt_area_iova(area) - area->page_offset +
@@ -286,6 +300,8 @@ static void batch_skip_carry(struct pfn_batch *batch, unsigned int skip_pfns)
{
if (!batch->total_pfns)
return;
+ if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
+ WARN_ON(batch->total_pfns != batch->npfns[0]);
skip_pfns = min(batch->total_pfns, skip_pfns);
batch->pfns[0] += skip_pfns;
batch->npfns[0] -= skip_pfns;
@@ -301,6 +317,8 @@ static int __batch_init(struct pfn_batch *batch, size_t max_pages, void *backup,
batch->pfns = temp_kmalloc(&size, backup, backup_len);
if (!batch->pfns)
return -ENOMEM;
+ if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && WARN_ON(size < elmsz))
+ return -EINVAL;
batch->array_size = size / elmsz;
batch->npfns = (u32 *)(batch->pfns + batch->array_size);
batch_clear(batch);
@@ -429,6 +447,10 @@ static int batch_iommu_map_small(struct iommu_domain *domain,
unsigned long start_iova = iova;
int rc;
+ if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
+ WARN_ON(paddr % PAGE_SIZE || iova % PAGE_SIZE ||
+ size % PAGE_SIZE);
+
while (size) {
rc = iommu_map(domain, iova, paddr, PAGE_SIZE, prot);
if (rc)
@@ -718,6 +740,10 @@ static int pfn_reader_user_pin(struct pfn_reader_user *user,
uintptr_t uptr;
long rc;
+ if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
+ WARN_ON(last_index < start_index))
+ return -EINVAL;
+
if (!user->upages) {
/* All undone in pfn_reader_destroy() */
user->upages_len =
@@ -956,6 +982,10 @@ static int pfn_reader_fill_span(struct pfn_reader *pfns)
struct iopt_area *area;
int rc;
+ if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
+ WARN_ON(span->last_used < start_index))
+ return -EINVAL;
+
if (span->is_used == 1) {
batch_from_xarray(&pfns->batch, &pfns->pages->pinned_pfns,
start_index, span->last_used);
@@ -1008,6 +1038,10 @@ static int pfn_reader_next(struct pfn_reader *pfns)
while (pfns->batch_end_index != pfns->last_index + 1) {
unsigned int npfns = pfns->batch.total_pfns;
+ if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
+ WARN_ON(interval_tree_double_span_iter_done(&pfns->span)))
+ return -EINVAL;
+
rc = pfn_reader_fill_span(pfns);
if (rc)
return rc;
@@ -1091,6 +1125,10 @@ static int pfn_reader_first(struct pfn_reader *pfns, struct iopt_pages *pages,
{
int rc;
+ if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
+ WARN_ON(last_index < start_index))
+ return -EINVAL;
+
rc = pfn_reader_init(pfns, pages, start_index, last_index);
if (rc)
return rc;