summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2021-12-17 15:31:00 +0000
committerJoerg Roedel <jroedel@suse.de>2021-12-20 09:03:05 +0100
commit87f60cc65d24939353b40aa1d9297fea080cdf8d (patch)
tree0c538476e267e82e5b93d8f07f7b931a8d198fdf /include
parentce00eece6909c266da123fd147172d745a4f14a0 (diff)
downloadlinux-87f60cc65d24939353b40aa1d9297fea080cdf8d.tar.bz2
iommu/vt-d: Use put_pages_list
page->freelist is for the use of slab. We already have the ability to free a list of pages in the core mm, but it requires the use of a list_head and for the pages to be chained together through page->lru. Switch the Intel IOMMU and IOVA code over to using free_pages_list(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> [rm: split from original patch, cosmetic tweaks, fix fq entries] Signed-off-by: Robin Murphy <robin.murphy@arm.com> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/2115b560d9a0ce7cd4b948bd51a2b7bde8fdfd59.1639753638.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'include')
-rw-r--r--include/linux/iommu.h3
-rw-r--r--include/linux/iova.h4
2 files changed, 4 insertions, 3 deletions
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index d2f3435e7d17..de0c57a567c8 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -186,7 +186,7 @@ struct iommu_iotlb_gather {
unsigned long start;
unsigned long end;
size_t pgsize;
- struct page *freelist;
+ struct list_head freelist;
bool queued;
};
@@ -399,6 +399,7 @@ static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
{
*gather = (struct iommu_iotlb_gather) {
.start = ULONG_MAX,
+ .freelist = LIST_HEAD_INIT(gather->freelist),
};
}
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 99be4fcea4f3..072a09c06e8a 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -46,7 +46,7 @@ struct iova_rcache {
struct iova_fq_entry {
unsigned long iova_pfn;
unsigned long pages;
- struct page *freelist;
+ struct list_head freelist;
u64 counter; /* Flush counter when this entrie was added */
};
@@ -135,7 +135,7 @@ void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
unsigned long size);
void queue_iova(struct iova_domain *iovad,
unsigned long pfn, unsigned long pages,
- struct page *freelist);
+ struct list_head *freelist);
unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn, bool flush_rcache);
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,