summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2009-09-19 07:36:28 -0700
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-09-19 07:36:28 -0700
commit59c36286b74ae6a8adebf6e133a83d7f2e3e6704 (patch)
treead6d608b560dc540330bdcc89e8702ac85993174
parent2ebe31513fcbe7a781f27002f065b50ae195022f (diff)
downloadlinux-59c36286b74ae6a8adebf6e133a83d7f2e3e6704.tar.bz2
intel-iommu: Fix integer overflow in dma_pte_{clear_range,free_pagetable}()
If end_pfn is equal to (unsigned long)-1, then the loop will never end. Seen on 32-bit kernel, but could have happened on 64-bit too once we get hardware that supports 64-bit guest addresses. Change both functions to a 'do {} while' loop with the test at the end, and check for the PFN having wrapper round to zero. Reported-by: Benjamin LaHaise <ben.lahaise@neterion.com> Tested-by: Benjamin LaHaise <ben.lahaise@neterion.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
-rw-r--r--drivers/pci/intel-iommu.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index c9272a1fb691..5493c79dde47 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -785,9 +785,10 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
+ BUG_ON(start_pfn > last_pfn);
/* we don't need lock here; nobody else touches the iova range */
- while (start_pfn <= last_pfn) {
+ do {
first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
if (!pte) {
start_pfn = align_to_level(start_pfn + 1, 2);
@@ -801,7 +802,8 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
domain_flush_cache(domain, first_pte,
(void *)pte - (void *)first_pte);
- }
+
+ } while (start_pfn && start_pfn <= last_pfn);
}
/* free page table pages. last level pte should already be cleared */
@@ -817,6 +819,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
+ BUG_ON(start_pfn > last_pfn);
/* We don't need lock here; nobody else touches the iova range */
level = 2;
@@ -827,7 +830,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
if (tmp + level_size(level) - 1 > last_pfn)
return;
- while (tmp + level_size(level) - 1 <= last_pfn) {
+ do {
first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
if (!pte) {
tmp = align_to_level(tmp + 1, level + 1);
@@ -846,7 +849,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
domain_flush_cache(domain, first_pte,
(void *)pte - (void *)first_pte);
- }
+ } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
level++;
}
/* free pgd */