blob: 2a83fc31dd47a217b7bcc4f966dfdf37847ce391 [file] [log] [blame]
/*
* Copyright © 2006-2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* Authors: David Woodhouse <dwmw2@infradead.org>,
* Ashok Raj <ashok.raj@intel.com>,
* Shaohua Li <shaohua.li@intel.com>,
* Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
* Fenghua Yu <fenghua.yu@intel.com>
* Joerg Roedel <jroedel@suse.de>
*/
#define pr_fmt(fmt) "DMAR: " fmt
#include <linux/init.h>
#include <linux/bitmap.h>
#include <linux/debugfs.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/dmar.h>
#include <linux/dma-mapping.h>
#include <linux/mempool.h>
#include <linux/memory.h>
#include <linux/cpu.h>
#include <linux/timer.h>
#include <linux/io.h>
#include <linux/iova.h>
#include <linux/iommu.h>
#include <linux/intel-iommu.h>
#include <linux/syscore_ops.h>
#include <linux/tboot.h>
#include <linux/dmi.h>
#include <linux/pci-ats.h>
#include <linux/memblock.h>
#include <linux/dma-contiguous.h>
#include <linux/dma-direct.h>
#include <linux/crash_dump.h>
#include <asm/irq_remapping.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
#include "irq_remapping.h"
#include "intel-pasid.h"
#define ROOT_SIZE VTD_PAGE_SIZE
#define CONTEXT_SIZE VTD_PAGE_SIZE
#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
#define IOAPIC_RANGE_START (0xfee00000)
#define IOAPIC_RANGE_END (0xfeefffff)
#define IOVA_START_ADDR (0x1000)
#define DEFAULT_DOMAIN_ADDRESS_WIDTH 57
#define MAX_AGAW_WIDTH 64
#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
to match. That way, we can use 'unsigned long' for PFNs with impunity. */
#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
__DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
/* IO virtual address start page frame number */
#define IOVA_START_PFN (1)
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
/* page table handling */
#define LEVEL_STRIDE (9)
#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
/*
* This bitmap is used to advertise the page sizes our hardware support
* to the IOMMU core, which will then use this information to split
* physically contiguous memory regions it is mapping into page sizes
* that we support.
*
* Traditionally the IOMMU core just handed us the mappings directly,
* after making sure the size is an order of a 4KiB page and that the
* mapping has natural alignment.
*
* To retain this behavior, we currently advertise that we support
* all page sizes that are an order of 4KiB.
*
* If at some point we'd like to utilize the IOMMU core's new behavior,
* we could change this to advertise the real page sizes we support.
*/
#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
static inline int agaw_to_level(int agaw)
{
return agaw + 2;
}
static inline int agaw_to_width(int agaw)
{
return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
}
static inline int width_to_agaw(int width)
{
return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
}
static inline unsigned int level_to_offset_bits(int level)
{
return (level - 1) * LEVEL_STRIDE;
}
static inline int pfn_level_offset(unsigned long pfn, int level)
{
return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
}
static inline unsigned long level_mask(int level)
{
return -1UL << level_to_offset_bits(level);
}
static inline unsigned long level_size(int level)
{
return 1UL << level_to_offset_bits(level);
}
static inline unsigned long align_to_level(unsigned long pfn, int level)
{
return (pfn + level_size(level) - 1) & level_mask(level);
}
static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
{
return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
}
/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
are never going to work. */
static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
{
return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
}
static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
{
return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
}
static inline unsigned long page_to_dma_pfn(struct page *pg)
{
return mm_to_dma_pfn(page_to_pfn(pg));
}
static inline unsigned long virt_to_dma_pfn(void *p)
{
return page_to_dma_pfn(virt_to_page(p));
}
/* global iommu list, set NULL for ignored DMAR units */
static struct intel_iommu **g_iommus;
static void __init check_tylersburg_isoch(void);
static int rwbf_quirk;
/*
* set to 1 to panic kernel if can't successfully enable VT-d
* (used when kernel is launched w/ TXT)
*/
static int force_on = 0;
int intel_iommu_tboot_noforce;
/*
* 0: Present
* 1-11: Reserved
* 12-63: Context Ptr (12 - (haw-1))
* 64-127: Reserved
*/
struct root_entry {
u64 lo;
u64 hi;
};
#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
/*
* Take a root_entry and return the Lower Context Table Pointer (LCTP)
* if marked present.
*/
static phys_addr_t root_entry_lctp(struct root_entry *re)
{
if (!(re->lo & 1))
return 0;
return re->lo & VTD_PAGE_MASK;
}
/*
* Take a root_entry and return the Upper Context Table Pointer (UCTP)
* if marked present.
*/
static phys_addr_t root_entry_uctp(struct root_entry *re)
{
if (!(re->hi & 1))
return 0;
return re->hi & VTD_PAGE_MASK;
}
/*
* low 64 bits:
* 0: present
* 1: fault processing disable
* 2-3: translation type
* 12-63: address space root
* high 64 bits:
* 0-2: address width
* 3-6: aval
* 8-23: domain id
*/
struct context_entry {
u64 lo;
u64 hi;
};
static inline void context_clear_pasid_enable(struct context_entry *context)
{
context->lo &= ~(1ULL << 11);
}
static inline bool context_pasid_enabled(struct context_entry *context)
{
return !!(context->lo & (1ULL << 11));
}
static inline void context_set_copied(struct context_entry *context)
{
context->hi |= (1ull << 3);
}
static inline bool context_copied(struct context_entry *context)
{
return !!(context->hi & (1ULL << 3));
}
static inline bool __context_present(struct context_entry *context)
{
return (context->lo & 1);
}
static inline bool context_present(struct context_entry *context)
{
return context_pasid_enabled(context) ?
__context_present(context) :
__context_present(context) && !context_copied(context);
}
static inline void context_set_present(struct context_entry *context)
{
context->lo |= 1;
}
static inline void context_set_fault_enable(struct context_entry *context)
{
context->lo &= (((u64)-1) << 2) | 1;
}
static inline void context_set_translation_type(struct context_entry *context,
unsigned long value)
{
context->lo &= (((u64)-1) << 4) | 3;
context->lo |= (value & 3) << 2;
}
static inline void context_set_address_root(struct context_entry *context,
unsigned long value)
{
context->lo &= ~VTD_PAGE_MASK;
context->lo |= value & VTD_PAGE_MASK;
}
static inline void context_set_address_width(struct context_entry *context,
unsigned long value)
{
context->hi |= value & 7;
}
static inline void context_set_domain_id(struct context_entry *context,
unsigned long value)
{
context->hi |= (value & ((1 << 16) - 1)) << 8;
}
static inline int context_domain_id(struct context_entry *c)
{
return((c->hi >> 8) & 0xffff);
}
static inline void context_clear_entry(struct context_entry *context)
{
context->lo = 0;
context->hi = 0;
}
/*
* 0: readable
* 1: writable
* 2-6: reserved
* 7: super page
* 8-10: available
* 11: snoop behavior
* 12-63: Host physcial address
*/
struct dma_pte {
u64 val;
};
static inline void dma_clear_pte(struct dma_pte *pte)
{
pte->val = 0;
}
static inline u64 dma_pte_addr(struct dma_pte *pte)
{
#ifdef CONFIG_64BIT
return pte->val & VTD_PAGE_MASK;
#else
/* Must have a full atomic 64-bit read */
return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
#endif
}
static inline bool dma_pte_present(struct dma_pte *pte)
{
return (pte->val & 3) != 0;
}
static inline bool dma_pte_superpage(struct dma_pte *pte)
{
return (pte->val & DMA_PTE_LARGE_PAGE);
}
static inline int first_pte_in_page(struct dma_pte *pte)
{
return !((unsigned long)pte & ~VTD_PAGE_MASK);
}
/*
* This domain is a statically identity mapping domain.
* 1. This domain creats a static 1:1 mapping to all usable memory.
* 2. It maps to each iommu if successful.
* 3. Each iommu mapps to this domain if successful.
*/
static struct dmar_domain *si_domain;
static int hw_pass_through = 1;
/*
* Domain represents a virtual machine, more than one devices
* across iommus may be owned in one domain, e.g. kvm guest.
*/
#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
/* si_domain contains mulitple devices */
#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
#define for_each_domain_iommu(idx, domain) \
for (idx = 0; idx < g_num_of_iommus; idx++) \
if (domain->iommu_refcnt[idx])
struct dmar_rmrr_unit {
struct list_head list; /* list of rmrr units */
struct acpi_dmar_header *hdr; /* ACPI header */
u64 base_address; /* reserved base address*/
u64 end_address; /* reserved end address */
struct dmar_dev_scope *devices; /* target devices */
int devices_cnt; /* target device count */
};
struct dmar_atsr_unit {
struct list_head list; /* list of ATSR units */
struct acpi_dmar_header *hdr; /* ACPI header */
struct dmar_dev_scope *devices; /* target devices */
int devices_cnt; /* target device count */
u8 include_all:1; /* include all ports */
};
static LIST_HEAD(dmar_atsr_units);
static LIST_HEAD(dmar_rmrr_units);
#define for_each_rmrr_units(rmrr) \
list_for_each_entry(rmrr, &dmar_rmrr_units, list)
/* bitmap for indexing intel_iommus */
static int g_num_of_iommus;
static void domain_exit(struct dmar_domain *domain);
static void domain_remove_dev_info(struct dmar_domain *domain);
static void dmar_remove_one_dev_info(struct dmar_domain *domain,
struct device *dev);
static void __dmar_remove_one_dev_info(struct device_domain_info *info);
static void domain_context_clear(struct intel_iommu *iommu,
struct device *dev);
static int domain_detach_iommu(struct dmar_domain *domain,
struct intel_iommu *iommu);
#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
int dmar_disabled = 0;
#else
int dmar_disabled = 1;
#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
int intel_iommu_enabled = 0;
EXPORT_SYMBOL_GPL(intel_iommu_enabled);
static int dmar_map_gfx = 1;
static int dmar_forcedac;
static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
static int intel_iommu_ecs = 1;
static int intel_iommu_pasid28;
static int iommu_identity_mapping;
#define IDENTMAP_ALL 1
#define IDENTMAP_GFX 2
#define IDENTMAP_AZALIA 4
/* Broadwell and Skylake have broken ECS support — normal so-called "second
* level" translation of DMA requests-without-PASID doesn't actually happen
* unless you also set the NESTE bit in an extended context-entry. Which of
* course means that SVM doesn't work because it's trying to do nested
* translation of the physical addresses it finds in the process page tables,
* through the IOVA->phys mapping found in the "second level" page tables.
*
* The VT-d specification was retroactively changed to change the definition
* of the capability bits and pretend that Broadwell/Skylake never happened...
* but unfortunately the wrong bit was changed. It's ECS which is broken, but
* for some reason it was the PASID capability bit which was redefined (from
* bit 28 on BDW/SKL to bit 40 in future).
*
* So our test for ECS needs to eschew those implementations which set the old
* PASID capabiity bit 28, since those are the ones on which ECS is broken.
* Unless we are working around the 'pasid28' limitations, that is, by putting
* the device into passthrough mode for normal DMA and thus masking the bug.
*/
#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
(intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
/* PASID support is thus enabled if ECS is enabled and *either* of the old
* or new capability bits are set. */
#define pasid_enabled(iommu) (ecs_enabled(iommu) && \
(ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
int intel_iommu_gfx_mapped;
EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
static DEFINE_SPINLOCK(device_domain_lock);
static LIST_HEAD(device_domain_list);
/*
* Iterate over elements in device_domain_list and call the specified
* callback @fn against each element. This helper should only be used
* in the context where the device_domain_lock has already been holden.
*/
int for_each_device_domain(int (*fn)(struct device_domain_info *info,
void *data), void *data)
{
int ret = 0;
struct device_domain_info *info;
assert_spin_locked(&device_domain_lock);
list_for_each_entry(info, &device_domain_list, global) {
ret = fn(info, data);
if (ret)
return ret;
}
return 0;
}
const struct iommu_ops intel_iommu_ops;
static bool translation_pre_enabled(struct intel_iommu *iommu)
{
return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
}
static void clear_translation_pre_enabled(struct intel_iommu *iommu)
{
iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
}
static void init_translation_status(struct intel_iommu *iommu)
{
u32 gsts;
gsts = readl(iommu->reg + DMAR_GSTS_REG);
if (gsts & DMA_GSTS_TES)
iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
}
/* Convert generic 'struct iommu_domain to private struct dmar_domain */
static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
{
return container_of(dom, struct dmar_domain, domain);
}
static int __init intel_iommu_setup(char *str)
{
if (!str)
return -EINVAL;
while (*str) {
if (!strncmp(str, "on", 2)) {
dmar_disabled = 0;
pr_info("IOMMU enabled\n");
} else if (!strncmp(str, "off", 3)) {
dmar_disabled = 1;
pr_info("IOMMU disabled\n");
} else if (!strncmp(str, "igfx_off", 8)) {
dmar_map_gfx = 0;
pr_info("Disable GFX device mapping\n");
} else if (!strncmp(str, "forcedac", 8)) {
pr_info("Forcing DAC for PCI devices\n");
dmar_forcedac = 1;
} else if (!strncmp(str, "strict", 6)) {
pr_info("Disable batched IOTLB flush\n");
intel_iommu_strict = 1;
} else if (!strncmp(str, "sp_off", 6)) {
pr_info("Disable supported super page\n");
intel_iommu_superpage = 0;
} else if (!strncmp(str, "ecs_off", 7)) {
printk(KERN_INFO
"Intel-IOMMU: disable extended context table support\n");
intel_iommu_ecs = 0;
} else if (!strncmp(str, "pasid28", 7)) {
printk(KERN_INFO
"Intel-IOMMU: enable pre-production PASID support\n");
intel_iommu_pasid28 = 1;
iommu_identity_mapping |= IDENTMAP_GFX;
} else if (!strncmp(str, "tboot_noforce", 13)) {
printk(KERN_INFO
"Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
intel_iommu_tboot_noforce = 1;
}
str += strcspn(str, ",");
while (*str == ',')
str++;
}
return 0;
}
__setup("intel_iommu=", intel_iommu_setup);
static struct kmem_cache *iommu_domain_cache;
static struct kmem_cache *iommu_devinfo_cache;
static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
{
struct dmar_domain **domains;
int idx = did >> 8;
domains = iommu->domains[idx];
if (!domains)
return NULL;
return domains[did & 0xff];
}
static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
struct dmar_domain *domain)
{
struct dmar_domain **domains;
int idx = did >> 8;
if (!iommu->domains[idx]) {
size_t size = 256 * sizeof(struct dmar_domain *);
iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
}
domains = iommu->domains[idx];
if (WARN_ON(!domains))
return;
else
domains[did & 0xff] = domain;
}
void *alloc_pgtable_page(int node)
{
struct page *page;
void *vaddr = NULL;
page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
if (page)
vaddr = page_address(page);
return vaddr;
}
void free_pgtable_page(void *vaddr)
{
free_page((unsigned long)vaddr);
}
static inline void *alloc_domain_mem(void)
{
return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
}
static void free_domain_mem(void *vaddr)
{
kmem_cache_free(iommu_domain_cache, vaddr);
}
static inline void * alloc_devinfo_mem(void)
{
return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
}
static inline void free_devinfo_mem(void *vaddr)
{
kmem_cache_free(iommu_devinfo_cache, vaddr);
}
static inline int domain_type_is_vm(struct dmar_domain *domain)
{
return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
}
static inline int domain_type_is_si(struct dmar_domain *domain)
{
return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
}
static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
{
return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
DOMAIN_FLAG_STATIC_IDENTITY);
}
static inline int domain_pfn_supported(struct dmar_domain *domain,
unsigned long pfn)
{
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
}
static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
{
unsigned long sagaw;
int agaw = -1;
sagaw = cap_sagaw(iommu->cap);
for (agaw = width_to_agaw(max_gaw);
agaw >= 0; agaw--) {
if (test_bit(agaw, &sagaw))
break;
}
return agaw;
}
/*
* Calculate max SAGAW for each iommu.
*/
int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
{
return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
}
/*
* calculate agaw for each iommu.
* "SAGAW" may be different across iommus, use a default agaw, and
* get a supported less agaw for iommus that don't support the default agaw.
*/
int iommu_calculate_agaw(struct intel_iommu *iommu)
{
return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
}
/* This functionin only returns single iommu in a domain */
struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
{
int iommu_id;
/* si_domain and vm domain should not get here. */
BUG_ON(domain_type_is_vm_or_si(domain));
for_each_domain_iommu(iommu_id, domain)
break;
if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
return NULL;
return g_iommus[iommu_id];
}
static void domain_update_iommu_coherency(struct dmar_domain *domain)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
bool found = false;
int i;
domain->iommu_coherency = 1;
for_each_domain_iommu(i, domain) {
found = true;
if (!ecap_coherent(g_iommus[i]->ecap)) {
domain->iommu_coherency = 0;
break;
}
}
if (found)
return;
/* No hardware attached; use lowest common denominator */
rcu_read_lock();
for_each_active_iommu(iommu, drhd) {
if (!ecap_coherent(iommu->ecap)) {
domain->iommu_coherency = 0;
break;
}
}
rcu_read_unlock();
}
static int domain_update_iommu_snooping(struct intel_iommu *skip)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
int ret = 1;
rcu_read_lock();
for_each_active_iommu(iommu, drhd) {
if (iommu != skip) {
if (!ecap_sc_support(iommu->ecap)) {
ret = 0;
break;
}
}
}
rcu_read_unlock();
return ret;
}
static int domain_update_iommu_superpage(struct intel_iommu *skip)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
int mask = 0xf;
if (!intel_iommu_superpage) {
return 0;
}
/* set iommu_superpage to the smallest common denominator */
rcu_read_lock();
for_each_active_iommu(iommu, drhd) {
if (iommu != skip) {
mask &= cap_super_page_val(iommu->cap);
if (!mask)
break;
}
}
rcu_read_unlock();
return fls(mask);
}
/* Some capabilities may be different across iommus */
static void domain_update_iommu_cap(struct dmar_domain *domain)
{
domain_update_iommu_coherency(domain);
domain->iommu_snooping = domain_update_iommu_snooping(NULL);
domain->iommu_superpage = domain_update_iommu_superpage(NULL);
}
static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
u8 bus, u8 devfn, int alloc)
{
struct root_entry *root = &iommu->root_entry[bus];
struct context_entry *context;
u64 *entry;
entry = &root->lo;
if (ecs_enabled(iommu)) {
if (devfn >= 0x80) {
devfn -= 0x80;
entry = &root->hi;
}
devfn *= 2;
}
if (*entry & 1)
context = phys_to_virt(*entry & VTD_PAGE_MASK);
else {
unsigned long phy_addr;
if (!alloc)
return NULL;
context = alloc_pgtable_page(iommu->node);
if (!context)
return NULL;
__iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
phy_addr = virt_to_phys((void *)context);
*entry = phy_addr | 1;
__iommu_flush_cache(iommu, entry, sizeof(*entry));
}
return &context[devfn];
}
static int iommu_dummy(struct device *dev)
{
return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
}
static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
{
struct dmar_drhd_unit *drhd = NULL;
struct intel_iommu *iommu;
struct device *tmp;
struct pci_dev *ptmp, *pdev = NULL;
u16 segment = 0;
int i;
if (iommu_dummy(dev))
return NULL;
if (dev_is_pci(dev)) {
struct pci_dev *pf_pdev;
pdev = to_pci_dev(dev);
#ifdef CONFIG_X86
/* VMD child devices currently cannot be handled individually */
if (is_vmd(pdev->bus))
return NULL;
#endif
/* VFs aren't listed in scope tables; we need to look up
* the PF instead to find the IOMMU. */
pf_pdev = pci_physfn(pdev);
dev = &pf_pdev->dev;
segment = pci_domain_nr(pdev->bus);
} else if (has_acpi_companion(dev))
dev = &ACPI_COMPANION(dev)->dev;
rcu_read_lock();
for_each_active_iommu(iommu, drhd) {
if (pdev && segment != drhd->segment)
continue;
for_each_active_dev_scope(drhd->devices,
drhd->devices_cnt, i, tmp) {
if (tmp == dev) {
/* For a VF use its original BDF# not that of the PF
* which we used for the IOMMU lookup. Strictly speaking
* we could do this for all PCI devices; we only need to
* get the BDF# from the scope table for ACPI matches. */
if (pdev && pdev->is_virtfn)
goto got_pdev;
*bus = drhd->devices[i].bus;
*devfn = drhd->devices[i].devfn;
goto out;
}
if (!pdev || !dev_is_pci(tmp))
continue;
ptmp = to_pci_dev(tmp);
if (ptmp->subordinate &&
ptmp->subordinate->number <= pdev->bus->number &&
ptmp->subordinate->busn_res.end >= pdev->bus->number)
goto got_pdev;
}
if (pdev && drhd->include_all) {
got_pdev:
*bus = pdev->bus->number;
*devfn = pdev->devfn;
goto out;
}
}
iommu = NULL;
out:
rcu_read_unlock();
return iommu;
}
static void domain_flush_cache(struct dmar_domain *domain,
void *addr, int size)
{
if (!domain->iommu_coherency)
clflush_cache_range(addr, size);
}
static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
struct context_entry *context;
int ret = 0;
unsigned long flags;
spin_lock_irqsave(&iommu->lock, flags);
context = iommu_context_addr(iommu, bus, devfn, 0);
if (context)
ret = context_present(context);
spin_unlock_irqrestore(&iommu->lock, flags);
return ret;
}
static void free_context_table(struct intel_iommu *iommu)
{
int i;
unsigned long flags;
struct context_entry *context;
spin_lock_irqsave(&iommu->lock, flags);
if (!iommu->root_entry) {
goto out;
}
for (i = 0; i < ROOT_ENTRY_NR; i++) {
context = iommu_context_addr(iommu, i, 0, 0);
if (context)
free_pgtable_page(context);
if (!ecs_enabled(iommu))
continue;
context = iommu_context_addr(iommu, i, 0x80, 0);
if (context)
free_pgtable_page(context);
}
free_pgtable_page(iommu->root_entry);
iommu->root_entry = NULL;
out:
spin_unlock_irqrestore(&iommu->lock, flags);
}
static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
unsigned long pfn, int *target_level)
{
struct dma_pte *parent, *pte = NULL;
int level = agaw_to_level(domain->agaw);
int offset;
BUG_ON(!domain->pgd);
if (!domain_pfn_supported(domain, pfn))
/* Address beyond IOMMU's addressing capabilities. */
return NULL;
parent = domain->pgd;
while (1) {
void *tmp_page;
offset = pfn_level_offset(pfn, level);
pte = &parent[offset];
if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
break;
if (level == *target_level)
break;
if (!dma_pte_present(pte)) {
uint64_t pteval;
tmp_page = alloc_pgtable_page(domain->nid);
if (!tmp_page)
return NULL;
domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
if (cmpxchg64(&pte->val, 0ULL, pteval))
/* Someone else set it while we were thinking; use theirs. */
free_pgtable_page(tmp_page);
else
domain_flush_cache(domain, pte, sizeof(*pte));
}
if (level == 1)
break;
parent = phys_to_virt(dma_pte_addr(pte));
level--;
}
if (!*target_level)
*target_level = level;
return pte;
}
/* return address's pte at specific level */
static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
unsigned long pfn,
int level, int *large_page)
{
struct dma_pte *parent, *pte = NULL;
int total = agaw_to_level(domain->agaw);
int offset;
parent = domain->pgd;
while (level <= total) {
offset = pfn_level_offset(pfn, total);
pte = &parent[offset];
if (level == total)
return pte;
if (!dma_pte_present(pte)) {
*large_page = total;
break;
}
if (dma_pte_superpage(pte)) {
*large_page = total;
return pte;
}
parent = phys_to_virt(dma_pte_addr(pte));
total--;
}
return NULL;
}
/* clear last level pte, a tlb flush should be followed */
static void dma_pte_clear_range(struct dmar_domain *domain,
unsigned long start_pfn,
unsigned long last_pfn)
{
unsigned int large_page = 1;
struct dma_pte *first_pte, *pte;
BUG_ON(!domain_pfn_supported(domain, start_pfn));
BUG_ON(!domain_pfn_supported(domain, last_pfn));
BUG_ON(start_pfn > last_pfn);
/* we don't need lock here; nobody else touches the iova range */
do {
large_page = 1;
first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
if (!pte) {
start_pfn = align_to_level(start_pfn + 1, large_page + 1);
continue;
}
do {
dma_clear_pte(pte);
start_pfn += lvl_to_nr_pages(large_page);
pte++;
} while (start_pfn <= last_pfn && !first_pte_in_page(pte));
domain_flush_cache(domain, first_pte,
(void *)pte - (void *)first_pte);
} while (start_pfn && start_pfn <= last_pfn);
}
static void dma_pte_free_level(struct dmar_domain *domain, int level,
int retain_level, struct dma_pte *pte,
unsigned long pfn, unsigned long start_pfn,
unsigned long last_pfn)
{
pfn = max(start_pfn, pfn);
pte = &pte[pfn_level_offset(pfn, level)];
do {
unsigned long level_pfn;
struct dma_pte *level_pte;
if (!dma_pte_present(pte) || dma_pte_superpage(pte))
goto next;
level_pfn = pfn & level_mask(level);
level_pte = phys_to_virt(dma_pte_addr(pte));
if (level > 2) {
dma_pte_free_level(domain, level - 1, retain_level,
level_pte, level_pfn, start_pfn,
last_pfn);
}
/*
* Free the page table if we're below the level we want to
* retain and the range covers the entire table.
*/
if (level < retain_level && !(start_pfn > level_pfn ||
last_pfn < level_pfn + level_size(level) - 1)) {
dma_clear_pte(pte);
domain_flush_cache(domain, pte, sizeof(*pte));
free_pgtable_page(level_pte);
}
next:
pfn += level_size(level);
} while (!first_pte_in_page(++pte) && pfn <= last_pfn);
}
/*
* clear last level (leaf) ptes and free page table pages below the
* level we wish to keep intact.
*/
static void dma_pte_free_pagetable(struct dmar_domain *domain,
unsigned long start_pfn,
unsigned long last_pfn,
int retain_level)
{
BUG_ON(!domain_pfn_supported(domain, start_pfn));
BUG_ON(!domain_pfn_supported(domain, last_pfn));
BUG_ON(start_pfn > last_pfn);
dma_pte_clear_range(domain, start_pfn, last_pfn);
/* We don't need lock here; nobody else touches the iova range */
dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level,
domain->pgd, 0, start_pfn, last_pfn);
/* free pgd */
if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
free_pgtable_page(domain->pgd);
domain->pgd = NULL;
}
}
/* When a page at a given level is being unlinked from its parent, we don't
need to *modify* it at all. All we need to do is make a list of all the
pages which can be freed just as soon as we've flushed the IOTLB and we
know the hardware page-walk will no longer touch them.
The 'pte' argument is the *parent* PTE, pointing to the page that is to
be freed. */
static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
int level, struct dma_pte *pte,
struct page *freelist)
{
struct page *pg;
pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
pg->freelist = freelist;
freelist = pg;
if (level == 1)
return freelist;
pte = page_address(pg);
do {
if (dma_pte_present(pte) && !dma_pte_superpage(pte))
freelist = dma_pte_list_pagetables(domain, level - 1,
pte, freelist);
pte++;
} while (!first_pte_in_page(pte));
return freelist;
}
static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
struct dma_pte *pte, unsigned long pfn,
unsigned long start_pfn,
unsigned long last_pfn,
struct page *freelist)
{
struct dma_pte *first_pte = NULL, *last_pte = NULL;
pfn = max(start_pfn, pfn);
pte = &pte[pfn_level_offset(pfn, level)];
do {
unsigned long level_pfn;
if (!dma_pte_present(pte))
goto next;
level_pfn = pfn & level_mask(level);
/* If range covers entire pagetable, free it */
if (start_pfn <= level_pfn &&
last_pfn >= level_pfn + level_size(level) - 1) {
/* These suborbinate page tables are going away entirely. Don't
bother to clear them; we're just going to *free* them. */
if (level > 1 && !dma_pte_superpage(pte))
freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
dma_clear_pte(pte);
if (!first_pte)
first_pte = pte;
last_pte = pte;
} else if (level > 1) {
/* Recurse down into a level that isn't *entirely* obsolete */
freelist = dma_pte_clear_level(domain, level - 1,
phys_to_virt(dma_pte_addr(pte)),
level_pfn, start_pfn, last_pfn,
freelist);
}
next:
pfn += level_size(level);
} while (!first_pte_in_page(++pte) && pfn <= last_pfn);
if (first_pte)
domain_flush_cache(domain, first_pte,
(void *)++last_pte - (void *)first_pte);
return freelist;
}
/* We can't just free the pages because the IOMMU may still be walking
the page tables, and may have cached the intermediate levels. The
pages can only be freed after the IOTLB flush has been done. */
static struct page *domain_unmap(struct dmar_domain *domain,
unsigned long start_pfn,
unsigned long last_pfn)
{
struct page *freelist = NULL;
BUG_ON(!domain_pfn_supported(domain, start_pfn));
BUG_ON(!domain_pfn_supported(domain, last_pfn));
BUG_ON(start_pfn > last_pfn);
/* we don't need lock here; nobody else touches the iova range */
freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
domain->pgd, 0, start_pfn, last_pfn, NULL);
/* free pgd */
if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
struct page *pgd_page = virt_to_page(domain->pgd);
pgd_page->freelist = freelist;
freelist = pgd_page;
domain->pgd = NULL;
}
return freelist;
}
static void dma_free_pagelist(struct page *freelist)
{
struct page *pg;
while ((pg = freelist)) {
freelist = pg->freelist;
free_pgtable_page(page_address(pg));
}
}
static void iova_entry_free(unsigned long data)
{
struct page *freelist = (struct page *)data;
dma_free_pagelist(freelist);
}
/* iommu handling */
static int iommu_alloc_root_entry(struct intel_iommu *iommu)
{
struct root_entry *root;
unsigned long flags;
root = (struct root_entry *)alloc_pgtable_page(iommu->node);
if (!root) {
pr_err("Allocating root entry for %s failed\n",
iommu->name);
return -ENOMEM;
}
__iommu_flush_cache(iommu, root, ROOT_SIZE);
spin_lock_irqsave(&iommu->lock, flags);
iommu->root_entry = root;
spin_unlock_irqrestore(&iommu->lock, flags);
return 0;
}
static void iommu_set_root_entry(struct intel_iommu *iommu)
{
u64 addr;
u32 sts;
unsigned long flag;
addr = virt_to_phys(iommu->root_entry);
if (ecs_enabled(iommu))
addr |= DMA_RTADDR_RTT;
raw_spin_lock_irqsave(&iommu->register_lock, flag);
dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_RTPS), sts);
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
static void iommu_flush_write_buffer(struct intel_iommu *iommu)
{
u32 val;
unsigned long flag;
if (!rwbf_quirk && !cap_rwbf(iommu->cap))
return;
raw_spin_lock_irqsave(&iommu->register_lock, flag);
writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (!(val & DMA_GSTS_WBFS)), val);
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
/* return value determine if we need a write buffer flush */
static void __iommu_flush_context(struct intel_iommu *iommu,
u16 did, u16 source_id, u8 function_mask,
u64 type)
{
u64 val = 0;
unsigned long flag;
switch (type) {
case DMA_CCMD_GLOBAL_INVL:
val = DMA_CCMD_GLOBAL_INVL;
break;
case DMA_CCMD_DOMAIN_INVL:
val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
break;
case DMA_CCMD_DEVICE_INVL:
val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
| DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
break;
default:
BUG();
}
val |= DMA_CCMD_ICC;
raw_spin_lock_irqsave(&iommu->register_lock, flag);
dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
dmar_readq, (!(val & DMA_CCMD_ICC)), val);
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
/* return value determine if we need a write buffer flush */
static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
u64 addr, unsigned int size_order, u64 type)
{
int tlb_offset = ecap_iotlb_offset(iommu->ecap);
u64 val = 0, val_iva = 0;
unsigned long flag;
switch (type) {
case DMA_TLB_GLOBAL_FLUSH:
/* global flush doesn't need set IVA_REG */
val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
break;
case DMA_TLB_DSI_FLUSH:
val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
break;
case DMA_TLB_PSI_FLUSH:
val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
/* IH bit is passed in as part of address */
val_iva = size_order | addr;
break;
default:
BUG();
}
/* Note: set drain read/write */
#if 0
/*
* This is probably to be super secure.. Looks like we can
* ignore it without any impact.
*/
if (cap_read_drain(iommu->cap))
val |= DMA_TLB_READ_DRAIN;
#endif
if (cap_write_drain(iommu->cap))
val |= DMA_TLB_WRITE_DRAIN;
raw_spin_lock_irqsave(&iommu->register_lock, flag);
/* Note: Only uses first TLB reg currently */
if (val_iva)
dmar_writeq(iommu->reg + tlb_offset, val_iva);
dmar_writeq(iommu->reg + tlb_offset + 8, val);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, tlb_offset + 8,
dmar_readq, (!(val & DMA_TLB_IVT)), val);
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
/* check IOTLB invalidation granularity */
if (DMA_TLB_IAIG(val) == 0)
pr_err("Flush IOTLB failed\n");
if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
pr_debug("TLB flush request %Lx, actual %Lx\n",
(unsigned long long)DMA_TLB_IIRG(type),
(unsigned long long)DMA_TLB_IAIG(val));
}
static struct device_domain_info *
iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
u8 bus, u8 devfn)
{
struct device_domain_info *info;
assert_spin_locked(&device_domain_lock);
if (!iommu->qi)
return NULL;
list_for_each_entry(info, &domain->devices, link)
if (info->iommu == iommu && info->bus == bus &&
info->devfn == devfn) {
if (info->ats_supported && info->dev)
return info;
break;
}
return NULL;
}
static void domain_update_iotlb(struct dmar_domain *domain)
{
struct device_domain_info *info;
bool has_iotlb_device = false;
assert_spin_locked(&device_domain_lock);
list_for_each_entry(info, &domain->devices, link) {
struct pci_dev *pdev;
if (!info->dev || !dev_is_pci(info->dev))
continue;
pdev = to_pci_dev(info->dev);
if (pdev->ats_enabled) {
has_iotlb_device = true;
break;
}
}
domain->has_iotlb_device = has_iotlb_device;
}
static void iommu_enable_dev_iotlb(struct device_domain_info *info)
{
struct pci_dev *pdev;
assert_spin_locked(&device_domain_lock);
if (!info || !dev_is_pci(info->dev))
return;
pdev = to_pci_dev(info->dev);
/* For IOMMU that supports device IOTLB throttling (DIT), we assign
* PFSID to the invalidation desc of a VF such that IOMMU HW can gauge
* queue depth at PF level. If DIT is not set, PFSID will be treated as
* reserved, which should be set to 0.
*/
if (!ecap_dit(info->iommu->ecap))
info->pfsid = 0;
else {
struct pci_dev *pf_pdev;
/* pdev will be returned if device is not a vf */
pf_pdev = pci_physfn(pdev);
info->pfsid = PCI_DEVID(pf_pdev->bus->number, pf_pdev->devfn);
}
#ifdef CONFIG_INTEL_IOMMU_SVM
/* The PCIe spec, in its wisdom, declares that the behaviour of
the device if you enable PASID support after ATS support is
undefined. So always enable PASID support on devices which
have it, even if we can't yet know if we're ever going to
use it. */
if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
info->pasid_enabled = 1;
if (info->pri_supported && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
info->pri_enabled = 1;
#endif
if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
info->ats_enabled = 1;
domain_update_iotlb(info->domain);
info->ats_qdep = pci_ats_queue_depth(pdev);
}
}
static void iommu_disable_dev_iotlb(struct device_domain_info *info)
{
struct pci_dev *pdev;
assert_spin_locked(&device_domain_lock);
if (!dev_is_pci(info->dev))
return;
pdev = to_pci_dev(info->dev);
if (info->ats_enabled) {
pci_disable_ats(pdev);
info->ats_enabled = 0;
domain_update_iotlb(info->domain);
}
#ifdef CONFIG_INTEL_IOMMU_SVM
if (info->pri_enabled) {
pci_disable_pri(pdev);
info->pri_enabled = 0;
}
if (info->pasid_enabled) {
pci_disable_pasid(pdev);
info->pasid_enabled = 0;
}
#endif
}
static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
u64 addr, unsigned mask)
{
u16 sid, qdep;
unsigned long flags;
struct device_domain_info *info;
if (!domain->has_iotlb_device)
return;
spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_entry(info, &domain->devices, link) {
if (!info->ats_enabled)
continue;
sid = info->bus << 8 | info->devfn;
qdep = info->ats_qdep;
qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
qdep, addr, mask);
}
spin_unlock_irqrestore(&device_domain_lock, flags);
}
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
struct dmar_domain *domain,
unsigned long pfn, unsigned int pages,
int ih, int map)
{
unsigned int mask = ilog2(__roundup_pow_of_two(pages));
uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
u16 did = domain->iommu_did[iommu->seq_id];
BUG_ON(pages == 0);
if (ih)
ih = 1 << 6;
/*
* Fallback to domain selective flush if no PSI support or the size is
* too big.
* PSI requires page size to be 2 ^ x, and the base address is naturally
* aligned to the size
*/
if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
iommu->flush.flush_iotlb(iommu, did, 0, 0,
DMA_TLB_DSI_FLUSH);
else
iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
DMA_TLB_PSI_FLUSH);
/*
* In caching mode, changes of pages from non-present to present require
* flush. However, device IOTLB doesn't need to be flushed in this case.
*/
if (!cap_caching_mode(iommu->cap) || !map)
iommu_flush_dev_iotlb(domain, addr, mask);
}
/* Notification for newly created mappings */
static inline void __mapping_notify_one(struct intel_iommu *iommu,
struct dmar_domain *domain,
unsigned long pfn, unsigned int pages)
{
/* It's a non-present to present mapping. Only flush if caching mode */
if (cap_caching_mode(iommu->cap))
iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
else
iommu_flush_write_buffer(iommu);
}
static void iommu_flush_iova(struct iova_domain *iovad)
{
struct dmar_domain *domain;
int idx;
domain = container_of(iovad, struct dmar_domain, iovad);
for_each_domain_iommu(idx, domain) {
struct intel_iommu *iommu = g_iommus[idx];
u16 did = domain->iommu_did[iommu->seq_id];
iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
if (!cap_caching_mode(iommu->cap))
iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
0, MAX_AGAW_PFN_WIDTH);
}
}
static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
{
u32 pmen;
unsigned long flags;
if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
return;
raw_spin_lock_irqsave(&iommu->register_lock, flags);
pmen = readl(iommu->reg + DMAR_PMEN_REG);
pmen &= ~DMA_PMEN_EPM;
writel(pmen, iommu->reg + DMAR_PMEN_REG);
/* wait for the protected region status bit to clear */
IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
readl, !(pmen & DMA_PMEN_PRS), pmen);
raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
}
static void iommu_enable_translation(struct intel_iommu *iommu)
{
u32 sts;
unsigned long flags;
raw_spin_lock_irqsave(&iommu->register_lock, flags);
iommu->gcmd |= DMA_GCMD_TE;
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_TES), sts);
raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
}
static void iommu_disable_translation(struct intel_iommu *iommu)
{
u32 sts;
unsigned long flag;
raw_spin_lock_irqsave(&iommu->register_lock, flag);
iommu->gcmd &= ~DMA_GCMD_TE;
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (!(sts & DMA_GSTS_TES)), sts);
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
static int iommu_init_domains(struct intel_iommu *iommu)
{
u32 ndomains, nlongs;
size_t size;
ndomains = cap_ndoms(iommu->cap);
pr_debug("%s: Number of Domains supported <%d>\n",
iommu->name, ndomains);
nlongs = BITS_TO_LONGS(ndomains);
spin_lock_init(&iommu->lock);
iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
if (!iommu->domain_ids) {
pr_err("%s: Allocating domain id array failed\n",
iommu->name);
return -ENOMEM;
}
size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
iommu->domains = kzalloc(size, GFP_KERNEL);
if (iommu->domains) {
size = 256 * sizeof(struct dmar_domain *);
iommu->domains[0] = kzalloc(size, GFP_KERNEL);
}
if (!iommu->domains || !iommu->domains[0]) {
pr_err("%s: Allocating domain array failed\n",
iommu->name);
kfree(iommu->domain_ids);
kfree(iommu->domains);
iommu->domain_ids = NULL;
iommu->domains = NULL;
return -ENOMEM;
}
/*
* If Caching mode is set, then invalid translations are tagged
* with domain-id 0, hence we need to pre-allocate it. We also
* use domain-id 0 as a marker for non-allocated domain-id, so
* make sure it is not used for a real domain.
*/
set_bit(0, iommu->domain_ids);
return 0;
}
static void disable_dmar_iommu(struct intel_iommu *iommu)
{
struct device_domain_info *info, *tmp;
unsigned long flags;
if (!iommu->domains || !iommu->domain_ids)
return;
again:
spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
struct dmar_domain *domain;
if (info->iommu != iommu)
continue;
if (!info->dev || !info->domain)
continue;
domain = info->domain;
__dmar_remove_one_dev_info(info);
if (!domain_type_is_vm_or_si(domain)) {
/*
* The domain_exit() function can't be called under
* device_domain_lock, as it takes this lock itself.
* So release the lock here and re-run the loop
* afterwards.
*/
spin_unlock_irqrestore(&device_domain_lock, flags);
domain_exit(domain);
goto again;
}
}
spin_unlock_irqrestore(&device_domain_lock, flags);
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
}
static void free_dmar_iommu(struct intel_iommu *iommu)
{
if ((iommu->domains) && (iommu->domain_ids)) {
int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
int i;
for (i = 0; i < elems; i++)
kfree(iommu->domains[i]);
kfree(iommu->domains);
kfree(iommu->domain_ids);
iommu->domains = NULL;
iommu->domain_ids = NULL;
}
g_iommus[iommu->seq_id] = NULL;
/* free context mapping */
free_context_table(iommu);
#ifdef CONFIG_INTEL_IOMMU_SVM
if (pasid_enabled(iommu)) {
if (ecap_prs(iommu->ecap))
intel_svm_finish_prq(iommu);
intel_svm_exit(iommu);
}
#endif
}
static struct dmar_domain *alloc_domain(int flags)
{
struct dmar_domain *domain;
domain = alloc_domain_mem();
if (!domain)
return NULL;
memset(domain, 0, sizeof(*domain));
domain->nid = -1;
domain->flags = flags;
domain->has_iotlb_device = false;
INIT_LIST_HEAD(&domain->devices);
return domain;
}
/* Must be called with iommu->lock */
static int domain_attach_iommu(struct dmar_domain *domain,
struct intel_iommu *iommu)
{
unsigned long ndomains;
int num;
assert_spin_locked(&device_domain_lock);
assert_spin_locked(&iommu->lock);
domain->iommu_refcnt[iommu->seq_id] += 1;
domain->iommu_count += 1;
if (domain->iommu_refcnt[iommu->seq_id] == 1) {
ndomains = cap_ndoms(iommu->cap);
num = find_first_zero_bit(iommu->domain_ids, ndomains);
if (num >= ndomains) {
pr_err("%s: No free domain ids\n", iommu->name);
domain->iommu_refcnt[iommu->seq_id] -= 1;
domain->iommu_count -= 1;
return -ENOSPC;
}
set_bit(num, iommu->domain_ids);
set_iommu_domain(iommu, num, domain);
domain->iommu_did[iommu->seq_id] = num;
domain->nid = iommu->node;
domain_update_iommu_cap(domain);
}
return 0;
}
static int domain_detach_iommu(struct dmar_domain *domain,
struct intel_iommu *iommu)
{
int num, count = INT_MAX;
assert_spin_locked(&device_domain_lock);
assert_spin_locked(&iommu->lock);
domain->iommu_refcnt[iommu->seq_id] -= 1;
count = --domain->iommu_count;
if (domain->iommu_refcnt[iommu->seq_id] == 0) {
num = domain->iommu_did[iommu->seq_id];
clear_bit(num, iommu->domain_ids);
set_iommu_domain(iommu, num, NULL);
domain_update_iommu_cap(domain);
domain->iommu_did[iommu->seq_id] = 0;
}
return count;
}
static struct iova_domain reserved_iova_list;
static struct lock_class_key reserved_rbtree_key;
static int dmar_init_reserved_ranges(void)
{
struct pci_dev *pdev = NULL;
struct iova *iova;
int i;
init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN);
lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
&reserved_rbtree_key);
/* IOAPIC ranges shouldn't be accessed by DMA */
iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
IOVA_PFN(IOAPIC_RANGE_END));
if (!iova) {
pr_err("Reserve IOAPIC range failed\n");
return -ENODEV;
}
/* Reserve all PCI MMIO to avoid peer-to-peer access */
for_each_pci_dev(pdev) {
struct resource *r;
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
r = &pdev->resource[i];
if (!r->flags || !(r->flags & IORESOURCE_MEM))
continue;
iova = reserve_iova(&reserved_iova_list,
IOVA_PFN(r->start),
IOVA_PFN(r->end));
if (!iova) {
pr_err("Reserve iova failed\n");
return -ENODEV;
}
}
}
return 0;
}
static void domain_reserve_special_ranges(struct dmar_domain *domain)
{
copy_reserved_iova(&reserved_iova_list, &domain->iovad);
}
static inline int guestwidth_to_adjustwidth(int gaw)
{
int agaw;
int r = (gaw - 12) % 9;
if (r == 0)
agaw = gaw;
else
agaw = gaw + 9 - r;
if (agaw > 64)
agaw = 64;
return agaw;
}
static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
int guest_width)
{
int adjust_width, agaw;
unsigned long sagaw;
int err;
init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
err = init_iova_flush_queue(&domain->iovad,
iommu_flush_iova, iova_entry_free);
if (err)
return err;
domain_reserve_special_ranges(domain);
/* calculate AGAW */
if (guest_width > cap_mgaw(iommu->cap))
guest_width = cap_mgaw(iommu->cap);
domain->gaw = guest_width;
adjust_width = guestwidth_to_adjustwidth(guest_width);
agaw = width_to_agaw(adjust_width);
sagaw = cap_sagaw(iommu->cap);
if (!test_bit(agaw, &sagaw)) {
/* hardware doesn't support it, choose a bigger one */
pr_debug("Hardware doesn't support agaw %d\n", agaw);
agaw = find_next_bit(&sagaw, 5, agaw);
if (agaw >= 5)
return -ENODEV;
}
domain->agaw = agaw;
if (ecap_coherent(iommu->ecap))
domain->iommu_coherency = 1;
else
domain->iommu_coherency = 0;
if (ecap_sc_support(iommu->ecap))
domain->iommu_snooping = 1;
else
domain->iommu_snooping = 0;
if (intel_iommu_superpage)
domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
else
domain->iommu_superpage = 0;
domain->nid = iommu->node;
/* always allocate the top pgd */
domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
if (!domain->pgd)
return -ENOMEM;
__iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
return 0;
}
static void domain_exit(struct dmar_domain *domain)
{
struct page *freelist = NULL;
/* Domain 0 is reserved, so dont process it */
if (!domain)
return;
/* Remove associated devices and clear attached or cached domains */
rcu_read_lock();
domain_remove_dev_info(domain);
rcu_read_unlock();
/* destroy iovas */
put_iova_domain(&domain->iovad);
freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
dma_free_pagelist(freelist);
free_domain_mem(domain);
}
static int domain_context_mapping_one(struct dmar_domain *domain,
struct intel_iommu *iommu,
u8 bus, u8 devfn)
{
u16 did = domain->iommu_did[iommu->seq_id];
int translation = CONTEXT_TT_MULTI_LEVEL;
struct device_domain_info *info = NULL;
struct context_entry *context;
unsigned long flags;
struct dma_pte *pgd;
int ret, agaw;
WARN_ON(did == 0);
if (hw_pass_through && domain_type_is_si(domain))
translation = CONTEXT_TT_PASS_THROUGH;
pr_debug("Set context mapping for %02x:%02x.%d\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
BUG_ON(!domain->pgd);
spin_lock_irqsave(&device_domain_lock, flags);
spin_lock(&iommu->lock);
ret = -ENOMEM;
context = iommu_context_addr(iommu, bus, devfn, 1);
if (!context)
goto out_unlock;
ret = 0;
if (context_present(context))
goto out_unlock;
/*
* For kdump cases, old valid entries may be cached due to the
* in-flight DMA and copied pgtable, but there is no unmapping
* behaviour for them, thus we need an explicit cache flush for
* the newly-mapped device. For kdump, at this point, the device
* is supposed to finish reset at its driver probe stage, so no
* in-flight DMA will exist, and we don't need to worry anymore
* hereafter.
*/
if (context_copied(context)) {
u16 did_old = context_domain_id(context);
if (did_old < cap_ndoms(iommu->cap)) {
iommu->flush.flush_context(iommu, did_old,
(((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
DMA_TLB_DSI_FLUSH);
}
}
pgd = domain->pgd;
context_clear_entry(context);
context_set_domain_id(context, did);
/*
* Skip top levels of page tables for iommu which has less agaw
* than default. Unnecessary for PT mode.
*/
if (translation != CONTEXT_TT_PASS_THROUGH) {
for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
ret = -ENOMEM;
pgd = phys_to_virt(dma_pte_addr(pgd));
if (!dma_pte_present(pgd))
goto out_unlock;
}
info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
if (info && info->ats_supported)
translation = CONTEXT_TT_DEV_IOTLB;
else
translation = CONTEXT_TT_MULTI_LEVEL;
context_set_address_root(context, virt_to_phys(pgd));
context_set_address_width(context, agaw);
} else {
/*
* In pass through mode, AW must be programmed to
* indicate the largest AGAW value supported by
* hardware. And ASR is ignored by hardware.
*/
context_set_address_width(context, iommu->msagaw);
}
context_set_translation_type(context, translation);
context_set_fault_enable(context);
context_set_present(context);
domain_flush_cache(domain, context, sizeof(*context));
/*
* It's a non-present to present mapping. If hardware doesn't cache
* non-present entry we only need to flush the write-buffer. If the
* _does_ cache non-present entries, then it does so in the special
* domain #0, which we have to flush:
*/
if (cap_caching_mode(iommu->cap)) {
iommu->flush.flush_context(iommu, 0,
(((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
} else {
iommu_flush_write_buffer(iommu);
}
iommu_enable_dev_iotlb(info);
ret = 0;
out_unlock:
spin_unlock(&iommu->lock);
spin_unlock_irqrestore(&device_domain_lock, flags);
return ret;
}
struct domain_context_mapping_data {
struct dmar_domain *domain;
struct intel_iommu *iommu;
};
static int domain_context_mapping_cb(struct pci_dev *pdev,
u16 alias, void *opaque)
{
struct domain_context_mapping_data *data = opaque;
return domain_context_mapping_one(data->domain, data->iommu,
PCI_BUS_NUM(alias), alias & 0xff);
}
static int
domain_context_mapping(struct dmar_domain *domain, struct device *dev)
{
struct intel_iommu *iommu;
u8 bus, devfn;
struct domain_context_mapping_data data;
iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
return -ENODEV;
if (!dev_is_pci(dev))
return domain_context_mapping_one(domain, iommu, bus, devfn);
data.domain = domain;
data.iommu = iommu;
return pci_for_each_dma_alias(to_pci_dev(dev),
&domain_context_mapping_cb, &data);
}
static int domain_context_mapped_cb(struct pci_dev *pdev,
u16 alias, void *opaque)
{
struct intel_iommu *iommu = opaque;
return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
}
static int domain_context_mapped(struct device *dev)
{
struct intel_iommu *iommu;
u8 bus, devfn;
iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
return -ENODEV;
if (!dev_is_pci(dev))
return device_context_mapped(iommu, bus, devfn);
return !pci_for_each_dma_alias(to_pci_dev(dev),
domain_context_mapped_cb, iommu);
}
/* Returns a number of VTD pages, but aligned to MM page size */
static inline unsigned long aligned_nrpages(unsigned long host_addr,
size_t size)
{
host_addr &= ~PAGE_MASK;
return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
}
/* Return largest possible superpage level for a given mapping */
static inline int hardware_largepage_caps(struct dmar_domain *domain,
unsigned long iov_pfn,
unsigned long phy_pfn,
unsigned long pages)
{
int support, level = 1;
unsigned long pfnmerge;
support = domain->iommu_superpage;
/* To use a large page, the virtual *and* physical addresses
must be aligned to 2MiB/1GiB/etc. Lower bits set in either
of them will mean we have to use smaller pages. So just
merge them and check both at once. */
pfnmerge = iov_pfn | phy_pfn;
while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
pages >>= VTD_STRIDE_SHIFT;
if (!pages)
break;
pfnmerge >>= VTD_STRIDE_SHIFT;
level++;
support--;
}
return level;
}
static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
struct scatterlist *sg, unsigned long phys_pfn,
unsigned long nr_pages, int prot)
{
struct dma_pte *first_pte = NULL, *pte = NULL;
phys_addr_t uninitialized_var(pteval);
unsigned long sg_res = 0;
unsigned int largepage_lvl = 0;
unsigned long lvl_pages = 0;
BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
return -EINVAL;
prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
if (!sg) {
sg_res = nr_pages;
pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
}
while (nr_pages > 0) {
uint64_t tmp;
if (!sg_res) {
unsigned int pgoff = sg->offset & ~PAGE_MASK;
sg_res = aligned_nrpages(sg->offset, sg->length);
sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
sg->dma_length = sg->length;
pteval = (sg_phys(sg) - pgoff) | prot;
phys_pfn = pteval >> VTD_PAGE_SHIFT;
}
if (!pte) {
largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
if (!pte)
return -ENOMEM;
/* It is large page*/
if (largepage_lvl > 1) {
unsigned long nr_superpages, end_pfn;
pteval |= DMA_PTE_LARGE_PAGE;
lvl_pages = lvl_to_nr_pages(largepage_lvl);
nr_superpages = sg_res / lvl_pages;
end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
/*
* Ensure that old small page tables are
* removed to make room for superpage(s).
* We're adding new large pages, so make sure
* we don't remove their parent tables.
*/
dma_pte_free_pagetable(domain, iov_pfn, end_pfn,
largepage_lvl + 1);
} else {
pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
}
}
/* We don't need lock here, nobody else
* touches the iova range
*/
tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
if (tmp) {
static int dumps = 5;
pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
iov_pfn, tmp, (unsigned long long)pteval);
if (dumps) {
dumps--;
debug_dma_dump_mappings(NULL);
}
WARN_ON(1);
}
lvl_pages = lvl_to_nr_pages(largepage_lvl);
BUG_ON(nr_pages < lvl_pages);
BUG_ON(sg_res < lvl_pages);
nr_pages -= lvl_pages;
iov_pfn += lvl_pages;
phys_pfn += lvl_pages;
pteval += lvl_pages * VTD_PAGE_SIZE;
sg_res -= lvl_pages;
/* If the next PTE would be the first in a new page, then we
need to flush the cache on the entries we've just written.
And then we'll need to recalculate 'pte', so clear it and
let it get set again in the if (!pte) block above.
If we're done (!nr_pages) we need to flush the cache too.
Also if we've been setting superpages, we may need to
recalculate 'pte' and switch back to smaller pages for the
end of the mapping, if the trailing size is not enough to
use another superpage (i.e. sg_res < lvl_pages). */
pte++;
if (!nr_pages || first_pte_in_page(pte) ||
(largepage_lvl > 1 && sg_res < lvl_pages)) {
domain_flush_cache(domain, first_pte,
(void *)pte - (void *)first_pte);
pte = NULL;
}
if (!sg_res && nr_pages)
sg = sg_next(sg);
}
return 0;
}
static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
struct scatterlist *sg, unsigned long phys_pfn,
unsigned long nr_pages, int prot)
{
int ret;
struct intel_iommu *iommu;
/* Do the real mapping first */
ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
if (ret)
return ret;
/* Notify about the new mapping */
if (domain_type_is_vm(domain)) {
/* VM typed domains can have more than one IOMMUs */
int iommu_id;
for_each_domain_iommu(iommu_id, domain) {
iommu = g_iommus[iommu_id];
__mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
}
} else {
/* General domains only have one IOMMU */
iommu = domain_get_iommu(domain);
__mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
}
return 0;
}
static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
struct scatterlist *sg, unsigned long nr_pages,
int prot)
{
return domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
}
static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
unsigned long phys_pfn, unsigned long nr_pages,
int prot)
{
return domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
}
static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
unsigned long flags;
struct context_entry *context;
u16 did_old;
if (!iommu)
return;
spin_lock_irqsave(&iommu->lock, flags);
context = iommu_context_addr(iommu, bus, devfn, 0);
if (!context) {
spin_unlock_irqrestore(&iommu->lock, flags);
return;
}
did_old = context_domain_id(context);
context_clear_entry(context);
__iommu_flush_cache(iommu, context, sizeof(*context));
spin_unlock_irqrestore(&iommu->lock, flags);
iommu->flush.flush_context(iommu,
did_old,
(((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
iommu->flush.flush_iotlb(iommu,
did_old,
0,
0,
DMA_TLB_DSI_FLUSH);
}
static inline void unlink_domain_info(struct device_domain_info *info)
{
assert_spin_locked(&device_domain_lock);
list_del(&info->link);
list_del(&info->global);
if (info->dev)
info->dev->archdata.iommu = NULL;
}
static void domain_remove_dev_info(struct dmar_domain *domain)
{
struct device_domain_info *info, *tmp;
unsigned long flags;
spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_entry_safe(info, tmp, &domain->devices, link)
__dmar_remove_one_dev_info(info);
spin_unlock_irqrestore(&device_domain_lock, flags);
}
/*
* find_domain
* Note: we use struct device->archdata.iommu stores the info
*/
static struct dmar_domain *find_domain(struct device *dev)
{
struct device_domain_info *info;
/* No lock here, assumes no domain exit in normal case */
info = dev->archdata.iommu;
if (likely(info))
return info->domain;
return NULL;
}
static inline struct device_domain_info *
dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
{
struct device_domain_info *info;
list_for_each_entry(info, &device_domain_list, global)
if (info->iommu->segment == segment && info->bus == bus &&
info->devfn == devfn)
return info;
return NULL;
}
static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
int bus, int devfn,
struct device *dev,
struct dmar_domain *domain)
{
struct dmar_domain *found = NULL;
struct device_domain_info *info;
unsigned long flags;
int ret;
info = alloc_devinfo_mem();
if (!info)
return NULL;
info->bus = bus;
info->devfn = devfn;
info->ats_supported = info->pasid_supported = info->pri_supported = 0;
info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
info->ats_qdep = 0;
info->dev = dev;
info->domain = domain;
info->iommu = iommu;
info->pasid_table = NULL;
if (dev && dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(info->dev);
if (!pci_ats_disabled() &&
ecap_dev_iotlb_support(iommu->ecap) &&
pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
dmar_find_matched_atsr_unit(pdev))
info->ats_supported = 1;
if (ecs_enabled(iommu)) {
if (pasid_enabled(iommu)) {
int features = pci_pasid_features(pdev);
if (features >= 0)
info->pasid_supported = features | 1;
}
if (info->ats_supported && ecap_prs(iommu->ecap) &&
pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI))
info->pri_supported = 1;
}
}
spin_lock_irqsave(&device_domain_lock, flags);
if (dev)
found = find_domain(dev);
if (!found) {
struct device_domain_info *info2;
info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
if (info2) {
found = info2->domain;
info2->dev = dev;
}
}
if (found) {
spin_unlock_irqrestore(&device_domain_lock, flags);
free_devinfo_mem(info);
/* Caller must free the original domain */
return found;
}
spin_lock(&iommu->lock);
ret = domain_attach_iommu(domain, iommu);
spin_unlock(&iommu->lock);
if (ret) {
spin_unlock_irqrestore(&device_domain_lock, flags);
free_devinfo_mem(info);
return NULL;
}
list_add(&info->link, &domain->devices);
list_add(&info->global, &device_domain_list);
if (dev)
dev->archdata.iommu = info;
if (dev && dev_is_pci(dev) && info->pasid_supported) {
ret = intel_pasid_alloc_table(dev);
if (ret) {
pr_warn("No pasid table for %s, pasid disabled\n",
dev_name(dev));
info->pasid_supported = 0;
}
}
spin_unlock_irqrestore(&device_domain_lock, flags);
if (dev && domain_context_mapping(domain, dev)) {
pr_err("Domain context map for %s failed\n", dev_name(dev));
dmar_remove_one_dev_info(domain, dev);
return NULL;
}
return domain;
}
static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
{
*(u16 *)opaque = alias;
return 0;
}
static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
{
struct device_domain_info *info = NULL;
struct dmar_domain *domain = NULL;
struct intel_iommu *iommu;
u16 dma_alias;
unsigned long flags;
u8 bus, devfn;
iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
return NULL;
if (dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(dev);
pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
spin_lock_irqsave(&device_domain_lock, flags);
info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
PCI_BUS_NUM(dma_alias),
dma_alias & 0xff);
if (info) {
iommu = info->iommu;
domain = info->domain;
}
spin_unlock_irqrestore(&device_domain_lock, flags);
/* DMA alias already has a domain, use it */
if (info)
goto out;
}
/* Allocate and initialize new domain for the device */
domain = alloc_domain(0);
if (!domain)
return NULL;
if (domain_init(domain, iommu, gaw)) {
domain_exit(domain);
return NULL;
}
out:
return domain;
}
static struct dmar_domain *set_domain_for_dev(struct device *dev,
struct dmar_domain *domain)
{
struct intel_iommu *iommu;
struct dmar_domain *tmp;
u16 req_id, dma_alias;
u8 bus, devfn;
iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
return NULL;
req_id = ((u16)bus << 8) | devfn;
if (dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(dev);
pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
/* register PCI DMA alias device */
if (req_id != dma_alias) {
tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
dma_alias & 0xff, NULL, domain);
if (!tmp || tmp != domain)
return tmp;
}
}
tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
if (!tmp || tmp != domain)
return tmp;
return domain;
}
static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
{
struct dmar_domain *domain, *tmp;
domain = find_domain(dev);
if (domain)
goto out;
domain = find_or_alloc_domain(dev, gaw);
if (!domain)
goto out;
tmp = set_domain_for_dev(dev, domain);
if (!tmp || domain != tmp) {
domain_exit(domain);
domain = tmp;
}
out:
return domain;
}
static int iommu_domain_identity_map(struct dmar_domain *domain,
unsigned long long start,
unsigned long long end)
{
unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
dma_to_mm_pfn(last_vpfn))) {
pr_err("Reserving iova failed\n");
return -ENOMEM;
}
pr_debug("Mapping reserved region %llx-%llx\n", start, end);
/*
* RMRR range might have overlap with physical memory range,
* clear it first
*/
dma_pte_clear_range(domain, first_vpfn, last_vpfn);
return __domain_mapping(domain, first_vpfn, NULL,
first_vpfn, last_vpfn - first_vpfn + 1,
DMA_PTE_READ|DMA_PTE_WRITE);
}
static int domain_prepare_identity_map(struct device *dev,
struct dmar_domain *domain,
unsigned long long start,
unsigned long long end)
{
/* For _hardware_ passthrough, don't bother. But for software
passthrough, we do it anyway -- it may indicate a memory
range which is reserved in E820, so which didn't get set
up to start with in si_domain */
if (domain == si_domain && hw_pass_through) {
pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
dev_name(dev), start, end);
return 0;
}
pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
dev_name(dev), start, end);
if (end < start) {
WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
dmi_get_system_info(DMI_BIOS_VENDOR),
dmi_get_system_info(DMI_BIOS_VERSION),
dmi_get_system_info(DMI_PRODUCT_VERSION));
return -EIO;
}
if (end >> agaw_to_width(domain->agaw)) {
WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
agaw_to_width(domain->agaw),
dmi_get_system_info(DMI_BIOS_VENDOR),
dmi_get_system_info(DMI_BIOS_VERSION),
dmi_get_system_info(DMI_PRODUCT_VERSION));
return -EIO;
}
return iommu_domain_identity_map(domain, start, end);
}
static int iommu_prepare_identity_map(struct device *dev,
unsigned long long start,
unsigned long long end)
{
struct dmar_domain *domain;
int ret;
domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
if (!domain)
return -ENOMEM;
ret = domain_prepare_identity_map(dev, domain, start, end);
if (ret)
domain_exit(domain);
return ret;
}
static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
struct device *dev)
{
if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
return 0;
return iommu_prepare_identity_map(dev, rmrr->base_address,
rmrr->end_address);
}
#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
static inline void iommu_prepare_isa(void)
{
struct pci_dev *pdev;
int ret;
pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
if (!pdev)
return;
pr_info("Prepare 0-16MiB unity mapping for LPC\n");
ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
if (ret)
pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
pci_dev_put(pdev);
}
#else
static inline void iommu_prepare_isa(void)
{
return;
}
#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
static int md_domain_init(struct dmar_domain *domain, int guest_width);
static int __init si_domain_init(int hw)
{
int nid, ret = 0;
si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
if (!si_domain)
return -EFAULT;
if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
domain_exit(si_domain);
return -EFAULT;
}
pr_debug("Identity mapping domain allocated\n");
if (hw)
return 0;
for_each_online_node(nid) {
unsigned long start_pfn, end_pfn;
int i;
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
ret = iommu_domain_identity_map(si_domain,
PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
if (ret)
return ret;
}
}
return 0;
}
static int identity_mapping(struct device *dev)
{
struct device_domain_info *info;
if (likely(!iommu_identity_mapping))
return 0;
info = dev->archdata.iommu;
if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
return (info->domain == si_domain);
return 0;
}
static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
{
struct dmar_domain *ndomain;
struct intel_iommu *iommu;
u8 bus, devfn;
iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
return -ENODEV;
ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
if (ndomain != domain)
return -EBUSY;
return 0;
}
static bool device_has_rmrr(struct device *dev)
{
struct dmar_rmrr_unit *rmrr;
struct device *tmp;
int i;
rcu_read_lock();
for_each_rmrr_units(rmrr) {
/*
* Return TRUE if this RMRR contains the device that
* is passed in.
*/
for_each_active_dev_scope(rmrr->devices,
rmrr->devices_cnt, i, tmp)
if (tmp == dev) {
rcu_read_unlock();
return true;
}
}
rcu_read_unlock();
return false;
}
/*
* There are a couple cases where we need to restrict the functionality of
* devices associated with RMRRs. The first is when evaluating a device for
* identity mapping because problems exist when devices are moved in and out
* of domains and their respective RMRR information is lost. This means that
* a device with associated RMRRs will never be in a "passthrough" domain.
* The second is use of the device through the IOMMU API. This interface
* expects to have full control of the IOVA space for the device. We cannot
* satisfy both the requirement that RMRR access is maintained and have an
* unencumbered IOVA space. We also have no ability to quiesce the device's
* use of the RMRR space or even inform the IOMMU API user of the restriction.
* We therefore prevent devices associated with an RMRR from participating in
* the IOMMU API, which eliminates them from device assignment.
*
* In both cases we assume that PCI USB devices with RMRRs have them largely
* for historical reasons and that the RMRR space is not actively used post
* boot. This exclusion may change if vendors begin to abuse it.
*
* The same exception is made for graphics devices, with the requirement that
* any use of the RMRR regions will be torn down before assigning the device
* to a guest.
*/
static bool device_is_rmrr_locked(struct device *dev)
{
if (!device_has_rmrr(dev))
return false;
if (dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(dev);
if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
return false;
}
return true;
}
static int iommu_should_identity_map(struct device *dev, int startup)
{
if (dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(dev);
if (device_is_rmrr_locked(dev))
return 0;
if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
return 1;
if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
return 1;
if (!(iommu_identity_mapping & IDENTMAP_ALL))
return 0;
/*
* We want to start off with all devices in the 1:1 domain, and
* take them out later if we find they can't access all of memory.
*
* However, we can't do this for PCI devices behind bridges,
* because all PCI devices behind the same bridge will end up
* with the same source-id on their transactions.
*
* Practically speaking, we can't change things around for these
* devices at run-time, because we can't be sure there'll be no
* DMA transactions in flight for any of their siblings.
*
* So PCI devices (unless they're on the root bus) as well as
* their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
* the 1:1 domain, just in _case_ one of their siblings turns out
* not to be able to map all of memory.
*/
if (!pci_is_pcie(pdev)) {
if (!pci_is_root_bus(pdev->bus))
return 0;
if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
return 0;
} else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
return 0;
} else {
if (device_has_rmrr(dev))
return 0;
}
/*
* At boot time, we don't yet know if devices will be 64-bit capable.
* Assume that they will — if they turn out not to be, then we can
* take them out of the 1:1 domain later.
*/
if (!startup) {
/*
* If the device's dma_mask is less than the system's memory
* size then this is not a candidate for identity mapping.
*/
u64 dma_mask = *dev->dma_mask;
if (dev->coherent_dma_mask &&
dev->coherent_dma_mask < dma_mask)
dma_mask = dev->coherent_dma_mask;
return dma_mask >= dma_get_required_mask(dev);
}
return 1;
}
static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
{
int ret;
if (!iommu_should_identity_map(dev, 1))
return 0;
ret = domain_add_dev_info(si_domain, dev);
if (!ret)
pr_info("%s identity mapping for device %s\n",
hw ? "Hardware" : "Software", dev_name(dev));
else if (ret == -ENODEV)
/* device not associated with an iommu */
ret = 0;
return ret;
}
static int __init iommu_prepare_static_identity_mapping(int hw)
{
struct pci_dev *pdev = NULL;
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
struct device *dev;
int i;
int ret = 0;
for_each_pci_dev(pdev) {
ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
if (ret)
return ret;
}
for_each_active_iommu(iommu, drhd)
for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
struct acpi_device_physical_node *pn;
struct acpi_device *adev;
if (dev->bus != &acpi_bus_type)
continue;
adev= to_acpi_device(dev);
mutex_lock(&adev->physical_node_lock);
list_for_each_entry(pn, &adev->physical_node_list, node) {
ret = dev_prepare_static_identity_mapping(pn->dev, hw);
if (ret)
break;
}
mutex_unlock(&adev->physical_node_lock);
if (ret)
return ret;
}
return 0;
}
static void intel_iommu_init_qi(struct intel_iommu *iommu)
{
/*
* Start from the sane iommu hardware state.
* If the queued invalidation is already initialized by us
* (for example, while enabling interrupt-remapping) then
* we got the things already rolling from a sane state.
*/
if (!iommu->qi) {
/*
* Clear any previous faults.
*/
dmar_fault(-1, iommu);
/*
* Disable queued invalidation if supported and already enabled
* before OS handover.
*/
dmar_disable_qi(iommu);
}
if (dmar_enable_qi(iommu)) {
/*
* Queued Invalidate not enabled, use Register Based Invalidate
*/
iommu->flush.flush_context = __iommu_flush_context;
iommu->flush.flush_iotlb = __iommu_flush_iotlb;
pr_info("%s: Using Register based invalidation\n",
iommu->name);
} else {
iommu->flush.flush_context = qi_flush_context;
iommu->flush.flush_iotlb = qi_flush_iotlb;
pr_info("%s: Using Queued invalidation\n", iommu->name);
}
}
static int copy_context_table(struct intel_iommu *iommu,
struct root_entry *old_re,
struct context_entry **tbl,
int bus, bool ext)
{
int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
struct context_entry *new_ce = NULL, ce;
struct context_entry *old_ce = NULL;
struct root_entry re;
phys_addr_t old_ce_phys;
tbl_idx = ext ? bus * 2 : bus;
memcpy(&re, old_re, sizeof(re));
for (devfn = 0; devfn < 256; devfn++) {
/* First calculate the correct index */
idx = (ext ? devfn * 2 : devfn) % 256;
if (idx == 0) {
/* First save what we may have and clean up */
if (new_ce) {
tbl[tbl_idx] = new_ce;
__iommu_flush_cache(iommu, new_ce,
VTD_PAGE_SIZE);
pos = 1;
}
if (old_ce)
memunmap(old_ce);
ret = 0;
if (devfn < 0x80)
old_ce_phys = root_entry_lctp(&re);
else
old_ce_phys = root_entry_uctp(&re);
if (!old_ce_phys) {
if (ext && devfn == 0) {
/* No LCTP, try UCTP */
devfn = 0x7f;
continue;
} else {
goto out;
}
}
ret = -ENOMEM;
old_ce = memremap(old_ce_phys, PAGE_SIZE,
MEMREMAP_WB);
if (!old_ce)
goto out;
new_ce = alloc_pgtable_page(iommu->node);
if (!new_ce)
goto out_unmap;
ret = 0;
}
/* Now copy the context entry */
memcpy(&ce, old_ce + idx, sizeof(ce));
if (!__context_present(&ce))