summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/evm23
-rw-r--r--Documentation/kernel-parameters.txt6
-rw-r--r--MAINTAINERS7
-rw-r--r--arch/m68k/kernel/vmlinux.lds_no.S1
-rw-r--r--arch/m68k/mac/macints.c2
-rw-r--r--arch/m68k/mac/misc.c40
-rw-r--r--arch/x86/include/asm/xen/page.h6
-rw-r--r--arch/x86/kernel/kprobes.c4
-rw-r--r--arch/x86/pci/xen.c32
-rw-r--r--arch/x86/xen/Kconfig11
-rw-r--r--arch/x86/xen/enlighten.c1
-rw-r--r--arch/x86/xen/mmu.c52
-rw-r--r--arch/x86/xen/p2m.c128
-rw-r--r--arch/x86/xen/setup.c284
-rw-r--r--drivers/block/xen-blkback/blkback.c2
-rw-r--r--drivers/char/tpm/tpm.c3
-rw-r--r--drivers/pci/xen-pcifront.c5
-rw-r--r--drivers/target/target_core_fabric_lib.c12
-rw-r--r--drivers/xen/Kconfig10
-rw-r--r--drivers/xen/Makefile4
-rw-r--r--drivers/xen/balloon.c62
-rw-r--r--drivers/xen/events.c47
-rw-r--r--drivers/xen/gntdev.c39
-rw-r--r--drivers/xen/grant-table.c6
-rw-r--r--drivers/xen/pci.c105
-rw-r--r--drivers/xen/swiotlb-xen.c70
-rw-r--r--drivers/xen/xen-pciback/conf_space.c1
-rw-r--r--drivers/xen/xen-pciback/conf_space_header.c5
-rw-r--r--drivers/xen/xen-pciback/conf_space_quirks.c3
-rw-r--r--drivers/xen/xen-pciback/passthrough.c34
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c35
-rw-r--r--drivers/xen/xen-pciback/pciback.h32
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c1
-rw-r--r--drivers/xen/xen-pciback/vpci.c35
-rw-r--r--drivers/xen/xen-pciback/xenbus.c27
-rw-r--r--drivers/xen/xen-selfballoon.c67
-rw-r--r--drivers/xen/xenbus/xenbus_comms.c4
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c101
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c121
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c17
-rw-r--r--drivers/zorro/zorro-driver.c8
-rw-r--r--fs/attr.c5
-rw-r--r--fs/btrfs/xattr.c50
-rw-r--r--fs/cifs/xattr.c40
-rw-r--r--fs/ext2/xattr_security.c34
-rw-r--r--fs/ext3/xattr_security.c36
-rw-r--r--fs/ext4/xattr_security.c36
-rw-r--r--fs/gfs2/inode.c38
-rw-r--r--fs/jffs2/security.c35
-rw-r--r--fs/jfs/xattr.c57
-rw-r--r--fs/ocfs2/xattr.c38
-rw-r--r--fs/reiserfs/xattr_security.c4
-rw-r--r--fs/xattr.c63
-rw-r--r--fs/xfs/xfs_iops.c39
-rw-r--r--include/asm-generic/vmlinux.lds.h1
-rw-r--r--include/linux/evm.h100
-rw-r--r--include/linux/ima.h13
-rw-r--r--include/linux/integrity.h39
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/security.h32
-rw-r--r--include/linux/xattr.h19
-rw-r--r--include/xen/balloon.h5
-rw-r--r--include/xen/grant_table.h1
-rw-r--r--include/xen/interface/io/xs_wire.h6
-rw-r--r--include/xen/interface/physdev.h34
-rw-r--r--include/xen/page.h12
-rw-r--r--kernel/cred.c18
-rw-r--r--lib/hexdump.c15
-rw-r--r--mm/shmem.c4
-rw-r--r--security/Kconfig6
-rw-r--r--security/Makefile4
-rw-r--r--security/apparmor/apparmorfs.c2
-rw-r--r--security/apparmor/ipc.c1
-rw-r--r--security/apparmor/lib.c1
-rw-r--r--security/apparmor/policy_unpack.c12
-rw-r--r--security/apparmor/procattr.c1
-rw-r--r--security/commoncap.c16
-rw-r--r--security/integrity/Kconfig7
-rw-r--r--security/integrity/Makefile12
-rw-r--r--security/integrity/evm/Kconfig13
-rw-r--r--security/integrity/evm/Makefile7
-rw-r--r--security/integrity/evm/evm.h38
-rw-r--r--security/integrity/evm/evm_crypto.c216
-rw-r--r--security/integrity/evm/evm_main.c384
-rw-r--r--security/integrity/evm/evm_posix_acl.c26
-rw-r--r--security/integrity/evm/evm_secfs.c108
-rw-r--r--security/integrity/iint.c172
-rw-r--r--security/integrity/ima/Kconfig1
-rw-r--r--security/integrity/ima/Makefile2
-rw-r--r--security/integrity/ima/ima.h30
-rw-r--r--security/integrity/ima/ima_api.c7
-rw-r--r--security/integrity/ima/ima_fs.c2
-rw-r--r--security/integrity/ima/ima_iint.c169
-rw-r--r--security/integrity/ima/ima_main.c13
-rw-r--r--security/integrity/integrity.h50
-rw-r--r--security/keys/Makefile2
-rw-r--r--security/keys/encrypted-keys/Makefile6
-rw-r--r--security/keys/encrypted-keys/ecryptfs_format.c (renamed from security/keys/ecryptfs_format.c)0
-rw-r--r--security/keys/encrypted-keys/ecryptfs_format.h (renamed from security/keys/ecryptfs_format.h)0
-rw-r--r--security/keys/encrypted-keys/encrypted.c (renamed from security/keys/encrypted.c)49
-rw-r--r--security/keys/encrypted-keys/encrypted.h (renamed from security/keys/encrypted.h)11
-rw-r--r--security/keys/encrypted-keys/masterkey_trusted.c45
-rw-r--r--security/keys/gc.c386
-rw-r--r--security/keys/internal.h4
-rw-r--r--security/keys/key.c121
-rw-r--r--security/keys/keyring.c3
-rw-r--r--security/keys/process_keys.c16
-rw-r--r--security/keys/trusted.c19
-rw-r--r--security/security.c76
-rw-r--r--security/selinux/exports.c1
-rw-r--r--security/selinux/hooks.c13
-rw-r--r--security/selinux/include/avc_ss.h6
-rw-r--r--security/selinux/include/security.h8
-rw-r--r--security/selinux/netlink.c2
-rw-r--r--security/selinux/nlmsgtab.c1
-rw-r--r--security/selinux/selinuxfs.c5
-rw-r--r--security/selinux/ss/conditional.c2
-rw-r--r--security/selinux/ss/conditional.h1
-rw-r--r--security/selinux/ss/policydb.c2
-rw-r--r--security/selinux/ss/services.c3
-rw-r--r--security/smack/smack.h24
-rw-r--r--security/smack/smack_access.c134
-rw-r--r--security/smack/smack_lsm.c266
-rw-r--r--security/smack/smackfs.c277
-rw-r--r--security/tomoyo/Kconfig2
-rw-r--r--security/tomoyo/Makefile4
-rw-r--r--security/tomoyo/audit.c7
-rw-r--r--security/tomoyo/common.c228
-rw-r--r--security/tomoyo/common.h189
-rw-r--r--security/tomoyo/condition.c71
-rw-r--r--security/tomoyo/domain.c209
-rw-r--r--security/tomoyo/environ.c122
-rw-r--r--security/tomoyo/file.c42
-rw-r--r--security/tomoyo/gc.c540
-rw-r--r--security/tomoyo/group.c61
-rw-r--r--security/tomoyo/memory.c39
-rw-r--r--security/tomoyo/network.c771
-rw-r--r--security/tomoyo/realpath.c32
-rw-r--r--security/tomoyo/securityfs_if.c123
-rw-r--r--security/tomoyo/tomoyo.c62
-rw-r--r--security/tomoyo/util.c80
142 files changed, 5642 insertions, 1954 deletions
diff --git a/Documentation/ABI/testing/evm b/Documentation/ABI/testing/evm
new file mode 100644
index 000000000000..8374d4557e5d
--- /dev/null
+++ b/Documentation/ABI/testing/evm
@@ -0,0 +1,23 @@
+What: security/evm
+Date: March 2011
+Contact: Mimi Zohar <zohar@us.ibm.com>
+Description:
+ EVM protects a file's security extended attributes(xattrs)
+ against integrity attacks. The initial method maintains an
+ HMAC-sha1 value across the extended attributes, storing the
+ value as the extended attribute 'security.evm'.
+
+ EVM depends on the Kernel Key Retention System to provide it
+ with a trusted/encrypted key for the HMAC-sha1 operation.
+ The key is loaded onto the root's keyring using keyctl. Until
+ EVM receives notification that the key has been successfully
+ loaded onto the keyring (echo 1 > <securityfs>/evm), EVM
+ can not create or validate the 'security.evm' xattr, but
+ returns INTEGRITY_UNKNOWN. Loading the key and signaling EVM
+ should be done as early as possible. Normally this is done
+ in the initramfs, which has already been measured as part
+ of the trusted boot. For more information on creating and
+ loading existing trusted/encrypted keys, refer to:
+ Documentation/keys-trusted-encrypted.txt. (A sample dracut
+ patch, which loads the trusted/encrypted key and enables
+ EVM, is available from http://linux-ima.sourceforge.net/#EVM.)
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index d6e6724446c8..d317f6cf0d35 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -49,6 +49,7 @@ parameter is applicable:
EDD BIOS Enhanced Disk Drive Services (EDD) is enabled
EFI EFI Partitioning (GPT) is enabled
EIDE EIDE/ATAPI support is enabled.
+ EVM Extended Verification Module
FB The frame buffer device is enabled.
FTRACE Function tracing enabled.
GCOV GCOV profiling is enabled.
@@ -760,6 +761,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
This option is obsoleted by the "netdev=" option, which
has equivalent usage. See its documentation for details.
+ evm= [EVM]
+ Format: { "fix" }
+ Permit 'security.evm' to be updated regardless of
+ current integrity status.
+
failslab=
fail_page_alloc=
fail_make_request=[KNL]
diff --git a/MAINTAINERS b/MAINTAINERS
index 6185d0513584..51a8160dc549 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2552,6 +2552,11 @@ S: Maintained
F: Documentation/filesystems/ext4.txt
F: fs/ext4/
+Extended Verification Module (EVM)
+M: Mimi Zohar <zohar@us.ibm.com>
+S: Supported
+F: security/integrity/evm/
+
F71805F HARDWARE MONITORING DRIVER
M: Jean Delvare <khali@linux-fr.org>
L: lm-sensors@lm-sensors.org
@@ -6447,7 +6452,7 @@ L: tomoyo-users-en@lists.sourceforge.jp (subscribers-only, for users in English)
L: tomoyo-dev@lists.sourceforge.jp (subscribers-only, for developers in Japanese)
L: tomoyo-users@lists.sourceforge.jp (subscribers-only, for users in Japanese)
W: http://tomoyo.sourceforge.jp/
-T: quilt http://svn.sourceforge.jp/svnroot/tomoyo/trunk/2.4.x/tomoyo-lsm/patches/
+T: quilt http://svn.sourceforge.jp/svnroot/tomoyo/trunk/2.5.x/tomoyo-lsm/patches/
S: Maintained
F: security/tomoyo/
diff --git a/arch/m68k/kernel/vmlinux.lds_no.S b/arch/m68k/kernel/vmlinux.lds_no.S
index 7dc4087a9545..4e2389340837 100644
--- a/arch/m68k/kernel/vmlinux.lds_no.S
+++ b/arch/m68k/kernel/vmlinux.lds_no.S
@@ -77,7 +77,6 @@ SECTIONS {
*(.rodata) *(.rodata.*)
*(__vermagic) /* Kernel version magic */
- *(__markers_strings)
*(.rodata1)
*(.rodata.str1.1)
diff --git a/arch/m68k/mac/macints.c b/arch/m68k/mac/macints.c
index 900d899f3323..f92190c159b4 100644
--- a/arch/m68k/mac/macints.c
+++ b/arch/m68k/mac/macints.c
@@ -370,7 +370,7 @@ int mac_irq_pending(unsigned int irq)
break;
case 4:
if (psc_present)
- psc_irq_pending(irq);
+ return psc_irq_pending(irq);
break;
}
return 0;
diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c
index e023fc6b37e5..eb915551de69 100644
--- a/arch/m68k/mac/misc.c
+++ b/arch/m68k/mac/misc.c
@@ -304,35 +304,41 @@ static void via_write_pram(int offset, __u8 data)
static long via_read_time(void)
{
union {
- __u8 cdata[4];
- long idata;
+ __u8 cdata[4];
+ long idata;
} result, last_result;
- int ct;
+ int count = 1;
+
+ via_pram_command(0x81, &last_result.cdata[3]);
+ via_pram_command(0x85, &last_result.cdata[2]);
+ via_pram_command(0x89, &last_result.cdata[1]);
+ via_pram_command(0x8D, &last_result.cdata[0]);
/*
* The NetBSD guys say to loop until you get the same reading
* twice in a row.
*/
- ct = 0;
- do {
- if (++ct > 10) {
- printk("via_read_time: couldn't get valid time, "
- "last read = 0x%08lx and 0x%08lx\n",
- last_result.idata, result.idata);
- break;
- }
-
- last_result.idata = result.idata;
- result.idata = 0;
-
+ while (1) {
via_pram_command(0x81, &result.cdata[3]);
via_pram_command(0x85, &result.cdata[2]);
via_pram_command(0x89, &result.cdata[1]);
via_pram_command(0x8D, &result.cdata[0]);
- } while (result.idata != last_result.idata);
- return result.idata - RTC_OFFSET;
+ if (result.idata == last_result.idata)
+ return result.idata - RTC_OFFSET;
+
+ if (++count > 10)
+ break;
+
+ last_result.idata = result.idata;
+ }
+
+ pr_err("via_read_time: failed to read a stable value; "
+ "got 0x%08lx then 0x%08lx\n",
+ last_result.idata, result.idata);
+
+ return 0;
}
/*
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 7ff4669580cf..c34f96c2f7a0 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -12,6 +12,7 @@
#include <asm/pgtable.h>
#include <xen/interface/xen.h>
+#include <xen/grant_table.h>
#include <xen/features.h>
/* Xen machine address */
@@ -48,14 +49,11 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s,
unsigned long pfn_e);
extern int m2p_add_override(unsigned long mfn, struct page *page,
- bool clear_pte);
+ struct gnttab_map_grant_ref *kmap_op);
extern int m2p_remove_override(struct page *page, bool clear_pte);
extern struct page *m2p_find_override(unsigned long mfn);
extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
-#ifdef CONFIG_XEN_DEBUG_FS
-extern int p2m_dump_show(struct seq_file *m, void *v);
-#endif
static inline unsigned long pfn_to_mfn(unsigned long pfn)
{
unsigned long mfn;
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index f1a6244d7d93..794bc95134cd 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -75,8 +75,10 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
/*
* Undefined/reserved opcodes, conditional jump, Opcode Extension
* Groups, and some special opcodes can not boost.
+ * This is non-const to keep gcc from statically optimizing it out, as
+ * variable_test_bit makes gcc think only *(unsigned long*) is used.
*/
-static const u32 twobyte_is_boostable[256 / 32] = {
+static u32 twobyte_is_boostable[256 / 32] = {
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
/* ---------------------------------------------- */
W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 1017c7bee388..492ade8c978e 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -175,8 +175,10 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
"pcifront-msi-x" :
"pcifront-msi",
DOMID_SELF);
- if (irq < 0)
+ if (irq < 0) {
+ ret = irq;
goto free;
+ }
i++;
}
kfree(v);
@@ -221,8 +223,10 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
if (msg.data != XEN_PIRQ_MSI_DATA ||
xen_irq_from_pirq(pirq) < 0) {
pirq = xen_allocate_pirq_msi(dev, msidesc);
- if (pirq < 0)
+ if (pirq < 0) {
+ irq = -ENODEV;
goto error;
+ }
xen_msi_compose_msg(dev, pirq, &msg);
__write_msi_msg(msidesc, &msg);
dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
@@ -244,10 +248,12 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
error:
dev_err(&dev->dev,
"Xen PCI frontend has not registered MSI/MSI-X support!\n");
- return -ENODEV;
+ return irq;
}
#ifdef CONFIG_XEN_DOM0
+static bool __read_mostly pci_seg_supported = true;
+
static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
int ret = 0;
@@ -265,10 +271,11 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
memset(&map_irq, 0, sizeof(map_irq));
map_irq.domid = domid;
- map_irq.type = MAP_PIRQ_TYPE_MSI;
+ map_irq.type = MAP_PIRQ_TYPE_MSI_SEG;
map_irq.index = -1;
map_irq.pirq = -1;
- map_irq.bus = dev->bus->number;
+ map_irq.bus = dev->bus->number |
+ (pci_domain_nr(dev->bus) << 16);
map_irq.devfn = dev->devfn;
if (type == PCI_CAP_ID_MSIX) {
@@ -285,7 +292,20 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
}
- ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
+ ret = -EINVAL;
+ if (pci_seg_supported)
+ ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq,
+ &map_irq);
+ if (ret == -EINVAL && !pci_domain_nr(dev->bus)) {
+ map_irq.type = MAP_PIRQ_TYPE_MSI;
+ map_irq.index = -1;
+ map_irq.pirq = -1;
+ map_irq.bus = dev->bus->number;
+ ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq,
+ &map_irq);
+ if (ret != -EINVAL)
+ pci_seg_supported = false;
+ }
if (ret) {
dev_warn(&dev->dev, "xen map irq failed %d for %d domain\n",
ret, domid);
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index 5cc821cb2e09..26c731a106af 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -25,8 +25,7 @@ config XEN_PRIVILEGED_GUEST
config XEN_PVHVM
def_bool y
- depends on XEN
- depends on X86_LOCAL_APIC
+ depends on XEN && PCI && X86_LOCAL_APIC
config XEN_MAX_DOMAIN_MEMORY
int
@@ -49,11 +48,3 @@ config XEN_DEBUG_FS
help
Enable statistics output and various tuning options in debugfs.
Enabling this option may incur a significant performance overhead.
-
-config XEN_DEBUG
- bool "Enable Xen debug checks"
- depends on XEN
- default n
- help
- Enable various WARN_ON checks in the Xen MMU code.
- Enabling this option WILL incur a significant performance overhead.
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 2d69617950f7..da8afd576a6b 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -251,6 +251,7 @@ static void __init xen_init_cpuid_mask(void)
~((1 << X86_FEATURE_APIC) | /* disable local APIC */
(1 << X86_FEATURE_ACPI)); /* disable ACPI */
ax = 1;
+ cx = 0;
xen_cpuid(&ax, &bx, &cx, &dx);
xsave_mask =
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 3dd53f997b11..87f6673b1207 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -495,41 +495,6 @@ static pte_t xen_make_pte(pteval_t pte)
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
-#ifdef CONFIG_XEN_DEBUG
-pte_t xen_make_pte_debug(pteval_t pte)
-{
- phys_addr_t addr = (pte & PTE_PFN_MASK);
- phys_addr_t other_addr;
- bool io_page = false;
- pte_t _pte;
-
- if (pte & _PAGE_IOMAP)
- io_page = true;
-
- _pte = xen_make_pte(pte);
-
- if (!addr)
- return _pte;
-
- if (io_page &&
- (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
- other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT;
- WARN_ONCE(addr != other_addr,
- "0x%lx is using VM_IO, but it is 0x%lx!\n",
- (unsigned long)addr, (unsigned long)other_addr);
- } else {
- pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP;
- other_addr = (_pte.pte & PTE_PFN_MASK);
- WARN_ONCE((addr == other_addr) && (!io_page) && (!iomap_set),
- "0x%lx is missing VM_IO (and wasn't fixed)!\n",
- (unsigned long)addr);
- }
-
- return _pte;
-}
-PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug);
-#endif
-
static pgd_t xen_make_pgd(pgdval_t pgd)
{
pgd = pte_pfn_to_mfn(pgd);
@@ -1992,9 +1957,6 @@ void __init xen_ident_map_ISA(void)
static void __init xen_post_allocator_init(void)
{
-#ifdef CONFIG_XEN_DEBUG
- pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug);
-#endif
pv_mmu_ops.set_pte = xen_set_pte;
pv_mmu_ops.set_pmd = xen_set_pmd;
pv_mmu_ops.set_pud = xen_set_pud;
@@ -2404,17 +2366,3 @@ out:
return err;
}
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
-
-#ifdef CONFIG_XEN_DEBUG_FS
-static int p2m_dump_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, p2m_dump_show, NULL);
-}
-
-static const struct file_operations p2m_dump_fops = {
- .open = p2m_dump_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-#endif /* CONFIG_XEN_DEBUG_FS */
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 58efeb9d5440..1b267e75158d 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -161,7 +161,9 @@
#include <asm/xen/page.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
+#include <xen/grant_table.h>
+#include "multicalls.h"
#include "xen-ops.h"
static void __init m2p_override_init(void);
@@ -676,7 +678,8 @@ static unsigned long mfn_hash(unsigned long mfn)
}
/* Add an MFN override for a particular page */
-int m2p_add_override(unsigned long mfn, struct page *page, bool clear_pte)
+int m2p_add_override(unsigned long mfn, struct page *page,
+ struct gnttab_map_grant_ref *kmap_op)
{
unsigned long flags;
unsigned long pfn;
@@ -692,16 +695,28 @@ int m2p_add_override(unsigned long mfn, struct page *page, bool clear_pte)
"m2p_add_override: pfn %lx not mapped", pfn))
return -EINVAL;
}
-
- page->private = mfn;
+ WARN_ON(PagePrivate(page));
+ SetPagePrivate(page);
+ set_page_private(page, mfn);
page->index = pfn_to_mfn(pfn);
if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn))))
return -ENOMEM;
- if (clear_pte && !PageHighMem(page))
- /* Just zap old mapping for now */
- pte_clear(&init_mm, address, ptep);
+ if (kmap_op != NULL) {
+ if (!PageHighMem(page)) {
+ struct multicall_space mcs =
+ xen_mc_entry(sizeof(*kmap_op));
+
+ MULTI_grant_table_op(mcs.mc,
+ GNTTABOP_map_grant_ref, kmap_op, 1);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+ }
+ /* let's use dev_bus_addr to record the old mfn instead */
+ kmap_op->dev_bus_addr = page->index;
+ page->index = (unsigned long) kmap_op;
+ }
spin_lock_irqsave(&m2p_override_lock, flags);
list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]);
spin_unlock_irqrestore(&m2p_override_lock, flags);
@@ -735,13 +750,56 @@ int m2p_remove_override(struct page *page, bool clear_pte)
spin_lock_irqsave(&m2p_override_lock, flags);
list_del(&page->lru);
spin_unlock_irqrestore(&m2p_override_lock, flags);
- set_phys_to_machine(pfn, page->index);
+ WARN_ON(!PagePrivate(page));
+ ClearPagePrivate(page);
- if (clear_pte && !PageHighMem(page))
- set_pte_at(&init_mm, address, ptep,
- pfn_pte(pfn, PAGE_KERNEL));
- /* No tlb flush necessary because the caller already
- * left the pte unmapped. */
+ if (clear_pte) {
+ struct gnttab_map_grant_ref *map_op =
+ (struct gnttab_map_grant_ref *) page->index;
+ set_phys_to_machine(pfn, map_op->dev_bus_addr);
+ if (!PageHighMem(page)) {
+ struct multicall_space mcs;
+ struct gnttab_unmap_grant_ref *unmap_op;
+
+ /*
+ * It might be that we queued all the m2p grant table
+ * hypercalls in a multicall, then m2p_remove_override
+ * get called before the multicall has actually been
+ * issued. In this case handle is going to -1 because
+ * it hasn't been modified yet.
+ */
+ if (map_op->handle == -1)
+ xen_mc_flush();
+ /*
+ * Now if map_op->handle is negative it means that the
+ * hypercall actually returned an error.
+ */
+ if (map_op->handle == GNTST_general_error) {
+ printk(KERN_WARNING "m2p_remove_override: "
+ "pfn %lx mfn %lx, failed to modify kernel mappings",
+ pfn, mfn);
+ return -1;
+ }
+
+ mcs = xen_mc_entry(
+ sizeof(struct gnttab_unmap_grant_ref));
+ unmap_op = mcs.args;
+ unmap_op->host_addr = map_op->host_addr;
+ unmap_op->handle = map_op->handle;
+ unmap_op->dev_bus_addr = 0;
+
+ MULTI_grant_table_op(mcs.mc,
+ GNTTABOP_unmap_grant_ref, unmap_op, 1);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+ set_pte_at(&init_mm, address, ptep,
+ pfn_pte(pfn, PAGE_KERNEL));
+ __flush_tlb_single(address);
+ map_op->host_addr = 0;
+ }
+ } else
+ set_phys_to_machine(pfn, page->index);
return 0;
}
@@ -758,7 +816,7 @@ struct page *m2p_find_override(unsigned long mfn)
spin_lock_irqsave(&m2p_override_lock, flags);
list_for_each_entry(p, bucket, lru) {
- if (p->private == mfn) {
+ if (page_private(p) == mfn) {
ret = p;
break;
}
@@ -782,17 +840,21 @@ unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn)
EXPORT_SYMBOL_GPL(m2p_find_override_pfn);
#ifdef CONFIG_XEN_DEBUG_FS
-
-int p2m_dump_show(struct seq_file *m, void *v)
+#include <linux/debugfs.h>
+#include "debugfs.h"
+static int p2m_dump_show(struct seq_file *m, void *v)
{
static const char * const level_name[] = { "top", "middle",
- "entry", "abnormal" };
- static const char * const type_name[] = { "identity", "missing",
- "pfn", "abnormal"};
+ "entry", "abnormal", "error"};
#define TYPE_IDENTITY 0
#define TYPE_MISSING 1
#define TYPE_PFN 2
#define TYPE_UNKNOWN 3
+ static const char * const type_name[] = {
+ [TYPE_IDENTITY] = "identity",
+ [TYPE_MISSING] = "missing",
+ [TYPE_PFN] = "pfn",
+ [TYPE_UNKNOWN] = "abnormal"};
unsigned long pfn, prev_pfn_type = 0, prev_pfn_level = 0;
unsigned int uninitialized_var(prev_level);
unsigned int uninitialized_var(prev_type);
@@ -856,4 +918,32 @@ int p2m_dump_show(struct seq_file *m, void *v)
#undef TYPE_PFN
#undef TYPE_UNKNOWN
}
-#endif
+
+static int p2m_dump_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, p2m_dump_show, NULL);
+}
+
+static const struct file_operations p2m_dump_fops = {
+ .open = p2m_dump_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct dentry *d_mmu_debug;
+
+static int __init xen_p2m_debugfs(void)
+{
+ struct dentry *d_xen = xen_init_debugfs();
+
+ if (d_xen == NULL)
+ return -ENOMEM;
+
+ d_mmu_debug = debugfs_create_dir("mmu", d_xen);
+
+ debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops);
+ return 0;
+}
+fs_initcall(xen_p2m_debugfs);
+#endif /* CONFIG_XEN_DEBUG_FS */
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 46d6d21dbdbe..38d0af4fefec 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -37,7 +37,10 @@ extern void xen_syscall_target(void);
extern void xen_syscall32_target(void);
/* Amount of extra memory space we add to the e820 ranges */
-phys_addr_t xen_extra_mem_start, xen_extra_mem_size;
+struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
+
+/* Number of pages released from the initial allocation. */
+unsigned long xen_released_pages;
/*
* The maximum amount of extra memory compared to the base size. The
@@ -51,48 +54,47 @@ phys_addr_t xen_extra_mem_start, xen_extra_mem_size;
*/
#define EXTRA_MEM_RATIO (10)
-static void __init xen_add_extra_mem(unsigned long pages)
+static void __init xen_add_extra_mem(u64 start, u64 size)
{
unsigned long pfn;
+ int i;
- u64 size = (u64)pages * PAGE_SIZE;
- u64 extra_start = xen_extra_mem_start + xen_extra_mem_size;
-
- if (!pages)
- return;
-
- e820_add_region(extra_start, size, E820_RAM);
- sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
-
- memblock_x86_reserve_range(extra_start, extra_start + size, "XEN EXTRA");
+ for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+ /* Add new region. */
+ if (xen_extra_mem[i].size == 0) {
+ xen_extra_mem[i].start = start;
+ xen_extra_mem[i].size = size;
+ break;
+ }
+ /* Append to existing region. */
+ if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) {
+ xen_extra_mem[i].size += size;
+ break;
+ }
+ }
+ if (i == XEN_EXTRA_MEM_MAX_REGIONS)
+ printk(KERN_WARNING "Warning: not enough extra memory regions\n");
- xen_extra_mem_size += size;
+ memblock_x86_reserve_range(start, start + size, "XEN EXTRA");
- xen_max_p2m_pfn = PFN_DOWN(extra_start + size);
+ xen_max_p2m_pfn = PFN_DOWN(start + size);
- for (pfn = PFN_DOWN(extra_start); pfn <= xen_max_p2m_pfn; pfn++)
+ for (pfn = PFN_DOWN(start); pfn <= xen_max_p2m_pfn; pfn++)
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}
-static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
- phys_addr_t end_addr)
+static unsigned long __init xen_release_chunk(unsigned long start,
+ unsigned long end)
{
struct xen_memory_reservation reservation = {
.address_bits = 0,
.extent_order = 0,
.domid = DOMID_SELF
};
- unsigned long start, end;
unsigned long len = 0;
unsigned long pfn;
int ret;
- start = PFN_UP(start_addr);
- end = PFN_DOWN(end_addr);
-
- if (end <= start)
- return 0;
-
for(pfn = start; pfn < end; pfn++) {
unsigned long mfn = pfn_to_mfn(pfn);
@@ -117,72 +119,52 @@ static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
return len;
}
-static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
- const struct e820map *e820)
+static unsigned long __init xen_set_identity_and_release(
+ const struct e820entry *list, size_t map_size, unsigned long nr_pages)
{
- phys_addr_t max_addr = PFN_PHYS(max_pfn);
- phys_addr_t last_end = ISA_END_ADDRESS;
+ phys_addr_t start = 0;
unsigned long released = 0;
- int i;
-
- /* Free any unused memory above the low 1Mbyte. */
- for (i = 0; i < e820->nr_map && last_end < max_addr; i++) {
- phys_addr_t end = e820->map[i].addr;
- end = min(max_addr, end);
-
- if (last_end < end)
- released += xen_release_chunk(last_end, end);
- last_end = max(last_end, e820->map[i].addr + e820->map[i].size);
- }
-
- if (last_end < max_addr)
- released += xen_release_chunk(last_end, max_addr);
-
- printk(KERN_INFO "released %lu pages of unused memory\n", released);
- return released;
-}
-
-static unsigned long __init xen_set_identity(const struct e820entry *list,
- ssize_t map_size)
-{
- phys_addr_t last = xen_initial_domain() ? 0 : ISA_END_ADDRESS;
- phys_addr_t start_pci = last;
- const struct e820entry *entry;
unsigned long identity = 0;
+ const struct e820entry *entry;
int i;
+ /*
+ * Combine non-RAM regions and gaps until a RAM region (or the
+ * end of the map) is reached, then set the 1:1 map and
+ * release the pages (if available) in those non-RAM regions.
+ *
+ * The combined non-RAM regions are rounded to a whole number
+ * of pages so any partial pages are accessible via the 1:1
+ * mapping. This is needed for some BIOSes that put (for
+ * example) the DMI tables in a reserved region that begins on
+ * a non-page boundary.
+ */
for (i = 0, entry = list; i < map_size; i++, entry++) {
- phys_addr_t start = entry->addr;
- phys_addr_t end = start + entry->size;
+ phys_addr_t end = entry->addr + entry->size;
- if (start < last)
- start = last;
+ if (entry->type == E820_RAM || i == map_size - 1) {
+ unsigned long start_pfn = PFN_DOWN(start);
+ unsigned long end_pfn = PFN_UP(end);
- if (end <= start)
- continue;
+ if (entry->type == E820_RAM)
+ end_pfn = PFN_UP(entry->addr);
- /* Skip over the 1MB region. */
- if (last > end)
- continue;
+ if (start_pfn < end_pfn) {
+ if (start_pfn < nr_pages)
+ released += xen_release_chunk(
+ start_pfn, min(end_pfn, nr_pages));
- if ((entry->type == E820_RAM) || (entry->type == E820_UNUSABLE)) {
- if (start > start_pci)
identity += set_phys_range_identity(
- PFN_UP(start_pci), PFN_DOWN(start));
-
- /* Without saving 'last' we would gooble RAM too
- * at the end of the loop. */
- last = end;
- start_pci = end;
- continue;
+ start_pfn, end_pfn);
+ }
+ start = end;
}
- start_pci = min(start, start_pci);
- last = end;
}
- if (last > start_pci)
- identity += set_phys_range_identity(
- PFN_UP(start_pci), PFN_DOWN(last));
- return identity;
+
+ printk(KERN_INFO "Released %lu pages of unused memory\n", released);
+ printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
+
+ return released;
}
static unsigned long __init xen_get_max_pages(void)
@@ -197,21 +179,32 @@ static unsigned long __init xen_get_max_pages(void)
return min(max_pages, MAX_DOMAIN_PAGES);
}
+static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
+{
+ u64 end = start + size;
+
+ /* Align RAM regions to page boundaries. */
+ if (type == E820_RAM) {
+ start = PAGE_ALIGN(start);
+ end &= ~((u64)PAGE_SIZE - 1);
+ }
+
+ e820_add_region(start, end - start, type);
+}
+
/**
* machine_specific_memory_setup - Hook for machine specific memory setup.
**/
char * __init xen_memory_setup(void)
{
static struct e820entry map[E820MAX] __initdata;
- static struct e820entry map_raw[E820MAX] __initdata;
unsigned long max_pfn = xen_start_info->nr_pages;
unsigned long long mem_end;
int rc;
struct xen_memory_map memmap;
+ unsigned long max_pages;
unsigned long extra_pages = 0;
- unsigned long extra_limit;
- unsigned long identity_pages = 0;
int i;
int op;
@@ -237,58 +230,65 @@ char * __init xen_memory_setup(void)
}
BUG_ON(rc);
- memcpy(map_raw, map, sizeof(map));
- e820.nr_map = 0;
- xen_extra_mem_start = mem_end;
- for (i = 0; i < memmap.nr_entries; i++) {
- unsigned long long end;
-
- /* Guard against non-page aligned E820 entries. */
- if (map[i].type == E820_RAM)
- map[i].size -= (map[i].size + map[i].addr) % PAGE_SIZE;
-
- end = map[i].addr + map[i].size;
- if (map[i].type == E820_RAM && end > mem_end) {
- /* RAM off the end - may be partially included */
- u64 delta = min(map[i].size, end - mem_end);
-
- map[i].size -= delta;
- end -= delta;
-
- extra_pages += PFN_DOWN(delta);
- /*
- * Set RAM below 4GB that is not for us to be unusable.
- * This prevents "System RAM" address space from being
- * used as potential resource for I/O address (happens
- * when 'allocate_resource' is called).
- */
- if (delta &&
- (xen_initial_domain() && end < 0x100000000ULL))
- e820_add_region(end, delta, E820_UNUSABLE);
+ /* Make sure the Xen-supplied memory map is well-ordered. */
+ sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
+
+ max_pages = xen_get_max_pages();
+ if (max_pages > max_pfn)
+ extra_pages += max_pages - max_pfn;
+
+ /*
+ * Set P2M for all non-RAM pages and E820 gaps to be identity
+ * type PFNs. Any RAM pages that would be made inaccesible by
+ * this are first released.
+ */
+ xen_released_pages = xen_set_identity_and_release(
+ map, memmap.nr_entries, max_pfn);
+ extra_pages += xen_released_pages;
+
+ /*
+ * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
+ * factor the base size. On non-highmem systems, the base
+ * size is the full initial memory allocation; on highmem it
+ * is limited to the max size of lowmem, so that it doesn't
+ * get completely filled.
+ *
+ * In principle there could be a problem in lowmem systems if
+ * the initial memory is also very large with respect to
+ * lowmem, but we won't try to deal with that here.
+ */
+ extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
+ extra_pages);
+
+ i = 0;
+ while (i < memmap.nr_entries) {
+ u64 addr = map[i].addr;
+ u64 size = map[i].size;
+ u32 type = map[i].type;
+
+ if (type == E820_RAM) {
+ if (addr < mem_end) {
+ size = min(size, mem_end - addr);
+ } else if (extra_pages) {
+ size = min(size, (u64)extra_pages * PAGE_SIZE);
+ extra_pages -= size / PAGE_SIZE;
+ xen_add_extra_mem(addr, size);
+ } else
+ type = E820_UNUSABLE;
}
- if (map[i].size > 0 && end > xen_extra_mem_start)
- xen_extra_mem_start = end;
+ xen_align_and_add_e820_region(addr, size, type);
- /* Add region if any remains */
- if (map[i].size > 0)
- e820_add_region(map[i].addr, map[i].size, map[i].type);
+ map[i].addr += size;
+ map[i].size -= size;
+ if (map[i].size == 0)
+ i++;
}
- /* Align the balloon area so that max_low_pfn does not get set
- * to be at the _end_ of the PCI gap at the far end (fee01000).
- * Note that xen_extra_mem_start gets set in the loop above to be
- * past the last E820 region. */
- if (xen_initial_domain() && (xen_extra_mem_start < (1ULL<<32)))
- xen_extra_mem_start = (1ULL<<32);
/*
* In domU, the ISA region is normal, usable memory, but we
* reserve ISA memory anyway because too many things poke
* about in there.
- *
- * In Dom0, the host E820 information can leave gaps in the
- * ISA range, which would cause us to release those pages. To
- * avoid this, we unconditionally reserve them here.
*/
e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
E820_RESERVED);
@@ -305,44 +305,6 @@ char * __init xen_memory_setup(void)
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
- extra_limit = xen_get_max_pages();
- if (max_pfn + extra_pages > extra_limit) {
- if (extra_limit > max_pfn)
- extra_pages = extra_limit - max_pfn;
- else
- extra_pages = 0;
- }
-
- extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820);
-
- /*
- * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
- * factor the base size. On non-highmem systems, the base
- * size is the full initial memory allocation; on highmem it
- * is limited to the max size of lowmem, so that it doesn't
- * get completely filled.
- *
- * In principle there could be a problem in lowmem systems if
- * the initial memory is also very large with respect to
- * lowmem, but we won't try to deal with that here.
- */
- extra_limit = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
- max_pfn + extra_pages);
-
- if (extra_limit >= max_pfn)
- extra_pages = extra_limit - max_pfn;
- else
- extra_pages = 0;
-
- xen_add_extra_mem(extra_pages);
-
- /*
- * Set P2M for all non-RAM pages and E820 gaps to be identity
- * type PFNs. We supply it with the non-sanitized version
- * of the E820.
- */
- identity_pages = xen_set_identity(map_raw, memmap.nr_entries);
- printk(KERN_INFO "Set %ld page(s) to 1-1 mapping.\n", identity_pages);
return "Xen";
}
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 2330a9ad5e95..1540792b1e54 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -396,7 +396,7 @@ static int xen_blkbk_map(struct blkif_request *req,
continue;
ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr),
- blkbk->pending_page(pending_req, i), false);
+ blkbk->pending_page(pending_req, i), NULL);
if (ret) {
pr_alert(DRV_PFX "Failed to install M2P override for %lx (ret: %d)\n",
(unsigned long)map[i].dev_bus_addr, ret);
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 9ca5c021d0b6..361a1dff8f77 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -966,6 +966,9 @@ ssize_t tpm_show_durations(struct device *dev, struct device_attribute *attr,
{
struct tpm_chip *chip = dev_get_drvdata(dev);
+ if (chip->vendor.duration[TPM_LONG] == 0)
+ return 0;
+
return sprintf(buf, "%d %d %d [%s]\n",
jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]),
jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]),
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 6fa215a38615..90832a955991 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -400,9 +400,8 @@ static int pcifront_claim_resource(struct pci_dev *dev, void *data)
dev_info(&pdev->xdev->dev, "claiming resource %s/%d\n",
pci_name(dev), i);
if (pci_claim_resource(dev, i)) {
- dev_err(&pdev->xdev->dev, "Could not claim "
- "resource %s/%d! Device offline. Try "
- "giving less than 4GB to domain.\n",
+ dev_err(&pdev->xdev->dev, "Could not claim resource %s/%d! "
+ "Device offline. Try using e820_host=1 in the guest config.\n",
pci_name(dev), i);
}
}
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index c4ea3a9a555b..39f021b855ef 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -63,6 +63,7 @@ u32 sas_get_pr_transport_id(
unsigned char *buf)
{
unsigned char *ptr;
+ int ret;
/*
* Set PROTOCOL IDENTIFIER to 6h for SAS
@@ -74,7 +75,9 @@ u32 sas_get_pr_transport_id(
*/
ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */
- hex2bin(&buf[4], ptr, 8);
+ ret = hex2bin(&buf[4], ptr, 8);
+ if (ret < 0)
+ pr_debug("sas transport_id: invalid hex string\n");
/*
* The SAS Transport ID is a hardcoded 24-byte length
@@ -156,8 +159,9 @@ u32 fc_get_pr_transport_id(
unsigned char *buf)
{
unsigned char *ptr;
- int i;
+ int i, ret;
u32 off = 8;
+
/*
* PROTOCOL IDENTIFIER is 0h for FCP-2
*
@@ -174,7 +178,9 @@ u32 fc_get_pr_transport_id(
i++;
continue;
}
- hex2bin(&buf[off++], &ptr[i], 1);
+ ret = hex2bin(&buf[off++], &ptr[i], 1);
+ if (ret < 0)
+ pr_debug("fc transport_id: invalid hex string\n");
i += 2;
}
/*
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 5f7ff8e2fc14..8795480c2350 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -137,16 +137,6 @@ config XEN_GRANT_DEV_ALLOC
to other domains. This can be used to implement frontend drivers
or as part of an inter-domain shared memory channel.
-config XEN_PLATFORM_PCI
- tristate "xen platform pci device driver"
- depends on XEN_PVHVM && PCI
- default m
- help
- Driver for the Xen PCI Platform device: it is responsible for
- initializing xenbus and grant_table when running in a Xen HVM
- domain. As a consequence this driver is required to run any Xen PV
- frontend on Xen HVM.
-
config SWIOTLB_XEN
def_bool y
depends on PCI
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 72bbb27d7a68..974fffdf22b2 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -14,7 +14,7 @@ obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o
obj-$(CONFIG_XEN_GRANT_DEV_ALLOC) += xen-gntalloc.o
obj-$(CONFIG_XENFS) += xenfs/
obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
-obj-$(CONFIG_XEN_PLATFORM_PCI) += xen-platform-pci.o
+obj-$(CONFIG_XEN_PVHVM) += platform-pci.o
obj-$(CONFIG_XEN_TMEM) += tmem.o
obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
obj-$(CONFIG_XEN_DOM0) += pci.o
@@ -23,5 +23,3 @@ obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/
xen-evtchn-y := evtchn.o
xen-gntdev-y := gntdev.o
xen-gntalloc-y := gntalloc.o
-
-xen-platform-pci-y := platform-pci.o
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 5dfd8f8ff07f..5876e1ae6c2d 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -501,20 +501,24 @@ EXPORT_SYMBOL_GPL(balloon_set_new_target);
* alloc_xenballooned_pages - get pages that have been ballooned out
* @nr_pages: Number of pages to get
* @pages: pages returned
+ * @highmem: highmem or lowmem pages
* @return 0 on success, error otherwise
*/
-int alloc_xenballooned_pages(int nr_pages, struct page** pages)
+int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
{
int pgno = 0;
struct page* page;
mutex_lock(&balloon_mutex);
while (pgno < nr_pages) {
- page = balloon_retrieve(true);
- if (page) {
+ page = balloon_retrieve(highmem);
+ if (page && PageHighMem(page) == highmem) {
pages[pgno++] = page;
} else {
enum bp_state st;
- st = decrease_reservation(nr_pages - pgno, GFP_HIGHUSER);
+ if (page)
+ balloon_append(page);
+ st = decrease_reservation(nr_pages - pgno,
+ highmem ? GFP_HIGHUSER : GFP_USER);
if (st != BP_DONE)
goto out_undo;
}
@@ -555,17 +559,40 @@ void free_xenballooned_pages(int nr_pages, struct page** pages)
}
EXPORT_SYMBOL(free_xenballooned_pages);
-static int __init balloon_init(void)
+static void __init balloon_add_region(unsigned long start_pfn,
+ unsigned long pages)
{
unsigned long pfn, extra_pfn_end;
struct page *page;
+ /*
+ * If the amount of usable memory has been limited (e.g., with
+ * the 'mem' command line parameter), don't add pages beyond
+ * this limit.
+ */
+ extra_pfn_end = min(max_pfn, start_pfn + pages);
+
+ for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
+ page = pfn_to_page(pfn);
+ /* totalram_pages and totalhigh_pages do not
+ include the boot-time balloon extension, so
+ don't subtract from it. */
+ __balloon_append(page);
+ }
+}
+
+static int __init balloon_init(void)
+{
+ int i;
+
if (!xen_domain())
return -ENODEV;
pr_info("xen/balloon: Initialising balloon driver.\n");
- balloon_stats.current_pages = xen_pv_domain() ? min(xen_start_info->nr_pages, max_pfn) : max_pfn;
+ balloon_stats.current_pages = xen_pv_domain()
+ ? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
+ : max_pfn;
balloon_stats.target_pages = balloon_stats.current_pages;
balloon_stats.balloon_low = 0;
balloon_stats.balloon_high = 0;
@@ -584,24 +611,13 @@ static int __init balloon_init(void)
#endif
/*
- * Initialise the balloon with excess memory space. We need
- * to make sure we don't add memory which doesn't exist or
- * logically exist. The E820 map can be trimmed to be smaller
- * than the amount of physical memory due to the mem= command
- * line parameter. And if this is a 32-bit non-HIGHMEM kernel
- * on a system with memory which requires highmem to access,
- * don't try to use it.
+ * Initialize the balloon with pages from the extra memory
+ * regions (see arch/x86/xen/setup.c).
*/
- extra_pfn_end = min(min(max_pfn, e820_end_of_ram_pfn()),
- (unsigned long)PFN_DOWN(xen_extra_mem_start + xen_extra_mem_size));
- for (pfn = PFN_UP(xen_extra_mem_start);
- pfn < extra_pfn_end;
- pfn++) {
- page = pfn_to_page(pfn);
- /* totalram_pages and totalhigh_pages do not include the boot-time
- balloon extension, so don't subtract from it. */
- __balloon_append(page);
- }
+ for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++)
+ if (xen_extra_mem[i].size)
+ balloon_add_region(PFN_UP(xen_extra_mem[i].start),
+ PFN_DOWN(xen_extra_mem[i].size));
return 0;
}
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 7523719bf8a4..7a55b292bf39 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -432,7 +432,8 @@ static int __must_check xen_allocate_irq_dynamic(void)
irq = irq_alloc_desc_from(first, -1);
- xen_irq_init(irq);
+ if (irq >= 0)
+ xen_irq_init(irq);
return irq;
}
@@ -713,7 +714,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
mutex_lock(&irq_mapping_update_lock);
irq = xen_allocate_irq_dynamic();
- if (irq == -1)
+ if (irq < 0)
goto out;
irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,
@@ -729,7 +730,7 @@ out:
error_irq:
mutex_unlock(&irq_mapping_update_lock);
xen_free_irq(irq);
- return -1;
+ return ret;
}
#endif
@@ -779,7 +780,7 @@ int xen_irq_from_pirq(unsigned pirq)
mutex_lock(&irq_mapping_update_lock);
list_for_each_entry(info, &xen_irq_list_head, list) {
- if (info == NULL || info->type != IRQT_PIRQ)
+ if (info->type != IRQT_PIRQ)
continue;
irq = info->irq;
if (info->u.pirq.pirq == pirq)
@@ -872,11 +873,32 @@ static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
}
+static int find_virq(unsigned int virq, unsigned int cpu)
+{
+ struct evtchn_status status;
+ int port, rc = -ENOENT;
+
+ memset(&status, 0, sizeof(status));
+ for (port = 0; port <= NR_EVENT_CHANNELS; port++) {
+ status.dom = DOMID_SELF;
+ status.port = port;
+ rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
+ if (rc < 0)
+ continue;
+ if (status.status != EVTCHNSTAT_virq)
+ continue;
+ if (status.u.virq == virq && status.vcpu == cpu) {
+ rc = port;
+ break;
+ }
+ }
+ return rc;
+}
int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
{
struct evtchn_bind_virq bind_virq;
- int evtchn, irq;
+ int evtchn, irq, ret;
mutex_lock(&irq_mapping_update_lock);
@@ -892,10 +914,16 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
bind_virq.virq = virq;
bind_virq.vcpu = cpu;
- if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
- &bind_virq) != 0)
- BUG();
- evtchn = bind_virq.port;
+ ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
+ &bind_virq);
+ if (ret == 0)
+ evtchn = bind_virq.port;
+ else {
+ if (ret == -EEXIST)
+ ret = find_virq(virq, cpu);
+ BUG_ON(ret < 0);
+ evtchn = ret;
+ }
xen_irq_info_virq_init(cpu, irq, evtchn, virq);
@@ -1670,6 +1698,7 @@ void __init xen_init_IRQ(void)
evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
GFP_KERNEL);
+ BUG_ON(!evtchn_to_irq);
for (i = 0; i < NR_EVENT_CHANNELS; i++)
evtchn_to_irq[i] = -1;
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index f914b26cf0c2..880798aae2f2 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -83,6 +83,7 @@ struct grant_map {
struct ioctl_gntdev_grant_ref *grants;
struct gnttab_map_grant_ref *map_ops;
struct gnttab_unmap_grant_ref *unmap_ops;
+ struct gnttab_map_grant_ref *kmap_ops;
struct page **pages;
};
@@ -116,19 +117,22 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
add->grants = kzalloc(sizeof(add->grants[0]) * count, GFP_KERNEL);
add->map_ops = kzalloc(sizeof(add->map_ops[0]) * count, GFP_KERNEL);
add->unmap_ops = kzalloc(sizeof(add->unmap_ops[0]) * count, GFP_KERNEL);
+ add->kmap_ops = kzalloc(sizeof(add->kmap_ops[0]) * count, GFP_KERNEL);
add->pages = kzalloc(sizeof(add->pages[0]) * count, GFP_KERNEL);
if (NULL == add->grants ||
NULL == add->map_ops ||
NULL == add->unmap_ops ||
+ NULL == add->kmap_ops ||
NULL == add->pages)
goto err;
- if (alloc_xenballooned_pages(count, add->pages))
+ if (alloc_xenballooned_pages(count, add->pages, false /* lowmem */))
goto err;
for (i = 0; i < count; i++) {
add->map_ops[i].handle = -1;
add->unmap_ops[i].handle = -1;
+ add->kmap_ops[i].handle = -1;
}
add->index = 0;
@@ -142,6 +146,7 @@ err:
kfree(add->grants);
kfree(add->map_ops);
kfree(add->unmap_ops);
+ kfree(add->kmap_ops);
kfree(add);
return NULL;
}
@@ -243,10 +248,35 @@ static int map_grant_pages(struct grant_map *map)
gnttab_set_unmap_op(&map->unmap_ops[i], addr,
map->flags, -1 /* handle */);
}
+ } else {
+ /*
+ * Setup the map_ops corresponding to the pte entries pointing
+ * to the kernel linear addresses of the struct pages.
+ * These ptes are completely different from the user ptes dealt
+ * with find_grant_ptes.
+ */
+ for (i = 0; i < map->count; i++) {
+ unsigned level;
+ unsigned long address = (unsigned long)
+ pfn_to_kaddr(page_to_pfn(map->pages[i]));
+ pte_t *ptep;
+ u64 pte_maddr = 0;
+ BUG_ON(PageHighMem(map->pages[i]));
+
+ ptep = lookup_address(address, &level);
+ pte_maddr = arbitrary_virt_to_machine(ptep).maddr;
+ gnttab_set_map_op(&map->kmap_ops[i], pte_maddr,
+ map->flags |
+ GNTMAP_host_map |
+ GNTMAP_contains_pte,
+ map->grants[i].ref,
+ map->grants[i].domid);
+ }
}
pr_debug("map %d+%d\n", map->index, map->count);
- err = gnttab_map_refs(map->map_ops, map->pages, map->count);
+ err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
+ map->pages, map->count);
if (err)
return err;
@@ -462,13 +492,11 @@ static int gntdev_release(struct inode *inode, struct file *flip)
pr_debug("priv %p\n", priv);
- spin_lock(&priv->lock);
while (!list_empty(&priv->maps)) {
map = list_entry(priv->maps.next, struct grant_map, next);
list_del(&map->next);
gntdev_put_map(map);
}
- spin_unlock(&priv->lock);
if (use_ptemod)
mmu_notifier_unregister(&priv->mn, priv->mm);
@@ -532,10 +560,11 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
if (map) {
list_del(&map->next);
- gntdev_put_map(map);
err = 0;
}
spin_unlock(&priv->lock);
+ if (map)
+ gntdev_put_map(map);
return err;
}
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 4f44b347b24a..8c71ab801756 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -448,7 +448,8 @@ unsigned int gnttab_max_grant_frames(void)
EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
- struct page **pages, unsigned int count)
+ struct gnttab_map_grant_ref *kmap_ops,
+ struct page **pages, unsigned int count)
{
int i, ret;
pte_t *pte;
@@ -488,8 +489,7 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
*/
return -EOPNOTSUPP;
}
- ret = m2p_add_override(mfn, pages[i],
- map_ops[i].flags & GNTMAP_contains_pte);
+ ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]);
if (ret)
return ret;
}
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index cef4bafc07dc..66057075d6e2 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -18,6 +18,7 @@
*/
#include <linux/pci.h>
+#include <linux/acpi.h>
#include <xen/xen.h>
#include <xen/interface/physdev.h>
#include <xen/interface/xen.h>
@@ -26,26 +27,85 @@
#include <asm/xen/hypercall.h>
#include "../pci/pci.h"
+static bool __read_mostly pci_seg_supported = true;
+
static int xen_add_device(struct device *dev)
{
int r;
struct pci_dev *pci_dev = to_pci_dev(dev);
+#ifdef CONFIG_PCI_IOV
+ struct pci_dev *physfn = pci_dev->physfn;
+#endif
+
+ if (pci_seg_supported) {
+ struct physdev_pci_device_add add = {
+ .seg = pci_domain_nr(pci_dev->bus),
+ .bus = pci_dev->bus->number,
+ .devfn = pci_dev->devfn
+ };
+#ifdef CONFIG_ACPI
+ acpi_handle handle;
+#endif
+
+#ifdef CONFIG_PCI_IOV
+ if (pci_dev->is_virtfn) {
+ add.flags = XEN_PCI_DEV_VIRTFN;
+ add.physfn.bus = physfn->bus->number;
+ add.physfn.devfn = physfn->devfn;
+ } else
+#endif
+ if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn))
+ add.flags = XEN_PCI_DEV_EXTFN;
+
+#ifdef CONFIG_ACPI
+ handle = DEVICE_ACPI_HANDLE(&pci_dev->dev);
+ if (!handle)
+ handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge);
+#ifdef CONFIG_PCI_IOV
+ if (!handle && pci_dev->is_virtfn)
+ handle = DEVICE_ACPI_HANDLE(physfn->bus->bridge);
+#endif
+ if (handle) {
+ acpi_status status;
+
+ do {
+ unsigned long long pxm;
+
+ status = acpi_evaluate_integer(handle, "_PXM",
+ NULL, &pxm);
+ if (ACPI_SUCCESS(status)) {
+ add.optarr[0] = pxm;
+ add.flags |= XEN_PCI_DEV_PXM;
+ break;
+ }
+ status = acpi_get_parent(handle, &handle);
+ } while (ACPI_SUCCESS(status));
+ }
+#endif /* CONFIG_ACPI */
+
+ r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_add, &add);
+ if (r != -ENOSYS)
+ return r;
+ pci_seg_supported = false;
+ }
+ if (pci_domain_nr(pci_dev->bus))
+ r = -ENOSYS;
#ifdef CONFIG_PCI_IOV
- if (pci_dev->is_virtfn) {
+ else if (pci_dev->is_virtfn) {
struct physdev_manage_pci_ext manage_pci_ext = {
.bus = pci_dev->bus->number,
.devfn = pci_dev->devfn,
.is_virtfn = 1,
- .physfn.bus = pci_dev->physfn->bus->number,
- .physfn.devfn = pci_dev->physfn->devfn,
+ .physfn.bus = physfn->bus->number,
+ .physfn.devfn = physfn->devfn,
};
r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext,
&manage_pci_ext);
- } else
+ }
#endif
- if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) {
+ else if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) {
struct physdev_manage_pci_ext manage_pci_ext = {
.bus = pci_dev->bus->number,
.devfn = pci_dev->devfn,
@@ -71,13 +131,27 @@ static int xen_remove_device(struct device *dev)
{
int r;
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct physdev_manage_pci manage_pci;
- manage_pci.bus = pci_dev->bus->number;
- manage_pci.devfn = pci_dev->devfn;
+ if (pci_seg_supported) {
+ struct physdev_pci_device device = {
+ .seg = pci_domain_nr(pci_dev->bus),
+ .bus = pci_dev->bus->number,
+ .devfn = pci_dev->devfn
+ };
- r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove,
- &manage_pci);
+ r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_remove,
+ &device);
+ } else if (pci_domain_nr(pci_dev->bus))
+ r = -ENOSYS;
+ else {
+ struct physdev_manage_pci manage_pci = {
+ .bus = pci_dev->bus->number,
+ .devfn = pci_dev->devfn
+ };
+
+ r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove,
+ &manage_pci);
+ }
return r;
}
@@ -96,13 +170,16 @@ static int xen_pci_notifier(struct notifier_block *nb,
r = xen_remove_device(dev);
break;
default:
- break;
+ return NOTIFY_DONE;
}
-
- return r;
+ if (r)
+ dev_err(dev, "Failed to %s - passthrough or MSI/MSI-X might fail!\n",
+ action == BUS_NOTIFY_ADD_DEVICE ? "add" :
+ (action == BUS_NOTIFY_DEL_DEVICE ? "delete" : "?"));
+ return NOTIFY_OK;
}
-struct notifier_block device_nb = {
+static struct notifier_block device_nb = {
.notifier_call = xen_pci_notifier,
};
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 6e8c15a23201..c984768d98ca 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -38,6 +38,7 @@
#include <xen/swiotlb-xen.h>
#include <xen/page.h>
#include <xen/xen-ops.h>
+#include <xen/hvc-console.h>
/*
* Used to do a quick range check in swiotlb_tbl_unmap_single and
* swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
@@ -146,8 +147,10 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
void __init xen_swiotlb_init(int verbose)
{
unsigned long bytes;
- int rc;
+ int rc = -ENOMEM;
unsigned long nr_tbl;
+ char *m = NULL;
+ unsigned int repeat = 3;
nr_tbl = swioltb_nr_tbl();
if (nr_tbl)
@@ -156,16 +159,17 @@ void __init xen_swiotlb_init(int verbose)
xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
}
-
+retry:
bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
/*
* Get IO TLB memory from any location.
*/
xen_io_tlb_start = alloc_bootmem(bytes);
- if (!xen_io_tlb_start)
- panic("Cannot allocate SWIOTLB buffer");
-
+ if (!xen_io_tlb_start) {
+ m = "Cannot allocate Xen-SWIOTLB buffer!\n";
+ goto error;
+ }
xen_io_tlb_end = xen_io_tlb_start + bytes;
/*
* And replace that memory with pages under 4GB.
@@ -173,17 +177,28 @@ void __init xen_swiotlb_init(int verbose)
rc = xen_swiotlb_fixup(xen_io_tlb_start,
bytes,
xen_io_tlb_nslabs);
- if (rc)
+ if (rc) {
+ free_bootmem(__pa(xen_io_tlb_start), bytes);
+ m = "Failed to get contiguous memory for DMA from Xen!\n"\
+ "You either: don't have the permissions, do not have"\
+ " enough free memory under 4GB, or the hypervisor memory"\
+ "is too fragmented!";
goto error;
-
+ }
start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose);
return;
error:
- panic("DMA(%d): Failed to exchange pages allocated for DMA with Xen! "\
- "We either don't have the permission or you do not have enough"\
- "free memory under 4GB!\n", rc);
+ if (repeat--) {
+ xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
+ (xen_io_tlb_nslabs >> 1));
+ printk(KERN_INFO "Xen-SWIOTLB: Lowering to %luMB\n",
+ (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
+ goto retry;
+ }
+ xen_raw_printk("%s (rc:%d)", m, rc);
+ panic("%s (rc:%d)", m, rc);
}
void *
@@ -194,6 +209,8 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
int order = get_order(size);
u64 dma_mask = DMA_BIT_MASK(32);
unsigned long vstart;
+ phys_addr_t phys;
+ dma_addr_t dev_addr;
/*
* Ignore region specifiers - the kernel's ideas of
@@ -209,18 +226,26 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
vstart = __get_free_pages(flags, order);
ret = (void *)vstart;
+ if (!ret)
+ return ret;
+
if (hwdev && hwdev->coherent_dma_mask)
- dma_mask = dma_alloc_coherent_mask(hwdev, flags);
+ dma_mask = hwdev->coherent_dma_mask;
- if (ret) {
+ phys = virt_to_phys(ret);
+ dev_addr = xen_phys_to_bus(phys);
+ if (((dev_addr + size - 1 <= dma_mask)) &&
+ !range_straddles_page_boundary(phys, size))
+ *dma_handle = dev_addr;
+ else {
if (xen_create_contiguous_region(vstart, order,
fls64(dma_mask)) != 0) {
free_pages(vstart, order);
return NULL;
}
- memset(ret, 0, size);
*dma_handle = virt_to_machine(ret).maddr;
}
+ memset(ret, 0, size);
return ret;
}
EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
@@ -230,11 +255,21 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
dma_addr_t dev_addr)
{
int order = get_order(size);
+ phys_addr_t phys;
+ u64 dma_mask = DMA_BIT_MASK(32);
if (dma_release_from_coherent(hwdev, order, vaddr))
return;
- xen_destroy_contiguous_region((unsigned long)vaddr, order);
+ if (hwdev && hwdev->coherent_dma_mask)
+ dma_mask = hwdev->coherent_dma_mask;
+
+ phys = virt_to_phys(vaddr);
+
+ if (((dev_addr + size - 1 > dma_mask)) ||
+ range_straddles_page_boundary(phys, size))
+ xen_destroy_contiguous_region((unsigned long)vaddr, order);
+
free_pages((unsigned long)vaddr, order);
}
EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
@@ -278,9 +313,10 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
/*
* Ensure that the address returned is DMA'ble
*/
- if (!dma_capable(dev, dev_addr, size))
- panic("map_single: bounce buffer is not DMA'ble");
-
+ if (!dma_capable(dev, dev_addr, size)) {
+ swiotlb_tbl_unmap_single(dev, map, size, dir);
+ dev_addr = 0;
+ }
return dev_addr;
}
EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
index a8031445d94e..444345afbd5c 100644
--- a/drivers/xen/xen-pciback/conf_space.c
+++ b/drivers/xen/xen-pciback/conf_space.c
@@ -15,7 +15,6 @@
#include "conf_space.h"
#include "conf_space_quirks.h"
-#define DRV_NAME "xen-pciback"
static int permissive;
module_param(permissive, bool, 0644);
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
index da3cbdfcb5dc..3daf862d739d 100644
--- a/drivers/xen/xen-pciback/conf_space_header.c
+++ b/drivers/xen/xen-pciback/conf_space_header.c
@@ -15,7 +15,6 @@ struct pci_bar_info {
int which;
};
-#define DRV_NAME "xen-pciback"
#define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO))
#define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER)
@@ -25,7 +24,7 @@ static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data)
int ret;
ret = xen_pcibk_read_config_word(dev, offset, value, data);
- if (!atomic_read(&dev->enable_cnt))
+ if (!pci_is_enabled(dev))
return ret;
for (i = 0; i < PCI_ROM_RESOURCE; i++) {
@@ -187,7 +186,7 @@ static inline void read_dev_bar(struct pci_dev *dev,
bar_info->val = res[pos].start |
(res[pos].flags & PCI_REGION_FLAG_MASK);
- bar_info->len_val = res[pos].end - res[pos].start + 1;
+ bar_info->len_val = resource_size(&res[pos]);
}
static void *bar_init(struct pci_dev *dev, int offset)
diff --git a/drivers/xen/xen-pciback/conf_space_quirks.c b/drivers/xen/xen-pciback/conf_space_quirks.c
index 921a889e65eb..7476791cab40 100644
--- a/drivers/xen/xen-pciback/conf_space_quirks.c
+++ b/drivers/xen/xen-pciback/conf_space_quirks.c
@@ -12,7 +12,6 @@
#include "conf_space_quirks.h"
LIST_HEAD(xen_pcibk_quirks);
-#define DRV_NAME "xen-pciback"
static inline const struct pci_device_id *
match_one_device(const struct pci_device_id *id, const struct pci_dev *dev)
{
@@ -36,7 +35,7 @@ static struct xen_pcibk_config_quirk *xen_pcibk_find_quirk(struct pci_dev *dev)
goto out;
tmp_quirk = NULL;
printk(KERN_DEBUG DRV_NAME
- ":quirk didn't match any device xen_pciback knows about\n");
+ ": quirk didn't match any device known\n");
out:
return tmp_quirk;
}
diff --git a/drivers/xen/xen-pciback/passthrough.c b/drivers/xen/xen-pciback/passthrough.c
index 1d32a9a42c01..828dddc360df 100644
--- a/drivers/xen/xen-pciback/passthrough.c
+++ b/drivers/xen/xen-pciback/passthrough.c
@@ -7,13 +7,13 @@
#include <linux/list.h>
#include <linux/pci.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include "pciback.h"
struct passthrough_dev_data {
/* Access to dev_list must be protected by lock */
struct list_head dev_list;
- spinlock_t lock;
+ struct mutex lock;
};
static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
@@ -24,9 +24,8 @@ static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
struct pci_dev_entry *dev_entry;
struct pci_dev *dev = NULL;
- unsigned long flags;
- spin_lock_irqsave(&dev_data->lock, flags);
+ mutex_lock(&dev_data->lock);
list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus)
@@ -37,7 +36,7 @@ static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
}
}
- spin_unlock_irqrestore(&dev_data->lock, flags);
+ mutex_unlock(&dev_data->lock);
return dev;
}
@@ -48,7 +47,6 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
{
struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
struct pci_dev_entry *dev_entry;
- unsigned long flags;
unsigned int domain, bus, devfn;
int err;
@@ -57,9 +55,9 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
return -ENOMEM;
dev_entry->dev = dev;
- spin_lock_irqsave(&dev_data->lock, flags);
+ mutex_lock(&dev_data->lock);
list_add_tail(&dev_entry->list, &dev_data->dev_list);
- spin_unlock_irqrestore(&dev_data->lock, flags);
+ mutex_unlock(&dev_data->lock);
/* Publish this device. */
domain = (unsigned int)pci_domain_nr(dev->bus);
@@ -76,9 +74,8 @@ static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
struct pci_dev_entry *dev_entry, *t;
struct pci_dev *found_dev = NULL;
- unsigned long flags;
- spin_lock_irqsave(&dev_data->lock, flags);
+ mutex_lock(&dev_data->lock);
list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
if (dev_entry->dev == dev) {
@@ -88,7 +85,7 @@ static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
}
}
- spin_unlock_irqrestore(&dev_data->lock, flags);
+ mutex_unlock(&dev_data->lock);
if (found_dev)
pcistub_put_pci_dev(found_dev);
@@ -102,7 +99,7 @@ static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
if (!dev_data)
return -ENOMEM;
- spin_lock_init(&dev_data->lock);
+ mutex_init(&dev_data->lock);
INIT_LIST_HEAD(&dev_data->dev_list);
@@ -116,14 +113,14 @@ static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
{
int err = 0;
struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
- struct pci_dev_entry *dev_entry, *e, *tmp;
+ struct pci_dev_entry *dev_entry, *e;
struct pci_dev *dev;
int found;
unsigned int domain, bus;
- spin_lock(&dev_data->lock);
+ mutex_lock(&dev_data->lock);
- list_for_each_entry_safe(dev_entry, tmp, &dev_data->dev_list, list) {
+ list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
/* Only publish this device as a root if none of its
* parent bridges are exported
*/
@@ -142,16 +139,13 @@ static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
bus = (unsigned int)dev_entry->dev->bus->number;
if (!found) {
- spin_unlock(&dev_data->lock);
err = publish_root_cb(pdev, domain, bus);
if (err)
break;
- spin_lock(&dev_data->lock);
}
}
- if (!err)
- spin_unlock(&dev_data->lock);
+ mutex_unlock(&dev_data->lock);
return err;
}
@@ -182,7 +176,7 @@ static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
return 1;
}
-struct xen_pcibk_backend xen_pcibk_passthrough_backend = {
+const struct xen_pcibk_backend xen_pcibk_passthrough_backend = {
.name = "passthrough",
.init = __xen_pcibk_init_devices,
.free = __xen_pcibk_release_devices,
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index aec214ac0a14..8f06e1ed028c 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -21,8 +21,6 @@
#include "conf_space.h"
#include "conf_space_quirks.h"
-#define DRV_NAME "xen-pciback"
-
static char *pci_devs_to_hide;
wait_queue_head_t xen_pcibk_aer_wait_queue;
/*Add sem for sync AER handling and xen_pcibk remove/reconfigue ops,
@@ -222,6 +220,8 @@ void pcistub_put_pci_dev(struct pci_dev *dev)
}
spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+ if (WARN_ON(!found_psdev))
+ return;
/*hold this lock for avoiding breaking link between
* pcistub and xen_pcibk when AER is in processing
@@ -514,12 +514,9 @@ static void kill_domain_by_device(struct pcistub_device *psdev)
int err;
char nodename[PCI_NODENAME_MAX];
- if (!psdev)
- dev_err(&psdev->dev->dev,
- "device is NULL when do AER recovery/kill_domain\n");
+ BUG_ON(!psdev);
snprintf(nodename, PCI_NODENAME_MAX, "/local/domain/0/backend/pci/%d/0",
psdev->pdev->xdev->otherend_id);
- nodename[strlen(nodename)] = '\0';
again:
err = xenbus_transaction_start(&xbt);
@@ -605,7 +602,7 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev,
if (test_bit(_XEN_PCIF_active,
(unsigned long *)&psdev->pdev->sh_info->flags)) {
dev_dbg(&psdev->dev->dev,
- "schedule pci_conf service in xen_pcibk\n");
+ "schedule pci_conf service in " DRV_NAME "\n");
xen_pcibk_test_and_schedule_op(psdev->pdev);
}
@@ -995,8 +992,7 @@ out:
err = count;
return err;
}
-
-DRIVER_ATTR(new_slot, S_IWUSR, NULL, pcistub_slot_add);
+static DRIVER_ATTR(new_slot, S_IWUSR, NULL, pcistub_slot_add);
static ssize_t pcistub_slot_remove(struct device_driver *drv, const char *buf,
size_t count)
@@ -1015,8 +1011,7 @@ out:
err = count;
return err;
}
-
-DRIVER_ATTR(remove_slot, S_IWUSR, NULL, pcistub_slot_remove);
+static DRIVER_ATTR(remove_slot, S_IWUSR, NULL, pcistub_slot_remove);
static ssize_t pcistub_slot_show(struct device_driver *drv, char *buf)
{
@@ -1039,8 +1034,7 @@ static ssize_t pcistub_slot_show(struct device_driver *drv, char *buf)
return count;
}
-
-DRIVER_ATTR(slots, S_IRUSR, pcistub_slot_show, NULL);
+static DRIVER_ATTR(slots, S_IRUSR, pcistub_slot_show, NULL);
static ssize_t pcistub_irq_handler_show(struct device_driver *drv, char *buf)
{
@@ -1069,8 +1063,7 @@ static ssize_t pcistub_irq_handler_show(struct device_driver *drv, char *buf)
spin_unlock_irqrestore(&pcistub_devices_lock, flags);
return count;
}
-
-DRIVER_ATTR(irq_handlers, S_IRUSR, pcistub_irq_handler_show, NULL);
+static DRIVER_ATTR(irq_handlers, S_IRUSR, pcistub_irq_handler_show, NULL);
static ssize_t pcistub_irq_handler_switch(struct device_driver *drv,
const char *buf,
@@ -1106,7 +1099,8 @@ out:
err = count;
return err;
}
-DRIVER_ATTR(irq_handler_state, S_IWUSR, NULL, pcistub_irq_handler_switch);
+static DRIVER_ATTR(irq_handler_state, S_IWUSR, NULL,
+ pcistub_irq_handler_switch);
static ssize_t pcistub_quirk_add(struct device_driver *drv, const char *buf,
size_t count)
@@ -1170,8 +1164,8 @@ out:
return count;
}
-
-DRIVER_ATTR(quirks, S_IRUSR | S_IWUSR, pcistub_quirk_show, pcistub_quirk_add);
+static DRIVER_ATTR(quirks, S_IRUSR | S_IWUSR, pcistub_quirk_show,
+ pcistub_quirk_add);
static ssize_t permissive_add(struct device_driver *drv, const char *buf,
size_t count)
@@ -1236,8 +1230,8 @@ static ssize_t permissive_show(struct device_driver *drv, char *buf)
spin_unlock_irqrestore(&pcistub_devices_lock, flags);
return count;
}
-
-DRIVER_ATTR(permissive, S_IRUSR | S_IWUSR, permissive_show, permissive_add);
+static DRIVER_ATTR(permissive, S_IRUSR | S_IWUSR, permissive_show,
+ permissive_add);
static void pcistub_exit(void)
{
@@ -1374,3 +1368,4 @@ module_init(xen_pcibk_init);
module_exit(xen_pcibk_cleanup);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("xen-backend:pci");
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
index a0e131a81503..e9b4011c5f9a 100644
--- a/drivers/xen/xen-pciback/pciback.h
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -15,6 +15,8 @@
#include <linux/atomic.h>
#include <xen/interface/io/pciif.h>
+#define DRV_NAME "xen-pciback"
+
struct pci_dev_entry {
struct list_head list;
struct pci_dev *dev;
@@ -27,7 +29,7 @@ struct pci_dev_entry {
struct xen_pcibk_device {
void *pci_dev_data;
- spinlock_t dev_lock;
+ struct mutex dev_lock;
struct xenbus_device *xdev;
struct xenbus_watch be_watch;
u8 be_watching;
@@ -89,7 +91,7 @@ typedef int (*publish_pci_root_cb) (struct xen_pcibk_device *pdev,
* passthrough - BDFs are exactly like in the host.
*/
struct xen_pcibk_backend {
- char *name;
+ const char *name;
int (*init)(struct xen_pcibk_device *pdev);
void (*free)(struct xen_pcibk_device *pdev);
int (*find)(struct pci_dev *pcidev, struct xen_pcibk_device *pdev,
@@ -104,9 +106,9 @@ struct xen_pcibk_backend {
unsigned int devfn);
};
-extern struct xen_pcibk_backend xen_pcibk_vpci_backend;
-extern struct xen_pcibk_backend xen_pcibk_passthrough_backend;
-extern struct xen_pcibk_backend *xen_pcibk_backend;
+extern const struct xen_pcibk_backend xen_pcibk_vpci_backend;
+extern const struct xen_pcibk_backend xen_pcibk_passthrough_backend;
+extern const struct xen_pcibk_backend *xen_pcibk_backend;
static inline int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev,
@@ -116,13 +118,14 @@ static inline int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
if (xen_pcibk_backend && xen_pcibk_backend->add)
return xen_pcibk_backend->add(pdev, dev, devid, publish_cb);
return -1;
-};
+}
+
static inline void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev)
{
if (xen_pcibk_backend && xen_pcibk_backend->free)
return xen_pcibk_backend->release(pdev, dev);
-};
+}
static inline struct pci_dev *
xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev, unsigned int domain,
@@ -131,7 +134,8 @@ xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev, unsigned int domain,
if (xen_pcibk_backend && xen_pcibk_backend->get)
return xen_pcibk_backend->get(pdev, domain, bus, devfn);
return NULL;
-};
+}
+
/**
* Add for domain0 PCIE-AER handling. Get guest domain/bus/devfn in xen_pcibk
* before sending aer request to pcifront, so that guest could identify
@@ -148,25 +152,29 @@ static inline int xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
return xen_pcibk_backend->find(pcidev, pdev, domain, bus,
devfn);
return -1;
-};
+}
+
static inline int xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
{
if (xen_pcibk_backend && xen_pcibk_backend->init)
return xen_pcibk_backend->init(pdev);
return -1;
-};
+}
+
static inline int xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
publish_pci_root_cb cb)
{
if (xen_pcibk_backend && xen_pcibk_backend->publish)
return xen_pcibk_backend->publish(pdev, cb);
return -1;
-};
+}
+
static inline void xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
{
if (xen_pcibk_backend && xen_pcibk_backend->free)
return xen_pcibk_backend->free(pdev);
-};
+}
+
/* Handles events from front-end */
irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id);
void xen_pcibk_do_op(struct work_struct *data);
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index 8c95c3415b75..63616d7453e6 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -10,7 +10,6 @@
#include <linux/sched.h>
#include "pciback.h"
-#define DRV_NAME "xen-pciback"
int verbose_request;
module_param(verbose_request, int, 0644);
diff --git a/drivers/xen/xen-pciback/vpci.c b/drivers/xen/xen-pciback/vpci.c
index 4a42cfb0959d..46d140baebd8 100644
--- a/drivers/xen/xen-pciback/vpci.c
+++ b/drivers/xen/xen-pciback/vpci.c
@@ -8,16 +8,15 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/pci.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include "pciback.h"
#define PCI_SLOT_MAX 32
-#define DRV_NAME "xen-pciback"
struct vpci_dev_data {
/* Access to dev_list must be protected by lock */
struct list_head dev_list[PCI_SLOT_MAX];
- spinlock_t lock;
+ struct mutex lock;
};
static inline struct list_head *list_first(struct list_head *head)
@@ -33,13 +32,12 @@ static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev_entry *entry;
struct pci_dev *dev = NULL;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
- unsigned long flags;
if (domain != 0 || bus != 0)
return NULL;
if (PCI_SLOT(devfn) < PCI_SLOT_MAX) {
- spin_lock_irqsave(&vpci_dev->lock, flags);
+ mutex_lock(&vpci_dev->lock);
list_for_each_entry(entry,
&vpci_dev->dev_list[PCI_SLOT(devfn)],
@@ -50,7 +48,7 @@ static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
}
}
- spin_unlock_irqrestore(&vpci_dev->lock, flags);
+ mutex_unlock(&vpci_dev->lock);
}
return dev;
}
@@ -71,7 +69,6 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
int err = 0, slot, func = -1;
struct pci_dev_entry *t, *dev_entry;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
- unsigned long flags;
if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
err = -EFAULT;
@@ -90,7 +87,7 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
dev_entry->dev = dev;
- spin_lock_irqsave(&vpci_dev->lock, flags);
+ mutex_lock(&vpci_dev->lock);
/* Keep multi-function devices together on the virtual PCI bus */
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
@@ -129,7 +126,7 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
"No more space on root virtual PCI bus");
unlock:
- spin_unlock_irqrestore(&vpci_dev->lock, flags);
+ mutex_unlock(&vpci_dev->lock);
/* Publish this device. */
if (!err)
@@ -145,14 +142,13 @@ static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
int slot;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
struct pci_dev *found_dev = NULL;
- unsigned long flags;
- spin_lock_irqsave(&vpci_dev->lock, flags);
+ mutex_lock(&vpci_dev->lock);
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
- struct pci_dev_entry *e, *tmp;
- list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
- list) {
+ struct pci_dev_entry *e;
+
+ list_for_each_entry(e, &vpci_dev->dev_list[slot], list) {
if (e->dev == dev) {
list_del(&e->list);
found_dev = e->dev;
@@ -163,7 +159,7 @@ static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
}
out:
- spin_unlock_irqrestore(&vpci_dev->lock, flags);
+ mutex_unlock(&vpci_dev->lock);
if (found_dev)
pcistub_put_pci_dev(found_dev);
@@ -178,7 +174,7 @@ static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
if (!vpci_dev)
return -ENOMEM;
- spin_lock_init(&vpci_dev->lock);
+ mutex_init(&vpci_dev->lock);
for (slot = 0; slot < PCI_SLOT_MAX; slot++)
INIT_LIST_HEAD(&vpci_dev->dev_list[slot]);
@@ -222,10 +218,9 @@ static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
struct pci_dev_entry *entry;
struct pci_dev *dev = NULL;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
- unsigned long flags;
int found = 0, slot;
- spin_lock_irqsave(&vpci_dev->lock, flags);
+ mutex_lock(&vpci_dev->lock);
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
list_for_each_entry(entry,
&vpci_dev->dev_list[slot],
@@ -243,11 +238,11 @@ static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
}
}
}
- spin_unlock_irqrestore(&vpci_dev->lock, flags);
+ mutex_unlock(&vpci_dev->lock);
return found;
}
-struct xen_pcibk_backend xen_pcibk_vpci_backend = {
+const struct xen_pcibk_backend xen_pcibk_vpci_backend = {
.name = "vpci",
.init = __xen_pcibk_init_devices,
.free = __xen_pcibk_release_devices,
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 978d2c6f5dca..474d52ec3374 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -13,7 +13,6 @@
#include <asm/xen/pci.h>
#include "pciback.h"
-#define DRV_NAME "xen-pciback"
#define INVALID_EVTCHN_IRQ (-1)
struct workqueue_struct *xen_pcibk_wq;
@@ -44,7 +43,7 @@ static struct xen_pcibk_device *alloc_pdev(struct xenbus_device *xdev)
pdev->xdev = xdev;
dev_set_drvdata(&xdev->dev, pdev);
- spin_lock_init(&pdev->dev_lock);
+ mutex_init(&pdev->dev_lock);
pdev->sh_info = NULL;
pdev->evtchn_irq = INVALID_EVTCHN_IRQ;
@@ -62,14 +61,12 @@ out:
static void xen_pcibk_disconnect(struct xen_pcibk_device *pdev)
{
- spin_lock(&pdev->dev_lock);
-
+ mutex_lock(&pdev->dev_lock);
/* Ensure the guest can't trigger our handler before removing devices */
if (pdev->evtchn_irq != INVALID_EVTCHN_IRQ) {
unbind_from_irqhandler(pdev->evtchn_irq, pdev);
pdev->evtchn_irq = INVALID_EVTCHN_IRQ;
}
- spin_unlock(&pdev->dev_lock);
/* If the driver domain started an op, make sure we complete it
* before releasing the shared memory */
@@ -77,13 +74,11 @@ static void xen_pcibk_disconnect(struct xen_pcibk_device *pdev)
/* Note, the workqueue does not use spinlocks at all.*/
flush_workqueue(xen_pcibk_wq);
- spin_lock(&pdev->dev_lock);
if (pdev->sh_info != NULL) {
xenbus_unmap_ring_vfree(pdev->xdev, pdev->sh_info);
pdev->sh_info = NULL;
}
- spin_unlock(&pdev->dev_lock);
-
+ mutex_unlock(&pdev->dev_lock);
}
static void free_pdev(struct xen_pcibk_device *pdev)
@@ -120,9 +115,7 @@ static int xen_pcibk_do_attach(struct xen_pcibk_device *pdev, int gnt_ref,
goto out;
}
- spin_lock(&pdev->dev_lock);
pdev->sh_info = vaddr;
- spin_unlock(&pdev->dev_lock);
err = bind_interdomain_evtchn_to_irqhandler(
pdev->xdev->otherend_id, remote_evtchn, xen_pcibk_handle_event,
@@ -132,10 +125,7 @@ static int xen_pcibk_do_attach(struct xen_pcibk_device *pdev, int gnt_ref,
"Error binding event channel to IRQ");
goto out;
}
-
- spin_lock(&pdev->dev_lock);
pdev->evtchn_irq = err;
- spin_unlock(&pdev->dev_lock);
err = 0;
dev_dbg(&pdev->xdev->dev, "Attached!\n");
@@ -150,6 +140,7 @@ static int xen_pcibk_attach(struct xen_pcibk_device *pdev)
char *magic = NULL;
+ mutex_lock(&pdev->dev_lock);
/* Make sure we only do this setup once */
if (xenbus_read_driver_state(pdev->xdev->nodename) !=
XenbusStateInitialised)
@@ -176,7 +167,7 @@ static int xen_pcibk_attach(struct xen_pcibk_device *pdev)
if (magic == NULL || strcmp(magic, XEN_PCI_MAGIC) != 0) {
xenbus_dev_fatal(pdev->xdev, -EFAULT,
"version mismatch (%s/%s) with pcifront - "
- "halting xen_pcibk",
+ "halting " DRV_NAME,
magic, XEN_PCI_MAGIC);
goto out;
}
@@ -194,6 +185,7 @@ static int xen_pcibk_attach(struct xen_pcibk_device *pdev)
dev_dbg(&pdev->xdev->dev, "Connected? %d\n", err);
out:
+ mutex_unlock(&pdev->dev_lock);
kfree(magic);
@@ -369,6 +361,7 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
dev_dbg(&pdev->xdev->dev, "Reconfiguring device ...\n");
+ mutex_lock(&pdev->dev_lock);
/* Make sure we only reconfigure once */
if (xenbus_read_driver_state(pdev->xdev->nodename) !=
XenbusStateReconfiguring)
@@ -506,6 +499,7 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
}
out:
+ mutex_unlock(&pdev->dev_lock);
return 0;
}
@@ -562,6 +556,7 @@ static int xen_pcibk_setup_backend(struct xen_pcibk_device *pdev)
char dev_str[64];
char state_str[64];
+ mutex_lock(&pdev->dev_lock);
/* It's possible we could get the call to setup twice, so make sure
* we're not already connected.
*/
@@ -642,10 +637,10 @@ static int xen_pcibk_setup_backend(struct xen_pcibk_device *pdev)
"Error switching to initialised state!");
out:
+ mutex_unlock(&pdev->dev_lock);
if (!err)
/* see if pcifront is already configured (if not, we'll wait) */
xen_pcibk_attach(pdev);
-
return err;
}
@@ -724,7 +719,7 @@ static struct xenbus_driver xenbus_xen_pcibk_driver = {
.otherend_changed = xen_pcibk_frontend_changed,
};
-struct xen_pcibk_backend *xen_pcibk_backend;
+const struct xen_pcibk_backend *__read_mostly xen_pcibk_backend;
int __init xen_pcibk_xenbus_register(void)
{
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index 6ea852e25162..d93c70857e03 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -68,6 +68,8 @@
*/
#include <linux/kernel.h>
+#include <linux/bootmem.h>
+#include <linux/swap.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/module.h>
@@ -93,6 +95,15 @@ static unsigned int selfballoon_uphysteresis __read_mostly = 1;
/* In HZ, controls frequency of worker invocation. */
static unsigned int selfballoon_interval __read_mostly = 5;
+/*
+ * Minimum usable RAM in MB for selfballooning target for balloon.
+ * If non-zero, it is added to totalreserve_pages and self-ballooning
+ * will not balloon below the sum. If zero, a piecewise linear function
+ * is calculated as a minimum and added to totalreserve_pages. Note that
+ * setting this value indiscriminately may cause OOMs and crashes.
+ */
+static unsigned int selfballoon_min_usable_mb;
+
static void selfballoon_process(struct work_struct *work);
static DECLARE_DELAYED_WORK(selfballoon_worker, selfballoon_process);
@@ -189,20 +200,23 @@ static int __init xen_selfballooning_setup(char *s)
__setup("selfballooning", xen_selfballooning_setup);
#endif /* CONFIG_FRONTSWAP */
+#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
+
/*
* Use current balloon size, the goal (vm_committed_as), and hysteresis
* parameters to set a new target balloon size
*/
static void selfballoon_process(struct work_struct *work)
{
- unsigned long cur_pages, goal_pages, tgt_pages;
+ unsigned long cur_pages, goal_pages, tgt_pages, floor_pages;
+ unsigned long useful_pages;
bool reset_timer = false;
if (xen_selfballooning_enabled) {
- cur_pages = balloon_stats.current_pages;
+ cur_pages = totalram_pages;
tgt_pages = cur_pages; /* default is no change */
goal_pages = percpu_counter_read_positive(&vm_committed_as) +
- balloon_stats.current_pages - totalram_pages;
+ totalreserve_pages;
#ifdef CONFIG_FRONTSWAP
/* allow space for frontswap pages to be repatriated */
if (frontswap_selfshrinking && frontswap_enabled)
@@ -217,7 +231,26 @@ static void selfballoon_process(struct work_struct *work)
((goal_pages - cur_pages) /
selfballoon_uphysteresis);
/* else if cur_pages == goal_pages, no change */
- balloon_set_new_target(tgt_pages);
+ useful_pages = max_pfn - totalreserve_pages;
+ if (selfballoon_min_usable_mb != 0)
+ floor_pages = totalreserve_pages +
+ MB2PAGES(selfballoon_min_usable_mb);
+ /* piecewise linear function ending in ~3% slope */
+ else if (useful_pages < MB2PAGES(16))
+ floor_pages = max_pfn; /* not worth ballooning */
+ else if (useful_pages < MB2PAGES(64))
+ floor_pages = totalreserve_pages + MB2PAGES(16) +
+ ((useful_pages - MB2PAGES(16)) >> 1);
+ else if (useful_pages < MB2PAGES(512))
+ floor_pages = totalreserve_pages + MB2PAGES(40) +
+ ((useful_pages - MB2PAGES(40)) >> 3);
+ else /* useful_pages >= MB2PAGES(512) */
+ floor_pages = totalreserve_pages + MB2PAGES(99) +
+ ((useful_pages - MB2PAGES(99)) >> 5);
+ if (tgt_pages < floor_pages)
+ tgt_pages = floor_pages;
+ balloon_set_new_target(tgt_pages +
+ balloon_stats.current_pages - totalram_pages);
reset_timer = true;
}
#ifdef CONFIG_FRONTSWAP
@@ -340,6 +373,31 @@ static ssize_t store_selfballoon_uphys(struct sys_device *dev,
static SYSDEV_ATTR(selfballoon_uphysteresis, S_IRUGO | S_IWUSR,
show_selfballoon_uphys, store_selfballoon_uphys);
+SELFBALLOON_SHOW(selfballoon_min_usable_mb, "%d\n",
+ selfballoon_min_usable_mb);
+
+static ssize_t store_selfballoon_min_usable_mb(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ unsigned long val;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ err = strict_strtoul(buf, 10, &val);
+ if (err || val == 0)
+ return -EINVAL;
+ selfballoon_min_usable_mb = val;
+ return count;
+}
+
+static SYSDEV_ATTR(selfballoon_min_usable_mb, S_IRUGO | S_IWUSR,
+ show_selfballoon_min_usable_mb,
+ store_selfballoon_min_usable_mb);
+
+
#ifdef CONFIG_FRONTSWAP
SELFBALLOON_SHOW(frontswap_selfshrinking, "%d\n", frontswap_selfshrinking);
@@ -421,6 +479,7 @@ static struct attribute *selfballoon_attrs[] = {
&attr_selfballoon_interval.attr,
&attr_selfballoon_downhysteresis.attr,
&attr_selfballoon_uphysteresis.attr,
+ &attr_selfballoon_min_usable_mb.attr,
#ifdef CONFIG_FRONTSWAP
&attr_frontswap_selfshrinking.attr,
&attr_frontswap_hysteresis.attr,
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
index 090c61ee8fd0..2eff7a6aaa20 100644
--- a/drivers/xen/xenbus/xenbus_comms.c
+++ b/drivers/xen/xenbus/xenbus_comms.c
@@ -212,7 +212,9 @@ int xb_init_comms(void)
printk(KERN_WARNING "XENBUS response ring is not quiescent "
"(%08x:%08x): fixing up\n",
intf->rsp_cons, intf->rsp_prod);
- intf->rsp_cons = intf->rsp_prod;
+ /* breaks kdump */
+ if (!reset_devices)
+ intf->rsp_cons = intf->rsp_prod;
}
if (xenbus_irq) {
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index bd2f90c9ac8b..cef9b0bf63d5 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -684,64 +684,74 @@ static int __init xenbus_probe_initcall(void)
device_initcall(xenbus_probe_initcall);
-static int __init xenbus_init(void)
+/* Set up event channel for xenstored which is run as a local process
+ * (this is normally used only in dom0)
+ */
+static int __init xenstored_local_init(void)
{
int err = 0;
unsigned long page = 0;
+ struct evtchn_alloc_unbound alloc_unbound;
- DPRINTK("");
+ /* Allocate Xenstore page */
+ page = get_zeroed_page(GFP_KERNEL);
+ if (!page)
+ goto out_err;
- err = -ENODEV;
- if (!xen_domain())
- return err;
+ xen_store_mfn = xen_start_info->store_mfn =
+ pfn_to_mfn(virt_to_phys((void *)page) >>
+ PAGE_SHIFT);
- /*
- * Domain0 doesn't have a store_evtchn or store_mfn yet.
- */
- if (xen_initial_domain()) {
- struct evtchn_alloc_unbound alloc_unbound;
+ /* Next allocate a local port which xenstored can bind to */
+ alloc_unbound.dom = DOMID_SELF;
+ alloc_unbound.remote_dom = DOMID_SELF;
- /* Allocate Xenstore page */
- page = get_zeroed_page(GFP_KERNEL);
- if (!page)
- goto out_error;
+ err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
+ &alloc_unbound);
+ if (err == -ENOSYS)
+ goto out_err;
- xen_store_mfn = xen_start_info->store_mfn =
- pfn_to_mfn(virt_to_phys((void *)page) >>
- PAGE_SHIFT);
+ BUG_ON(err);
+ xen_store_evtchn = xen_start_info->store_evtchn =
+ alloc_unbound.port;
- /* Next allocate a local port which xenstored can bind to */
- alloc_unbound.dom = DOMID_SELF;
- alloc_unbound.remote_dom = 0;
+ return 0;
- err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
- &alloc_unbound);
- if (err == -ENOSYS)
- goto out_error;
+ out_err:
+ if (page != 0)
+ free_page(page);
+ return err;
+}
- BUG_ON(err);
- xen_store_evtchn = xen_start_info->store_evtchn =
- alloc_unbound.port;
+static int __init xenbus_init(void)
+{
+ int err = 0;
- xen_store_interface = mfn_to_virt(xen_store_mfn);
+ if (!xen_domain())
+ return -ENODEV;
+
+ if (xen_hvm_domain()) {
+ uint64_t v = 0;
+ err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
+ if (err)
+ goto out_error;
+ xen_store_evtchn = (int)v;
+ err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
+ if (err)
+ goto out_error;
+ xen_store_mfn = (unsigned long)v;
+ xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE);
} else {
- if (xen_hvm_domain()) {
- uint64_t v = 0;
- err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
- if (err)
- goto out_error;
- xen_store_evtchn = (int)v;
- err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
+ xen_store_evtchn = xen_start_info->store_evtchn;
+ xen_store_mfn = xen_start_info->store_mfn;
+ if (xen_store_evtchn)
+ xenstored_ready = 1;
+ else {
+ err = xenstored_local_init();
if (err)
goto out_error;
- xen_store_mfn = (unsigned long)v;
- xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE);
- } else {
- xen_store_evtchn = xen_start_info->store_evtchn;
- xen_store_mfn = xen_start_info->store_mfn;
- xen_store_interface = mfn_to_virt(xen_store_mfn);
- xenstored_ready = 1;
}
+ xen_store_interface = mfn_to_virt(xen_store_mfn);
}
/* Initialize the interface to xenstore. */
@@ -760,12 +770,7 @@ static int __init xenbus_init(void)
proc_mkdir("xen", NULL);
#endif
- return 0;
-
- out_error:
- if (page != 0)
- free_page(page);
-
+ out_error:
return err;
}
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index 60adf919d78d..32417b5064fd 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -104,8 +104,6 @@ static int xenbus_uevent_backend(struct device *dev,
xdev = to_xenbus_device(dev);
bus = container_of(xdev->dev.bus, struct xen_bus_type, bus);
- if (xdev == NULL)
- return -ENODEV;
if (add_uevent_var(env, "MODALIAS=xen-backend:%s", xdev->devicetype))
return -ENOMEM;
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index ed2ba474a560..540587e18a94 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -248,10 +248,131 @@ int __xenbus_register_frontend(struct xenbus_driver *drv,
}
EXPORT_SYMBOL_GPL(__xenbus_register_frontend);
+static DECLARE_WAIT_QUEUE_HEAD(backend_state_wq);
+static int backend_state;
+
+static void xenbus_reset_backend_state_changed(struct xenbus_watch *w,
+ const char **v, unsigned int l)
+{
+ xenbus_scanf(XBT_NIL, v[XS_WATCH_PATH], "", "%i", &backend_state);
+ printk(KERN_DEBUG "XENBUS: backend %s %s\n",
+ v[XS_WATCH_PATH], xenbus_strstate(backend_state));
+ wake_up(&backend_state_wq);
+}
+
+static void xenbus_reset_wait_for_backend(char *be, int expected)
+{
+ long timeout;
+ timeout = wait_event_interruptible_timeout(backend_state_wq,
+ backend_state == expected, 5 * HZ);
+ if (timeout <= 0)
+ printk(KERN_INFO "XENBUS: backend %s timed out.\n", be);
+}
+
+/*
+ * Reset frontend if it is in Connected or Closed state.
+ * Wait for backend to catch up.
+ * State Connected happens during kdump, Closed after kexec.
+ */
+static void xenbus_reset_frontend(char *fe, char *be, int be_state)
+{
+ struct xenbus_watch be_watch;
+
+ printk(KERN_DEBUG "XENBUS: backend %s %s\n",
+ be, xenbus_strstate(be_state));
+
+ memset(&be_watch, 0, sizeof(be_watch));
+ be_watch.node = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/state", be);
+ if (!be_watch.node)
+ return;
+
+ be_watch.callback = xenbus_reset_backend_state_changed;
+ backend_state = XenbusStateUnknown;
+
+ printk(KERN_INFO "XENBUS: triggering reconnect on %s\n", be);
+ register_xenbus_watch(&be_watch);
+
+ /* fall through to forward backend to state XenbusStateInitialising */
+ switch (be_state) {
+ case XenbusStateConnected:
+ xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosing);
+ xenbus_reset_wait_for_backend(be, XenbusStateClosing);
+
+ case XenbusStateClosing:
+ xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosed);
+ xenbus_reset_wait_for_backend(be, XenbusStateClosed);
+
+ case XenbusStateClosed:
+ xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateInitialising);
+ xenbus_reset_wait_for_backend(be, XenbusStateInitWait);
+ }
+
+ unregister_xenbus_watch(&be_watch);
+ printk(KERN_INFO "XENBUS: reconnect done on %s\n", be);
+ kfree(be_watch.node);
+}
+
+static void xenbus_check_frontend(char *class, char *dev)
+{
+ int be_state, fe_state, err;
+ char *backend, *frontend;
+
+ frontend = kasprintf(GFP_NOIO | __GFP_HIGH, "device/%s/%s", class, dev);
+ if (!frontend)
+ return;
+
+ err = xenbus_scanf(XBT_NIL, frontend, "state", "%i", &fe_state);
+ if (err != 1)
+ goto out;
+
+ switch (fe_state) {
+ case XenbusStateConnected:
+ case XenbusStateClosed:
+ printk(KERN_DEBUG "XENBUS: frontend %s %s\n",
+ frontend, xenbus_strstate(fe_state));
+ backend = xenbus_read(XBT_NIL, frontend, "backend", NULL);
+ if (!backend || IS_ERR(backend))
+ goto out;
+ err = xenbus_scanf(XBT_NIL, backend, "state", "%i", &be_state);
+ if (err == 1)
+ xenbus_reset_frontend(frontend, backend, be_state);
+ kfree(backend);
+ break;
+ default:
+ break;
+ }
+out:
+ kfree(frontend);
+}
+
+static void xenbus_reset_state(void)
+{
+ char **devclass, **dev;
+ int devclass_n, dev_n;
+ int i, j;
+
+ devclass = xenbus_directory(XBT_NIL, "device", "", &devclass_n);
+ if (IS_ERR(devclass))
+ return;
+
+ for (i = 0; i < devclass_n; i++) {
+ dev = xenbus_directory(XBT_NIL, "device", devclass[i], &dev_n);
+ if (IS_ERR(dev))
+ continue;
+ for (j = 0; j < dev_n; j++)
+ xenbus_check_frontend(devclass[i], dev[j]);
+ kfree(dev);
+ }
+ kfree(devclass);
+}
+
static int frontend_probe_and_watch(struct notifier_block *notifier,
unsigned long event,
void *data)
{
+ /* reset devices in Connected or Closed state */
+ if (xen_hvm_domain())
+ xenbus_reset_state();
/* Enumerate devices in xenstore and watch for changes. */
xenbus_probe_devices(&xenbus_frontend);
register_xenbus_watch(&fe_watch);
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index 5534690075af..b3b8f2f3ad10 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -45,6 +45,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <xen/xenbus.h>
+#include <xen/xen.h>
#include "xenbus_comms.h"
struct xs_stored_msg {
@@ -620,6 +621,15 @@ static struct xenbus_watch *find_watch(const char *token)
return NULL;
}
+static void xs_reset_watches(void)
+{
+ int err;
+
+ err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL));
+ if (err && err != -EEXIST)
+ printk(KERN_WARNING "xs_reset_watches failed: %d\n", err);
+}
+
/* Register callback to watch this node. */
int register_xenbus_watch(struct xenbus_watch *watch)
{
@@ -638,8 +648,7 @@ int register_xenbus_watch(struct xenbus_watch *watch)
err = xs_watch(watch->node, token);
- /* Ignore errors due to multiple registration. */
- if ((err != 0) && (err != -EEXIST)) {
+ if (err) {
spin_lock(&watches_lock);
list_del(&watch->list);
spin_unlock(&watches_lock);
@@ -897,5 +906,9 @@ int xs_init(void)
if (IS_ERR(task))
return PTR_ERR(task);
+ /* shutdown watches for kexec boot */
+ if (xen_hvm_domain())
+ xs_reset_watches();
+
return 0;
}
diff --git a/drivers/zorro/zorro-driver.c b/drivers/zorro/zorro-driver.c
index 7ee2b6e71786..229624f867d3 100644
--- a/drivers/zorro/zorro-driver.c
+++ b/drivers/zorro/zorro-driver.c
@@ -37,6 +37,7 @@ zorro_match_device(const struct zorro_device_id *ids,
}
return NULL;
}
+EXPORT_SYMBOL(zorro_match_device);
static int zorro_device_probe(struct device *dev)
@@ -91,6 +92,7 @@ int zorro_register_driver(struct zorro_driver *drv)
/* register with core */
return driver_register(&drv->driver);
}
+EXPORT_SYMBOL(zorro_register_driver);
/**
@@ -107,6 +109,7 @@ void zorro_unregister_driver(struct zorro_driver *drv)
{
driver_unregister(&drv->driver);
}
+EXPORT_SYMBOL(zorro_unregister_driver);
/**
@@ -168,6 +171,7 @@ struct bus_type zorro_bus_type = {
.probe = zorro_device_probe,
.remove = zorro_device_remove,
};
+EXPORT_SYMBOL(zorro_bus_type);
static int __init zorro_driver_init(void)
@@ -177,7 +181,3 @@ static int __init zorro_driver_init(void)
postcore_initcall(zorro_driver_init);
-EXPORT_SYMBOL(zorro_match_device);
-EXPORT_SYMBOL(zorro_register_driver);
-EXPORT_SYMBOL(zorro_unregister_driver);
-EXPORT_SYMBOL(zorro_bus_type);
diff --git a/fs/attr.c b/fs/attr.c
index 538e27959d3f..7ee7ba488313 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -13,6 +13,7 @@
#include <linux/fsnotify.h>
#include <linux/fcntl.h>
#include <linux/security.h>
+#include <linux/evm.h>
/**
* inode_change_ok - check if attribute changes to an inode are allowed
@@ -237,8 +238,10 @@ int notify_change(struct dentry * dentry, struct iattr * attr)
else
error = simple_setattr(dentry, attr);
- if (!error)
+ if (!error) {
fsnotify_change(dentry, ia_valid);
+ evm_inode_post_setattr(dentry, ia_valid);
+ }
return error;
}
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 69565e5fc6a0..426aa464f1af 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -383,36 +383,36 @@ int btrfs_removexattr(struct dentry *dentry, const char *name)
XATTR_REPLACE);
}
-int btrfs_xattr_security_init(struct btrfs_trans_handle *trans,
- struct inode *inode, struct inode *dir,
- const struct qstr *qstr)
+int btrfs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
{
- int err;
- size_t len;
- void *value;
- char *suffix;
+ const struct xattr *xattr;
+ struct btrfs_trans_handle *trans = fs_info;
char *name;
+ int err = 0;
- err = security_inode_init_security(inode, dir, qstr, &suffix, &value,
- &len);
- if (err) {
- if (err == -EOPNOTSUPP)
- return 0;
- return err;
- }
-
- name = kmalloc(XATTR_SECURITY_PREFIX_LEN + strlen(suffix) + 1,
- GFP_NOFS);
- if (!name) {
- err = -ENOMEM;
- } else {
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ name = kmalloc(XATTR_SECURITY_PREFIX_LEN +
+ strlen(xattr->name) + 1, GFP_NOFS);
+ if (!name) {
+ err = -ENOMEM;
+ break;
+ }
strcpy(name, XATTR_SECURITY_PREFIX);
- strcpy(name + XATTR_SECURITY_PREFIX_LEN, suffix);
- err = __btrfs_setxattr(trans, inode, name, value, len, 0);
+ strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name);
+ err = __btrfs_setxattr(trans, inode, name,
+ xattr->value, xattr->value_len, 0);
kfree(name);
+ if (err < 0)
+ break;
}
-
- kfree(suffix);
- kfree(value);
return err;
}
+
+int btrfs_xattr_security_init(struct btrfs_trans_handle *trans,
+ struct inode *inode, struct inode *dir,
+ const struct qstr *qstr)
+{
+ return security_inode_init_security(inode, dir, qstr,
+ &btrfs_initxattrs, trans);
+}
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 2a22fb2989e4..c32308882148 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -22,6 +22,7 @@
#include <linux/fs.h>
#include <linux/posix_acl_xattr.h>
#include <linux/slab.h>
+#include <linux/xattr.h>
#include "cifsfs.h"
#include "cifspdu.h"
#include "cifsglob.h"
@@ -31,16 +32,8 @@
#define MAX_EA_VALUE_SIZE 65535
#define CIFS_XATTR_DOS_ATTRIB "user.DosAttrib"
#define CIFS_XATTR_CIFS_ACL "system.cifs_acl"
-#define CIFS_XATTR_USER_PREFIX "user."
-#define CIFS_XATTR_SYSTEM_PREFIX "system."
-#define CIFS_XATTR_OS2_PREFIX "os2."
-#define CIFS_XATTR_SECURITY_PREFIX "security."
-#define CIFS_XATTR_TRUSTED_PREFIX "trusted."
-#define XATTR_TRUSTED_PREFIX_LEN 8
-#define XATTR_SECURITY_PREFIX_LEN 9
-/* BB need to add server (Samba e.g) support for security and trusted prefix */
-
+/* BB need to add server (Samba e.g) support for security and trusted prefix */
int cifs_removexattr(struct dentry *direntry, const char *ea_name)
{
@@ -76,8 +69,8 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name)
}
if (ea_name == NULL) {
cFYI(1, "Null xattr names not supported");
- } else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5)
- && (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4))) {
+ } else if (strncmp(ea_name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)
+ && (strncmp(ea_name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN))) {
cFYI(1,
"illegal xattr request %s (only user namespace supported)",
ea_name);
@@ -88,7 +81,7 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name)
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
goto remove_ea_exit;
- ea_name += 5; /* skip past user. prefix */
+ ea_name += XATTR_USER_PREFIX_LEN; /* skip past user. prefix */
rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, NULL,
(__u16)0, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
@@ -149,21 +142,23 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
if (ea_name == NULL) {
cFYI(1, "Null xattr names not supported");
- } else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) == 0) {
+ } else if (strncmp(ea_name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)
+ == 0) {
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
goto set_ea_exit;
if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0)
cFYI(1, "attempt to set cifs inode metadata");
- ea_name += 5; /* skip past user. prefix */
+ ea_name += XATTR_USER_PREFIX_LEN; /* skip past user. prefix */
rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value,
(__u16)value_size, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
- } else if (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4) == 0) {
+ } else if (strncmp(ea_name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN)
+ == 0) {
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
goto set_ea_exit;
- ea_name += 4; /* skip past os2. prefix */
+ ea_name += XATTR_OS2_PREFIX_LEN; /* skip past os2. prefix */
rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value,
(__u16)value_size, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
@@ -269,7 +264,8 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
/* return alt name if available as pseudo attr */
if (ea_name == NULL) {
cFYI(1, "Null xattr names not supported");
- } else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) == 0) {
+ } else if (strncmp(ea_name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)
+ == 0) {
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
goto get_ea_exit;
@@ -277,15 +273,15 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
cFYI(1, "attempt to query cifs inode metadata");
/* revalidate/getattr then populate from inode */
} /* BB add else when above is implemented */
- ea_name += 5; /* skip past user. prefix */
+ ea_name += XATTR_USER_PREFIX_LEN; /* skip past user. prefix */
rc = CIFSSMBQAllEAs(xid, pTcon, full_path, ea_name, ea_value,
buf_size, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
- } else if (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4) == 0) {
+ } else if (strncmp(ea_name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) {
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
goto get_ea_exit;
- ea_name += 4; /* skip past os2. prefix */
+ ea_name += XATTR_OS2_PREFIX_LEN; /* skip past os2. prefix */
rc = CIFSSMBQAllEAs(xid, pTcon, full_path, ea_name, ea_value,
buf_size, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
@@ -339,10 +335,10 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
cFYI(1, "Query CIFS ACL not supported yet");
#endif /* CONFIG_CIFS_ACL */
} else if (strncmp(ea_name,
- CIFS_XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0) {
+ XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0) {
cFYI(1, "Trusted xattr namespace not supported yet");
} else if (strncmp(ea_name,
- CIFS_XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) == 0) {
+ XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) == 0) {
cFYI(1, "Security xattr namespace not supported yet");
} else
cFYI(1,
diff --git a/fs/ext2/xattr_security.c b/fs/ext2/xattr_security.c
index 5d979b4347b0..c922adc8ef41 100644
--- a/fs/ext2/xattr_security.c
+++ b/fs/ext2/xattr_security.c
@@ -46,28 +46,30 @@ ext2_xattr_security_set(struct dentry *dentry, const char *name,
value, size, flags);
}
-int
-ext2_init_security(struct inode *inode, struct inode *dir,
- const struct qstr *qstr)
+int ext2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
{
- int err;
- size_t len;
- void *value;
- char *name;
+ const struct xattr *xattr;
+ int err = 0;
- err = security_inode_init_security(inode, dir, qstr, &name, &value, &len);
- if (err) {
- if (err == -EOPNOTSUPP)
- return 0;
- return err;
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ err = ext2_xattr_set(inode, EXT2_XATTR_INDEX_SECURITY,
+ xattr->name, xattr->value,
+ xattr->value_len, 0);
+ if (err < 0)
+ break;
}
- err = ext2_xattr_set(inode, EXT2_XATTR_INDEX_SECURITY,
- name, value, len, 0);
- kfree(name);
- kfree(value);
return err;
}
+int
+ext2_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr)
+{
+ return security_inode_init_security(inode, dir, qstr,
+ &ext2_initxattrs, NULL);
+}
+
const struct xattr_handler ext2_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.list = ext2_xattr_security_list,
diff --git a/fs/ext3/xattr_security.c b/fs/ext3/xattr_security.c
index b8d9f83aa5c5..3c218b8a51d4 100644
--- a/fs/ext3/xattr_security.c
+++ b/fs/ext3/xattr_security.c
@@ -48,28 +48,32 @@ ext3_xattr_security_set(struct dentry *dentry, const char *name,
name, value, size, flags);
}
-int
-ext3_init_security(handle_t *handle, struct inode *inode, struct inode *dir,
- const struct qstr *qstr)
+int ext3_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
{
- int err;
- size_t len;
- void *value;
- char *name;
+ const struct xattr *xattr;
+ handle_t *handle = fs_info;
+ int err = 0;
- err = security_inode_init_security(inode, dir, qstr, &name, &value, &len);
- if (err) {
- if (err == -EOPNOTSUPP)
- return 0;
- return err;
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ err = ext3_xattr_set_handle(handle, inode,
+ EXT3_XATTR_INDEX_SECURITY,
+ xattr->name, xattr->value,
+ xattr->value_len, 0);
+ if (err < 0)
+ break;
}
- err = ext3_xattr_set_handle(handle, inode, EXT3_XATTR_INDEX_SECURITY,
- name, value, len, 0);
- kfree(name);
- kfree(value);
return err;
}
+int
+ext3_init_security(handle_t *handle, struct inode *inode, struct inode *dir,
+ const struct qstr *qstr)
+{
+ return security_inode_init_security(inode, dir, qstr,
+ &ext3_initxattrs, handle);
+}
+
const struct xattr_handler ext3_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.list = ext3_xattr_security_list,
diff --git a/fs/ext4/xattr_security.c b/fs/ext4/xattr_security.c
index 007c3bfbf094..34e4350dd4d9 100644
--- a/fs/ext4/xattr_security.c
+++ b/fs/ext4/xattr_security.c
@@ -48,28 +48,32 @@ ext4_xattr_security_set(struct dentry *dentry, const char *name,
name, value, size, flags);
}
-int
-ext4_init_security(handle_t *handle, struct inode *inode, struct inode *dir,
- const struct qstr *qstr)
+int ext4_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
{
- int err;
- size_t len;
- void *value;
- char *name;
+ const struct xattr *xattr;
+ handle_t *handle = fs_info;
+ int err = 0;
- err = security_inode_init_security(inode, dir, qstr, &name, &value, &len);
- if (err) {
- if (err == -EOPNOTSUPP)
- return 0;
- return err;
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ err = ext4_xattr_set_handle(handle, inode,
+ EXT4_XATTR_INDEX_SECURITY,
+ xattr->name, xattr->value,
+ xattr->value_len, 0);
+ if (err < 0)
+ break;
}
- err = ext4_xattr_set_handle(handle, inode, EXT4_XATTR_INDEX_SECURITY,
- name, value, len, 0);
- kfree(name);
- kfree(value);
return err;
}
+int
+ext4_init_security(handle_t *handle, struct inode *inode, struct inode *dir,
+ const struct qstr *qstr)
+{
+ return security_inode_init_security(inode, dir, qstr,
+ &ext4_initxattrs, handle);
+}
+
const struct xattr_handler ext4_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.list = ext4_xattr_security_list,
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 900cf986aadc..6525b804d5ec 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -624,31 +624,29 @@ fail:
return error;
}
-static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip,
- const struct qstr *qstr)
+int gfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
{
- int err;
- size_t len;
- void *value;
- char *name;
-
- err = security_inode_init_security(&ip->i_inode, &dip->i_inode, qstr,
- &name, &value, &len);
-
- if (err) {
- if (err == -EOPNOTSUPP)
- return 0;
- return err;
+ const struct xattr *xattr;
+ int err = 0;
+
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ err = __gfs2_xattr_set(inode, xattr->name, xattr->value,
+ xattr->value_len, 0,
+ GFS2_EATYPE_SECURITY);
+ if (err < 0)
+ break;
}
-
- err = __gfs2_xattr_set(&ip->i_inode, name, value, len, 0,
- GFS2_EATYPE_SECURITY);
- kfree(value);
- kfree(name);
-
return err;
}
+static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip,
+ const struct qstr *qstr)
+{
+ return security_inode_init_security(&ip->i_inode, &dip->i_inode, qstr,
+ &gfs2_initxattrs, NULL);
+}
+
/**
* gfs2_create_inode - Create a new inode
* @dir: The parent directory
diff --git a/fs/jffs2/security.c b/fs/jffs2/security.c
index cfeb7164b085..0f20208df602 100644
--- a/fs/jffs2/security.c
+++ b/fs/jffs2/security.c
@@ -22,26 +22,29 @@
#include <linux/security.h>
#include "nodelist.h"
-/* ---- Initial Security Label Attachment -------------- */
-int jffs2_init_security(struct inode *inode, struct inode *dir,
- const struct qstr *qstr)
+/* ---- Initial Security Label(s) Attachment callback --- */
+int jffs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
{
- int rc;
- size_t len;
- void *value;
- char *name;
+ const struct xattr *xattr;
+ int err = 0;
- rc = security_inode_init_security(inode, dir, qstr, &name, &value, &len);
- if (rc) {
- if (rc == -EOPNOTSUPP)
- return 0;
- return rc;
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ err = do_jffs2_setxattr(inode, JFFS2_XPREFIX_SECURITY,
+ xattr->name, xattr->value,
+ xattr->value_len, 0);
+ if (err < 0)
+ break;
}
- rc = do_jffs2_setxattr(inode, JFFS2_XPREFIX_SECURITY, name, value, len, 0);
+ return err;
+}
- kfree(name);
- kfree(value);
- return rc;
+/* ---- Initial Security Label(s) Attachment ----------- */
+int jffs2_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr)
+{
+ return security_inode_init_security(inode, dir, qstr,
+ &jffs2_initxattrs, NULL);
}
/* ---- XATTR Handler for "security.*" ----------------- */
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index e87fedef23db..26683e15b3ac 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -1089,38 +1089,37 @@ int jfs_removexattr(struct dentry *dentry, const char *name)
}
#ifdef CONFIG_JFS_SECURITY
-int jfs_init_security(tid_t tid, struct inode *inode, struct inode *dir,
- const struct qstr *qstr)
+int jfs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
{
- int rc;
- size_t len;
- void *value;
- char *suffix;
+ const struct xattr *xattr;
+ tid_t *tid = fs_info;
char *name;
-
- rc = security_inode_init_security(inode, dir, qstr, &suffix, &value,
- &len);
- if (rc) {
- if (rc == -EOPNOTSUPP)
- return 0;
- return rc;
- }
- name = kmalloc(XATTR_SECURITY_PREFIX_LEN + 1 + strlen(suffix),
- GFP_NOFS);
- if (!name) {
- rc = -ENOMEM;
- goto kmalloc_failed;
+ int err = 0;
+
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ name = kmalloc(XATTR_SECURITY_PREFIX_LEN +
+ strlen(xattr->name) + 1, GFP_NOFS);
+ if (!name) {
+ err = -ENOMEM;
+ break;
+ }
+ strcpy(name, XATTR_SECURITY_PREFIX);
+ strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name);
+
+ err = __jfs_setxattr(*tid, inode, name,
+ xattr->value, xattr->value_len, 0);
+ kfree(name);
+ if (err < 0)
+ break;
}
- strcpy(name, XATTR_SECURITY_PREFIX);
- strcpy(name + XATTR_SECURITY_PREFIX_LEN, suffix);
-
- rc = __jfs_setxattr(tid, inode, name, value, len, 0);
-
- kfree(name);
-kmalloc_failed:
- kfree(suffix);
- kfree(value);
+ return err;
+}
- return rc;
+int jfs_init_security(tid_t tid, struct inode *inode, struct inode *dir,
+ const struct qstr *qstr)
+{
+ return security_inode_init_security(inode, dir, qstr,
+ &jfs_initxattrs, &tid);
}
#endif
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 81ecf9c0bf0a..194fb22ef79d 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -7185,20 +7185,9 @@ int ocfs2_init_security_and_acl(struct inode *dir,
{
int ret = 0;
struct buffer_head *dir_bh = NULL;
- struct ocfs2_security_xattr_info si = {
- .enable = 1,
- };
- ret = ocfs2_init_security_get(inode, dir, qstr, &si);
+ ret = ocfs2_init_security_get(inode, dir, qstr, NULL);
if (!ret) {
- ret = ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_SECURITY,
- si.name, si.value, si.value_len,
- XATTR_CREATE);
- if (ret) {
- mlog_errno(ret);
- goto leave;
- }
- } else if (ret != -EOPNOTSUPP) {
mlog_errno(ret);
goto leave;
}
@@ -7255,6 +7244,22 @@ static int ocfs2_xattr_security_set(struct dentry *dentry, const char *name,
name, value, size, flags);
}
+int ocfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
+{
+ const struct xattr *xattr;
+ int err = 0;
+
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ err = ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_SECURITY,
+ xattr->name, xattr->value,
+ xattr->value_len, XATTR_CREATE);
+ if (err)
+ break;
+ }
+ return err;
+}
+
int ocfs2_init_security_get(struct inode *inode,
struct inode *dir,
const struct qstr *qstr,
@@ -7263,8 +7268,13 @@ int ocfs2_init_security_get(struct inode *inode,
/* check whether ocfs2 support feature xattr */
if (!ocfs2_supports_xattr(OCFS2_SB(dir->i_sb)))
return -EOPNOTSUPP;
- return security_inode_init_security(inode, dir, qstr, &si->name,
- &si->value, &si->value_len);
+ if (si)
+ return security_old_inode_init_security(inode, dir, qstr,
+ &si->name, &si->value,
+ &si->value_len);
+
+ return security_inode_init_security(inode, dir, qstr,
+ &ocfs2_initxattrs, NULL);
}
int ocfs2_init_security_set(handle_t *handle,
diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c
index ef66c18a9332..534668fa41be 100644
--- a/fs/reiserfs/xattr_security.c
+++ b/fs/reiserfs/xattr_security.c
@@ -66,8 +66,8 @@ int reiserfs_security_init(struct inode *dir, struct inode *inode,
if (IS_PRIVATE(dir))
return 0;
- error = security_inode_init_security(inode, dir, qstr, &sec->name,
- &sec->value, &sec->length);
+ error = security_old_inode_init_security(inode, dir, qstr, &sec->name,
+ &sec->value, &sec->length);
if (error) {
if (error == -EOPNOTSUPP)
error = 0;
diff --git a/fs/xattr.c b/fs/xattr.c
index f060663ab70c..67583de8218c 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -14,6 +14,7 @@
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/security.h>
+#include <linux/evm.h>
#include <linux/syscalls.h>
#include <linux/module.h>
#include <linux/fsnotify.h>
@@ -166,6 +167,64 @@ out_noalloc:
}
EXPORT_SYMBOL_GPL(xattr_getsecurity);
+/*
+ * vfs_getxattr_alloc - allocate memory, if necessary, before calling getxattr
+ *
+ * Allocate memory, if not already allocated, or re-allocate correct size,
+ * before retrieving the extended attribute.
+ *
+ * Returns the result of alloc, if failed, or the getxattr operation.
+ */
+ssize_t
+vfs_getxattr_alloc(struct dentry *dentry, const char *name, char **xattr_value,
+ size_t xattr_size, gfp_t flags)
+{
+ struct inode *inode = dentry->d_inode;
+ char *value = *xattr_value;
+ int error;
+
+ error = xattr_permission(inode, name, MAY_READ);
+ if (error)
+ return error;
+
+ if (!inode->i_op->getxattr)
+ return -EOPNOTSUPP;
+
+ error = inode->i_op->getxattr(dentry, name, NULL, 0);
+ if (error < 0)
+ return error;
+
+ if (!value || (error > xattr_size)) {
+ value = krealloc(*xattr_value, error + 1, flags);
+ if (!value)
+ return -ENOMEM;
+ memset(value, 0, error + 1);
+ }
+
+ error = inode->i_op->getxattr(dentry, name, value, error);
+ *xattr_value = value;
+ return error;
+}
+
+/* Compare an extended attribute value with the given value */
+int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
+ const char *value, size_t size, gfp_t flags)
+{
+ char *xattr_value = NULL;
+ int rc;
+
+ rc = vfs_getxattr_alloc(dentry, xattr_name, &xattr_value, 0, flags);
+ if (rc < 0)
+ return rc;
+
+ if ((rc != size) || (memcmp(xattr_value, value, rc) != 0))
+ rc = -EINVAL;
+ else
+ rc = 0;
+ kfree(xattr_value);
+ return rc;
+}
+
ssize_t
vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
{
@@ -243,8 +302,10 @@ vfs_removexattr(struct dentry *dentry, const char *name)
error = inode->i_op->removexattr(dentry, name);
mutex_unlock(&inode->i_mutex);
- if (!error)
+ if (!error) {
fsnotify_xattr(dentry);
+ evm_inode_post_removexattr(dentry, name);
+ }
return error;
}
EXPORT_SYMBOL_GPL(vfs_removexattr);
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 673704fab748..28856accb4fa 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -102,37 +102,38 @@ xfs_mark_inode_dirty(
}
+
+int xfs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+ void *fs_info)
+{
+ const struct xattr *xattr;
+ struct xfs_inode *ip = XFS_I(inode);
+ int error = 0;
+
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ error = xfs_attr_set(ip, xattr->name, xattr->value,
+ xattr->value_len, ATTR_SECURE);
+ if (error < 0)
+ break;
+ }
+ return error;
+}
+
/*
* Hook in SELinux. This is not quite correct yet, what we really need
* here (as we do for default ACLs) is a mechanism by which creation of
* these attrs can be journalled at inode creation time (along with the
* inode, of course, such that log replay can't cause these to be lost).
*/
+
STATIC int
xfs_init_security(
struct inode *inode,
struct inode *dir,
const struct qstr *qstr)
{
- struct xfs_inode *ip = XFS_I(inode);
- size_t length;
- void *value;
- unsigned char *name;
- int error;
-
- error = security_inode_init_security(inode, dir, qstr, (char **)&name,
- &value, &length);
- if (error) {
- if (error == -EOPNOTSUPP)
- return 0;
- return -error;
- }
-
- error = xfs_attr_set(ip, name, value, length, ATTR_SECURE);
-
- kfree(name);
- kfree(value);
- return error;
+ return security_inode_init_security(inode, dir, qstr,
+ &xfs_initxattrs, NULL);
}
static void
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index db22d136ad08..b5e2e4c6b017 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -222,7 +222,6 @@
VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
*(__tracepoints_ptrs) /* Tracepoints: pointer array */\
VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \
- *(__markers_strings) /* Markers: strings */ \
*(__tracepoints_strings)/* Tracepoints: strings */ \
} \
\
diff --git a/include/linux/evm.h b/include/linux/evm.h
new file mode 100644
index 000000000000..9fc13a760928
--- /dev/null
+++ b/include/linux/evm.h
@@ -0,0 +1,100 @@
+/*
+ * evm.h
+ *
+ * Copyright (c) 2009 IBM Corporation
+ * Author: Mimi Zohar <zohar@us.ibm.com>
+ */
+
+#ifndef _LINUX_EVM_H
+#define _LINUX_EVM_H
+
+#include <linux/integrity.h>
+#include <linux/xattr.h>
+
+struct integrity_iint_cache;
+
+#ifdef CONFIG_EVM
+extern enum integrity_status evm_verifyxattr(struct dentry *dentry,
+ const char *xattr_name,
+ void *xattr_value,
+ size_t xattr_value_len,
+ struct integrity_iint_cache *iint);
+extern int evm_inode_setattr(struct dentry *dentry, struct iattr *attr);
+extern void evm_inode_post_setattr(struct dentry *dentry, int ia_valid);
+extern int evm_inode_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size);
+extern void evm_inode_post_setxattr(struct dentry *dentry,
+ const char *xattr_name,
+ const void *xattr_value,
+ size_t xattr_value_len);
+extern int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name);
+extern void evm_inode_post_removexattr(struct dentry *dentry,
+ const char *xattr_name);
+extern int evm_inode_init_security(struct inode *inode,
+ const struct xattr *xattr_array,
+ struct xattr *evm);
+#ifdef CONFIG_FS_POSIX_ACL
+extern int posix_xattr_acl(const char *xattrname);
+#else
+static inline int posix_xattr_acl(const char *xattrname)
+{
+ return 0;
+}
+#endif
+#else
+#ifdef CONFIG_INTEGRITY
+static inline enum integrity_status evm_verifyxattr(struct dentry *dentry,
+ const char *xattr_name,
+ void *xattr_value,
+ size_t xattr_value_len,
+ struct integrity_iint_cache *iint)
+{
+ return INTEGRITY_UNKNOWN;
+}
+#endif
+
+static inline int evm_inode_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ return 0;
+}
+
+static inline void evm_inode_post_setattr(struct dentry *dentry, int ia_valid)
+{
+ return;
+}
+
+static inline int evm_inode_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size)
+{
+ return 0;
+}
+
+static inline void evm_inode_post_setxattr(struct dentry *dentry,
+ const char *xattr_name,
+ const void *xattr_value,
+ size_t xattr_value_len)
+{
+ return;
+}
+
+static inline int evm_inode_removexattr(struct dentry *dentry,
+ const char *xattr_name)
+{
+ return 0;
+}
+
+static inline void evm_inode_post_removexattr(struct dentry *dentry,
+ const char *xattr_name)
+{
+ return;
+}
+
+static inline int evm_inode_init_security(struct inode *inode,
+ const struct xattr *xattr_array,
+ struct xattr *evm)
+{
+ return 0;
+}
+
+#endif /* CONFIG_EVM_H */
+#endif /* LINUX_EVM_H */
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 09e6e62f9953..6ac8e50c6cf5 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -15,8 +15,6 @@ struct linux_binprm;
#ifdef CONFIG_IMA
extern int ima_bprm_check(struct linux_binprm *bprm);
-extern int ima_inode_alloc(struct inode *inode);
-extern void ima_inode_free(struct inode *inode);
extern int ima_file_check(struct file *file, int mask);
extern void ima_file_free(struct file *file);
extern int ima_file_mmap(struct file *file, unsigned long prot);
@@ -27,16 +25,6 @@ static inline int ima_bprm_check(struct linux_binprm *bprm)
return 0;
}
-static inline int ima_inode_alloc(struct inode *inode)
-{
- return 0;
-}
-
-static inline void ima_inode_free(struct inode *inode)
-{
- return;
-}
-
static inline int ima_file_check(struct file *file, int mask)
{
return 0;
@@ -51,6 +39,5 @@ static inline int ima_file_mmap(struct file *file, unsigned long prot)
{
return 0;
}
-
#endif /* CONFIG_IMA_H */
#endif /* _LINUX_IMA_H */
diff --git a/include/linux/integrity.h b/include/linux/integrity.h
new file mode 100644
index 000000000000..a0c41256cb92
--- /dev/null
+++ b/include/linux/integrity.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2009 IBM Corporation
+ * Author: Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ */
+
+#ifndef _LINUX_INTEGRITY_H
+#define _LINUX_INTEGRITY_H
+
+#include <linux/fs.h>
+
+enum integrity_status {
+ INTEGRITY_PASS = 0,
+ INTEGRITY_FAIL,
+ INTEGRITY_NOLABEL,
+ INTEGRITY_NOXATTRS,
+ INTEGRITY_UNKNOWN,
+};
+
+/* List of EVM protected security xattrs */
+#ifdef CONFIG_INTEGRITY
+extern int integrity_inode_alloc(struct inode *inode);
+extern void integrity_inode_free(struct inode *inode);
+
+#else
+static inline int integrity_inode_alloc(struct inode *inode)
+{
+ return 0;
+}
+
+static inline void integrity_inode_free(struct inode *inode)
+{
+ return;
+}
+#endif /* CONFIG_INTEGRITY_H */
+#endif /* _LINUX_INTEGRITY_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 46ac9a50528d..8eefcf7e95eb 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -382,7 +382,7 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
}
extern int hex_to_bin(char ch);
-extern void hex2bin(u8 *dst, const char *src, size_t count);
+extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
/*
* General tracing related utility functions - trace_printk(),
diff --git a/include/linux/security.h b/include/linux/security.h
index ebd2a53a3d07..19d8e04e1688 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -36,6 +36,7 @@
#include <linux/key.h>
#include <linux/xfrm.h>
#include <linux/slab.h>
+#include <linux/xattr.h>
#include <net/flow.h>
/* Maximum number of letters for an LSM name string */
@@ -147,6 +148,10 @@ extern int mmap_min_addr_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
#endif
+/* security_inode_init_security callback function to write xattrs */
+typedef int (*initxattrs) (struct inode *inode,
+ const struct xattr *xattr_array, void *fs_data);
+
#ifdef CONFIG_SECURITY
struct security_mnt_opts {
@@ -1367,7 +1372,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @inode_getsecctx:
* Returns a string containing all relavent security context information
*
- * @inode we wish to set the security context of.
+ * @inode we wish to get the security context of.
* @ctx is a pointer in which to place the allocated security context.
* @ctxlen points to the place to put the length of @ctx.
* This is the main security structure.
@@ -1655,6 +1660,8 @@ struct security_operations {
extern int security_init(void);
extern int security_module_enable(struct security_operations *ops);
extern int register_security(struct security_operations *ops);
+extern void __init security_fixup_ops(struct security_operations *ops);
+
/* Security operations */
int security_ptrace_access_check(struct task_struct *child, unsigned int mode);
@@ -1704,8 +1711,11 @@ int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts);
int security_inode_alloc(struct inode *inode);
void security_inode_free(struct inode *inode);
int security_inode_init_security(struct inode *inode, struct inode *dir,
- const struct qstr *qstr, char **name,
- void **value, size_t *len);
+ const struct qstr *qstr,
+ initxattrs initxattrs, void *fs_data);
+int security_old_inode_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr, char **name,
+ void **value, size_t *len);
int security_inode_create(struct inode *dir, struct dentry *dentry, int mode);
int security_inode_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry);
@@ -2034,11 +2044,19 @@ static inline void security_inode_free(struct inode *inode)
static inline int security_inode_init_security(struct inode *inode,
struct inode *dir,
const struct qstr *qstr,
- char **name,
- void **value,
- size_t *len)
+ initxattrs initxattrs,
+ void *fs_data)
{
- return -EOPNOTSUPP;
+ return 0;
+}
+
+static inline int security_old_inode_init_security(struct inode *inode,
+ struct inode *dir,
+ const struct qstr *qstr,
+ char **name, void **value,
+ size_t *len)
+{
+ return 0;
}
static inline int security_inode_create(struct inode *dir,
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index aed54c50aa66..e5d122031542 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -30,6 +30,9 @@
#define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1)
/* Security namespace */
+#define XATTR_EVM_SUFFIX "evm"
+#define XATTR_NAME_EVM XATTR_SECURITY_PREFIX XATTR_EVM_SUFFIX
+
#define XATTR_SELINUX_SUFFIX "selinux"
#define XATTR_NAME_SELINUX XATTR_SECURITY_PREFIX XATTR_SELINUX_SUFFIX
@@ -49,6 +52,11 @@
#define XATTR_CAPS_SUFFIX "capability"
#define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX
+#define XATTR_POSIX_ACL_ACCESS "posix_acl_access"
+#define XATTR_NAME_POSIX_ACL_ACCESS XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_ACCESS
+#define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
+#define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
+
#ifdef __KERNEL__
#include <linux/types.h>
@@ -67,6 +75,12 @@ struct xattr_handler {
size_t size, int flags, int handler_flags);
};
+struct xattr {
+ char *name;
+ void *value;
+ size_t value_len;
+};
+
ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
@@ -78,7 +92,10 @@ ssize_t generic_getxattr(struct dentry *dentry, const char *name, void *buffer,
ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
int generic_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags);
int generic_removexattr(struct dentry *dentry, const char *name);
-
+ssize_t vfs_getxattr_alloc(struct dentry *dentry, const char *name,
+ char **xattr_value, size_t size, gfp_t flags);
+int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
+ const char *value, size_t size, gfp_t flags);
#endif /* __KERNEL__ */
#endif /* _LINUX_XATTR_H */
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index 76f7538bb339..d29c153705bc 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -25,8 +25,9 @@ extern struct balloon_stats balloon_stats;
void balloon_set_new_target(unsigned long target);
-int alloc_xenballooned_pages(int nr_pages, struct page** pages);
-void free_xenballooned_pages(int nr_pages, struct page** pages);
+int alloc_xenballooned_pages(int nr_pages, struct page **pages,
+ bool highmem);
+void free_xenballooned_pages(int nr_pages, struct page **pages);
struct sys_device;
#ifdef CONFIG_XEN_SELFBALLOONING
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index b1fab6b5b3ef..6b99bfbd785d 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -156,6 +156,7 @@ unsigned int gnttab_max_grant_frames(void);
#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
+ struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count);
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
struct page **pages, unsigned int count);
diff --git a/include/xen/interface/io/xs_wire.h b/include/xen/interface/io/xs_wire.h
index 99fcffb372d1..f0b6890370be 100644
--- a/include/xen/interface/io/xs_wire.h
+++ b/include/xen/interface/io/xs_wire.h
@@ -26,7 +26,11 @@ enum xsd_sockmsg_type
XS_SET_PERMS,
XS_WATCH_EVENT,
XS_ERROR,
- XS_IS_DOMAIN_INTRODUCED
+ XS_IS_DOMAIN_INTRODUCED,
+ XS_RESUME,
+ XS_SET_TARGET,
+ XS_RESTRICT,
+ XS_RESET_WATCHES
};
#define XS_WRITE_NONE "NONE"
diff --git a/include/xen/interface/physdev.h b/include/xen/interface/physdev.h
index 534cac89a77d..c1080d9c705d 100644
--- a/include/xen/interface/physdev.h
+++ b/include/xen/interface/physdev.h
@@ -109,6 +109,7 @@ struct physdev_irq {
#define MAP_PIRQ_TYPE_MSI 0x0
#define MAP_PIRQ_TYPE_GSI 0x1
#define MAP_PIRQ_TYPE_UNKNOWN 0x2
+#define MAP_PIRQ_TYPE_MSI_SEG 0x3
#define PHYSDEVOP_map_pirq 13
struct physdev_map_pirq {
@@ -119,7 +120,7 @@ struct physdev_map_pirq {
int index;
/* IN or OUT */
int pirq;
- /* IN */
+ /* IN - high 16 bits hold segment for MAP_PIRQ_TYPE_MSI_SEG */
int bus;
/* IN */
int devfn;
@@ -198,6 +199,37 @@ struct physdev_get_free_pirq {
uint32_t pirq;
};
+#define XEN_PCI_DEV_EXTFN 0x1
+#define XEN_PCI_DEV_VIRTFN 0x2
+#define XEN_PCI_DEV_PXM 0x4
+
+#define PHYSDEVOP_pci_device_add 25
+struct physdev_pci_device_add {
+ /* IN */
+ uint16_t seg;
+ uint8_t bus;
+ uint8_t devfn;
+ uint32_t flags;
+ struct {
+ uint8_t bus;
+ uint8_t devfn;
+ } physfn;
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ uint32_t optarr[];
+#elif defined(__GNUC__)
+ uint32_t optarr[0];
+#endif
+};
+
+#define PHYSDEVOP_pci_device_remove 26
+#define PHYSDEVOP_restore_msi_ext 27
+struct physdev_pci_device {
+ /* IN */
+ uint16_t seg;
+ uint8_t bus;
+ uint8_t devfn;
+};
+
/*
* Notify that some PIRQ-bound event channels have been unmasked.
* ** This command is obsolete since interface version 0x00030202 and is **
diff --git a/include/xen/page.h b/include/xen/page.h
index 0be36b976f4b..12765b6f9517 100644
--- a/include/xen/page.h
+++ b/include/xen/page.h
@@ -3,6 +3,16 @@
#include <asm/xen/page.h>
-extern phys_addr_t xen_extra_mem_start, xen_extra_mem_size;
+struct xen_memory_region {
+ phys_addr_t start;
+ phys_addr_t size;
+};
+
+#define XEN_EXTRA_MEM_MAX_REGIONS 128 /* == E820MAX */
+
+extern __initdata
+struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS];
+
+extern unsigned long xen_released_pages;
#endif /* _XEN_PAGE_H */
diff --git a/kernel/cred.c b/kernel/cred.c
index 8ef31f53c44c..bb55d052d858 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -644,6 +644,9 @@ void __init cred_init(void)
*/
struct cred *prepare_kernel_cred(struct task_struct *daemon)
{
+#ifdef CONFIG_KEYS
+ struct thread_group_cred *tgcred;
+#endif
const struct cred *old;
struct cred *new;
@@ -651,6 +654,14 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
if (!new)
return NULL;
+#ifdef CONFIG_KEYS
+ tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
+ if (!tgcred) {
+ kmem_cache_free(cred_jar, new);
+ return NULL;
+ }
+#endif
+
kdebug("prepare_kernel_cred() alloc %p", new);
if (daemon)
@@ -667,8 +678,11 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
get_group_info(new->group_info);
#ifdef CONFIG_KEYS
- atomic_inc(&init_tgcred.usage);
- new->tgcred = &init_tgcred;
+ atomic_set(&tgcred->usage, 1);
+ spin_lock_init(&tgcred->lock);
+ tgcred->process_keyring = NULL;
+ tgcred->session_keyring = NULL;
+ new->tgcred = tgcred;
new->request_key_auth = NULL;
new->thread_keyring = NULL;
new->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
diff --git a/lib/hexdump.c b/lib/hexdump.c
index f5fe6ba7a3ab..51d5ae210244 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -38,14 +38,21 @@ EXPORT_SYMBOL(hex_to_bin);
* @dst: binary result
* @src: ascii hexadecimal string
* @count: result length
+ *
+ * Return 0 on success, -1 in case of bad input.
*/
-void hex2bin(u8 *dst, const char *src, size_t count)
+int hex2bin(u8 *dst, const char *src, size_t count)
{
while (count--) {
- *dst = hex_to_bin(*src++) << 4;
- *dst += hex_to_bin(*src++);
- dst++;
+ int hi = hex_to_bin(*src++);
+ int lo = hex_to_bin(*src++);
+
+ if ((hi < 0) || (lo < 0))
+ return -1;
+
+ *dst++ = (hi << 4) | lo;
}
+ return 0;
}
EXPORT_SYMBOL(hex2bin);
diff --git a/mm/shmem.c b/mm/shmem.c
index 32f6763f16fb..2d3577295298 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1458,7 +1458,7 @@ shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
if (inode) {
error = security_inode_init_security(inode, dir,
- &dentry->d_name, NULL,
+ &dentry->d_name,
NULL, NULL);
if (error) {
if (error != -EOPNOTSUPP) {
@@ -1598,7 +1598,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
if (!inode)
return -ENOSPC;
- error = security_inode_init_security(inode, dir, &dentry->d_name, NULL,
+ error = security_inode_init_security(inode, dir, &dentry->d_name,
NULL, NULL);
if (error) {
if (error != -EOPNOTSUPP) {
diff --git a/security/Kconfig b/security/Kconfig
index e0f08b52e4ab..51bd5a0b69ae 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -38,7 +38,9 @@ config TRUSTED_KEYS
config ENCRYPTED_KEYS
tristate "ENCRYPTED KEYS"
- depends on KEYS && TRUSTED_KEYS
+ depends on KEYS
+ select CRYPTO
+ select CRYPTO_HMAC
select CRYPTO_AES
select CRYPTO_CBC
select CRYPTO_SHA256
@@ -186,7 +188,7 @@ source security/smack/Kconfig
source security/tomoyo/Kconfig
source security/apparmor/Kconfig
-source security/integrity/ima/Kconfig
+source security/integrity/Kconfig
choice
prompt "Default security module"
diff --git a/security/Makefile b/security/Makefile
index 8bb0fe9e1ca9..a5e502f8a05b 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -24,5 +24,5 @@ obj-$(CONFIG_SECURITY_APPARMOR) += apparmor/built-in.o
obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o
# Object integrity file lists
-subdir-$(CONFIG_IMA) += integrity/ima
-obj-$(CONFIG_IMA) += integrity/ima/built-in.o
+subdir-$(CONFIG_INTEGRITY) += integrity
+obj-$(CONFIG_INTEGRITY) += integrity/built-in.o
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 0848292982a2..69ddb47787b2 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -200,7 +200,7 @@ void __init aa_destroy_aafs(void)
*
* Returns: error on failure
*/
-int __init aa_create_aafs(void)
+static int __init aa_create_aafs(void)
{
int error;
diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c
index 649fad88869b..7ee05c6f3c64 100644
--- a/security/apparmor/ipc.c
+++ b/security/apparmor/ipc.c
@@ -19,6 +19,7 @@
#include "include/capability.h"
#include "include/context.h"
#include "include/policy.h"
+#include "include/ipc.h"
/* call back to audit ptrace fields */
static void audit_cb(struct audit_buffer *ab, void *va)
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
index b82e383beb77..9516948041ad 100644
--- a/security/apparmor/lib.c
+++ b/security/apparmor/lib.c
@@ -18,6 +18,7 @@
#include <linux/vmalloc.h>
#include "include/audit.h"
+#include "include/apparmor.h"
/**
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index d6d9a57b5652..741dd13e089b 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -381,11 +381,11 @@ static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
profile->file.trans.size = size;
for (i = 0; i < size; i++) {
char *str;
- int c, j, size = unpack_strdup(e, &str, NULL);
+ int c, j, size2 = unpack_strdup(e, &str, NULL);
/* unpack_strdup verifies that the last character is
* null termination byte.
*/
- if (!size)
+ if (!size2)
goto fail;
profile->file.trans.table[i] = str;
/* verify that name doesn't start with space */
@@ -393,7 +393,7 @@ static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
goto fail;
/* count internal # of internal \0 */
- for (c = j = 0; j < size - 2; j++) {
+ for (c = j = 0; j < size2 - 2; j++) {
if (!str[j])
c++;
}
@@ -440,11 +440,11 @@ static bool unpack_rlimits(struct aa_ext *e, struct aa_profile *profile)
if (size > RLIM_NLIMITS)
goto fail;
for (i = 0; i < size; i++) {
- u64 tmp = 0;
+ u64 tmp2 = 0;
int a = aa_map_resource(i);
- if (!unpack_u64(e, &tmp, NULL))
+ if (!unpack_u64(e, &tmp2, NULL))
goto fail;
- profile->rlimits.limits[a].rlim_max = tmp;
+ profile->rlimits.limits[a].rlim_max = tmp2;
}
if (!unpack_nameX(e, AA_ARRAYEND, NULL))
goto fail;
diff --git a/security/apparmor/procattr.c b/security/apparmor/procattr.c
index 04a2cf8d1b65..1b41c542d376 100644
--- a/security/apparmor/procattr.c
+++ b/security/apparmor/procattr.c
@@ -16,6 +16,7 @@
#include "include/context.h"
#include "include/policy.h"
#include "include/domain.h"
+#include "include/procattr.h"
/**
diff --git a/security/commoncap.c b/security/commoncap.c
index a93b3b733079..ee4f8486e5f5 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -332,7 +332,8 @@ int cap_inode_killpriv(struct dentry *dentry)
*/
static inline int bprm_caps_from_vfs_caps(struct cpu_vfs_cap_data *caps,
struct linux_binprm *bprm,
- bool *effective)
+ bool *effective,
+ bool *has_cap)
{
struct cred *new = bprm->cred;
unsigned i;
@@ -341,6 +342,9 @@ static inline int bprm_caps_from_vfs_caps(struct cpu_vfs_cap_data *caps,
if (caps->magic_etc & VFS_CAP_FLAGS_EFFECTIVE)
*effective = true;
+ if (caps->magic_etc & VFS_CAP_REVISION_MASK)
+ *has_cap = true;
+
CAP_FOR_EACH_U32(i) {
__u32 permitted = caps->permitted.cap[i];
__u32 inheritable = caps->inheritable.cap[i];
@@ -424,7 +428,7 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
* its xattrs and, if present, apply them to the proposed credentials being
* constructed by execve().
*/
-static int get_file_caps(struct linux_binprm *bprm, bool *effective)
+static int get_file_caps(struct linux_binprm *bprm, bool *effective, bool *has_cap)
{
struct dentry *dentry;
int rc = 0;
@@ -450,7 +454,7 @@ static int get_file_caps(struct linux_binprm *bprm, bool *effective)
goto out;
}
- rc = bprm_caps_from_vfs_caps(&vcaps, bprm, effective);
+ rc = bprm_caps_from_vfs_caps(&vcaps, bprm, effective, has_cap);
if (rc == -EINVAL)
printk(KERN_NOTICE "%s: cap_from_disk returned %d for %s\n",
__func__, rc, bprm->filename);
@@ -475,11 +479,11 @@ int cap_bprm_set_creds(struct linux_binprm *bprm)
{
const struct cred *old = current_cred();
struct cred *new = bprm->cred;
- bool effective;
+ bool effective, has_cap = false;
int ret;
effective = false;
- ret = get_file_caps(bprm, &effective);
+ ret = get_file_caps(bprm, &effective, &has_cap);
if (ret < 0)
return ret;
@@ -489,7 +493,7 @@ int cap_bprm_set_creds(struct linux_binprm *bprm)
* for a setuid root binary run by a non-root user. Do set it
* for a root user just to cause least surprise to an admin.
*/
- if (effective && new->uid != 0 && new->euid == 0) {
+ if (has_cap && new->uid != 0 && new->euid == 0) {
warn_setuid_and_fcaps_mixed(bprm->filename);
goto skip;
}
diff --git a/security/integrity/Kconfig b/security/integrity/Kconfig
new file mode 100644
index 000000000000..4bf00acf7937
--- /dev/null
+++ b/security/integrity/Kconfig
@@ -0,0 +1,7 @@
+#
+config INTEGRITY
+ def_bool y
+ depends on IMA || EVM
+
+source security/integrity/ima/Kconfig
+source security/integrity/evm/Kconfig
diff --git a/security/integrity/Makefile b/security/integrity/Makefile
new file mode 100644
index 000000000000..0ae44aea6516
--- /dev/null
+++ b/security/integrity/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for caching inode integrity data (iint)
+#
+
+obj-$(CONFIG_INTEGRITY) += integrity.o
+
+integrity-y := iint.o
+
+subdir-$(CONFIG_IMA) += ima
+obj-$(CONFIG_IMA) += ima/built-in.o
+subdir-$(CONFIG_EVM) += evm
+obj-$(CONFIG_EVM) += evm/built-in.o
diff --git a/security/integrity/evm/Kconfig b/security/integrity/evm/Kconfig
new file mode 100644
index 000000000000..afbb59dd262d
--- /dev/null
+++ b/security/integrity/evm/Kconfig
@@ -0,0 +1,13 @@
+config EVM
+ boolean "EVM support"
+ depends on SECURITY && KEYS && (TRUSTED_KEYS=y || TRUSTED_KEYS=n)
+ select CRYPTO_HMAC
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+ select ENCRYPTED_KEYS
+ default n
+ help
+ EVM protects a file's security extended attributes against
+ integrity attacks.
+
+ If you are unsure how to answer this question, answer N.
diff --git a/security/integrity/evm/Makefile b/security/integrity/evm/Makefile
new file mode 100644
index 000000000000..7393c415a066
--- /dev/null
+++ b/security/integrity/evm/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for building the Extended Verification Module(EVM)
+#
+obj-$(CONFIG_EVM) += evm.o
+
+evm-y := evm_main.o evm_crypto.o evm_secfs.o
+evm-$(CONFIG_FS_POSIX_ACL) += evm_posix_acl.o
diff --git a/security/integrity/evm/evm.h b/security/integrity/evm/evm.h
new file mode 100644
index 000000000000..d320f5197437
--- /dev/null
+++ b/security/integrity/evm/evm.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2005-2010 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * File: evm.h
+ *
+ */
+#include <linux/xattr.h>
+#include <linux/security.h>
+#include "../integrity.h"
+
+extern int evm_initialized;
+extern char *evm_hmac;
+
+extern struct crypto_shash *hmac_tfm;
+
+/* List of EVM protected security xattrs */
+extern char *evm_config_xattrnames[];
+
+extern int evm_init_key(void);
+extern int evm_update_evmxattr(struct dentry *dentry,
+ const char *req_xattr_name,
+ const char *req_xattr_value,
+ size_t req_xattr_value_len);
+extern int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name,
+ const char *req_xattr_value,
+ size_t req_xattr_value_len, char *digest);
+extern int evm_init_hmac(struct inode *inode, const struct xattr *xattr,
+ char *hmac_val);
+extern int evm_init_secfs(void);
+extern void evm_cleanup_secfs(void);
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
new file mode 100644
index 000000000000..5dd5b140242c
--- /dev/null
+++ b/security/integrity/evm/evm_crypto.c
@@ -0,0 +1,216 @@
+/*
+ * Copyright (C) 2005-2010 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * File: evm_crypto.c
+ * Using root's kernel master key (kmk), calculate the HMAC
+ */
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/xattr.h>
+#include <keys/encrypted-type.h>
+#include <crypto/hash.h>
+#include "evm.h"
+
+#define EVMKEY "evm-key"
+#define MAX_KEY_SIZE 128
+static unsigned char evmkey[MAX_KEY_SIZE];
+static int evmkey_len = MAX_KEY_SIZE;
+
+struct crypto_shash *hmac_tfm;
+
+static struct shash_desc *init_desc(void)
+{
+ int rc;
+ struct shash_desc *desc;
+
+ if (hmac_tfm == NULL) {
+ hmac_tfm = crypto_alloc_shash(evm_hmac, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(hmac_tfm)) {
+ pr_err("Can not allocate %s (reason: %ld)\n",
+ evm_hmac, PTR_ERR(hmac_tfm));
+ rc = PTR_ERR(hmac_tfm);
+ hmac_tfm = NULL;
+ return ERR_PTR(rc);
+ }
+ }
+
+ desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac_tfm),
+ GFP_KERNEL);
+ if (!desc)
+ return ERR_PTR(-ENOMEM);
+
+ desc->tfm = hmac_tfm;
+ desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ rc = crypto_shash_setkey(hmac_tfm, evmkey, evmkey_len);
+ if (rc)
+ goto out;
+ rc = crypto_shash_init(desc);
+out:
+ if (rc) {
+ kfree(desc);
+ return ERR_PTR(rc);
+ }
+ return desc;
+}
+
+/* Protect against 'cutting & pasting' security.evm xattr, include inode
+ * specific info.
+ *
+ * (Additional directory/file metadata needs to be added for more complete
+ * protection.)
+ */
+static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
+ char *digest)
+{
+ struct h_misc {
+ unsigned long ino;
+ __u32 generation;
+ uid_t uid;
+ gid_t gid;
+ umode_t mode;
+ } hmac_misc;
+
+ memset(&hmac_misc, 0, sizeof hmac_misc);
+ hmac_misc.ino = inode->i_ino;
+ hmac_misc.generation = inode->i_generation;
+ hmac_misc.uid = inode->i_uid;
+ hmac_misc.gid = inode->i_gid;
+ hmac_misc.mode = inode->i_mode;
+ crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof hmac_misc);
+ crypto_shash_final(desc, digest);
+}
+
+/*
+ * Calculate the HMAC value across the set of protected security xattrs.
+ *
+ * Instead of retrieving the requested xattr, for performance, calculate
+ * the hmac using the requested xattr value. Don't alloc/free memory for
+ * each xattr, but attempt to re-use the previously allocated memory.
+ */
+int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name,
+ const char *req_xattr_value, size_t req_xattr_value_len,
+ char *digest)
+{
+ struct inode *inode = dentry->d_inode;
+ struct shash_desc *desc;
+ char **xattrname;
+ size_t xattr_size = 0;
+ char *xattr_value = NULL;
+ int error;
+ int size;
+
+ if (!inode->i_op || !inode->i_op->getxattr)
+ return -EOPNOTSUPP;
+ desc = init_desc();
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ error = -ENODATA;
+ for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) {
+ if ((req_xattr_name && req_xattr_value)
+ && !strcmp(*xattrname, req_xattr_name)) {
+ error = 0;
+ crypto_shash_update(desc, (const u8 *)req_xattr_value,
+ req_xattr_value_len);
+ continue;
+ }
+ size = vfs_getxattr_alloc(dentry, *xattrname,
+ &xattr_value, xattr_size, GFP_NOFS);
+ if (size == -ENOMEM) {
+ error = -ENOMEM;
+ goto out;
+ }
+ if (size < 0)
+ continue;
+
+ error = 0;
+ xattr_size = size;
+ crypto_shash_update(desc, (const u8 *)xattr_value, xattr_size);
+ }
+ hmac_add_misc(desc, inode, digest);
+
+out:
+ kfree(xattr_value);
+ kfree(desc);
+ return error;
+}
+
+/*
+ * Calculate the hmac and update security.evm xattr
+ *
+ * Expects to be called with i_mutex locked.
+ */
+int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name,
+ const char *xattr_value, size_t xattr_value_len)
+{
+ struct inode *inode = dentry->d_inode;
+ struct evm_ima_xattr_data xattr_data;
+ int rc = 0;
+
+ rc = evm_calc_hmac(dentry, xattr_name, xattr_value,
+ xattr_value_len, xattr_data.digest);
+ if (rc == 0) {
+ xattr_data.type = EVM_XATTR_HMAC;
+ rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_EVM,
+ &xattr_data,
+ sizeof(xattr_data), 0);
+ }
+ else if (rc == -ENODATA)
+ rc = inode->i_op->removexattr(dentry, XATTR_NAME_EVM);
+ return rc;
+}
+
+int evm_init_hmac(struct inode *inode, const struct xattr *lsm_xattr,
+ char *hmac_val)
+{
+ struct shash_desc *desc;
+
+ desc = init_desc();
+ if (IS_ERR(desc)) {
+ printk(KERN_INFO "init_desc failed\n");
+ return PTR_ERR(desc);
+ }
+
+ crypto_shash_update(desc, lsm_xattr->value, lsm_xattr->value_len);
+ hmac_add_misc(desc, inode, hmac_val);
+ kfree(desc);
+ return 0;
+}
+
+/*
+ * Get the key from the TPM for the SHA1-HMAC
+ */
+int evm_init_key(void)
+{
+ struct key *evm_key;
+ struct encrypted_key_payload *ekp;
+ int rc = 0;
+
+ evm_key = request_key(&key_type_encrypted, EVMKEY, NULL);
+ if (IS_ERR(evm_key))
+ return -ENOENT;
+
+ down_read(&evm_key->sem);
+ ekp = evm_key->payload.data;
+ if (ekp->decrypted_datalen > MAX_KEY_SIZE) {
+ rc = -EINVAL;
+ goto out;
+ }
+ memcpy(evmkey, ekp->decrypted_data, ekp->decrypted_datalen);
+out:
+ /* burn the original key contents */
+ memset(ekp->decrypted_data, 0, ekp->decrypted_datalen);
+ up_read(&evm_key->sem);
+ key_put(evm_key);
+ return rc;
+}
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
new file mode 100644
index 000000000000..92d3d99a9f7b
--- /dev/null
+++ b/security/integrity/evm/evm_main.c
@@ -0,0 +1,384 @@
+/*
+ * Copyright (C) 2005-2010 IBM Corporation
+ *
+ * Author:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * File: evm_main.c
+ * implements evm_inode_setxattr, evm_inode_post_setxattr,
+ * evm_inode_removexattr, and evm_verifyxattr
+ */
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/xattr.h>
+#include <linux/integrity.h>
+#include <linux/evm.h>
+#include <crypto/hash.h>
+#include "evm.h"
+
+int evm_initialized;
+
+char *evm_hmac = "hmac(sha1)";
+
+char *evm_config_xattrnames[] = {
+#ifdef CONFIG_SECURITY_SELINUX
+ XATTR_NAME_SELINUX,
+#endif
+#ifdef CONFIG_SECURITY_SMACK
+ XATTR_NAME_SMACK,
+#endif
+ XATTR_NAME_CAPS,
+ NULL
+};
+
+static int evm_fixmode;
+static int __init evm_set_fixmode(char *str)
+{
+ if (strncmp(str, "fix", 3) == 0)
+ evm_fixmode = 1;
+ return 0;
+}
+__setup("evm=", evm_set_fixmode);
+
+/*
+ * evm_verify_hmac - calculate and compare the HMAC with the EVM xattr
+ *
+ * Compute the HMAC on the dentry's protected set of extended attributes
+ * and compare it against the stored security.evm xattr.
+ *
+ * For performance:
+ * - use the previoulsy retrieved xattr value and length to calculate the
+ * HMAC.)
+ * - cache the verification result in the iint, when available.
+ *
+ * Returns integrity status
+ */
+static enum integrity_status evm_verify_hmac(struct dentry *dentry,
+ const char *xattr_name,
+ char *xattr_value,
+ size_t xattr_value_len,
+ struct integrity_iint_cache *iint)
+{
+ struct evm_ima_xattr_data xattr_data;
+ enum integrity_status evm_status = INTEGRITY_PASS;
+ int rc;
+
+ if (iint && iint->evm_status == INTEGRITY_PASS)
+ return iint->evm_status;
+
+ /* if status is not PASS, try to check again - against -ENOMEM */
+
+ rc = evm_calc_hmac(dentry, xattr_name, xattr_value,
+ xattr_value_len, xattr_data.digest);
+ if (rc < 0) {
+ evm_status = (rc == -ENODATA)
+ ? INTEGRITY_NOXATTRS : INTEGRITY_FAIL;
+ goto out;
+ }
+
+ xattr_data.type = EVM_XATTR_HMAC;
+ rc = vfs_xattr_cmp(dentry, XATTR_NAME_EVM, (u8 *)&xattr_data,
+ sizeof xattr_data, GFP_NOFS);
+ if (rc < 0)
+ evm_status = (rc == -ENODATA)
+ ? INTEGRITY_NOLABEL : INTEGRITY_FAIL;
+out:
+ if (iint)
+ iint->evm_status = evm_status;
+ return evm_status;
+}
+
+static int evm_protected_xattr(const char *req_xattr_name)
+{
+ char **xattrname;
+ int namelen;
+ int found = 0;
+
+ namelen = strlen(req_xattr_name);
+ for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) {
+ if ((strlen(*xattrname) == namelen)
+ && (strncmp(req_xattr_name, *xattrname, namelen) == 0)) {
+ found = 1;
+ break;
+ }
+ if (strncmp(req_xattr_name,
+ *xattrname + XATTR_SECURITY_PREFIX_LEN,
+ strlen(req_xattr_name)) == 0) {
+ found = 1;
+ break;
+ }
+ }
+ return found;
+}
+
+/**
+ * evm_verifyxattr - verify the integrity of the requested xattr
+ * @dentry: object of the verify xattr
+ * @xattr_name: requested xattr
+ * @xattr_value: requested xattr value
+ * @xattr_value_len: requested xattr value length
+ *
+ * Calculate the HMAC for the given dentry and verify it against the stored
+ * security.evm xattr. For performance, use the xattr value and length
+ * previously retrieved to calculate the HMAC.
+ *
+ * Returns the xattr integrity status.
+ *
+ * This function requires the caller to lock the inode's i_mutex before it
+ * is executed.
+ */
+enum integrity_status evm_verifyxattr(struct dentry *dentry,
+ const char *xattr_name,
+ void *xattr_value, size_t xattr_value_len,
+ struct integrity_iint_cache *iint)
+{
+ if (!evm_initialized || !evm_protected_xattr(xattr_name))
+ return INTEGRITY_UNKNOWN;
+
+ if (!iint) {
+ iint = integrity_iint_find(dentry->d_inode);
+ if (!iint)
+ return INTEGRITY_UNKNOWN;
+ }
+ return evm_verify_hmac(dentry, xattr_name, xattr_value,
+ xattr_value_len, iint);
+}
+EXPORT_SYMBOL_GPL(evm_verifyxattr);
+
+/*
+ * evm_verify_current_integrity - verify the dentry's metadata integrity
+ * @dentry: pointer to the affected dentry
+ *
+ * Verify and return the dentry's metadata integrity. The exceptions are
+ * before EVM is initialized or in 'fix' mode.
+ */
+static enum integrity_status evm_verify_current_integrity(struct dentry *dentry)
+{
+ struct inode *inode = dentry->d_inode;
+
+ if (!evm_initialized || !S_ISREG(inode->i_mode) || evm_fixmode)
+ return 0;
+ return evm_verify_hmac(dentry, NULL, NULL, 0, NULL);
+}
+
+/*
+ * evm_protect_xattr - protect the EVM extended attribute
+ *
+ * Prevent security.evm from being modified or removed without the
+ * necessary permissions or when the existing value is invalid.
+ *
+ * The posix xattr acls are 'system' prefixed, which normally would not
+ * affect security.evm. An interesting side affect of writing posix xattr
+ * acls is their modifying of the i_mode, which is included in security.evm.
+ * For posix xattr acls only, permit security.evm, even if it currently
+ * doesn't exist, to be updated.
+ */
+static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len)
+{
+ enum integrity_status evm_status;
+
+ if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ } else if (!evm_protected_xattr(xattr_name)) {
+ if (!posix_xattr_acl(xattr_name))
+ return 0;
+ evm_status = evm_verify_current_integrity(dentry);
+ if ((evm_status == INTEGRITY_PASS) ||
+ (evm_status == INTEGRITY_NOXATTRS))
+ return 0;
+ return -EPERM;
+ }
+ evm_status = evm_verify_current_integrity(dentry);
+ return evm_status == INTEGRITY_PASS ? 0 : -EPERM;
+}
+
+/**
+ * evm_inode_setxattr - protect the EVM extended attribute
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: pointer to the affected extended attribute name
+ * @xattr_value: pointer to the new extended attribute value
+ * @xattr_value_len: pointer to the new extended attribute value length
+ *
+ * Updating 'security.evm' requires CAP_SYS_ADMIN privileges and that
+ * the current value is valid.
+ */
+int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len)
+{
+ return evm_protect_xattr(dentry, xattr_name, xattr_value,
+ xattr_value_len);
+}
+
+/**
+ * evm_inode_removexattr - protect the EVM extended attribute
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: pointer to the affected extended attribute name
+ *
+ * Removing 'security.evm' requires CAP_SYS_ADMIN privileges and that
+ * the current value is valid.
+ */
+int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name)
+{
+ return evm_protect_xattr(dentry, xattr_name, NULL, 0);
+}
+
+/**
+ * evm_inode_post_setxattr - update 'security.evm' to reflect the changes
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: pointer to the affected extended attribute name
+ * @xattr_value: pointer to the new extended attribute value
+ * @xattr_value_len: pointer to the new extended attribute value length
+ *
+ * Update the HMAC stored in 'security.evm' to reflect the change.
+ *
+ * No need to take the i_mutex lock here, as this function is called from
+ * __vfs_setxattr_noperm(). The caller of which has taken the inode's
+ * i_mutex lock.
+ */
+void evm_inode_post_setxattr(struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len)
+{
+ if (!evm_initialized || (!evm_protected_xattr(xattr_name)
+ && !posix_xattr_acl(xattr_name)))
+ return;
+
+ evm_update_evmxattr(dentry, xattr_name, xattr_value, xattr_value_len);
+ return;
+}
+
+/**
+ * evm_inode_post_removexattr - update 'security.evm' after removing the xattr
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: pointer to the affected extended attribute name
+ *
+ * Update the HMAC stored in 'security.evm' to reflect removal of the xattr.
+ */
+void evm_inode_post_removexattr(struct dentry *dentry, const char *xattr_name)
+{
+ struct inode *inode = dentry->d_inode;
+
+ if (!evm_initialized || !evm_protected_xattr(xattr_name))
+ return;
+
+ mutex_lock(&inode->i_mutex);
+ evm_update_evmxattr(dentry, xattr_name, NULL, 0);
+ mutex_unlock(&inode->i_mutex);
+ return;
+}
+
+/**
+ * evm_inode_setattr - prevent updating an invalid EVM extended attribute
+ * @dentry: pointer to the affected dentry
+ */
+int evm_inode_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ unsigned int ia_valid = attr->ia_valid;
+ enum integrity_status evm_status;
+
+ if (!(ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)))
+ return 0;
+ evm_status = evm_verify_current_integrity(dentry);
+ if ((evm_status == INTEGRITY_PASS) ||
+ (evm_status == INTEGRITY_NOXATTRS))
+ return 0;
+ return -EPERM;
+}
+
+/**
+ * evm_inode_post_setattr - update 'security.evm' after modifying metadata
+ * @dentry: pointer to the affected dentry
+ * @ia_valid: for the UID and GID status
+ *
+ * For now, update the HMAC stored in 'security.evm' to reflect UID/GID
+ * changes.
+ *
+ * This function is called from notify_change(), which expects the caller
+ * to lock the inode's i_mutex.
+ */
+void evm_inode_post_setattr(struct dentry *dentry, int ia_valid)
+{
+ if (!evm_initialized)
+ return;
+
+ if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID))
+ evm_update_evmxattr(dentry, NULL, NULL, 0);
+ return;
+}
+
+/*
+ * evm_inode_init_security - initializes security.evm
+ */
+int evm_inode_init_security(struct inode *inode,
+ const struct xattr *lsm_xattr,
+ struct xattr *evm_xattr)
+{
+ struct evm_ima_xattr_data *xattr_data;
+ int rc;
+
+ if (!evm_initialized || !evm_protected_xattr(lsm_xattr->name))
+ return 0;
+
+ xattr_data = kzalloc(sizeof(*xattr_data), GFP_NOFS);
+ if (!xattr_data)
+ return -ENOMEM;
+
+ xattr_data->type = EVM_XATTR_HMAC;
+ rc = evm_init_hmac(inode, lsm_xattr, xattr_data->digest);
+ if (rc < 0)
+ goto out;
+
+ evm_xattr->value = xattr_data;
+ evm_xattr->value_len = sizeof(*xattr_data);
+ evm_xattr->name = kstrdup(XATTR_EVM_SUFFIX, GFP_NOFS);
+ return 0;
+out:
+ kfree(xattr_data);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(evm_inode_init_security);
+
+static int __init init_evm(void)
+{
+ int error;
+
+ error = evm_init_secfs();
+ if (error < 0) {
+ printk(KERN_INFO "EVM: Error registering secfs\n");
+ goto err;
+ }
+err:
+ return error;
+}
+
+static void __exit cleanup_evm(void)
+{
+ evm_cleanup_secfs();
+ if (hmac_tfm)
+ crypto_free_shash(hmac_tfm);
+}
+
+/*
+ * evm_display_config - list the EVM protected security extended attributes
+ */
+static int __init evm_display_config(void)
+{
+ char **xattrname;
+
+ for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++)
+ printk(KERN_INFO "EVM: %s\n", *xattrname);
+ return 0;
+}
+
+pure_initcall(evm_display_config);
+late_initcall(init_evm);
+
+MODULE_DESCRIPTION("Extended Verification Module");
+MODULE_LICENSE("GPL");
diff --git a/security/integrity/evm/evm_posix_acl.c b/security/integrity/evm/evm_posix_acl.c
new file mode 100644
index 000000000000..b1753e98bf9a
--- /dev/null
+++ b/security/integrity/evm/evm_posix_acl.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2011 IBM Corporation
+ *
+ * Author:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/xattr.h>
+
+int posix_xattr_acl(char *xattr)
+{
+ int xattr_len = strlen(xattr);
+
+ if ((strlen(XATTR_NAME_POSIX_ACL_ACCESS) == xattr_len)
+ && (strncmp(XATTR_NAME_POSIX_ACL_ACCESS, xattr, xattr_len) == 0))
+ return 1;
+ if ((strlen(XATTR_NAME_POSIX_ACL_DEFAULT) == xattr_len)
+ && (strncmp(XATTR_NAME_POSIX_ACL_DEFAULT, xattr, xattr_len) == 0))
+ return 1;
+ return 0;
+}
diff --git a/security/integrity/evm/evm_secfs.c b/security/integrity/evm/evm_secfs.c
new file mode 100644
index 000000000000..ac7629950578
--- /dev/null
+++ b/security/integrity/evm/evm_secfs.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2010 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * File: evm_secfs.c
+ * - Used to signal when key is on keyring
+ * - Get the key and enable EVM
+ */
+
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include "evm.h"
+
+static struct dentry *evm_init_tpm;
+
+/**
+ * evm_read_key - read() for <securityfs>/evm
+ *
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @count: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t evm_read_key(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char temp[80];
+ ssize_t rc;
+
+ if (*ppos != 0)
+ return 0;
+
+ sprintf(temp, "%d", evm_initialized);
+ rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
+
+ return rc;
+}
+
+/**
+ * evm_write_key - write() for <securityfs>/evm
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Used to signal that key is on the kernel key ring.
+ * - get the integrity hmac key from the kernel key ring
+ * - create list of hmac protected extended attributes
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t evm_write_key(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char temp[80];
+ int i, error;
+
+ if (!capable(CAP_SYS_ADMIN) || evm_initialized)
+ return -EPERM;
+
+ if (count >= sizeof(temp) || count == 0)
+ return -EINVAL;
+
+ if (copy_from_user(temp, buf, count) != 0)
+ return -EFAULT;
+
+ temp[count] = '\0';
+
+ if ((sscanf(temp, "%d", &i) != 1) || (i != 1))
+ return -EINVAL;
+
+ error = evm_init_key();
+ if (!error) {
+ evm_initialized = 1;
+ pr_info("EVM: initialized\n");
+ } else
+ pr_err("EVM: initialization failed\n");
+ return count;
+}
+
+static const struct file_operations evm_key_ops = {
+ .read = evm_read_key,
+ .write = evm_write_key,
+};
+
+int __init evm_init_secfs(void)
+{
+ int error = 0;
+
+ evm_init_tpm = securityfs_create_file("evm", S_IRUSR | S_IRGRP,
+ NULL, NULL, &evm_key_ops);
+ if (!evm_init_tpm || IS_ERR(evm_init_tpm))
+ error = -EFAULT;
+ return error;
+}
+
+void __exit evm_cleanup_secfs(void)
+{
+ if (evm_init_tpm)
+ securityfs_remove(evm_init_tpm);
+}
diff --git a/security/integrity/iint.c b/security/integrity/iint.c
new file mode 100644
index 000000000000..399641c3e846
--- /dev/null
+++ b/security/integrity/iint.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2008 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: integrity_iint.c
+ * - implements the integrity hooks: integrity_inode_alloc,
+ * integrity_inode_free
+ * - cache integrity information associated with an inode
+ * using a rbtree tree.
+ */
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/rbtree.h>
+#include "integrity.h"
+
+static struct rb_root integrity_iint_tree = RB_ROOT;
+static DEFINE_SPINLOCK(integrity_iint_lock);
+static struct kmem_cache *iint_cache __read_mostly;
+
+int iint_initialized;
+
+/*
+ * __integrity_iint_find - return the iint associated with an inode
+ */
+static struct integrity_iint_cache *__integrity_iint_find(struct inode *inode)
+{
+ struct integrity_iint_cache *iint;
+ struct rb_node *n = integrity_iint_tree.rb_node;
+
+ assert_spin_locked(&integrity_iint_lock);
+
+ while (n) {
+ iint = rb_entry(n, struct integrity_iint_cache, rb_node);
+
+ if (inode < iint->inode)
+ n = n->rb_left;
+ else if (inode > iint->inode)
+ n = n->rb_right;
+ else
+ break;
+ }
+ if (!n)
+ return NULL;
+
+ return iint;
+}
+
+/*
+ * integrity_iint_find - return the iint associated with an inode
+ */
+struct integrity_iint_cache *integrity_iint_find(struct inode *inode)
+{
+ struct integrity_iint_cache *iint;
+
+ if (!IS_IMA(inode))
+ return NULL;
+
+ spin_lock(&integrity_iint_lock);
+ iint = __integrity_iint_find(inode);
+ spin_unlock(&integrity_iint_lock);
+
+ return iint;
+}
+
+static void iint_free(struct integrity_iint_cache *iint)
+{
+ iint->version = 0;
+ iint->flags = 0UL;
+ iint->evm_status = INTEGRITY_UNKNOWN;
+ kmem_cache_free(iint_cache, iint);
+}
+
+/**
+ * integrity_inode_alloc - allocate an iint associated with an inode
+ * @inode: pointer to the inode
+ */
+int integrity_inode_alloc(struct inode *inode)
+{
+ struct rb_node **p;
+ struct rb_node *new_node, *parent = NULL;
+ struct integrity_iint_cache *new_iint, *test_iint;
+ int rc;
+
+ new_iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
+ if (!new_iint)
+ return -ENOMEM;
+
+ new_iint->inode = inode;
+ new_node = &new_iint->rb_node;
+
+ mutex_lock(&inode->i_mutex); /* i_flags */
+ spin_lock(&integrity_iint_lock);
+
+ p = &integrity_iint_tree.rb_node;
+ while (*p) {
+ parent = *p;
+ test_iint = rb_entry(parent, struct integrity_iint_cache,
+ rb_node);
+ rc = -EEXIST;
+ if (inode < test_iint->inode)
+ p = &(*p)->rb_left;
+ else if (inode > test_iint->inode)
+ p = &(*p)->rb_right;
+ else
+ goto out_err;
+ }
+
+ inode->i_flags |= S_IMA;
+ rb_link_node(new_node, parent, p);
+ rb_insert_color(new_node, &integrity_iint_tree);
+
+ spin_unlock(&integrity_iint_lock);
+ mutex_unlock(&inode->i_mutex); /* i_flags */
+
+ return 0;
+out_err:
+ spin_unlock(&integrity_iint_lock);
+ mutex_unlock(&inode->i_mutex); /* i_flags */
+ iint_free(new_iint);
+
+ return rc;
+}
+
+/**
+ * integrity_inode_free - called on security_inode_free
+ * @inode: pointer to the inode
+ *
+ * Free the integrity information(iint) associated with an inode.
+ */
+void integrity_inode_free(struct inode *inode)
+{
+ struct integrity_iint_cache *iint;
+
+ if (!IS_IMA(inode))
+ return;
+
+ spin_lock(&integrity_iint_lock);
+ iint = __integrity_iint_find(inode);
+ rb_erase(&iint->rb_node, &integrity_iint_tree);
+ spin_unlock(&integrity_iint_lock);
+
+ iint_free(iint);
+}
+
+static void init_once(void *foo)
+{
+ struct integrity_iint_cache *iint = foo;
+
+ memset(iint, 0, sizeof *iint);
+ iint->version = 0;
+ iint->flags = 0UL;
+ mutex_init(&iint->mutex);
+ iint->evm_status = INTEGRITY_UNKNOWN;
+}
+
+static int __init integrity_iintcache_init(void)
+{
+ iint_cache =
+ kmem_cache_create("iint_cache", sizeof(struct integrity_iint_cache),
+ 0, SLAB_PANIC, init_once);
+ iint_initialized = 1;
+ return 0;
+}
+security_initcall(integrity_iintcache_init);
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index b6ecfd4d8d78..19c053b82303 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -3,6 +3,7 @@
config IMA
bool "Integrity Measurement Architecture(IMA)"
depends on SECURITY
+ select INTEGRITY
select SECURITYFS
select CRYPTO
select CRYPTO_HMAC
diff --git a/security/integrity/ima/Makefile b/security/integrity/ima/Makefile
index 787c4cb916cd..5690c021de8f 100644
--- a/security/integrity/ima/Makefile
+++ b/security/integrity/ima/Makefile
@@ -6,4 +6,4 @@
obj-$(CONFIG_IMA) += ima.o
ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \
- ima_policy.o ima_iint.o ima_audit.o
+ ima_policy.o ima_audit.o
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 08408bd71462..3ccf7acac6df 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -24,18 +24,19 @@
#include <linux/tpm.h>
#include <linux/audit.h>
+#include "../integrity.h"
+
enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_ASCII };
enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
/* digest size for IMA, fits SHA1 or MD5 */
-#define IMA_DIGEST_SIZE 20
+#define IMA_DIGEST_SIZE SHA1_DIGEST_SIZE
#define IMA_EVENT_NAME_LEN_MAX 255
#define IMA_HASH_BITS 9
#define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS)
/* set during initialization */
-extern int iint_initialized;
extern int ima_initialized;
extern int ima_used_chip;
extern char *ima_hash;
@@ -96,34 +97,21 @@ static inline unsigned long ima_hash_key(u8 *digest)
return hash_long(*digest, IMA_HASH_BITS);
}
-/* iint cache flags */
-#define IMA_MEASURED 0x01
-
-/* integrity data associated with an inode */
-struct ima_iint_cache {
- struct rb_node rb_node; /* rooted in ima_iint_tree */
- struct inode *inode; /* back pointer to inode in question */
- u64 version; /* track inode changes */
- unsigned char flags;
- u8 digest[IMA_DIGEST_SIZE];
- struct mutex mutex; /* protects: version, flags, digest */
-};
-
/* LIM API function definitions */
int ima_must_measure(struct inode *inode, int mask, int function);
-int ima_collect_measurement(struct ima_iint_cache *iint, struct file *file);
-void ima_store_measurement(struct ima_iint_cache *iint, struct file *file,
+int ima_collect_measurement(struct integrity_iint_cache *iint,
+ struct file *file);
+void ima_store_measurement(struct integrity_iint_cache *iint, struct file *file,
const unsigned char *filename);
int ima_store_template(struct ima_template_entry *entry, int violation,
struct inode *inode);
-void ima_template_show(struct seq_file *m, void *e,
- enum ima_show_type show);
+void ima_template_show(struct seq_file *m, void *e, enum ima_show_type show);
/* rbtree tree calls to lookup, insert, delete
* integrity data associated with an inode.
*/
-struct ima_iint_cache *ima_iint_insert(struct inode *inode);
-struct ima_iint_cache *ima_iint_find(struct inode *inode);
+struct integrity_iint_cache *integrity_iint_insert(struct inode *inode);
+struct integrity_iint_cache *integrity_iint_find(struct inode *inode);
/* IMA policy related functions */
enum ima_hooks { FILE_CHECK = 1, FILE_MMAP, BPRM_CHECK };
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index da36d2c085a4..0d50df04ccc4 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -126,7 +126,8 @@ int ima_must_measure(struct inode *inode, int mask, int function)
*
* Return 0 on success, error code otherwise
*/
-int ima_collect_measurement(struct ima_iint_cache *iint, struct file *file)
+int ima_collect_measurement(struct integrity_iint_cache *iint,
+ struct file *file)
{
int result = -EEXIST;
@@ -156,8 +157,8 @@ int ima_collect_measurement(struct ima_iint_cache *iint, struct file *file)
*
* Must be called with iint->mutex held.
*/
-void ima_store_measurement(struct ima_iint_cache *iint, struct file *file,
- const unsigned char *filename)
+void ima_store_measurement(struct integrity_iint_cache *iint,
+ struct file *file, const unsigned char *filename)
{
const char *op = "add_template_measure";
const char *audit_cause = "ENOMEM";
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index ef21b96a0b42..e1aa2b482dd2 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -287,7 +287,7 @@ static atomic_t policy_opencount = ATOMIC_INIT(1);
/*
* ima_open_policy: sequentialize access to the policy file
*/
-int ima_open_policy(struct inode * inode, struct file * filp)
+static int ima_open_policy(struct inode * inode, struct file * filp)
{
/* No point in being allowed to open it if you aren't going to write */
if (!(filp->f_flags & O_WRONLY))
diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c
deleted file mode 100644
index 4ae73040ab7b..000000000000
--- a/security/integrity/ima/ima_iint.c
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Copyright (C) 2008 IBM Corporation
- *
- * Authors:
- * Mimi Zohar <zohar@us.ibm.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2 of the
- * License.
- *
- * File: ima_iint.c
- * - implements the IMA hooks: ima_inode_alloc, ima_inode_free
- * - cache integrity information associated with an inode
- * using a rbtree tree.
- */
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/rbtree.h>
-#include "ima.h"
-
-static struct rb_root ima_iint_tree = RB_ROOT;
-static DEFINE_SPINLOCK(ima_iint_lock);
-static struct kmem_cache *iint_cache __read_mostly;
-
-int iint_initialized = 0;
-
-/*
- * __ima_iint_find - return the iint associated with an inode
- */
-static struct ima_iint_cache *__ima_iint_find(struct inode *inode)
-{
- struct ima_iint_cache *iint;
- struct rb_node *n = ima_iint_tree.rb_node;
-
- assert_spin_locked(&ima_iint_lock);
-
- while (n) {
- iint = rb_entry(n, struct ima_iint_cache, rb_node);
-
- if (inode < iint->inode)
- n = n->rb_left;
- else if (inode > iint->inode)
- n = n->rb_right;
- else
- break;
- }
- if (!n)
- return NULL;
-
- return iint;
-}
-
-/*
- * ima_iint_find - return the iint associated with an inode
- */
-struct ima_iint_cache *ima_iint_find(struct inode *inode)
-{
- struct ima_iint_cache *iint;
-
- if (!IS_IMA(inode))
- return NULL;
-
- spin_lock(&ima_iint_lock);
- iint = __ima_iint_find(inode);
- spin_unlock(&ima_iint_lock);
-
- return iint;
-}
-
-static void iint_free(struct ima_iint_cache *iint)
-{
- iint->version = 0;
- iint->flags = 0UL;
- kmem_cache_free(iint_cache, iint);
-}
-
-/**
- * ima_inode_alloc - allocate an iint associated with an inode
- * @inode: pointer to the inode
- */
-int ima_inode_alloc(struct inode *inode)
-{
- struct rb_node **p;
- struct rb_node *new_node, *parent = NULL;
- struct ima_iint_cache *new_iint, *test_iint;
- int rc;
-
- new_iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
- if (!new_iint)
- return -ENOMEM;
-
- new_iint->inode = inode;
- new_node = &new_iint->rb_node;
-
- mutex_lock(&inode->i_mutex); /* i_flags */
- spin_lock(&ima_iint_lock);
-
- p = &ima_iint_tree.rb_node;
- while (*p) {
- parent = *p;
- test_iint = rb_entry(parent, struct ima_iint_cache, rb_node);
-
- rc = -EEXIST;
- if (inode < test_iint->inode)
- p = &(*p)->rb_left;
- else if (inode > test_iint->inode)
- p = &(*p)->rb_right;
- else
- goto out_err;
- }
-
- inode->i_flags |= S_IMA;
- rb_link_node(new_node, parent, p);
- rb_insert_color(new_node, &ima_iint_tree);
-
- spin_unlock(&ima_iint_lock);
- mutex_unlock(&inode->i_mutex); /* i_flags */
-
- return 0;
-out_err:
- spin_unlock(&ima_iint_lock);
- mutex_unlock(&inode->i_mutex); /* i_flags */
- iint_free(new_iint);
-
- return rc;
-}
-
-/**
- * ima_inode_free - called on security_inode_free
- * @inode: pointer to the inode
- *
- * Free the integrity information(iint) associated with an inode.
- */
-void ima_inode_free(struct inode *inode)
-{
- struct ima_iint_cache *iint;
-
- if (!IS_IMA(inode))
- return;
-
- spin_lock(&ima_iint_lock);
- iint = __ima_iint_find(inode);
- rb_erase(&iint->rb_node, &ima_iint_tree);
- spin_unlock(&ima_iint_lock);
-
- iint_free(iint);
-}
-
-static void init_once(void *foo)
-{
- struct ima_iint_cache *iint = foo;
-
- memset(iint, 0, sizeof *iint);
- iint->version = 0;
- iint->flags = 0UL;
- mutex_init(&iint->mutex);
-}
-
-static int __init ima_iintcache_init(void)
-{
- iint_cache =
- kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0,
- SLAB_PANIC, init_once);
- iint_initialized = 1;
- return 0;
-}
-security_initcall(ima_iintcache_init);
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 26b46ff74663..1eff5cb001e5 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -22,6 +22,7 @@
#include <linux/mount.h>
#include <linux/mman.h>
#include <linux/slab.h>
+#include <linux/ima.h>
#include "ima.h"
@@ -82,7 +83,7 @@ out:
"open_writers");
}
-static void ima_check_last_writer(struct ima_iint_cache *iint,
+static void ima_check_last_writer(struct integrity_iint_cache *iint,
struct inode *inode,
struct file *file)
{
@@ -105,12 +106,12 @@ static void ima_check_last_writer(struct ima_iint_cache *iint,
void ima_file_free(struct file *file)
{
struct inode *inode = file->f_dentry->d_inode;
- struct ima_iint_cache *iint;
+ struct integrity_iint_cache *iint;
if (!iint_initialized || !S_ISREG(inode->i_mode))
return;
- iint = ima_iint_find(inode);
+ iint = integrity_iint_find(inode);
if (!iint)
return;
@@ -121,7 +122,7 @@ static int process_measurement(struct file *file, const unsigned char *filename,
int mask, int function)
{
struct inode *inode = file->f_dentry->d_inode;
- struct ima_iint_cache *iint;
+ struct integrity_iint_cache *iint;
int rc = 0;
if (!ima_initialized || !S_ISREG(inode->i_mode))
@@ -131,9 +132,9 @@ static int process_measurement(struct file *file, const unsigned char *filename,
if (rc != 0)
return rc;
retry:
- iint = ima_iint_find(inode);
+ iint = integrity_iint_find(inode);
if (!iint) {
- rc = ima_inode_alloc(inode);
+ rc = integrity_inode_alloc(inode);
if (!rc || rc == -EEXIST)
goto retry;
return rc;
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
new file mode 100644
index 000000000000..3143a3c39868
--- /dev/null
+++ b/security/integrity/integrity.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2009-2010 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/integrity.h>
+#include <crypto/sha.h>
+
+/* iint cache flags */
+#define IMA_MEASURED 0x01
+
+enum evm_ima_xattr_type {
+ IMA_XATTR_DIGEST = 0x01,
+ EVM_XATTR_HMAC,
+ EVM_IMA_XATTR_DIGSIG,
+};
+
+struct evm_ima_xattr_data {
+ u8 type;
+ u8 digest[SHA1_DIGEST_SIZE];
+} __attribute__((packed));
+
+/* integrity data associated with an inode */
+struct integrity_iint_cache {
+ struct rb_node rb_node; /* rooted in integrity_iint_tree */
+ struct inode *inode; /* back pointer to inode in question */
+ u64 version; /* track inode changes */
+ unsigned char flags;
+ u8 digest[SHA1_DIGEST_SIZE];
+ struct mutex mutex; /* protects: version, flags, digest */
+ enum integrity_status evm_status;
+};
+
+/* rbtree tree calls to lookup, insert, delete
+ * integrity data associated with an inode.
+ */
+struct integrity_iint_cache *integrity_iint_insert(struct inode *inode);
+struct integrity_iint_cache *integrity_iint_find(struct inode *inode);
+
+/* set during initialization */
+extern int iint_initialized;
diff --git a/security/keys/Makefile b/security/keys/Makefile
index b34cc6ee6900..a56f1ffdc64d 100644
--- a/security/keys/Makefile
+++ b/security/keys/Makefile
@@ -14,7 +14,7 @@ obj-y := \
user_defined.o
obj-$(CONFIG_TRUSTED_KEYS) += trusted.o
-obj-$(CONFIG_ENCRYPTED_KEYS) += ecryptfs_format.o encrypted.o
+obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted-keys/
obj-$(CONFIG_KEYS_COMPAT) += compat.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSCTL) += sysctl.o
diff --git a/security/keys/encrypted-keys/Makefile b/security/keys/encrypted-keys/Makefile
new file mode 100644
index 000000000000..6bc7a86d1027
--- /dev/null
+++ b/security/keys/encrypted-keys/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for encrypted keys
+#
+
+obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted.o ecryptfs_format.o
+obj-$(CONFIG_TRUSTED_KEYS) += masterkey_trusted.o
diff --git a/security/keys/ecryptfs_format.c b/security/keys/encrypted-keys/ecryptfs_format.c
index 6daa3b6ff9ed..6daa3b6ff9ed 100644
--- a/security/keys/ecryptfs_format.c
+++ b/security/keys/encrypted-keys/ecryptfs_format.c
diff --git a/security/keys/ecryptfs_format.h b/security/keys/encrypted-keys/ecryptfs_format.h
index 40294de238bb..40294de238bb 100644
--- a/security/keys/ecryptfs_format.h
+++ b/security/keys/encrypted-keys/ecryptfs_format.h
diff --git a/security/keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index e7eca9ec4c65..f33804c1b4c8 100644
--- a/security/keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -299,31 +299,6 @@ out:
}
/*
- * request_trusted_key - request the trusted key
- *
- * Trusted keys are sealed to PCRs and other metadata. Although userspace
- * manages both trusted/encrypted key-types, like the encrypted key type
- * data, trusted key type data is not visible decrypted from userspace.
- */
-static struct key *request_trusted_key(const char *trusted_desc,
- u8 **master_key, size_t *master_keylen)
-{
- struct trusted_key_payload *tpayload;
- struct key *tkey;
-
- tkey = request_key(&key_type_trusted, trusted_desc, NULL);
- if (IS_ERR(tkey))
- goto error;
-
- down_read(&tkey->sem);
- tpayload = rcu_dereference(tkey->payload.data);
- *master_key = tpayload->key;
- *master_keylen = tpayload->key_len;
-error:
- return tkey;
-}
-
-/*
* request_user_key - request the user key
*
* Use a user provided key to encrypt/decrypt an encrypted-key.
@@ -469,8 +444,14 @@ static struct key *request_master_key(struct encrypted_key_payload *epayload,
goto out;
if (IS_ERR(mkey)) {
- pr_info("encrypted_key: key %s not found",
- epayload->master_desc);
+ int ret = PTR_ERR(epayload);
+
+ if (ret == -ENOTSUPP)
+ pr_info("encrypted_key: key %s not supported",
+ epayload->master_desc);
+ else
+ pr_info("encrypted_key: key %s not found",
+ epayload->master_desc);
goto out;
}
@@ -686,11 +667,19 @@ static int encrypted_key_decrypt(struct encrypted_key_payload *epayload,
return -EINVAL;
hex_encoded_data = hex_encoded_iv + (2 * ivsize) + 2;
- hex2bin(epayload->iv, hex_encoded_iv, ivsize);
- hex2bin(epayload->encrypted_data, hex_encoded_data, encrypted_datalen);
+ ret = hex2bin(epayload->iv, hex_encoded_iv, ivsize);
+ if (ret < 0)
+ return -EINVAL;
+ ret = hex2bin(epayload->encrypted_data, hex_encoded_data,
+ encrypted_datalen);
+ if (ret < 0)
+ return -EINVAL;
hmac = epayload->format + epayload->datablob_len;
- hex2bin(hmac, hex_encoded_data + (encrypted_datalen * 2), HASH_SIZE);
+ ret = hex2bin(hmac, hex_encoded_data + (encrypted_datalen * 2),
+ HASH_SIZE);
+ if (ret < 0)
+ return -EINVAL;
mkey = request_master_key(epayload, &master_key, &master_keylen);
if (IS_ERR(mkey))
diff --git a/security/keys/encrypted.h b/security/keys/encrypted-keys/encrypted.h
index cef5e2f2b7d1..b6ade8945250 100644
--- a/security/keys/encrypted.h
+++ b/security/keys/encrypted-keys/encrypted.h
@@ -2,6 +2,17 @@
#define __ENCRYPTED_KEY_H
#define ENCRYPTED_DEBUG 0
+#ifdef CONFIG_TRUSTED_KEYS
+extern struct key *request_trusted_key(const char *trusted_desc,
+ u8 **master_key, size_t *master_keylen);
+#else
+static inline struct key *request_trusted_key(const char *trusted_desc,
+ u8 **master_key,
+ size_t *master_keylen)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+#endif
#if ENCRYPTED_DEBUG
static inline void dump_master_key(const u8 *master_key, size_t master_keylen)
diff --git a/security/keys/encrypted-keys/masterkey_trusted.c b/security/keys/encrypted-keys/masterkey_trusted.c
new file mode 100644
index 000000000000..df87272e3f51
--- /dev/null
+++ b/security/keys/encrypted-keys/masterkey_trusted.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2010 IBM Corporation
+ * Copyright (C) 2010 Politecnico di Torino, Italy
+ * TORSEC group -- http://security.polito.it
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * See Documentation/security/keys-trusted-encrypted.txt
+ */
+
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <keys/trusted-type.h>
+
+/*
+ * request_trusted_key - request the trusted key
+ *
+ * Trusted keys are sealed to PCRs and other metadata. Although userspace
+ * manages both trusted/encrypted key-types, like the encrypted key type
+ * data, trusted key type data is not visible decrypted from userspace.
+ */
+struct key *request_trusted_key(const char *trusted_desc,
+ u8 **master_key, size_t *master_keylen)
+{
+ struct trusted_key_payload *tpayload;
+ struct key *tkey;
+
+ tkey = request_key(&key_type_trusted, trusted_desc, NULL);
+ if (IS_ERR(tkey))
+ goto error;
+
+ down_read(&tkey->sem);
+ tpayload = rcu_dereference(tkey->payload.data);
+ *master_key = tpayload->key;
+ *master_keylen = tpayload->key_len;
+error:
+ return tkey;
+}
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 89df6b5f203c..bf4d8da5a795 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -1,6 +1,6 @@
/* Key garbage collector
*
- * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2009-2011 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
@@ -10,6 +10,8 @@
*/
#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/security.h>
#include <keys/keyring-type.h>
#include "internal.h"
@@ -19,17 +21,33 @@
unsigned key_gc_delay = 5 * 60;
/*
- * Reaper
+ * Reaper for unused keys.
+ */
+static void key_garbage_collector(struct work_struct *work);
+DECLARE_WORK(key_gc_work, key_garbage_collector);
+
+/*
+ * Reaper for links from keyrings to dead keys.
*/
static void key_gc_timer_func(unsigned long);
-static void key_garbage_collector(struct work_struct *);
static DEFINE_TIMER(key_gc_timer, key_gc_timer_func, 0, 0);
-static DECLARE_WORK(key_gc_work, key_garbage_collector);
-static key_serial_t key_gc_cursor; /* the last key the gc considered */
-static bool key_gc_again;
-static unsigned long key_gc_executing;
+
static time_t key_gc_next_run = LONG_MAX;
-static time_t key_gc_new_timer;
+static struct key_type *key_gc_dead_keytype;
+
+static unsigned long key_gc_flags;
+#define KEY_GC_KEY_EXPIRED 0 /* A key expired and needs unlinking */
+#define KEY_GC_REAP_KEYTYPE 1 /* A keytype is being unregistered */
+#define KEY_GC_REAPING_KEYTYPE 2 /* Cleared when keytype reaped */
+
+
+/*
+ * Any key whose type gets unregistered will be re-typed to this if it can't be
+ * immediately unlinked.
+ */
+struct key_type key_type_dead = {
+ .name = "dead",
+};
/*
* Schedule a garbage collection run.
@@ -42,31 +60,75 @@ void key_schedule_gc(time_t gc_at)
kenter("%ld", gc_at - now);
- if (gc_at <= now) {
- schedule_work(&key_gc_work);
+ if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) {
+ kdebug("IMMEDIATE");
+ queue_work(system_nrt_wq, &key_gc_work);
} else if (gc_at < key_gc_next_run) {
+ kdebug("DEFERRED");
+ key_gc_next_run = gc_at;
expires = jiffies + (gc_at - now) * HZ;
mod_timer(&key_gc_timer, expires);
}
}
/*
- * The garbage collector timer kicked off
+ * Some key's cleanup time was met after it expired, so we need to get the
+ * reaper to go through a cycle finding expired keys.
*/
static void key_gc_timer_func(unsigned long data)
{
kenter("");
key_gc_next_run = LONG_MAX;
- schedule_work(&key_gc_work);
+ set_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags);
+ queue_work(system_nrt_wq, &key_gc_work);
+}
+
+/*
+ * wait_on_bit() sleep function for uninterruptible waiting
+ */
+static int key_gc_wait_bit(void *flags)
+{
+ schedule();
+ return 0;
+}
+
+/*
+ * Reap keys of dead type.
+ *
+ * We use three flags to make sure we see three complete cycles of the garbage
+ * collector: the first to mark keys of that type as being dead, the second to
+ * collect dead links and the third to clean up the dead keys. We have to be
+ * careful as there may already be a cycle in progress.
+ *
+ * The caller must be holding key_types_sem.
+ */
+void key_gc_keytype(struct key_type *ktype)
+{
+ kenter("%s", ktype->name);
+
+ key_gc_dead_keytype = ktype;
+ set_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags);
+ smp_mb();
+ set_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags);
+
+ kdebug("schedule");
+ queue_work(system_nrt_wq, &key_gc_work);
+
+ kdebug("sleep");
+ wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE, key_gc_wait_bit,
+ TASK_UNINTERRUPTIBLE);
+
+ key_gc_dead_keytype = NULL;
+ kleave("");
}
/*
* Garbage collect pointers from a keyring.
*
- * Return true if we altered the keyring.
+ * Not called with any locks held. The keyring's key struct will not be
+ * deallocated under us as only our caller may deallocate it.
*/
-static bool key_gc_keyring(struct key *keyring, time_t limit)
- __releases(key_serial_lock)
+static void key_gc_keyring(struct key *keyring, time_t limit)
{
struct keyring_list *klist;
struct key *key;
@@ -93,130 +155,234 @@ static bool key_gc_keyring(struct key *keyring, time_t limit)
unlock_dont_gc:
rcu_read_unlock();
dont_gc:
- kleave(" = false");
- return false;
+ kleave(" [no gc]");
+ return;
do_gc:
rcu_read_unlock();
- key_gc_cursor = keyring->serial;
- key_get(keyring);
- spin_unlock(&key_serial_lock);
+
keyring_gc(keyring, limit);
- key_put(keyring);
- kleave(" = true");
- return true;
+ kleave(" [gc]");
}
/*
- * Garbage collector for keys. This involves scanning the keyrings for dead,
- * expired and revoked keys that have overstayed their welcome
+ * Garbage collect an unreferenced, detached key
*/
-static void key_garbage_collector(struct work_struct *work)
+static noinline void key_gc_unused_key(struct key *key)
{
- struct rb_node *rb;
- key_serial_t cursor;
- struct key *key, *xkey;
- time_t new_timer = LONG_MAX, limit, now;
-
- now = current_kernel_time().tv_sec;
- kenter("[%x,%ld]", key_gc_cursor, key_gc_new_timer - now);
-
- if (test_and_set_bit(0, &key_gc_executing)) {
- key_schedule_gc(current_kernel_time().tv_sec + 1);
- kleave(" [busy; deferring]");
- return;
+ key_check(key);
+
+ security_key_free(key);
+
+ /* deal with the user's key tracking and quota */
+ if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
+ spin_lock(&key->user->lock);
+ key->user->qnkeys--;
+ key->user->qnbytes -= key->quotalen;
+ spin_unlock(&key->user->lock);
}
- limit = now;
+ atomic_dec(&key->user->nkeys);
+ if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
+ atomic_dec(&key->user->nikeys);
+
+ key_user_put(key->user);
+
+ /* now throw away the key memory */
+ if (key->type->destroy)
+ key->type->destroy(key);
+
+ kfree(key->description);
+
+#ifdef KEY_DEBUGGING
+ key->magic = KEY_DEBUG_MAGIC_X;
+#endif
+ kmem_cache_free(key_jar, key);
+}
+
+/*
+ * Garbage collector for unused keys.
+ *
+ * This is done in process context so that we don't have to disable interrupts
+ * all over the place. key_put() schedules this rather than trying to do the
+ * cleanup itself, which means key_put() doesn't have to sleep.
+ */
+static void key_garbage_collector(struct work_struct *work)
+{
+ static u8 gc_state; /* Internal persistent state */
+#define KEY_GC_REAP_AGAIN 0x01 /* - Need another cycle */
+#define KEY_GC_REAPING_LINKS 0x02 /* - We need to reap links */
+#define KEY_GC_SET_TIMER 0x04 /* - We need to restart the timer */
+#define KEY_GC_REAPING_DEAD_1 0x10 /* - We need to mark dead keys */
+#define KEY_GC_REAPING_DEAD_2 0x20 /* - We need to reap dead key links */
+#define KEY_GC_REAPING_DEAD_3 0x40 /* - We need to reap dead keys */
+#define KEY_GC_FOUND_DEAD_KEY 0x80 /* - We found at least one dead key */
+
+ struct rb_node *cursor;
+ struct key *key;
+ time_t new_timer, limit;
+
+ kenter("[%lx,%x]", key_gc_flags, gc_state);
+
+ limit = current_kernel_time().tv_sec;
if (limit > key_gc_delay)
limit -= key_gc_delay;
else
limit = key_gc_delay;
+ /* Work out what we're going to be doing in this pass */
+ gc_state &= KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2;
+ gc_state <<= 1;
+ if (test_and_clear_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags))
+ gc_state |= KEY_GC_REAPING_LINKS | KEY_GC_SET_TIMER;
+
+ if (test_and_clear_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags))
+ gc_state |= KEY_GC_REAPING_DEAD_1;
+ kdebug("new pass %x", gc_state);
+
+ new_timer = LONG_MAX;
+
+ /* As only this function is permitted to remove things from the key
+ * serial tree, if cursor is non-NULL then it will always point to a
+ * valid node in the tree - even if lock got dropped.
+ */
spin_lock(&key_serial_lock);
+ cursor = rb_first(&key_serial_tree);
- if (unlikely(RB_EMPTY_ROOT(&key_serial_tree))) {
- spin_unlock(&key_serial_lock);
- clear_bit(0, &key_gc_executing);
- return;
- }
+continue_scanning:
+ while (cursor) {
+ key = rb_entry(cursor, struct key, serial_node);
+ cursor = rb_next(cursor);
- cursor = key_gc_cursor;
- if (cursor < 0)
- cursor = 0;
- if (cursor > 0)
- new_timer = key_gc_new_timer;
- else
- key_gc_again = false;
-
- /* find the first key above the cursor */
- key = NULL;
- rb = key_serial_tree.rb_node;
- while (rb) {
- xkey = rb_entry(rb, struct key, serial_node);
- if (cursor < xkey->serial) {
- key = xkey;
- rb = rb->rb_left;
- } else if (cursor > xkey->serial) {
- rb = rb->rb_right;
- } else {
- rb = rb_next(rb);
- if (!rb)
- goto reached_the_end;
- key = rb_entry(rb, struct key, serial_node);
- break;
+ if (atomic_read(&key->usage) == 0)
+ goto found_unreferenced_key;
+
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_1)) {
+ if (key->type == key_gc_dead_keytype) {
+ gc_state |= KEY_GC_FOUND_DEAD_KEY;
+ set_bit(KEY_FLAG_DEAD, &key->flags);
+ key->perm = 0;
+ goto skip_dead_key;
+ }
+ }
+
+ if (gc_state & KEY_GC_SET_TIMER) {
+ if (key->expiry > limit && key->expiry < new_timer) {
+ kdebug("will expire %x in %ld",
+ key_serial(key), key->expiry - limit);
+ new_timer = key->expiry;
+ }
}
- }
- if (!key)
- goto reached_the_end;
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2))
+ if (key->type == key_gc_dead_keytype)
+ gc_state |= KEY_GC_FOUND_DEAD_KEY;
- /* trawl through the keys looking for keyrings */
- for (;;) {
- if (key->expiry > limit && key->expiry < new_timer) {
- kdebug("will expire %x in %ld",
- key_serial(key), key->expiry - limit);
- new_timer = key->expiry;
+ if ((gc_state & KEY_GC_REAPING_LINKS) ||
+ unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) {
+ if (key->type == &key_type_keyring)
+ goto found_keyring;
}
- if (key->type == &key_type_keyring &&
- key_gc_keyring(key, limit))
- /* the gc had to release our lock so that the keyring
- * could be modified, so we have to get it again */
- goto gc_released_our_lock;
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3))
+ if (key->type == key_gc_dead_keytype)
+ goto destroy_dead_key;
- rb = rb_next(&key->serial_node);
- if (!rb)
- goto reached_the_end;
- key = rb_entry(rb, struct key, serial_node);
+ skip_dead_key:
+ if (spin_is_contended(&key_serial_lock) || need_resched())
+ goto contended;
}
-gc_released_our_lock:
- kdebug("gc_released_our_lock");
- key_gc_new_timer = new_timer;
- key_gc_again = true;
- clear_bit(0, &key_gc_executing);
- schedule_work(&key_gc_work);
- kleave(" [continue]");
- return;
-
- /* when we reach the end of the run, we set the timer for the next one */
-reached_the_end:
- kdebug("reached_the_end");
+contended:
spin_unlock(&key_serial_lock);
- key_gc_new_timer = new_timer;
- key_gc_cursor = 0;
- clear_bit(0, &key_gc_executing);
-
- if (key_gc_again) {
- /* there may have been a key that expired whilst we were
- * scanning, so if we discarded any links we should do another
- * scan */
- new_timer = now + 1;
- key_schedule_gc(new_timer);
- } else if (new_timer < LONG_MAX) {
+
+maybe_resched:
+ if (cursor) {
+ cond_resched();
+ spin_lock(&key_serial_lock);
+ goto continue_scanning;
+ }
+
+ /* We've completed the pass. Set the timer if we need to and queue a
+ * new cycle if necessary. We keep executing cycles until we find one
+ * where we didn't reap any keys.
+ */
+ kdebug("pass complete");
+
+ if (gc_state & KEY_GC_SET_TIMER && new_timer != (time_t)LONG_MAX) {
new_timer += key_gc_delay;
key_schedule_gc(new_timer);
}
- kleave(" [end]");
+
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) {
+ /* Make sure everyone revalidates their keys if we marked a
+ * bunch as being dead and make sure all keyring ex-payloads
+ * are destroyed.
+ */
+ kdebug("dead sync");
+ synchronize_rcu();
+ }
+
+ if (unlikely(gc_state & (KEY_GC_REAPING_DEAD_1 |
+ KEY_GC_REAPING_DEAD_2))) {
+ if (!(gc_state & KEY_GC_FOUND_DEAD_KEY)) {
+ /* No remaining dead keys: short circuit the remaining
+ * keytype reap cycles.
+ */
+ kdebug("dead short");
+ gc_state &= ~(KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2);
+ gc_state |= KEY_GC_REAPING_DEAD_3;
+ } else {
+ gc_state |= KEY_GC_REAP_AGAIN;
+ }
+ }
+
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3)) {
+ kdebug("dead wake");
+ smp_mb();
+ clear_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags);
+ wake_up_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE);
+ }
+
+ if (gc_state & KEY_GC_REAP_AGAIN)
+ queue_work(system_nrt_wq, &key_gc_work);
+ kleave(" [end %x]", gc_state);
+ return;
+
+ /* We found an unreferenced key - once we've removed it from the tree,
+ * we can safely drop the lock.
+ */
+found_unreferenced_key:
+ kdebug("unrefd key %d", key->serial);
+ rb_erase(&key->serial_node, &key_serial_tree);
+ spin_unlock(&key_serial_lock);
+
+ key_gc_unused_key(key);
+ gc_state |= KEY_GC_REAP_AGAIN;
+ goto maybe_resched;
+
+ /* We found a keyring and we need to check the payload for links to
+ * dead or expired keys. We don't flag another reap immediately as we
+ * have to wait for the old payload to be destroyed by RCU before we
+ * can reap the keys to which it refers.
+ */
+found_keyring:
+ spin_unlock(&key_serial_lock);
+ kdebug("scan keyring %d", key->serial);
+ key_gc_keyring(key, limit);
+ goto maybe_resched;
+
+ /* We found a dead key that is still referenced. Reset its type and
+ * destroy its payload with its semaphore held.
+ */
+destroy_dead_key:
+ spin_unlock(&key_serial_lock);
+ kdebug("destroy key %d", key->serial);
+ down_write(&key->sem);
+ key->type = &key_type_dead;
+ if (key_gc_dead_keytype->destroy)
+ key_gc_dead_keytype->destroy(key);
+ memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
+ up_write(&key->sem);
+ goto maybe_resched;
}
diff --git a/security/keys/internal.h b/security/keys/internal.h
index f375152a2500..c7a7caec4830 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -31,6 +31,7 @@
no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
#endif
+extern struct key_type key_type_dead;
extern struct key_type key_type_user;
/*****************************************************************************/
@@ -75,6 +76,7 @@ extern unsigned key_quota_maxbytes;
#define KEYQUOTA_LINK_BYTES 4 /* a link in a keyring is worth 4 bytes */
+extern struct kmem_cache *key_jar;
extern struct rb_root key_serial_tree;
extern spinlock_t key_serial_lock;
extern struct mutex key_construction_mutex;
@@ -146,9 +148,11 @@ extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags,
extern long join_session_keyring(const char *name);
+extern struct work_struct key_gc_work;
extern unsigned key_gc_delay;
extern void keyring_gc(struct key *keyring, time_t limit);
extern void key_schedule_gc(time_t expiry_at);
+extern void key_gc_keytype(struct key_type *ktype);
extern int key_task_permission(const key_ref_t key_ref,
const struct cred *cred,
diff --git a/security/keys/key.c b/security/keys/key.c
index f7f9d93f08d9..4414abddcb5b 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -21,7 +21,7 @@
#include <linux/user_namespace.h>
#include "internal.h"
-static struct kmem_cache *key_jar;
+struct kmem_cache *key_jar;
struct rb_root key_serial_tree; /* tree of keys indexed by serial */
DEFINE_SPINLOCK(key_serial_lock);
@@ -36,17 +36,9 @@ unsigned int key_quota_maxbytes = 20000; /* general key space quota */
static LIST_HEAD(key_types_list);
static DECLARE_RWSEM(key_types_sem);
-static void key_cleanup(struct work_struct *work);
-static DECLARE_WORK(key_cleanup_task, key_cleanup);
-
/* We serialise key instantiation and link */
DEFINE_MUTEX(key_construction_mutex);
-/* Any key who's type gets unegistered will be re-typed to this */
-static struct key_type key_type_dead = {
- .name = "dead",
-};
-
#ifdef KEY_DEBUGGING
void __key_check(const struct key *key)
{
@@ -591,71 +583,6 @@ int key_reject_and_link(struct key *key,
}
EXPORT_SYMBOL(key_reject_and_link);
-/*
- * Garbage collect keys in process context so that we don't have to disable
- * interrupts all over the place.
- *
- * key_put() schedules this rather than trying to do the cleanup itself, which
- * means key_put() doesn't have to sleep.
- */
-static void key_cleanup(struct work_struct *work)
-{
- struct rb_node *_n;
- struct key *key;
-
-go_again:
- /* look for a dead key in the tree */
- spin_lock(&key_serial_lock);
-
- for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
- key = rb_entry(_n, struct key, serial_node);
-
- if (atomic_read(&key->usage) == 0)
- goto found_dead_key;
- }
-
- spin_unlock(&key_serial_lock);
- return;
-
-found_dead_key:
- /* we found a dead key - once we've removed it from the tree, we can
- * drop the lock */
- rb_erase(&key->serial_node, &key_serial_tree);
- spin_unlock(&key_serial_lock);
-
- key_check(key);
-
- security_key_free(key);
-
- /* deal with the user's key tracking and quota */
- if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
- spin_lock(&key->user->lock);
- key->user->qnkeys--;
- key->user->qnbytes -= key->quotalen;
- spin_unlock(&key->user->lock);
- }
-
- atomic_dec(&key->user->nkeys);
- if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
- atomic_dec(&key->user->nikeys);
-
- key_user_put(key->user);
-
- /* now throw away the key memory */
- if (key->type->destroy)
- key->type->destroy(key);
-
- kfree(key->description);
-
-#ifdef KEY_DEBUGGING
- key->magic = KEY_DEBUG_MAGIC_X;
-#endif
- kmem_cache_free(key_jar, key);
-
- /* there may, of course, be more than one key to destroy */
- goto go_again;
-}
-
/**
* key_put - Discard a reference to a key.
* @key: The key to discard a reference from.
@@ -670,7 +597,7 @@ void key_put(struct key *key)
key_check(key);
if (atomic_dec_and_test(&key->usage))
- schedule_work(&key_cleanup_task);
+ queue_work(system_nrt_wq, &key_gc_work);
}
}
EXPORT_SYMBOL(key_put);
@@ -1048,49 +975,11 @@ EXPORT_SYMBOL(register_key_type);
*/
void unregister_key_type(struct key_type *ktype)
{
- struct rb_node *_n;
- struct key *key;
-
down_write(&key_types_sem);
-
- /* withdraw the key type */
list_del_init(&ktype->link);
-
- /* mark all the keys of this type dead */
- spin_lock(&key_serial_lock);
-
- for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
- key = rb_entry(_n, struct key, serial_node);
-
- if (key->type == ktype) {
- key->type = &key_type_dead;
- set_bit(KEY_FLAG_DEAD, &key->flags);
- }
- }
-
- spin_unlock(&key_serial_lock);
-
- /* make sure everyone revalidates their keys */
- synchronize_rcu();
-
- /* we should now be able to destroy the payloads of all the keys of
- * this type with impunity */
- spin_lock(&key_serial_lock);
-
- for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
- key = rb_entry(_n, struct key, serial_node);
-
- if (key->type == ktype) {
- if (ktype->destroy)
- ktype->destroy(key);
- memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
- }
- }
-
- spin_unlock(&key_serial_lock);
- up_write(&key_types_sem);
-
- key_schedule_gc(0);
+ downgrade_write(&key_types_sem);
+ key_gc_keytype(ktype);
+ up_read(&key_types_sem);
}
EXPORT_SYMBOL(unregister_key_type);
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 30e242f7bd0e..37a7f3b28852 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -860,8 +860,7 @@ void __key_link(struct key *keyring, struct key *key,
kenter("%d,%d,%p", keyring->serial, key->serial, nklist);
- klist = rcu_dereference_protected(keyring->payload.subscriptions,
- rwsem_is_locked(&keyring->sem));
+ klist = rcu_dereference_locked_keyring(keyring);
atomic_inc(&key->usage);
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index a3063eb3dc23..1068cb1939b3 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -270,7 +270,7 @@ static int install_session_keyring(struct key *keyring)
if (!new)
return -ENOMEM;
- ret = install_session_keyring_to_cred(new, NULL);
+ ret = install_session_keyring_to_cred(new, keyring);
if (ret < 0) {
abort_creds(new);
return ret;
@@ -589,12 +589,22 @@ try_again:
ret = install_user_keyrings();
if (ret < 0)
goto error;
- ret = install_session_keyring(
- cred->user->session_keyring);
+ if (lflags & KEY_LOOKUP_CREATE)
+ ret = join_session_keyring(NULL);
+ else
+ ret = install_session_keyring(
+ cred->user->session_keyring);
if (ret < 0)
goto error;
goto reget_creds;
+ } else if (cred->tgcred->session_keyring ==
+ cred->user->session_keyring &&
+ lflags & KEY_LOOKUP_CREATE) {
+ ret = join_session_keyring(NULL);
+ if (ret < 0)
+ goto error;
+ goto reget_creds;
}
rcu_read_lock();
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index 0c33e2ea1f3c..0964fc236946 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -779,7 +779,10 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
opt->pcrinfo_len = strlen(args[0].from) / 2;
if (opt->pcrinfo_len > MAX_PCRINFO_SIZE)
return -EINVAL;
- hex2bin(opt->pcrinfo, args[0].from, opt->pcrinfo_len);
+ res = hex2bin(opt->pcrinfo, args[0].from,
+ opt->pcrinfo_len);
+ if (res < 0)
+ return -EINVAL;
break;
case Opt_keyhandle:
res = strict_strtoul(args[0].from, 16, &handle);
@@ -791,12 +794,18 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
case Opt_keyauth:
if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE)
return -EINVAL;
- hex2bin(opt->keyauth, args[0].from, SHA1_DIGEST_SIZE);
+ res = hex2bin(opt->keyauth, args[0].from,
+ SHA1_DIGEST_SIZE);
+ if (res < 0)
+ return -EINVAL;
break;
case Opt_blobauth:
if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE)
return -EINVAL;
- hex2bin(opt->blobauth, args[0].from, SHA1_DIGEST_SIZE);
+ res = hex2bin(opt->blobauth, args[0].from,
+ SHA1_DIGEST_SIZE);
+ if (res < 0)
+ return -EINVAL;
break;
case Opt_migratable:
if (*args[0].from == '0')
@@ -860,7 +869,9 @@ static int datablob_parse(char *datablob, struct trusted_key_payload *p,
p->blob_len = strlen(c) / 2;
if (p->blob_len > MAX_BLOB_SIZE)
return -EINVAL;
- hex2bin(p->blob, c, p->blob_len);
+ ret = hex2bin(p->blob, c, p->blob_len);
+ if (ret < 0)
+ return -EINVAL;
ret = getoptions(datablob, p, o);
if (ret < 0)
return ret;
diff --git a/security/security.c b/security/security.c
index d9e153390926..0c6cc69c8f86 100644
--- a/security/security.c
+++ b/security/security.c
@@ -16,15 +16,16 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/security.h>
+#include <linux/integrity.h>
#include <linux/ima.h>
+#include <linux/evm.h>
+
+#define MAX_LSM_EVM_XATTR 2
/* Boot-time LSM user choice */
static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
CONFIG_DEFAULT_SECURITY;
-/* things that live in capability.c */
-extern void __init security_fixup_ops(struct security_operations *ops);
-
static struct security_operations *security_ops;
static struct security_operations default_security_ops = {
.name = "default",
@@ -334,20 +335,57 @@ int security_inode_alloc(struct inode *inode)
void security_inode_free(struct inode *inode)
{
- ima_inode_free(inode);
+ integrity_inode_free(inode);
security_ops->inode_free_security(inode);
}
int security_inode_init_security(struct inode *inode, struct inode *dir,
- const struct qstr *qstr, char **name,
- void **value, size_t *len)
+ const struct qstr *qstr,
+ const initxattrs initxattrs, void *fs_data)
{
+ struct xattr new_xattrs[MAX_LSM_EVM_XATTR + 1];
+ struct xattr *lsm_xattr, *evm_xattr, *xattr;
+ int ret;
+
if (unlikely(IS_PRIVATE(inode)))
- return -EOPNOTSUPP;
+ return 0;
+
+ memset(new_xattrs, 0, sizeof new_xattrs);
+ if (!initxattrs)
+ return security_ops->inode_init_security(inode, dir, qstr,
+ NULL, NULL, NULL);
+ lsm_xattr = new_xattrs;
+ ret = security_ops->inode_init_security(inode, dir, qstr,
+ &lsm_xattr->name,
+ &lsm_xattr->value,
+ &lsm_xattr->value_len);
+ if (ret)
+ goto out;
+
+ evm_xattr = lsm_xattr + 1;
+ ret = evm_inode_init_security(inode, lsm_xattr, evm_xattr);
+ if (ret)
+ goto out;
+ ret = initxattrs(inode, new_xattrs, fs_data);
+out:
+ for (xattr = new_xattrs; xattr->name != NULL; xattr++) {
+ kfree(xattr->name);
+ kfree(xattr->value);
+ }
+ return (ret == -EOPNOTSUPP) ? 0 : ret;
+}
+EXPORT_SYMBOL(security_inode_init_security);
+
+int security_old_inode_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr, char **name,
+ void **value, size_t *len)
+{
+ if (unlikely(IS_PRIVATE(inode)))
+ return 0;
return security_ops->inode_init_security(inode, dir, qstr, name, value,
len);
}
-EXPORT_SYMBOL(security_inode_init_security);
+EXPORT_SYMBOL(security_old_inode_init_security);
#ifdef CONFIG_SECURITY_PATH
int security_path_mknod(struct path *dir, struct dentry *dentry, int mode,
@@ -523,9 +561,14 @@ int security_inode_permission(struct inode *inode, int mask)
int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
{
+ int ret;
+
if (unlikely(IS_PRIVATE(dentry->d_inode)))
return 0;
- return security_ops->inode_setattr(dentry, attr);
+ ret = security_ops->inode_setattr(dentry, attr);
+ if (ret)
+ return ret;
+ return evm_inode_setattr(dentry, attr);
}
EXPORT_SYMBOL_GPL(security_inode_setattr);
@@ -539,9 +582,14 @@ int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
int security_inode_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
+ int ret;
+
if (unlikely(IS_PRIVATE(dentry->d_inode)))
return 0;
- return security_ops->inode_setxattr(dentry, name, value, size, flags);
+ ret = security_ops->inode_setxattr(dentry, name, value, size, flags);
+ if (ret)
+ return ret;
+ return evm_inode_setxattr(dentry, name, value, size);
}
void security_inode_post_setxattr(struct dentry *dentry, const char *name,
@@ -550,6 +598,7 @@ void security_inode_post_setxattr(struct dentry *dentry, const char *name,
if (unlikely(IS_PRIVATE(dentry->d_inode)))
return;
security_ops->inode_post_setxattr(dentry, name, value, size, flags);
+ evm_inode_post_setxattr(dentry, name, value, size);
}
int security_inode_getxattr(struct dentry *dentry, const char *name)
@@ -568,9 +617,14 @@ int security_inode_listxattr(struct dentry *dentry)
int security_inode_removexattr(struct dentry *dentry, const char *name)
{
+ int ret;
+
if (unlikely(IS_PRIVATE(dentry->d_inode)))
return 0;
- return security_ops->inode_removexattr(dentry, name);
+ ret = security_ops->inode_removexattr(dentry, name);
+ if (ret)
+ return ret;
+ return evm_inode_removexattr(dentry, name);
}
int security_inode_need_killpriv(struct dentry *dentry)
diff --git a/security/selinux/exports.c b/security/selinux/exports.c
index 90664385dead..e75dd94e2d2b 100644
--- a/security/selinux/exports.c
+++ b/security/selinux/exports.c
@@ -12,6 +12,7 @@
* as published by the Free Software Foundation.
*/
#include <linux/module.h>
+#include <linux/selinux.h>
#include "security.h"
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 266a2292451d..e545b9f67072 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -89,14 +89,14 @@
#include "xfrm.h"
#include "netlabel.h"
#include "audit.h"
+#include "avc_ss.h"
#define NUM_SEL_MNT_OPTS 5
-extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
extern struct security_operations *security_ops;
/* SECMARK reference count */
-atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
+static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
#ifdef CONFIG_SECURITY_SELINUX_DEVELOP
int selinux_enforcing;
@@ -279,10 +279,6 @@ static void superblock_free_security(struct super_block *sb)
kfree(sbsec);
}
-/* The security server must be initialized before
- any labeling or access decisions can be provided. */
-extern int ss_initialized;
-
/* The file system's label must be initialized prior to use. */
static const char *labeling_behaviors[6] = {
@@ -2097,9 +2093,6 @@ static int selinux_bprm_secureexec(struct linux_binprm *bprm)
return (atsecure || cap_bprm_secureexec(bprm));
}
-extern struct vfsmount *selinuxfs_mount;
-extern struct dentry *selinux_null;
-
/* Derived from fs/exec.c:flush_old_files. */
static inline void flush_unauthorized_files(const struct cred *cred,
struct files_struct *files)
@@ -5803,8 +5796,6 @@ static int selinux_disabled;
int selinux_disable(void)
{
- extern void exit_sel_fs(void);
-
if (ss_initialized) {
/* Not permitted after initial policy load. */
return -EINVAL;
diff --git a/security/selinux/include/avc_ss.h b/security/selinux/include/avc_ss.h
index 4677aa519b04..d5c328452df0 100644
--- a/security/selinux/include/avc_ss.h
+++ b/security/selinux/include/avc_ss.h
@@ -18,5 +18,11 @@ struct security_class_mapping {
extern struct security_class_mapping secclass_map[];
+/*
+ * The security server must be initialized before
+ * any labeling or access decisions can be provided.
+ */
+extern int ss_initialized;
+
#endif /* _SELINUX_AVC_SS_H_ */
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 3ba4feba048a..d871e8ad2103 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -216,6 +216,14 @@ struct selinux_kernel_status {
extern void selinux_status_update_setenforce(int enforcing);
extern void selinux_status_update_policyload(int seqno);
+extern void selinux_complete_init(void);
+extern int selinux_disable(void);
+extern void exit_sel_fs(void);
+extern struct dentry *selinux_null;
+extern struct vfsmount *selinuxfs_mount;
+extern void selnl_notify_setenforce(int val);
+extern void selnl_notify_policyload(u32 seqno);
+extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
#endif /* _SELINUX_SECURITY_H_ */
diff --git a/security/selinux/netlink.c b/security/selinux/netlink.c
index 36ac257cec9a..ce3f481558d8 100644
--- a/security/selinux/netlink.c
+++ b/security/selinux/netlink.c
@@ -19,6 +19,8 @@
#include <linux/selinux_netlink.h>
#include <net/net_namespace.h>
+#include "security.h"
+
static struct sock *selnl;
static int selnl_msglen(int msgtype)
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 8b02b2137da2..0920ea3bf599 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -21,6 +21,7 @@
#include "flask.h"
#include "av_permissions.h"
+#include "security.h"
struct nlmsg_perm {
u16 nlmsg_type;
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 55d92cbb177a..f46658722c78 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -75,8 +75,6 @@ static char policy_opened;
/* global data for policy capabilities */
static struct dentry *policycap_dir;
-extern void selnl_notify_setenforce(int val);
-
/* Check whether a task is allowed to use a security operation. */
static int task_has_security(struct task_struct *tsk,
u32 perms)
@@ -278,7 +276,6 @@ static ssize_t sel_write_disable(struct file *file, const char __user *buf,
char *page = NULL;
ssize_t length;
int new_value;
- extern int selinux_disable(void);
length = -ENOMEM;
if (count >= PAGE_SIZE)
@@ -478,7 +475,7 @@ static struct vm_operations_struct sel_mmap_policy_ops = {
.page_mkwrite = sel_mmap_policy_fault,
};
-int sel_mmap_policy(struct file *filp, struct vm_area_struct *vma)
+static int sel_mmap_policy(struct file *filp, struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_SHARED) {
/* do not allow mprotect to make mapping writable */
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
index a53373207fb4..2ec904177fe0 100644
--- a/security/selinux/ss/conditional.c
+++ b/security/selinux/ss/conditional.c
@@ -555,7 +555,7 @@ static int cond_write_av_list(struct policydb *p,
return 0;
}
-int cond_write_node(struct policydb *p, struct cond_node *node,
+static int cond_write_node(struct policydb *p, struct cond_node *node,
struct policy_file *fp)
{
struct cond_expr *cur_expr;
diff --git a/security/selinux/ss/conditional.h b/security/selinux/ss/conditional.h
index 3f209c635295..4d1f87466508 100644
--- a/security/selinux/ss/conditional.h
+++ b/security/selinux/ss/conditional.h
@@ -13,6 +13,7 @@
#include "avtab.h"
#include "symtab.h"
#include "policydb.h"
+#include "../include/conditional.h"
#define COND_EXPR_MAXDEPTH 10
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 2381d0ded228..a7f61d52f05c 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -1743,8 +1743,6 @@ static int policydb_bounds_sanity_check(struct policydb *p)
return 0;
}
-extern int ss_initialized;
-
u16 string_to_security_class(struct policydb *p, const char *name)
{
struct class_datum *cladatum;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index f6917bc0aa05..185f849a26f6 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -70,8 +70,6 @@
#include "ebitmap.h"
#include "audit.h"
-extern void selnl_notify_policyload(u32 seqno);
-
int selinux_policycap_netpeer;
int selinux_policycap_openperm;
@@ -1790,7 +1788,6 @@ static void security_load_policycaps(void)
POLICYDB_CAPABILITY_OPENPERM);
}
-extern void selinux_complete_init(void);
static int security_preserve_bools(struct policydb *p);
/**
diff --git a/security/smack/smack.h b/security/smack/smack.h
index 2b6c6a516123..2ad00657b801 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -41,9 +41,9 @@ struct superblock_smack {
};
struct socket_smack {
- char *smk_out; /* outbound label */
- char *smk_in; /* inbound label */
- char smk_packet[SMK_LABELLEN]; /* TCP peer label */
+ char *smk_out; /* outbound label */
+ char *smk_in; /* inbound label */
+ char *smk_packet; /* TCP peer label */
};
/*
@@ -116,13 +116,19 @@ struct smk_netlbladdr {
* If there is a cipso value associated with the label it
* gets stored here, too. This will most likely be rare as
* the cipso direct mapping in used internally.
+ *
+ * Keep the access rules for this subject label here so that
+ * the entire set of rules does not need to be examined every
+ * time.
*/
struct smack_known {
struct list_head list;
char smk_known[SMK_LABELLEN];
u32 smk_secid;
struct smack_cipso *smk_cipso;
- spinlock_t smk_cipsolock; /* for changing cipso map */
+ spinlock_t smk_cipsolock; /* for changing cipso map */
+ struct list_head smk_rules; /* access rules */
+ struct mutex smk_rules_lock; /* lock for the rules */
};
/*
@@ -150,7 +156,6 @@ struct smack_known {
/*
* smackfs magic number
- * smackfs macic number
*/
#define SMACK_MAGIC 0x43415d53 /* "SMAC" */
@@ -176,9 +181,9 @@ struct smack_known {
#define MAY_NOT 0
/*
- * Number of access types used by Smack (rwxa)
+ * Number of access types used by Smack (rwxat)
*/
-#define SMK_NUM_ACCESS_TYPE 4
+#define SMK_NUM_ACCESS_TYPE 5
/*
* Smack audit data; is empty if CONFIG_AUDIT not set
@@ -201,10 +206,12 @@ int smk_access_entry(char *, char *, struct list_head *);
int smk_access(char *, char *, int, struct smk_audit_info *);
int smk_curacc(char *, u32, struct smk_audit_info *);
int smack_to_cipso(const char *, struct smack_cipso *);
-void smack_from_cipso(u32, char *, char *);
+char *smack_from_cipso(u32, char *);
char *smack_from_secid(const u32);
+void smk_parse_smack(const char *string, int len, char *smack);
char *smk_import(const char *, int);
struct smack_known *smk_import_entry(const char *, int);
+struct smack_known *smk_find_entry(const char *);
u32 smack_to_secid(const char *);
/*
@@ -223,7 +230,6 @@ extern struct smack_known smack_known_star;
extern struct smack_known smack_known_web;
extern struct list_head smack_known_list;
-extern struct list_head smack_rule_list;
extern struct list_head smk_netlbladdr_list;
extern struct security_operations smack_ops;
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index 9637e107f7ea..cc7cb6edba08 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -77,14 +77,19 @@ int log_policy = SMACK_AUDIT_DENIED;
* entry is found returns -ENOENT.
*
* NOTE:
- * Even though Smack labels are usually shared on smack_list
- * labels that come in off the network can't be imported
- * and added to the list for locking reasons.
*
- * Therefore, it is necessary to check the contents of the labels,
- * not just the pointer values. Of course, in most cases the labels
- * will be on the list, so checking the pointers may be a worthwhile
- * optimization.
+ * Earlier versions of this function allowed for labels that
+ * were not on the label list. This was done to allow for
+ * labels to come over the network that had never been seen
+ * before on this host. Unless the receiving socket has the
+ * star label this will always result in a failure check. The
+ * star labeled socket case is now handled in the networking
+ * hooks so there is no case where the label is not on the
+ * label list. Checking to see if the address of two labels
+ * is the same is now a reliable test.
+ *
+ * Do the object check first because that is more
+ * likely to differ.
*/
int smk_access_entry(char *subject_label, char *object_label,
struct list_head *rule_list)
@@ -93,13 +98,10 @@ int smk_access_entry(char *subject_label, char *object_label,
struct smack_rule *srp;
list_for_each_entry_rcu(srp, rule_list, list) {
- if (srp->smk_subject == subject_label ||
- strcmp(srp->smk_subject, subject_label) == 0) {
- if (srp->smk_object == object_label ||
- strcmp(srp->smk_object, object_label) == 0) {
- may = srp->smk_access;
- break;
- }
+ if (srp->smk_object == object_label &&
+ srp->smk_subject == subject_label) {
+ may = srp->smk_access;
+ break;
}
}
@@ -117,18 +119,12 @@ int smk_access_entry(char *subject_label, char *object_label,
* access rule list and returns 0 if the access is permitted,
* non zero otherwise.
*
- * Even though Smack labels are usually shared on smack_list
- * labels that come in off the network can't be imported
- * and added to the list for locking reasons.
- *
- * Therefore, it is necessary to check the contents of the labels,
- * not just the pointer values. Of course, in most cases the labels
- * will be on the list, so checking the pointers may be a worthwhile
- * optimization.
+ * Smack labels are shared on smack_list
*/
int smk_access(char *subject_label, char *object_label, int request,
struct smk_audit_info *a)
{
+ struct smack_known *skp;
int may = MAY_NOT;
int rc = 0;
@@ -137,8 +133,7 @@ int smk_access(char *subject_label, char *object_label, int request,
*
* A star subject can't access any object.
*/
- if (subject_label == smack_known_star.smk_known ||
- strcmp(subject_label, smack_known_star.smk_known) == 0) {
+ if (subject_label == smack_known_star.smk_known) {
rc = -EACCES;
goto out_audit;
}
@@ -148,33 +143,27 @@ int smk_access(char *subject_label, char *object_label, int request,
* An internet subject can access any object.
*/
if (object_label == smack_known_web.smk_known ||
- subject_label == smack_known_web.smk_known ||
- strcmp(object_label, smack_known_web.smk_known) == 0 ||
- strcmp(subject_label, smack_known_web.smk_known) == 0)
+ subject_label == smack_known_web.smk_known)
goto out_audit;
/*
* A star object can be accessed by any subject.
*/
- if (object_label == smack_known_star.smk_known ||
- strcmp(object_label, smack_known_star.smk_known) == 0)
+ if (object_label == smack_known_star.smk_known)
goto out_audit;
/*
* An object can be accessed in any way by a subject
* with the same label.
*/
- if (subject_label == object_label ||
- strcmp(subject_label, object_label) == 0)
+ if (subject_label == object_label)
goto out_audit;
/*
* A hat subject can read any object.
* A floor object can be read by any subject.
*/
if ((request & MAY_ANYREAD) == request) {
- if (object_label == smack_known_floor.smk_known ||
- strcmp(object_label, smack_known_floor.smk_known) == 0)
+ if (object_label == smack_known_floor.smk_known)
goto out_audit;
- if (subject_label == smack_known_hat.smk_known ||
- strcmp(subject_label, smack_known_hat.smk_known) == 0)
+ if (subject_label == smack_known_hat.smk_known)
goto out_audit;
}
/*
@@ -184,8 +173,9 @@ int smk_access(char *subject_label, char *object_label, int request,
* good. A negative response from smk_access_entry()
* indicates there is no entry for this pair.
*/
+ skp = smk_find_entry(subject_label);
rcu_read_lock();
- may = smk_access_entry(subject_label, object_label, &smack_rule_list);
+ may = smk_access_entry(subject_label, object_label, &skp->smk_rules);
rcu_read_unlock();
if (may > 0 && (request & may) == request)
@@ -344,17 +334,32 @@ void smack_log(char *subject_label, char *object_label, int request,
static DEFINE_MUTEX(smack_known_lock);
/**
- * smk_import_entry - import a label, return the list entry
+ * smk_find_entry - find a label on the list, return the list entry
* @string: a text string that might be a Smack label
- * @len: the maximum size, or zero if it is NULL terminated.
*
* Returns a pointer to the entry in the label list that
- * matches the passed string, adding it if necessary.
+ * matches the passed string.
*/
-struct smack_known *smk_import_entry(const char *string, int len)
+struct smack_known *smk_find_entry(const char *string)
{
struct smack_known *skp;
- char smack[SMK_LABELLEN];
+
+ list_for_each_entry_rcu(skp, &smack_known_list, list) {
+ if (strncmp(skp->smk_known, string, SMK_MAXLEN) == 0)
+ return skp;
+ }
+
+ return NULL;
+}
+
+/**
+ * smk_parse_smack - parse smack label from a text string
+ * @string: a text string that might contain a Smack label
+ * @len: the maximum size, or zero if it is NULL terminated.
+ * @smack: parsed smack label, or NULL if parse error
+ */
+void smk_parse_smack(const char *string, int len, char *smack)
+{
int found;
int i;
@@ -372,27 +377,38 @@ struct smack_known *smk_import_entry(const char *string, int len)
} else
smack[i] = string[i];
}
+}
+
+/**
+ * smk_import_entry - import a label, return the list entry
+ * @string: a text string that might be a Smack label
+ * @len: the maximum size, or zero if it is NULL terminated.
+ *
+ * Returns a pointer to the entry in the label list that
+ * matches the passed string, adding it if necessary.
+ */
+struct smack_known *smk_import_entry(const char *string, int len)
+{
+ struct smack_known *skp;
+ char smack[SMK_LABELLEN];
+ smk_parse_smack(string, len, smack);
if (smack[0] == '\0')
return NULL;
mutex_lock(&smack_known_lock);
- found = 0;
- list_for_each_entry_rcu(skp, &smack_known_list, list) {
- if (strncmp(skp->smk_known, smack, SMK_MAXLEN) == 0) {
- found = 1;
- break;
- }
- }
+ skp = smk_find_entry(smack);
- if (found == 0) {
+ if (skp == NULL) {
skp = kzalloc(sizeof(struct smack_known), GFP_KERNEL);
if (skp != NULL) {
strncpy(skp->smk_known, smack, SMK_MAXLEN);
skp->smk_secid = smack_next_secid++;
skp->smk_cipso = NULL;
+ INIT_LIST_HEAD(&skp->smk_rules);
spin_lock_init(&skp->smk_cipsolock);
+ mutex_init(&skp->smk_rules_lock);
/*
* Make sure that the entry is actually
* filled before putting it on the list.
@@ -480,19 +496,12 @@ u32 smack_to_secid(const char *smack)
* smack_from_cipso - find the Smack label associated with a CIPSO option
* @level: Bell & LaPadula level from the network
* @cp: Bell & LaPadula categories from the network
- * @result: where to put the Smack value
*
* This is a simple lookup in the label table.
*
- * This is an odd duck as far as smack handling goes in that
- * it sends back a copy of the smack label rather than a pointer
- * to the master list. This is done because it is possible for
- * a foreign host to send a smack label that is new to this
- * machine and hence not on the list. That would not be an
- * issue except that adding an entry to the master list can't
- * be done at that point.
+ * Return the matching label from the label list or NULL.
*/
-void smack_from_cipso(u32 level, char *cp, char *result)
+char *smack_from_cipso(u32 level, char *cp)
{
struct smack_known *kp;
char *final = NULL;
@@ -509,12 +518,13 @@ void smack_from_cipso(u32 level, char *cp, char *result)
final = kp->smk_known;
spin_unlock_bh(&kp->smk_cipsolock);
+
+ if (final != NULL)
+ break;
}
rcu_read_unlock();
- if (final == NULL)
- final = smack_known_huh.smk_known;
- strncpy(result, final, SMK_MAXLEN);
- return;
+
+ return final;
}
/**
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index b9c5e149903b..7db62b48eb42 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -5,12 +5,13 @@
*
* Authors:
* Casey Schaufler <casey@schaufler-ca.com>
- * Jarkko Sakkinen <ext-jarkko.2.sakkinen@nokia.com>
+ * Jarkko Sakkinen <jarkko.sakkinen@intel.com>
*
* Copyright (C) 2007 Casey Schaufler <casey@schaufler-ca.com>
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
* Paul Moore <paul@paul-moore.com>
* Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2011 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
@@ -34,6 +35,7 @@
#include <linux/audit.h>
#include <linux/magic.h>
#include <linux/dcache.h>
+#include <linux/personality.h>
#include "smack.h"
#define task_security(task) (task_cred_xxx((task), security))
@@ -441,11 +443,17 @@ static int smack_sb_umount(struct vfsmount *mnt, int flags)
* BPRM hooks
*/
+/**
+ * smack_bprm_set_creds - set creds for exec
+ * @bprm: the exec information
+ *
+ * Returns 0 if it gets a blob, -ENOMEM otherwise
+ */
static int smack_bprm_set_creds(struct linux_binprm *bprm)
{
- struct task_smack *tsp = bprm->cred->security;
+ struct inode *inode = bprm->file->f_path.dentry->d_inode;
+ struct task_smack *bsp = bprm->cred->security;
struct inode_smack *isp;
- struct dentry *dp;
int rc;
rc = cap_bprm_set_creds(bprm);
@@ -455,20 +463,48 @@ static int smack_bprm_set_creds(struct linux_binprm *bprm)
if (bprm->cred_prepared)
return 0;
- if (bprm->file == NULL || bprm->file->f_dentry == NULL)
+ isp = inode->i_security;
+ if (isp->smk_task == NULL || isp->smk_task == bsp->smk_task)
return 0;
- dp = bprm->file->f_dentry;
+ if (bprm->unsafe)
+ return -EPERM;
- if (dp->d_inode == NULL)
- return 0;
+ bsp->smk_task = isp->smk_task;
+ bprm->per_clear |= PER_CLEAR_ON_SETID;
- isp = dp->d_inode->i_security;
+ return 0;
+}
- if (isp->smk_task != NULL)
- tsp->smk_task = isp->smk_task;
+/**
+ * smack_bprm_committing_creds - Prepare to install the new credentials
+ * from bprm.
+ *
+ * @bprm: binprm for exec
+ */
+static void smack_bprm_committing_creds(struct linux_binprm *bprm)
+{
+ struct task_smack *bsp = bprm->cred->security;
- return 0;
+ if (bsp->smk_task != bsp->smk_forked)
+ current->pdeath_signal = 0;
+}
+
+/**
+ * smack_bprm_secureexec - Return the decision to use secureexec.
+ * @bprm: binprm for exec
+ *
+ * Returns 0 on success.
+ */
+static int smack_bprm_secureexec(struct linux_binprm *bprm)
+{
+ struct task_smack *tsp = current_security();
+ int ret = cap_bprm_secureexec(bprm);
+
+ if (!ret && (tsp->smk_task != tsp->smk_forked))
+ ret = 1;
+
+ return ret;
}
/*
@@ -516,6 +552,8 @@ static int smack_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr, char **name,
void **value, size_t *len)
{
+ struct smack_known *skp;
+ char *csp = smk_of_current();
char *isp = smk_of_inode(inode);
char *dsp = smk_of_inode(dir);
int may;
@@ -527,8 +565,9 @@ static int smack_inode_init_security(struct inode *inode, struct inode *dir,
}
if (value) {
+ skp = smk_find_entry(csp);
rcu_read_lock();
- may = smk_access_entry(smk_of_current(), dsp, &smack_rule_list);
+ may = smk_access_entry(csp, dsp, &skp->smk_rules);
rcu_read_unlock();
/*
@@ -841,7 +880,7 @@ static void smack_inode_post_setxattr(struct dentry *dentry, const char *name,
return;
}
-/*
+/**
* smack_inode_getxattr - Smack check on getxattr
* @dentry: the object
* @name: unused
@@ -858,7 +897,7 @@ static int smack_inode_getxattr(struct dentry *dentry, const char *name)
return smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ, &ad);
}
-/*
+/**
* smack_inode_removexattr - Smack check on removexattr
* @dentry: the object
* @name: name of the attribute
@@ -1088,36 +1127,31 @@ static int smack_file_lock(struct file *file, unsigned int cmd)
* @cmd: what action to check
* @arg: unused
*
+ * Generally these operations are harmless.
+ * File locking operations present an obvious mechanism
+ * for passing information, so they require write access.
+ *
* Returns 0 if current has access, error code otherwise
*/
static int smack_file_fcntl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct smk_audit_info ad;
- int rc;
+ int rc = 0;
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
- smk_ad_setfield_u_fs_path(&ad, file->f_path);
switch (cmd) {
- case F_DUPFD:
- case F_GETFD:
- case F_GETFL:
case F_GETLK:
- case F_GETOWN:
- case F_GETSIG:
- rc = smk_curacc(file->f_security, MAY_READ, &ad);
- break;
- case F_SETFD:
- case F_SETFL:
case F_SETLK:
case F_SETLKW:
case F_SETOWN:
case F_SETSIG:
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
+ smk_ad_setfield_u_fs_path(&ad, file->f_path);
rc = smk_curacc(file->f_security, MAY_WRITE, &ad);
break;
default:
- rc = smk_curacc(file->f_security, MAY_READWRITE, &ad);
+ break;
}
return rc;
@@ -1138,6 +1172,7 @@ static int smack_file_mmap(struct file *file,
unsigned long flags, unsigned long addr,
unsigned long addr_only)
{
+ struct smack_known *skp;
struct smack_rule *srp;
struct task_smack *tsp;
char *sp;
@@ -1170,6 +1205,7 @@ static int smack_file_mmap(struct file *file,
tsp = current_security();
sp = smk_of_current();
+ skp = smk_find_entry(sp);
rc = 0;
rcu_read_lock();
@@ -1177,15 +1213,8 @@ static int smack_file_mmap(struct file *file,
* For each Smack rule associated with the subject
* label verify that the SMACK64MMAP also has access
* to that rule's object label.
- *
- * Because neither of the labels comes
- * from the networking code it is sufficient
- * to compare pointers.
*/
- list_for_each_entry_rcu(srp, &smack_rule_list, list) {
- if (srp->smk_subject != sp)
- continue;
-
+ list_for_each_entry_rcu(srp, &skp->smk_rules, list) {
osmack = srp->smk_object;
/*
* Matching labels always allows access.
@@ -1214,7 +1243,8 @@ static int smack_file_mmap(struct file *file,
* If there isn't one a SMACK64MMAP subject
* can't have as much access as current.
*/
- mmay = smk_access_entry(msmack, osmack, &smack_rule_list);
+ skp = smk_find_entry(msmack);
+ mmay = smk_access_entry(msmack, osmack, &skp->smk_rules);
if (mmay == -ENOENT) {
rc = -EACCES;
break;
@@ -1315,6 +1345,24 @@ static int smack_file_receive(struct file *file)
return smk_curacc(file->f_security, may, &ad);
}
+/**
+ * smack_dentry_open - Smack dentry open processing
+ * @file: the object
+ * @cred: unused
+ *
+ * Set the security blob in the file structure.
+ *
+ * Returns 0
+ */
+static int smack_dentry_open(struct file *file, const struct cred *cred)
+{
+ struct inode_smack *isp = file->f_path.dentry->d_inode->i_security;
+
+ file->f_security = isp->smk_inode;
+
+ return 0;
+}
+
/*
* Task hooks
*/
@@ -1455,15 +1503,17 @@ static int smack_kernel_create_files_as(struct cred *new,
/**
* smk_curacc_on_task - helper to log task related access
* @p: the task object
- * @access : the access requested
+ * @access: the access requested
+ * @caller: name of the calling function for audit
*
* Return 0 if access is permitted
*/
-static int smk_curacc_on_task(struct task_struct *p, int access)
+static int smk_curacc_on_task(struct task_struct *p, int access,
+ const char *caller)
{
struct smk_audit_info ad;
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
+ smk_ad_init(&ad, caller, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_tsk(&ad, p);
return smk_curacc(smk_of_task(task_security(p)), access, &ad);
}
@@ -1477,7 +1527,7 @@ static int smk_curacc_on_task(struct task_struct *p, int access)
*/
static int smack_task_setpgid(struct task_struct *p, pid_t pgid)
{
- return smk_curacc_on_task(p, MAY_WRITE);
+ return smk_curacc_on_task(p, MAY_WRITE, __func__);
}
/**
@@ -1488,7 +1538,7 @@ static int smack_task_setpgid(struct task_struct *p, pid_t pgid)
*/
static int smack_task_getpgid(struct task_struct *p)
{
- return smk_curacc_on_task(p, MAY_READ);
+ return smk_curacc_on_task(p, MAY_READ, __func__);
}
/**
@@ -1499,7 +1549,7 @@ static int smack_task_getpgid(struct task_struct *p)
*/
static int smack_task_getsid(struct task_struct *p)
{
- return smk_curacc_on_task(p, MAY_READ);
+ return smk_curacc_on_task(p, MAY_READ, __func__);
}
/**
@@ -1527,7 +1577,7 @@ static int smack_task_setnice(struct task_struct *p, int nice)
rc = cap_task_setnice(p, nice);
if (rc == 0)
- rc = smk_curacc_on_task(p, MAY_WRITE);
+ rc = smk_curacc_on_task(p, MAY_WRITE, __func__);
return rc;
}
@@ -1544,7 +1594,7 @@ static int smack_task_setioprio(struct task_struct *p, int ioprio)
rc = cap_task_setioprio(p, ioprio);
if (rc == 0)
- rc = smk_curacc_on_task(p, MAY_WRITE);
+ rc = smk_curacc_on_task(p, MAY_WRITE, __func__);
return rc;
}
@@ -1556,7 +1606,7 @@ static int smack_task_setioprio(struct task_struct *p, int ioprio)
*/
static int smack_task_getioprio(struct task_struct *p)
{
- return smk_curacc_on_task(p, MAY_READ);
+ return smk_curacc_on_task(p, MAY_READ, __func__);
}
/**
@@ -1573,7 +1623,7 @@ static int smack_task_setscheduler(struct task_struct *p)
rc = cap_task_setscheduler(p);
if (rc == 0)
- rc = smk_curacc_on_task(p, MAY_WRITE);
+ rc = smk_curacc_on_task(p, MAY_WRITE, __func__);
return rc;
}
@@ -1585,7 +1635,7 @@ static int smack_task_setscheduler(struct task_struct *p)
*/
static int smack_task_getscheduler(struct task_struct *p)
{
- return smk_curacc_on_task(p, MAY_READ);
+ return smk_curacc_on_task(p, MAY_READ, __func__);
}
/**
@@ -1596,7 +1646,7 @@ static int smack_task_getscheduler(struct task_struct *p)
*/
static int smack_task_movememory(struct task_struct *p)
{
- return smk_curacc_on_task(p, MAY_WRITE);
+ return smk_curacc_on_task(p, MAY_WRITE, __func__);
}
/**
@@ -1711,7 +1761,7 @@ static int smack_sk_alloc_security(struct sock *sk, int family, gfp_t gfp_flags)
ssp->smk_in = csp;
ssp->smk_out = csp;
- ssp->smk_packet[0] = '\0';
+ ssp->smk_packet = NULL;
sk->sk_security = ssp;
@@ -2753,6 +2803,7 @@ static int smack_unix_stream_connect(struct sock *sock,
{
struct socket_smack *ssp = sock->sk_security;
struct socket_smack *osp = other->sk_security;
+ struct socket_smack *nsp = newsk->sk_security;
struct smk_audit_info ad;
int rc = 0;
@@ -2762,6 +2813,14 @@ static int smack_unix_stream_connect(struct sock *sock,
if (!capable(CAP_MAC_OVERRIDE))
rc = smk_access(ssp->smk_out, osp->smk_in, MAY_WRITE, &ad);
+ /*
+ * Cross reference the peer labels for SO_PEERSEC.
+ */
+ if (rc == 0) {
+ nsp->smk_packet = ssp->smk_out;
+ ssp->smk_packet = osp->smk_out;
+ }
+
return rc;
}
@@ -2813,16 +2872,17 @@ static int smack_socket_sendmsg(struct socket *sock, struct msghdr *msg,
return smack_netlabel_send(sock->sk, sip);
}
-
/**
* smack_from_secattr - Convert a netlabel attr.mls.lvl/attr.mls.cat pair to smack
* @sap: netlabel secattr
- * @sip: where to put the result
+ * @ssp: socket security information
*
- * Copies a smack label into sip
+ * Returns a pointer to a Smack label found on the label list.
*/
-static void smack_from_secattr(struct netlbl_lsm_secattr *sap, char *sip)
+static char *smack_from_secattr(struct netlbl_lsm_secattr *sap,
+ struct socket_smack *ssp)
{
+ struct smack_known *skp;
char smack[SMK_LABELLEN];
char *sp;
int pcat;
@@ -2852,15 +2912,43 @@ static void smack_from_secattr(struct netlbl_lsm_secattr *sap, char *sip)
* we are already done. WeeHee.
*/
if (sap->attr.mls.lvl == smack_cipso_direct) {
- memcpy(sip, smack, SMK_MAXLEN);
- return;
+ /*
+ * The label sent is usually on the label list.
+ *
+ * If it is not we may still want to allow the
+ * delivery.
+ *
+ * If the recipient is accepting all packets
+ * because it is using the star ("*") label
+ * for SMACK64IPIN provide the web ("@") label
+ * so that a directed response will succeed.
+ * This is not very correct from a MAC point
+ * of view, but gets around the problem that
+ * locking prevents adding the newly discovered
+ * label to the list.
+ * The case where the recipient is not using
+ * the star label should obviously fail.
+ * The easy way to do this is to provide the
+ * star label as the subject label.
+ */
+ skp = smk_find_entry(smack);
+ if (skp != NULL)
+ return skp->smk_known;
+ if (ssp != NULL &&
+ ssp->smk_in == smack_known_star.smk_known)
+ return smack_known_web.smk_known;
+ return smack_known_star.smk_known;
}
/*
* Look it up in the supplied table if it is not
* a direct mapping.
*/
- smack_from_cipso(sap->attr.mls.lvl, smack, sip);
- return;
+ sp = smack_from_cipso(sap->attr.mls.lvl, smack);
+ if (sp != NULL)
+ return sp;
+ if (ssp != NULL && ssp->smk_in == smack_known_star.smk_known)
+ return smack_known_web.smk_known;
+ return smack_known_star.smk_known;
}
if ((sap->flags & NETLBL_SECATTR_SECID) != 0) {
/*
@@ -2875,16 +2963,14 @@ static void smack_from_secattr(struct netlbl_lsm_secattr *sap, char *sip)
* secid is from a fallback.
*/
BUG_ON(sp == NULL);
- strncpy(sip, sp, SMK_MAXLEN);
- return;
+ return sp;
}
/*
* Without guidance regarding the smack value
* for the packet fall back on the network
* ambient value.
*/
- strncpy(sip, smack_net_ambient, SMK_MAXLEN);
- return;
+ return smack_net_ambient;
}
/**
@@ -2898,7 +2984,6 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
struct netlbl_lsm_secattr secattr;
struct socket_smack *ssp = sk->sk_security;
- char smack[SMK_LABELLEN];
char *csp;
int rc;
struct smk_audit_info ad;
@@ -2911,10 +2996,9 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
netlbl_secattr_init(&secattr);
rc = netlbl_skbuff_getattr(skb, sk->sk_family, &secattr);
- if (rc == 0) {
- smack_from_secattr(&secattr, smack);
- csp = smack;
- } else
+ if (rc == 0)
+ csp = smack_from_secattr(&secattr, ssp);
+ else
csp = smack_net_ambient;
netlbl_secattr_destroy(&secattr);
@@ -2951,15 +3035,19 @@ static int smack_socket_getpeersec_stream(struct socket *sock,
int __user *optlen, unsigned len)
{
struct socket_smack *ssp;
- int slen;
+ char *rcp = "";
+ int slen = 1;
int rc = 0;
ssp = sock->sk->sk_security;
- slen = strlen(ssp->smk_packet) + 1;
+ if (ssp->smk_packet != NULL) {
+ rcp = ssp->smk_packet;
+ slen = strlen(rcp) + 1;
+ }
if (slen > len)
rc = -ERANGE;
- else if (copy_to_user(optval, ssp->smk_packet, slen) != 0)
+ else if (copy_to_user(optval, rcp, slen) != 0)
rc = -EFAULT;
if (put_user(slen, optlen) != 0)
@@ -2982,8 +3070,8 @@ static int smack_socket_getpeersec_dgram(struct socket *sock,
{
struct netlbl_lsm_secattr secattr;
- struct socket_smack *sp;
- char smack[SMK_LABELLEN];
+ struct socket_smack *ssp = NULL;
+ char *sp;
int family = PF_UNSPEC;
u32 s = 0; /* 0 is the invalid secid */
int rc;
@@ -2998,17 +3086,19 @@ static int smack_socket_getpeersec_dgram(struct socket *sock,
family = sock->sk->sk_family;
if (family == PF_UNIX) {
- sp = sock->sk->sk_security;
- s = smack_to_secid(sp->smk_out);
+ ssp = sock->sk->sk_security;
+ s = smack_to_secid(ssp->smk_out);
} else if (family == PF_INET || family == PF_INET6) {
/*
* Translate what netlabel gave us.
*/
+ if (sock != NULL && sock->sk != NULL)
+ ssp = sock->sk->sk_security;
netlbl_secattr_init(&secattr);
rc = netlbl_skbuff_getattr(skb, family, &secattr);
if (rc == 0) {
- smack_from_secattr(&secattr, smack);
- s = smack_to_secid(smack);
+ sp = smack_from_secattr(&secattr, ssp);
+ s = smack_to_secid(sp);
}
netlbl_secattr_destroy(&secattr);
}
@@ -3056,7 +3146,7 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
struct netlbl_lsm_secattr secattr;
struct sockaddr_in addr;
struct iphdr *hdr;
- char smack[SMK_LABELLEN];
+ char *sp;
int rc;
struct smk_audit_info ad;
@@ -3067,9 +3157,9 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
netlbl_secattr_init(&secattr);
rc = netlbl_skbuff_getattr(skb, family, &secattr);
if (rc == 0)
- smack_from_secattr(&secattr, smack);
+ sp = smack_from_secattr(&secattr, ssp);
else
- strncpy(smack, smack_known_huh.smk_known, SMK_MAXLEN);
+ sp = smack_known_huh.smk_known;
netlbl_secattr_destroy(&secattr);
#ifdef CONFIG_AUDIT
@@ -3082,7 +3172,7 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
* Receiving a packet requires that the other end be able to write
* here. Read access is not required.
*/
- rc = smk_access(smack, ssp->smk_in, MAY_WRITE, &ad);
+ rc = smk_access(sp, ssp->smk_in, MAY_WRITE, &ad);
if (rc != 0)
return rc;
@@ -3090,7 +3180,7 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
* Save the peer's label in the request_sock so we can later setup
* smk_packet in the child socket so that SO_PEERCRED can report it.
*/
- req->peer_secid = smack_to_secid(smack);
+ req->peer_secid = smack_to_secid(sp);
/*
* We need to decide if we want to label the incoming connection here
@@ -3103,7 +3193,7 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
if (smack_host_label(&addr) == NULL) {
rcu_read_unlock();
netlbl_secattr_init(&secattr);
- smack_to_secattr(smack, &secattr);
+ smack_to_secattr(sp, &secattr);
rc = netlbl_req_setattr(req, &secattr);
netlbl_secattr_destroy(&secattr);
} else {
@@ -3125,13 +3215,11 @@ static void smack_inet_csk_clone(struct sock *sk,
const struct request_sock *req)
{
struct socket_smack *ssp = sk->sk_security;
- char *smack;
- if (req->peer_secid != 0) {
- smack = smack_from_secid(req->peer_secid);
- strncpy(ssp->smk_packet, smack, SMK_MAXLEN);
- } else
- ssp->smk_packet[0] = '\0';
+ if (req->peer_secid != 0)
+ ssp->smk_packet = smack_from_secid(req->peer_secid);
+ else
+ ssp->smk_packet = NULL;
}
/*
@@ -3409,6 +3497,8 @@ struct security_operations smack_ops = {
.sb_umount = smack_sb_umount,
.bprm_set_creds = smack_bprm_set_creds,
+ .bprm_committing_creds = smack_bprm_committing_creds,
+ .bprm_secureexec = smack_bprm_secureexec,
.inode_alloc_security = smack_inode_alloc_security,
.inode_free_security = smack_inode_free_security,
@@ -3440,6 +3530,8 @@ struct security_operations smack_ops = {
.file_send_sigiotask = smack_file_send_sigiotask,
.file_receive = smack_file_receive,
+ .dentry_open = smack_dentry_open,
+
.cred_alloc_blank = smack_cred_alloc_blank,
.cred_free = smack_cred_free,
.cred_prepare = smack_cred_prepare,
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index f93460156dce..6aceef518a41 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -44,6 +44,7 @@ enum smk_inos {
SMK_ONLYCAP = 9, /* the only "capable" label */
SMK_LOGGING = 10, /* logging */
SMK_LOAD_SELF = 11, /* task specific rules */
+ SMK_ACCESSES = 12, /* access policy */
};
/*
@@ -85,6 +86,16 @@ char *smack_onlycap;
*/
LIST_HEAD(smk_netlbladdr_list);
+
+/*
+ * Rule lists are maintained for each label.
+ * This master list is just for reading /smack/load.
+ */
+struct smack_master_list {
+ struct list_head list;
+ struct smack_rule *smk_rule;
+};
+
LIST_HEAD(smack_rule_list);
static int smk_cipso_doi_value = SMACK_CIPSO_DOI_DEFAULT;
@@ -92,7 +103,7 @@ static int smk_cipso_doi_value = SMACK_CIPSO_DOI_DEFAULT;
const char *smack_cipso_option = SMACK_CIPSO_OPTION;
-#define SEQ_READ_FINISHED 1
+#define SEQ_READ_FINISHED ((loff_t)-1)
/*
* Values for parsing cipso rules
@@ -159,9 +170,13 @@ static int smk_set_access(struct smack_rule *srp, struct list_head *rule_list,
mutex_lock(rule_lock);
+ /*
+ * Because the object label is less likely to match
+ * than the subject label check it first
+ */
list_for_each_entry_rcu(sp, rule_list, list) {
- if (sp->smk_subject == srp->smk_subject &&
- sp->smk_object == srp->smk_object) {
+ if (sp->smk_object == srp->smk_object &&
+ sp->smk_subject == srp->smk_subject) {
found = 1;
sp->smk_access = srp->smk_access;
break;
@@ -176,6 +191,99 @@ static int smk_set_access(struct smack_rule *srp, struct list_head *rule_list,
}
/**
+ * smk_parse_rule - parse Smack rule from load string
+ * @data: string to be parsed whose size is SMK_LOADLEN
+ * @rule: Smack rule
+ * @import: if non-zero, import labels
+ */
+static int smk_parse_rule(const char *data, struct smack_rule *rule, int import)
+{
+ char smack[SMK_LABELLEN];
+ struct smack_known *skp;
+
+ if (import) {
+ rule->smk_subject = smk_import(data, 0);
+ if (rule->smk_subject == NULL)
+ return -1;
+
+ rule->smk_object = smk_import(data + SMK_LABELLEN, 0);
+ if (rule->smk_object == NULL)
+ return -1;
+ } else {
+ smk_parse_smack(data, 0, smack);
+ skp = smk_find_entry(smack);
+ if (skp == NULL)
+ return -1;
+ rule->smk_subject = skp->smk_known;
+
+ smk_parse_smack(data + SMK_LABELLEN, 0, smack);
+ skp = smk_find_entry(smack);
+ if (skp == NULL)
+ return -1;
+ rule->smk_object = skp->smk_known;
+ }
+
+ rule->smk_access = 0;
+
+ switch (data[SMK_LABELLEN + SMK_LABELLEN]) {
+ case '-':
+ break;
+ case 'r':
+ case 'R':
+ rule->smk_access |= MAY_READ;
+ break;
+ default:
+ return -1;
+ }
+
+ switch (data[SMK_LABELLEN + SMK_LABELLEN + 1]) {
+ case '-':
+ break;
+ case 'w':
+ case 'W':
+ rule->smk_access |= MAY_WRITE;
+ break;
+ default:
+ return -1;
+ }
+
+ switch (data[SMK_LABELLEN + SMK_LABELLEN + 2]) {
+ case '-':
+ break;
+ case 'x':
+ case 'X':
+ rule->smk_access |= MAY_EXEC;
+ break;
+ default:
+ return -1;
+ }
+
+ switch (data[SMK_LABELLEN + SMK_LABELLEN + 3]) {
+ case '-':
+ break;
+ case 'a':
+ case 'A':
+ rule->smk_access |= MAY_APPEND;
+ break;
+ default:
+ return -1;
+ }
+
+ switch (data[SMK_LABELLEN + SMK_LABELLEN + 4]) {
+ case '-':
+ break;
+ case 't':
+ case 'T':
+ rule->smk_access |= MAY_TRANSMUTE;
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
* smk_write_load_list - write() for any /smack/load
* @file: file pointer, not actually used
* @buf: where to get the data from
@@ -197,9 +305,12 @@ static ssize_t smk_write_load_list(struct file *file, const char __user *buf,
struct list_head *rule_list,
struct mutex *rule_lock)
{
+ struct smack_master_list *smlp;
+ struct smack_known *skp;
struct smack_rule *rule;
char *data;
int rc = -EINVAL;
+ int load = 0;
/*
* No partial writes.
@@ -234,69 +345,14 @@ static ssize_t smk_write_load_list(struct file *file, const char __user *buf,
goto out;
}
- rule->smk_subject = smk_import(data, 0);
- if (rule->smk_subject == NULL)
- goto out_free_rule;
-
- rule->smk_object = smk_import(data + SMK_LABELLEN, 0);
- if (rule->smk_object == NULL)
+ if (smk_parse_rule(data, rule, 1))
goto out_free_rule;
- rule->smk_access = 0;
-
- switch (data[SMK_LABELLEN + SMK_LABELLEN]) {
- case '-':
- break;
- case 'r':
- case 'R':
- rule->smk_access |= MAY_READ;
- break;
- default:
- goto out_free_rule;
- }
-
- switch (data[SMK_LABELLEN + SMK_LABELLEN + 1]) {
- case '-':
- break;
- case 'w':
- case 'W':
- rule->smk_access |= MAY_WRITE;
- break;
- default:
- goto out_free_rule;
- }
-
- switch (data[SMK_LABELLEN + SMK_LABELLEN + 2]) {
- case '-':
- break;
- case 'x':
- case 'X':
- rule->smk_access |= MAY_EXEC;
- break;
- default:
- goto out_free_rule;
- }
-
- switch (data[SMK_LABELLEN + SMK_LABELLEN + 3]) {
- case '-':
- break;
- case 'a':
- case 'A':
- rule->smk_access |= MAY_APPEND;
- break;
- default:
- goto out_free_rule;
- }
-
- switch (data[SMK_LABELLEN + SMK_LABELLEN + 4]) {
- case '-':
- break;
- case 't':
- case 'T':
- rule->smk_access |= MAY_TRANSMUTE;
- break;
- default:
- goto out_free_rule;
+ if (rule_list == NULL) {
+ load = 1;
+ skp = smk_find_entry(rule->smk_subject);
+ rule_list = &skp->smk_rules;
+ rule_lock = &skp->smk_rules_lock;
}
rc = count;
@@ -304,8 +360,15 @@ static ssize_t smk_write_load_list(struct file *file, const char __user *buf,
* smk_set_access returns true if there was already a rule
* for the subject/object pair, and false if it was new.
*/
- if (!smk_set_access(rule, rule_list, rule_lock))
+ if (!smk_set_access(rule, rule_list, rule_lock)) {
+ smlp = kzalloc(sizeof(*smlp), GFP_KERNEL);
+ if (smlp != NULL) {
+ smlp->smk_rule = rule;
+ list_add_rcu(&smlp->list, &smack_rule_list);
+ } else
+ rc = -ENOMEM;
goto out;
+ }
out_free_rule:
kfree(rule);
@@ -321,11 +384,24 @@ out:
static void *load_seq_start(struct seq_file *s, loff_t *pos)
{
- if (*pos == SEQ_READ_FINISHED)
+ struct list_head *list;
+
+ /*
+ * This is 0 the first time through.
+ */
+ if (s->index == 0)
+ s->private = &smack_rule_list;
+
+ if (s->private == NULL)
return NULL;
- if (list_empty(&smack_rule_list))
+
+ list = s->private;
+ if (list_empty(list))
return NULL;
- return smack_rule_list.next;
+
+ if (s->index == 0)
+ return list->next;
+ return list;
}
static void *load_seq_next(struct seq_file *s, void *v, loff_t *pos)
@@ -333,17 +409,19 @@ static void *load_seq_next(struct seq_file *s, void *v, loff_t *pos)
struct list_head *list = v;
if (list_is_last(list, &smack_rule_list)) {
- *pos = SEQ_READ_FINISHED;
+ s->private = NULL;
return NULL;
}
+ s->private = list->next;
return list->next;
}
static int load_seq_show(struct seq_file *s, void *v)
{
struct list_head *list = v;
- struct smack_rule *srp =
- list_entry(list, struct smack_rule, list);
+ struct smack_master_list *smlp =
+ list_entry(list, struct smack_master_list, list);
+ struct smack_rule *srp = smlp->smk_rule;
seq_printf(s, "%s %s", (char *)srp->smk_subject,
(char *)srp->smk_object);
@@ -412,8 +490,7 @@ static ssize_t smk_write_load(struct file *file, const char __user *buf,
if (!capable(CAP_MAC_ADMIN))
return -EPERM;
- return smk_write_load_list(file, buf, count, ppos, &smack_rule_list,
- &smack_list_lock);
+ return smk_write_load_list(file, buf, count, ppos, NULL, NULL);
}
static const struct file_operations smk_load_ops = {
@@ -1425,6 +1502,44 @@ static const struct file_operations smk_load_self_ops = {
.write = smk_write_load_self,
.release = seq_release,
};
+
+/**
+ * smk_write_access - handle access check transaction
+ * @file: file pointer
+ * @buf: data from user space
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ */
+static ssize_t smk_write_access(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct smack_rule rule;
+ char *data;
+ int res;
+
+ data = simple_transaction_get(file, buf, count);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ if (count < SMK_LOADLEN || smk_parse_rule(data, &rule, 0))
+ return -EINVAL;
+
+ res = smk_access(rule.smk_subject, rule.smk_object, rule.smk_access,
+ NULL);
+ data[0] = res == 0 ? '1' : '0';
+ data[1] = '\0';
+
+ simple_transaction_set(file, 2);
+ return SMK_LOADLEN;
+}
+
+static const struct file_operations smk_access_ops = {
+ .write = smk_write_access,
+ .read = simple_transaction_read,
+ .release = simple_transaction_release,
+ .llseek = generic_file_llseek,
+};
+
/**
* smk_fill_super - fill the /smackfs superblock
* @sb: the empty superblock
@@ -1459,6 +1574,8 @@ static int smk_fill_super(struct super_block *sb, void *data, int silent)
"logging", &smk_logging_ops, S_IRUGO|S_IWUSR},
[SMK_LOAD_SELF] = {
"load-self", &smk_load_self_ops, S_IRUGO|S_IWUGO},
+ [SMK_ACCESSES] = {
+ "access", &smk_access_ops, S_IRUGO|S_IWUGO},
/* last one */
{""}
};
@@ -1534,6 +1651,20 @@ static int __init init_smk_fs(void)
smk_cipso_doi();
smk_unlbl_ambient(NULL);
+ mutex_init(&smack_known_floor.smk_rules_lock);
+ mutex_init(&smack_known_hat.smk_rules_lock);
+ mutex_init(&smack_known_huh.smk_rules_lock);
+ mutex_init(&smack_known_invalid.smk_rules_lock);
+ mutex_init(&smack_known_star.smk_rules_lock);
+ mutex_init(&smack_known_web.smk_rules_lock);
+
+ INIT_LIST_HEAD(&smack_known_floor.smk_rules);
+ INIT_LIST_HEAD(&smack_known_hat.smk_rules);
+ INIT_LIST_HEAD(&smack_known_huh.smk_rules);
+ INIT_LIST_HEAD(&smack_known_invalid.smk_rules);
+ INIT_LIST_HEAD(&smack_known_star.smk_rules);
+ INIT_LIST_HEAD(&smack_known_web.smk_rules);
+
return err;
}
diff --git a/security/tomoyo/Kconfig b/security/tomoyo/Kconfig
index 7c7f8c16c10f..8eb779b9d77f 100644
--- a/security/tomoyo/Kconfig
+++ b/security/tomoyo/Kconfig
@@ -1,8 +1,10 @@
config SECURITY_TOMOYO
bool "TOMOYO Linux Support"
depends on SECURITY
+ depends on NET
select SECURITYFS
select SECURITY_PATH
+ select SECURITY_NETWORK
default n
help
This selects TOMOYO Linux, pathname-based access control.
diff --git a/security/tomoyo/Makefile b/security/tomoyo/Makefile
index 95278b71fc21..56a0c7be409e 100644
--- a/security/tomoyo/Makefile
+++ b/security/tomoyo/Makefile
@@ -1,4 +1,4 @@
-obj-y = audit.o common.o condition.o domain.o file.o gc.o group.o load_policy.o memory.o mount.o realpath.o securityfs_if.o tomoyo.o util.o
+obj-y = audit.o common.o condition.o domain.o environ.o file.o gc.o group.o load_policy.o memory.o mount.o network.o realpath.o securityfs_if.o tomoyo.o util.o
$(obj)/policy/profile.conf:
@mkdir -p $(obj)/policy/
@@ -27,7 +27,7 @@ $(obj)/policy/stat.conf:
@touch $@
$(obj)/builtin-policy.h: $(obj)/policy/profile.conf $(obj)/policy/exception_policy.conf $(obj)/policy/domain_policy.conf $(obj)/policy/manager.conf $(obj)/policy/stat.conf
- @echo Generating built-in policy for TOMOYO 2.4.x.
+ @echo Generating built-in policy for TOMOYO 2.5.x.
@echo "static char tomoyo_builtin_profile[] __initdata =" > $@.tmp
@sed -e 's/\\/\\\\/g' -e 's/\"/\\"/g' -e 's/\(.*\)/"\1\\n"/' < $(obj)/policy/profile.conf >> $@.tmp
@echo "\"\";" >> $@.tmp
diff --git a/security/tomoyo/audit.c b/security/tomoyo/audit.c
index 5dbb1f7617c0..075c3a6d1649 100644
--- a/security/tomoyo/audit.c
+++ b/security/tomoyo/audit.c
@@ -313,6 +313,7 @@ static unsigned int tomoyo_log_count;
*/
static bool tomoyo_get_audit(const struct tomoyo_policy_namespace *ns,
const u8 profile, const u8 index,
+ const struct tomoyo_acl_info *matched_acl,
const bool is_granted)
{
u8 mode;
@@ -324,6 +325,9 @@ static bool tomoyo_get_audit(const struct tomoyo_policy_namespace *ns,
p = tomoyo_profile(ns, profile);
if (tomoyo_log_count >= p->pref[TOMOYO_PREF_MAX_AUDIT_LOG])
return false;
+ if (is_granted && matched_acl && matched_acl->cond &&
+ matched_acl->cond->grant_log != TOMOYO_GRANTLOG_AUTO)
+ return matched_acl->cond->grant_log == TOMOYO_GRANTLOG_YES;
mode = p->config[index];
if (mode == TOMOYO_CONFIG_USE_DEFAULT)
mode = p->config[category];
@@ -350,7 +354,8 @@ void tomoyo_write_log2(struct tomoyo_request_info *r, int len, const char *fmt,
char *buf;
struct tomoyo_log *entry;
bool quota_exceeded = false;
- if (!tomoyo_get_audit(r->domain->ns, r->profile, r->type, r->granted))
+ if (!tomoyo_get_audit(r->domain->ns, r->profile, r->type,
+ r->matched_acl, r->granted))
goto out;
buf = tomoyo_init_log(r, len, fmt, args);
if (!buf)
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index 2e43aec1c36b..150911c7ff08 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -20,6 +20,7 @@ const char * const tomoyo_mode[TOMOYO_CONFIG_MAX_MODE] = {
/* String table for /sys/kernel/security/tomoyo/profile */
const char * const tomoyo_mac_keywords[TOMOYO_MAX_MAC_INDEX
+ TOMOYO_MAX_MAC_CATEGORY_INDEX] = {
+ /* CONFIG::file group */
[TOMOYO_MAC_FILE_EXECUTE] = "execute",
[TOMOYO_MAC_FILE_OPEN] = "open",
[TOMOYO_MAC_FILE_CREATE] = "create",
@@ -43,7 +44,28 @@ const char * const tomoyo_mac_keywords[TOMOYO_MAX_MAC_INDEX
[TOMOYO_MAC_FILE_MOUNT] = "mount",
[TOMOYO_MAC_FILE_UMOUNT] = "unmount",
[TOMOYO_MAC_FILE_PIVOT_ROOT] = "pivot_root",
+ /* CONFIG::network group */
+ [TOMOYO_MAC_NETWORK_INET_STREAM_BIND] = "inet_stream_bind",
+ [TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN] = "inet_stream_listen",
+ [TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT] = "inet_stream_connect",
+ [TOMOYO_MAC_NETWORK_INET_DGRAM_BIND] = "inet_dgram_bind",
+ [TOMOYO_MAC_NETWORK_INET_DGRAM_SEND] = "inet_dgram_send",
+ [TOMOYO_MAC_NETWORK_INET_RAW_BIND] = "inet_raw_bind",
+ [TOMOYO_MAC_NETWORK_INET_RAW_SEND] = "inet_raw_send",
+ [TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND] = "unix_stream_bind",
+ [TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN] = "unix_stream_listen",
+ [TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT] = "unix_stream_connect",
+ [TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND] = "unix_dgram_bind",
+ [TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND] = "unix_dgram_send",
+ [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND] = "unix_seqpacket_bind",
+ [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN] = "unix_seqpacket_listen",
+ [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT] = "unix_seqpacket_connect",
+ /* CONFIG::misc group */
+ [TOMOYO_MAC_ENVIRON] = "env",
+ /* CONFIG group */
[TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_FILE] = "file",
+ [TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_NETWORK] = "network",
+ [TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_MISC] = "misc",
};
/* String table for conditions. */
@@ -130,10 +152,20 @@ const char * const tomoyo_path_keyword[TOMOYO_MAX_PATH_OPERATION] = {
[TOMOYO_TYPE_UMOUNT] = "unmount",
};
+/* String table for socket's operation. */
+const char * const tomoyo_socket_keyword[TOMOYO_MAX_NETWORK_OPERATION] = {
+ [TOMOYO_NETWORK_BIND] = "bind",
+ [TOMOYO_NETWORK_LISTEN] = "listen",
+ [TOMOYO_NETWORK_CONNECT] = "connect",
+ [TOMOYO_NETWORK_SEND] = "send",
+};
+
/* String table for categories. */
static const char * const tomoyo_category_keywords
[TOMOYO_MAX_MAC_CATEGORY_INDEX] = {
- [TOMOYO_MAC_CATEGORY_FILE] = "file",
+ [TOMOYO_MAC_CATEGORY_FILE] = "file",
+ [TOMOYO_MAC_CATEGORY_NETWORK] = "network",
+ [TOMOYO_MAC_CATEGORY_MISC] = "misc",
};
/* Permit policy management by non-root user? */
@@ -230,13 +262,17 @@ static void tomoyo_set_string(struct tomoyo_io_buffer *head, const char *string)
WARN_ON(1);
}
+static void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt,
+ ...) __printf(2, 3);
+
/**
* tomoyo_io_printf - printf() to "struct tomoyo_io_buffer" structure.
*
* @head: Pointer to "struct tomoyo_io_buffer".
* @fmt: The printf()'s format string, followed by parameters.
*/
-void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, ...)
+static void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt,
+ ...)
{
va_list args;
size_t len;
@@ -313,7 +349,7 @@ void tomoyo_init_policy_namespace(struct tomoyo_policy_namespace *ns)
INIT_LIST_HEAD(&ns->group_list[idx]);
for (idx = 0; idx < TOMOYO_MAX_POLICY; idx++)
INIT_LIST_HEAD(&ns->policy_list[idx]);
- ns->profile_version = 20100903;
+ ns->profile_version = 20110903;
tomoyo_namespace_enabled = !list_empty(&tomoyo_namespace_list);
list_add_tail_rcu(&ns->namespace_list, &tomoyo_namespace_list);
}
@@ -466,8 +502,10 @@ static struct tomoyo_profile *tomoyo_assign_profile
TOMOYO_CONFIG_WANT_REJECT_LOG;
memset(ptr->config, TOMOYO_CONFIG_USE_DEFAULT,
sizeof(ptr->config));
- ptr->pref[TOMOYO_PREF_MAX_AUDIT_LOG] = 1024;
- ptr->pref[TOMOYO_PREF_MAX_LEARNING_ENTRY] = 2048;
+ ptr->pref[TOMOYO_PREF_MAX_AUDIT_LOG] =
+ CONFIG_SECURITY_TOMOYO_MAX_AUDIT_LOG;
+ ptr->pref[TOMOYO_PREF_MAX_LEARNING_ENTRY] =
+ CONFIG_SECURITY_TOMOYO_MAX_ACCEPT_ENTRY;
mb(); /* Avoid out-of-order execution. */
ns->profile_ptr[profile] = ptr;
entry = NULL;
@@ -951,14 +989,12 @@ static bool tomoyo_select_domain(struct tomoyo_io_buffer *head,
(global_pid = true, sscanf(data, "global-pid=%u", &pid) == 1)) {
struct task_struct *p;
rcu_read_lock();
- read_lock(&tasklist_lock);
if (global_pid)
p = find_task_by_pid_ns(pid, &init_pid_ns);
else
p = find_task_by_vpid(pid);
if (p)
domain = tomoyo_real_domain(p);
- read_unlock(&tasklist_lock);
rcu_read_unlock();
} else if (!strncmp(data, "domain=", 7)) {
if (tomoyo_domain_def(data + 7))
@@ -982,6 +1018,48 @@ static bool tomoyo_select_domain(struct tomoyo_io_buffer *head,
}
/**
+ * tomoyo_same_task_acl - Check for duplicated "struct tomoyo_task_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_task_acl(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
+{
+ const struct tomoyo_task_acl *p1 = container_of(a, typeof(*p1), head);
+ const struct tomoyo_task_acl *p2 = container_of(b, typeof(*p2), head);
+ return p1->domainname == p2->domainname;
+}
+
+/**
+ * tomoyo_write_task - Update task related list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_write_task(struct tomoyo_acl_param *param)
+{
+ int error = -EINVAL;
+ if (tomoyo_str_starts(&param->data, "manual_domain_transition ")) {
+ struct tomoyo_task_acl e = {
+ .head.type = TOMOYO_TYPE_MANUAL_TASK_ACL,
+ .domainname = tomoyo_get_domainname(param),
+ };
+ if (e.domainname)
+ error = tomoyo_update_domain(&e.head, sizeof(e), param,
+ tomoyo_same_task_acl,
+ NULL);
+ tomoyo_put_name(e.domainname);
+ }
+ return error;
+}
+
+/**
* tomoyo_delete_domain - Delete a domain.
*
* @domainname: The name of domain.
@@ -1039,11 +1117,16 @@ static int tomoyo_write_domain2(struct tomoyo_policy_namespace *ns,
static const struct {
const char *keyword;
int (*write) (struct tomoyo_acl_param *);
- } tomoyo_callback[1] = {
+ } tomoyo_callback[5] = {
{ "file ", tomoyo_write_file },
+ { "network inet ", tomoyo_write_inet_network },
+ { "network unix ", tomoyo_write_unix_network },
+ { "misc ", tomoyo_write_misc },
+ { "task ", tomoyo_write_task },
};
u8 i;
- for (i = 0; i < 1; i++) {
+
+ for (i = 0; i < ARRAY_SIZE(tomoyo_callback); i++) {
if (!tomoyo_str_starts(&param.data,
tomoyo_callback[i].keyword))
continue;
@@ -1127,6 +1210,10 @@ static bool tomoyo_print_condition(struct tomoyo_io_buffer *head,
case 0:
head->r.cond_index = 0;
head->r.cond_step++;
+ if (cond->transit) {
+ tomoyo_set_space(head);
+ tomoyo_set_string(head, cond->transit->name);
+ }
/* fall through */
case 1:
{
@@ -1239,6 +1326,10 @@ static bool tomoyo_print_condition(struct tomoyo_io_buffer *head,
head->r.cond_step++;
/* fall through */
case 3:
+ if (cond->grant_log != TOMOYO_GRANTLOG_AUTO)
+ tomoyo_io_printf(head, " grant_log=%s",
+ tomoyo_yesno(cond->grant_log ==
+ TOMOYO_GRANTLOG_YES));
tomoyo_set_lf(head);
return true;
}
@@ -1306,6 +1397,12 @@ static bool tomoyo_print_entry(struct tomoyo_io_buffer *head,
if (first)
return true;
tomoyo_print_name_union(head, &ptr->name);
+ } else if (acl_type == TOMOYO_TYPE_MANUAL_TASK_ACL) {
+ struct tomoyo_task_acl *ptr =
+ container_of(acl, typeof(*ptr), head);
+ tomoyo_set_group(head, "task ");
+ tomoyo_set_string(head, "manual_domain_transition ");
+ tomoyo_set_string(head, ptr->domainname->name);
} else if (head->r.print_transition_related_only) {
return true;
} else if (acl_type == TOMOYO_TYPE_PATH2_ACL) {
@@ -1370,6 +1467,60 @@ static bool tomoyo_print_entry(struct tomoyo_io_buffer *head,
tomoyo_print_number_union(head, &ptr->mode);
tomoyo_print_number_union(head, &ptr->major);
tomoyo_print_number_union(head, &ptr->minor);
+ } else if (acl_type == TOMOYO_TYPE_INET_ACL) {
+ struct tomoyo_inet_acl *ptr =
+ container_of(acl, typeof(*ptr), head);
+ const u8 perm = ptr->perm;
+
+ for (bit = 0; bit < TOMOYO_MAX_NETWORK_OPERATION; bit++) {
+ if (!(perm & (1 << bit)))
+ continue;
+ if (first) {
+ tomoyo_set_group(head, "network inet ");
+ tomoyo_set_string(head, tomoyo_proto_keyword
+ [ptr->protocol]);
+ tomoyo_set_space(head);
+ first = false;
+ } else {
+ tomoyo_set_slash(head);
+ }
+ tomoyo_set_string(head, tomoyo_socket_keyword[bit]);
+ }
+ if (first)
+ return true;
+ tomoyo_set_space(head);
+ if (ptr->address.group) {
+ tomoyo_set_string(head, "@");
+ tomoyo_set_string(head, ptr->address.group->group_name
+ ->name);
+ } else {
+ char buf[128];
+ tomoyo_print_ip(buf, sizeof(buf), &ptr->address);
+ tomoyo_io_printf(head, "%s", buf);
+ }
+ tomoyo_print_number_union(head, &ptr->port);
+ } else if (acl_type == TOMOYO_TYPE_UNIX_ACL) {
+ struct tomoyo_unix_acl *ptr =
+ container_of(acl, typeof(*ptr), head);
+ const u8 perm = ptr->perm;
+
+ for (bit = 0; bit < TOMOYO_MAX_NETWORK_OPERATION; bit++) {
+ if (!(perm & (1 << bit)))
+ continue;
+ if (first) {
+ tomoyo_set_group(head, "network unix ");
+ tomoyo_set_string(head, tomoyo_proto_keyword
+ [ptr->protocol]);
+ tomoyo_set_space(head);
+ first = false;
+ } else {
+ tomoyo_set_slash(head);
+ }
+ tomoyo_set_string(head, tomoyo_socket_keyword[bit]);
+ }
+ if (first)
+ return true;
+ tomoyo_print_name_union(head, &ptr->name);
} else if (acl_type == TOMOYO_TYPE_MOUNT_ACL) {
struct tomoyo_mount_acl *ptr =
container_of(acl, typeof(*ptr), head);
@@ -1378,6 +1529,12 @@ static bool tomoyo_print_entry(struct tomoyo_io_buffer *head,
tomoyo_print_name_union(head, &ptr->dir_name);
tomoyo_print_name_union(head, &ptr->fs_type);
tomoyo_print_number_union(head, &ptr->flags);
+ } else if (acl_type == TOMOYO_TYPE_ENV_ACL) {
+ struct tomoyo_env_acl *ptr =
+ container_of(acl, typeof(*ptr), head);
+
+ tomoyo_set_group(head, "misc env ");
+ tomoyo_set_string(head, ptr->env->name);
}
if (acl->cond) {
head->r.print_cond_part = true;
@@ -1510,14 +1667,12 @@ static void tomoyo_read_pid(struct tomoyo_io_buffer *head)
global_pid = true;
pid = (unsigned int) simple_strtoul(buf, NULL, 10);
rcu_read_lock();
- read_lock(&tasklist_lock);
if (global_pid)
p = find_task_by_pid_ns(pid, &init_pid_ns);
else
p = find_task_by_vpid(pid);
if (p)
domain = tomoyo_real_domain(p);
- read_unlock(&tasklist_lock);
rcu_read_unlock();
if (!domain)
return;
@@ -1537,8 +1692,9 @@ static const char *tomoyo_transition_type[TOMOYO_MAX_TRANSITION_TYPE] = {
/* String table for grouping keywords. */
static const char *tomoyo_group_name[TOMOYO_MAX_GROUP] = {
- [TOMOYO_PATH_GROUP] = "path_group ",
- [TOMOYO_NUMBER_GROUP] = "number_group ",
+ [TOMOYO_PATH_GROUP] = "path_group ",
+ [TOMOYO_NUMBER_GROUP] = "number_group ",
+ [TOMOYO_ADDRESS_GROUP] = "address_group ",
};
/**
@@ -1580,7 +1736,7 @@ static int tomoyo_write_exception(struct tomoyo_io_buffer *head)
}
/**
- * tomoyo_read_group - Read "struct tomoyo_path_group"/"struct tomoyo_number_group" list.
+ * tomoyo_read_group - Read "struct tomoyo_path_group"/"struct tomoyo_number_group"/"struct tomoyo_address_group" list.
*
* @head: Pointer to "struct tomoyo_io_buffer".
* @idx: Index number.
@@ -1617,6 +1773,15 @@ static bool tomoyo_read_group(struct tomoyo_io_buffer *head, const int idx)
(ptr,
struct tomoyo_number_group,
head)->number);
+ } else if (idx == TOMOYO_ADDRESS_GROUP) {
+ char buffer[128];
+
+ struct tomoyo_address_group *member =
+ container_of(ptr, typeof(*member),
+ head);
+ tomoyo_print_ip(buffer, sizeof(buffer),
+ &member->address);
+ tomoyo_io_printf(head, " %s", buffer);
}
tomoyo_set_lf(head);
}
@@ -2066,27 +2231,7 @@ static int tomoyo_write_answer(struct tomoyo_io_buffer *head)
static void tomoyo_read_version(struct tomoyo_io_buffer *head)
{
if (!head->r.eof) {
- tomoyo_io_printf(head, "2.4.0");
- head->r.eof = true;
- }
-}
-
-/**
- * tomoyo_read_self_domain - Get the current process's domainname.
- *
- * @head: Pointer to "struct tomoyo_io_buffer".
- *
- * Returns the current process's domainname.
- */
-static void tomoyo_read_self_domain(struct tomoyo_io_buffer *head)
-{
- if (!head->r.eof) {
- /*
- * tomoyo_domain()->domainname != NULL
- * because every process belongs to a domain and
- * the domain's name cannot be NULL.
- */
- tomoyo_io_printf(head, "%s", tomoyo_domain()->domainname->name);
+ tomoyo_io_printf(head, "2.5.0");
head->r.eof = true;
}
}
@@ -2221,10 +2366,6 @@ int tomoyo_open_control(const u8 type, struct file *file)
head->poll = tomoyo_poll_log;
head->read = tomoyo_read_log;
break;
- case TOMOYO_SELFDOMAIN:
- /* /sys/kernel/security/tomoyo/self_domain */
- head->read = tomoyo_read_self_domain;
- break;
case TOMOYO_PROCESS_STATUS:
/* /sys/kernel/security/tomoyo/.process_status */
head->write = tomoyo_write_pid;
@@ -2453,6 +2594,7 @@ ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head,
return -EFAULT;
if (mutex_lock_interruptible(&head->io_sem))
return -EINTR;
+ head->read_user_buf_avail = 0;
idx = tomoyo_read_lock();
/* Read a line and dispatch it to the policy handler. */
while (avail_len > 0) {
@@ -2562,11 +2704,11 @@ void tomoyo_check_profile(void)
struct tomoyo_domain_info *domain;
const int idx = tomoyo_read_lock();
tomoyo_policy_loaded = true;
- printk(KERN_INFO "TOMOYO: 2.4.0\n");
+ printk(KERN_INFO "TOMOYO: 2.5.0\n");
list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
const u8 profile = domain->profile;
const struct tomoyo_policy_namespace *ns = domain->ns;
- if (ns->profile_version != 20100903)
+ if (ns->profile_version != 20110903)
printk(KERN_ERR
"Profile version %u is not supported.\n",
ns->profile_version);
@@ -2577,9 +2719,9 @@ void tomoyo_check_profile(void)
else
continue;
printk(KERN_ERR
- "Userland tools for TOMOYO 2.4 must be installed and "
+ "Userland tools for TOMOYO 2.5 must be installed and "
"policy must be initialized.\n");
- printk(KERN_ERR "Please see http://tomoyo.sourceforge.jp/2.4/ "
+ printk(KERN_ERR "Please see http://tomoyo.sourceforge.jp/2.5/ "
"for more information.\n");
panic("STOP!");
}
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index f7fbaa66e443..ed311d7a8ce0 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -3,7 +3,7 @@
*
* Header file for TOMOYO.
*
- * Copyright (C) 2005-2010 NTT DATA CORPORATION
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
*/
#ifndef _SECURITY_TOMOYO_COMMON_H
@@ -23,6 +23,16 @@
#include <linux/poll.h>
#include <linux/binfmts.h>
#include <linux/highmem.h>
+#include <linux/net.h>
+#include <linux/inet.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/un.h>
+#include <net/sock.h>
+#include <net/af_unix.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
/********** Constants definitions. **********/
@@ -34,8 +44,17 @@
#define TOMOYO_HASH_BITS 8
#define TOMOYO_MAX_HASH (1u<<TOMOYO_HASH_BITS)
+/*
+ * TOMOYO checks only SOCK_STREAM, SOCK_DGRAM, SOCK_RAW, SOCK_SEQPACKET.
+ * Therefore, we don't need SOCK_MAX.
+ */
+#define TOMOYO_SOCK_MAX 6
+
#define TOMOYO_EXEC_TMPSIZE 4096
+/* Garbage collector is trying to kfree() this element. */
+#define TOMOYO_GC_IN_PROGRESS -1
+
/* Profile number is an integer between 0 and 255. */
#define TOMOYO_MAX_PROFILES 256
@@ -136,6 +155,7 @@ enum tomoyo_mode_index {
/* Index numbers for entry type. */
enum tomoyo_policy_id {
TOMOYO_ID_GROUP,
+ TOMOYO_ID_ADDRESS_GROUP,
TOMOYO_ID_PATH_GROUP,
TOMOYO_ID_NUMBER_GROUP,
TOMOYO_ID_TRANSITION_CONTROL,
@@ -162,10 +182,21 @@ enum tomoyo_domain_info_flags_index {
TOMOYO_MAX_DOMAIN_INFO_FLAGS
};
+/* Index numbers for audit type. */
+enum tomoyo_grant_log {
+ /* Follow profile's configuration. */
+ TOMOYO_GRANTLOG_AUTO,
+ /* Do not generate grant log. */
+ TOMOYO_GRANTLOG_NO,
+ /* Generate grant_log. */
+ TOMOYO_GRANTLOG_YES,
+};
+
/* Index numbers for group entries. */
enum tomoyo_group_id {
TOMOYO_PATH_GROUP,
TOMOYO_NUMBER_GROUP,
+ TOMOYO_ADDRESS_GROUP,
TOMOYO_MAX_GROUP
};
@@ -196,6 +227,10 @@ enum tomoyo_acl_entry_type_index {
TOMOYO_TYPE_PATH_NUMBER_ACL,
TOMOYO_TYPE_MKDEV_ACL,
TOMOYO_TYPE_MOUNT_ACL,
+ TOMOYO_TYPE_INET_ACL,
+ TOMOYO_TYPE_UNIX_ACL,
+ TOMOYO_TYPE_ENV_ACL,
+ TOMOYO_TYPE_MANUAL_TASK_ACL,
};
/* Index numbers for access controls with one pathname. */
@@ -228,6 +263,15 @@ enum tomoyo_mkdev_acl_index {
TOMOYO_MAX_MKDEV_OPERATION
};
+/* Index numbers for socket operations. */
+enum tomoyo_network_acl_index {
+ TOMOYO_NETWORK_BIND, /* bind() operation. */
+ TOMOYO_NETWORK_LISTEN, /* listen() operation. */
+ TOMOYO_NETWORK_CONNECT, /* connect() operation. */
+ TOMOYO_NETWORK_SEND, /* send() operation. */
+ TOMOYO_MAX_NETWORK_OPERATION
+};
+
/* Index numbers for access controls with two pathnames. */
enum tomoyo_path2_acl_index {
TOMOYO_TYPE_LINK,
@@ -255,7 +299,6 @@ enum tomoyo_securityfs_interface_index {
TOMOYO_EXCEPTIONPOLICY,
TOMOYO_PROCESS_STATUS,
TOMOYO_STAT,
- TOMOYO_SELFDOMAIN,
TOMOYO_AUDIT,
TOMOYO_VERSION,
TOMOYO_PROFILE,
@@ -300,12 +343,30 @@ enum tomoyo_mac_index {
TOMOYO_MAC_FILE_MOUNT,
TOMOYO_MAC_FILE_UMOUNT,
TOMOYO_MAC_FILE_PIVOT_ROOT,
+ TOMOYO_MAC_NETWORK_INET_STREAM_BIND,
+ TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN,
+ TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT,
+ TOMOYO_MAC_NETWORK_INET_DGRAM_BIND,
+ TOMOYO_MAC_NETWORK_INET_DGRAM_SEND,
+ TOMOYO_MAC_NETWORK_INET_RAW_BIND,
+ TOMOYO_MAC_NETWORK_INET_RAW_SEND,
+ TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND,
+ TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN,
+ TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT,
+ TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND,
+ TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND,
+ TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND,
+ TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN,
+ TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT,
+ TOMOYO_MAC_ENVIRON,
TOMOYO_MAX_MAC_INDEX
};
/* Index numbers for category of functionality. */
enum tomoyo_mac_category_index {
TOMOYO_MAC_CATEGORY_FILE,
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ TOMOYO_MAC_CATEGORY_MISC,
TOMOYO_MAX_MAC_CATEGORY_INDEX
};
@@ -340,7 +401,7 @@ enum tomoyo_pref_index {
/* Common header for holding ACL entries. */
struct tomoyo_acl_head {
struct list_head list;
- bool is_deleted;
+ s8 is_deleted; /* true or false or TOMOYO_GC_IN_PROGRESS */
} __packed;
/* Common header for shared entries. */
@@ -397,13 +458,36 @@ struct tomoyo_request_info {
u8 operation;
} path_number;
struct {
+ const struct tomoyo_path_info *name;
+ } environ;
+ struct {
+ const __be32 *address;
+ u16 port;
+ /* One of values smaller than TOMOYO_SOCK_MAX. */
+ u8 protocol;
+ /* One of values in "enum tomoyo_network_acl_index". */
+ u8 operation;
+ bool is_ipv6;
+ } inet_network;
+ struct {
+ const struct tomoyo_path_info *address;
+ /* One of values smaller than TOMOYO_SOCK_MAX. */
+ u8 protocol;
+ /* One of values in "enum tomoyo_network_acl_index". */
+ u8 operation;
+ } unix_network;
+ struct {
const struct tomoyo_path_info *type;
const struct tomoyo_path_info *dir;
const struct tomoyo_path_info *dev;
unsigned long flags;
int need_dev;
} mount;
+ struct {
+ const struct tomoyo_path_info *domainname;
+ } task;
} param;
+ struct tomoyo_acl_info *matched_acl;
u8 param_type;
bool granted;
u8 retry;
@@ -442,7 +526,14 @@ struct tomoyo_number_union {
u8 value_type[2];
};
-/* Structure for "path_group"/"number_group" directive. */
+/* Structure for holding an IP address. */
+struct tomoyo_ipaddr_union {
+ struct in6_addr ip[2]; /* Big endian. */
+ struct tomoyo_group *group; /* Pointer to address group. */
+ bool is_ipv6; /* Valid only if @group == NULL. */
+};
+
+/* Structure for "path_group"/"number_group"/"address_group" directive. */
struct tomoyo_group {
struct tomoyo_shared_acl_head head;
const struct tomoyo_path_info *group_name;
@@ -461,6 +552,13 @@ struct tomoyo_number_group {
struct tomoyo_number_union number;
};
+/* Structure for "address_group" directive. */
+struct tomoyo_address_group {
+ struct tomoyo_acl_head head;
+ /* Structure for holding an IP address. */
+ struct tomoyo_ipaddr_union address;
+};
+
/* Subset of "struct stat". Used by conditional ACL and audit logs. */
struct tomoyo_mini_stat {
uid_t uid;
@@ -520,6 +618,7 @@ struct tomoyo_execve {
struct tomoyo_request_info r;
struct tomoyo_obj_info obj;
struct linux_binprm *bprm;
+ const struct tomoyo_path_info *transition;
/* For dumping argv[] and envp[]. */
struct tomoyo_page_dump dump;
/* For temporary use. */
@@ -554,6 +653,8 @@ struct tomoyo_condition {
u16 names_count; /* Number of "struct tomoyo_name_union names". */
u16 argc; /* Number of "struct tomoyo_argv". */
u16 envc; /* Number of "struct tomoyo_envp". */
+ u8 grant_log; /* One of values in "enum tomoyo_grant_log". */
+ const struct tomoyo_path_info *transit; /* Maybe NULL. */
/*
* struct tomoyo_condition_element condition[condc];
* struct tomoyo_number_union values[numbers_count];
@@ -567,7 +668,7 @@ struct tomoyo_condition {
struct tomoyo_acl_info {
struct list_head list;
struct tomoyo_condition *cond; /* Maybe NULL. */
- bool is_deleted;
+ s8 is_deleted; /* true or false or TOMOYO_GC_IN_PROGRESS */
u8 type; /* One of values in "enum tomoyo_acl_entry_type_index". */
} __packed;
@@ -587,6 +688,15 @@ struct tomoyo_domain_info {
};
/*
+ * Structure for "task manual_domain_transition" directive.
+ */
+struct tomoyo_task_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_MANUAL_TASK_ACL */
+ /* Pointer to domainname. */
+ const struct tomoyo_path_info *domainname;
+};
+
+/*
* Structure for "file execute", "file read", "file write", "file append",
* "file unlink", "file getattr", "file rmdir", "file truncate",
* "file symlink", "file chroot" and "file unmount" directive.
@@ -638,6 +748,29 @@ struct tomoyo_mount_acl {
struct tomoyo_number_union flags;
};
+/* Structure for "misc env" directive in domain policy. */
+struct tomoyo_env_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_ENV_ACL */
+ const struct tomoyo_path_info *env; /* environment variable */
+};
+
+/* Structure for "network inet" directive. */
+struct tomoyo_inet_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_INET_ACL */
+ u8 protocol;
+ u8 perm; /* Bitmask of values in "enum tomoyo_network_acl_index" */
+ struct tomoyo_ipaddr_union address;
+ struct tomoyo_number_union port;
+};
+
+/* Structure for "network unix" directive. */
+struct tomoyo_unix_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_UNIX_ACL */
+ u8 protocol;
+ u8 perm; /* Bitmask of values in "enum tomoyo_network_acl_index" */
+ struct tomoyo_name_union name;
+};
+
/* Structure for holding a line from /sys/kernel/security/tomoyo/ interface. */
struct tomoyo_acl_param {
char *data;
@@ -773,7 +906,7 @@ struct tomoyo_policy_namespace {
struct list_head acl_group[TOMOYO_MAX_ACL_GROUPS];
/* List for connecting to tomoyo_namespace_list list. */
struct list_head namespace_list;
- /* Profile version. Currently only 20100903 is defined. */
+ /* Profile version. Currently only 20110903 is defined. */
unsigned int profile_version;
/* Name of this namespace (e.g. "<kernel>", "</usr/sbin/httpd>" ). */
const char *name;
@@ -781,6 +914,8 @@ struct tomoyo_policy_namespace {
/********** Function prototypes. **********/
+bool tomoyo_address_matches_group(const bool is_ipv6, const __be32 *address,
+ const struct tomoyo_group *group);
bool tomoyo_compare_number_union(const unsigned long value,
const struct tomoyo_number_union *ptr);
bool tomoyo_condition(struct tomoyo_request_info *r,
@@ -796,6 +931,8 @@ bool tomoyo_memory_ok(void *ptr);
bool tomoyo_number_matches_group(const unsigned long min,
const unsigned long max,
const struct tomoyo_group *group);
+bool tomoyo_parse_ipaddr_union(struct tomoyo_acl_param *param,
+ struct tomoyo_ipaddr_union *ptr);
bool tomoyo_parse_name_union(struct tomoyo_acl_param *param,
struct tomoyo_name_union *ptr);
bool tomoyo_parse_number_union(struct tomoyo_acl_param *param,
@@ -805,6 +942,7 @@ bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename,
bool tomoyo_permstr(const char *string, const char *keyword);
bool tomoyo_str_starts(char **src, const char *find);
char *tomoyo_encode(const char *str);
+char *tomoyo_encode2(const char *str, int str_len);
char *tomoyo_init_log(struct tomoyo_request_info *r, int len, const char *fmt,
va_list args);
char *tomoyo_read_token(struct tomoyo_acl_param *param);
@@ -814,12 +952,17 @@ const char *tomoyo_get_exe(void);
const char *tomoyo_yesno(const unsigned int value);
const struct tomoyo_path_info *tomoyo_compare_name_union
(const struct tomoyo_path_info *name, const struct tomoyo_name_union *ptr);
+const struct tomoyo_path_info *tomoyo_get_domainname
+(struct tomoyo_acl_param *param);
const struct tomoyo_path_info *tomoyo_get_name(const char *name);
const struct tomoyo_path_info *tomoyo_path_matches_group
(const struct tomoyo_path_info *pathname, const struct tomoyo_group *group);
int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
struct path *path, const int flag);
int tomoyo_close_control(struct tomoyo_io_buffer *head);
+int tomoyo_env_perm(struct tomoyo_request_info *r, const char *env);
+int tomoyo_execute_permission(struct tomoyo_request_info *r,
+ const struct tomoyo_path_info *filename);
int tomoyo_find_next_domain(struct linux_binprm *bprm);
int tomoyo_get_mode(const struct tomoyo_policy_namespace *ns, const u8 profile,
const u8 index);
@@ -838,10 +981,15 @@ int tomoyo_path_number_perm(const u8 operation, struct path *path,
unsigned long number);
int tomoyo_path_perm(const u8 operation, struct path *path,
const char *target);
-int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation,
- const struct tomoyo_path_info *filename);
int tomoyo_poll_control(struct file *file, poll_table *wait);
int tomoyo_poll_log(struct file *file, poll_table *wait);
+int tomoyo_socket_bind_permission(struct socket *sock, struct sockaddr *addr,
+ int addr_len);
+int tomoyo_socket_connect_permission(struct socket *sock,
+ struct sockaddr *addr, int addr_len);
+int tomoyo_socket_listen_permission(struct socket *sock);
+int tomoyo_socket_sendmsg_permission(struct socket *sock, struct msghdr *msg,
+ int size);
int tomoyo_supervisor(struct tomoyo_request_info *r, const char *fmt, ...)
__printf(2, 3);
int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size,
@@ -860,8 +1008,11 @@ int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size,
int tomoyo_write_aggregator(struct tomoyo_acl_param *param);
int tomoyo_write_file(struct tomoyo_acl_param *param);
int tomoyo_write_group(struct tomoyo_acl_param *param, const u8 type);
+int tomoyo_write_misc(struct tomoyo_acl_param *param);
+int tomoyo_write_inet_network(struct tomoyo_acl_param *param);
int tomoyo_write_transition_control(struct tomoyo_acl_param *param,
const u8 type);
+int tomoyo_write_unix_network(struct tomoyo_acl_param *param);
ssize_t tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer,
const int buffer_len);
ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head,
@@ -891,12 +1042,11 @@ void tomoyo_del_condition(struct list_head *element);
void tomoyo_fill_path_info(struct tomoyo_path_info *ptr);
void tomoyo_get_attributes(struct tomoyo_obj_info *obj);
void tomoyo_init_policy_namespace(struct tomoyo_policy_namespace *ns);
-void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, ...)
- __printf(2, 3);
void tomoyo_load_policy(const char *filename);
-void tomoyo_memory_free(void *ptr);
void tomoyo_normalize_line(unsigned char *buffer);
void tomoyo_notify_gc(struct tomoyo_io_buffer *head, const bool is_register);
+void tomoyo_print_ip(char *buf, const unsigned int size,
+ const struct tomoyo_ipaddr_union *ptr);
void tomoyo_print_ulong(char *buffer, const int buffer_len,
const unsigned long value, const u8 type);
void tomoyo_put_name_union(struct tomoyo_name_union *ptr);
@@ -919,6 +1069,8 @@ extern const char * const tomoyo_mac_keywords[TOMOYO_MAX_MAC_INDEX
+ TOMOYO_MAX_MAC_CATEGORY_INDEX];
extern const char * const tomoyo_mode[TOMOYO_CONFIG_MAX_MODE];
extern const char * const tomoyo_path_keyword[TOMOYO_MAX_PATH_OPERATION];
+extern const char * const tomoyo_proto_keyword[TOMOYO_SOCK_MAX];
+extern const char * const tomoyo_socket_keyword[TOMOYO_MAX_NETWORK_OPERATION];
extern const u8 tomoyo_index2category[TOMOYO_MAX_MAC_INDEX];
extern const u8 tomoyo_pn2mac[TOMOYO_MAX_PATH_NUMBER_OPERATION];
extern const u8 tomoyo_pnnn2mac[TOMOYO_MAX_MKDEV_OPERATION];
@@ -1098,6 +1250,21 @@ static inline bool tomoyo_same_number_union
}
/**
+ * tomoyo_same_ipaddr_union - Check for duplicated "struct tomoyo_ipaddr_union" entry.
+ *
+ * @a: Pointer to "struct tomoyo_ipaddr_union".
+ * @b: Pointer to "struct tomoyo_ipaddr_union".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static inline bool tomoyo_same_ipaddr_union
+(const struct tomoyo_ipaddr_union *a, const struct tomoyo_ipaddr_union *b)
+{
+ return !memcmp(a->ip, b->ip, sizeof(a->ip)) && a->group == b->group &&
+ a->is_ipv6 == b->is_ipv6;
+}
+
+/**
* tomoyo_current_namespace - Get "struct tomoyo_policy_namespace" for current thread.
*
* Returns pointer to "struct tomoyo_policy_namespace" for current thread.
diff --git a/security/tomoyo/condition.c b/security/tomoyo/condition.c
index 8a05f71eaf67..986330b8c73e 100644
--- a/security/tomoyo/condition.c
+++ b/security/tomoyo/condition.c
@@ -348,6 +348,7 @@ static inline bool tomoyo_same_condition(const struct tomoyo_condition *a,
a->numbers_count == b->numbers_count &&
a->names_count == b->names_count &&
a->argc == b->argc && a->envc == b->envc &&
+ a->grant_log == b->grant_log && a->transit == b->transit &&
!memcmp(a + 1, b + 1, a->size - sizeof(*a));
}
@@ -399,8 +400,9 @@ static struct tomoyo_condition *tomoyo_commit_condition
found = true;
goto out;
}
- list_for_each_entry_rcu(ptr, &tomoyo_condition_list, head.list) {
- if (!tomoyo_same_condition(ptr, entry))
+ list_for_each_entry(ptr, &tomoyo_condition_list, head.list) {
+ if (!tomoyo_same_condition(ptr, entry) ||
+ atomic_read(&ptr->head.users) == TOMOYO_GC_IN_PROGRESS)
continue;
/* Same entry found. Share this entry. */
atomic_inc(&ptr->head.users);
@@ -410,8 +412,7 @@ static struct tomoyo_condition *tomoyo_commit_condition
if (!found) {
if (tomoyo_memory_ok(entry)) {
atomic_set(&entry->head.users, 1);
- list_add_rcu(&entry->head.list,
- &tomoyo_condition_list);
+ list_add(&entry->head.list, &tomoyo_condition_list);
} else {
found = true;
ptr = NULL;
@@ -428,6 +429,46 @@ out:
}
/**
+ * tomoyo_get_transit_preference - Parse domain transition preference for execve().
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @e: Pointer to "struct tomoyo_condition".
+ *
+ * Returns the condition string part.
+ */
+static char *tomoyo_get_transit_preference(struct tomoyo_acl_param *param,
+ struct tomoyo_condition *e)
+{
+ char * const pos = param->data;
+ bool flag;
+ if (*pos == '<') {
+ e->transit = tomoyo_get_domainname(param);
+ goto done;
+ }
+ {
+ char *cp = strchr(pos, ' ');
+ if (cp)
+ *cp = '\0';
+ flag = tomoyo_correct_path(pos) || !strcmp(pos, "keep") ||
+ !strcmp(pos, "initialize") || !strcmp(pos, "reset") ||
+ !strcmp(pos, "child") || !strcmp(pos, "parent");
+ if (cp)
+ *cp = ' ';
+ }
+ if (!flag)
+ return pos;
+ e->transit = tomoyo_get_name(tomoyo_read_token(param));
+done:
+ if (e->transit)
+ return param->data;
+ /*
+ * Return a bad read-only condition string that will let
+ * tomoyo_get_condition() return NULL.
+ */
+ return "/";
+}
+
+/**
* tomoyo_get_condition - Parse condition part.
*
* @param: Pointer to "struct tomoyo_acl_param".
@@ -443,7 +484,8 @@ struct tomoyo_condition *tomoyo_get_condition(struct tomoyo_acl_param *param)
struct tomoyo_argv *argv = NULL;
struct tomoyo_envp *envp = NULL;
struct tomoyo_condition e = { };
- char * const start_of_string = param->data;
+ char * const start_of_string =
+ tomoyo_get_transit_preference(param, &e);
char * const end_of_string = start_of_string + strlen(start_of_string);
char *pos;
rerun:
@@ -486,6 +528,20 @@ rerun:
goto out;
dprintk(KERN_WARNING "%u: <%s>%s=<%s>\n", __LINE__, left_word,
is_not ? "!" : "", right_word);
+ if (!strcmp(left_word, "grant_log")) {
+ if (entry) {
+ if (is_not ||
+ entry->grant_log != TOMOYO_GRANTLOG_AUTO)
+ goto out;
+ else if (!strcmp(right_word, "yes"))
+ entry->grant_log = TOMOYO_GRANTLOG_YES;
+ else if (!strcmp(right_word, "no"))
+ entry->grant_log = TOMOYO_GRANTLOG_NO;
+ else
+ goto out;
+ }
+ continue;
+ }
if (!strncmp(left_word, "exec.argv[", 10)) {
if (!argv) {
e.argc++;
@@ -593,8 +649,9 @@ store_value:
+ e.envc * sizeof(struct tomoyo_envp);
entry = kzalloc(e.size, GFP_NOFS);
if (!entry)
- return NULL;
+ goto out2;
*entry = e;
+ e.transit = NULL;
condp = (struct tomoyo_condition_element *) (entry + 1);
numbers_p = (struct tomoyo_number_union *) (condp + e.condc);
names_p = (struct tomoyo_name_union *) (numbers_p + e.numbers_count);
@@ -621,6 +678,8 @@ out:
tomoyo_del_condition(&entry->head.list);
kfree(entry);
}
+out2:
+ tomoyo_put_name(e.transit);
return NULL;
}
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index cd0f92d88bb4..9027ac1534af 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -39,6 +39,8 @@ int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size,
if (mutex_lock_interruptible(&tomoyo_policy_lock))
return -ENOMEM;
list_for_each_entry_rcu(entry, list, list) {
+ if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS)
+ continue;
if (!check_duplicate(entry, new_entry))
continue;
entry->is_deleted = param->is_delete;
@@ -102,10 +104,21 @@ int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size,
new_entry->cond = tomoyo_get_condition(param);
if (!new_entry->cond)
return -EINVAL;
+ /*
+ * Domain transition preference is allowed for only
+ * "file execute" entries.
+ */
+ if (new_entry->cond->transit &&
+ !(new_entry->type == TOMOYO_TYPE_PATH_ACL &&
+ container_of(new_entry, struct tomoyo_path_acl, head)
+ ->perm == 1 << TOMOYO_TYPE_EXECUTE))
+ goto out;
}
if (mutex_lock_interruptible(&tomoyo_policy_lock))
goto out;
list_for_each_entry_rcu(entry, list, list) {
+ if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS)
+ continue;
if (!tomoyo_same_acl_head(entry, new_entry) ||
!check_duplicate(entry, new_entry))
continue;
@@ -157,6 +170,7 @@ retry:
continue;
if (!tomoyo_condition(r, ptr->cond))
continue;
+ r->matched_acl = ptr;
r->granted = true;
return;
}
@@ -501,7 +515,8 @@ struct tomoyo_domain_info *tomoyo_assign_domain(const char *domainname,
* that domain. Do not perform domain transition if
* profile for that domain is not yet created.
*/
- if (!entry->ns->profile_ptr[entry->profile])
+ if (tomoyo_policy_loaded &&
+ !entry->ns->profile_ptr[entry->profile])
return NULL;
}
return entry;
@@ -557,12 +572,99 @@ out:
tomoyo_write_log(&r, "use_profile %u\n",
entry->profile);
tomoyo_write_log(&r, "use_group %u\n", entry->group);
+ tomoyo_update_stat(TOMOYO_STAT_POLICY_UPDATES);
}
}
return entry;
}
/**
+ * tomoyo_environ - Check permission for environment variable names.
+ *
+ * @ee: Pointer to "struct tomoyo_execve".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_environ(struct tomoyo_execve *ee)
+{
+ struct tomoyo_request_info *r = &ee->r;
+ struct linux_binprm *bprm = ee->bprm;
+ /* env_page.data is allocated by tomoyo_dump_page(). */
+ struct tomoyo_page_dump env_page = { };
+ char *arg_ptr; /* Size is TOMOYO_EXEC_TMPSIZE bytes */
+ int arg_len = 0;
+ unsigned long pos = bprm->p;
+ int offset = pos % PAGE_SIZE;
+ int argv_count = bprm->argc;
+ int envp_count = bprm->envc;
+ int error = -ENOMEM;
+
+ ee->r.type = TOMOYO_MAC_ENVIRON;
+ ee->r.profile = r->domain->profile;
+ ee->r.mode = tomoyo_get_mode(r->domain->ns, ee->r.profile,
+ TOMOYO_MAC_ENVIRON);
+ if (!r->mode || !envp_count)
+ return 0;
+ arg_ptr = kzalloc(TOMOYO_EXEC_TMPSIZE, GFP_NOFS);
+ if (!arg_ptr)
+ goto out;
+ while (error == -ENOMEM) {
+ if (!tomoyo_dump_page(bprm, pos, &env_page))
+ goto out;
+ pos += PAGE_SIZE - offset;
+ /* Read. */
+ while (argv_count && offset < PAGE_SIZE) {
+ if (!env_page.data[offset++])
+ argv_count--;
+ }
+ if (argv_count) {
+ offset = 0;
+ continue;
+ }
+ while (offset < PAGE_SIZE) {
+ const unsigned char c = env_page.data[offset++];
+
+ if (c && arg_len < TOMOYO_EXEC_TMPSIZE - 10) {
+ if (c == '=') {
+ arg_ptr[arg_len++] = '\0';
+ } else if (c == '\\') {
+ arg_ptr[arg_len++] = '\\';
+ arg_ptr[arg_len++] = '\\';
+ } else if (c > ' ' && c < 127) {
+ arg_ptr[arg_len++] = c;
+ } else {
+ arg_ptr[arg_len++] = '\\';
+ arg_ptr[arg_len++] = (c >> 6) + '0';
+ arg_ptr[arg_len++]
+ = ((c >> 3) & 7) + '0';
+ arg_ptr[arg_len++] = (c & 7) + '0';
+ }
+ } else {
+ arg_ptr[arg_len] = '\0';
+ }
+ if (c)
+ continue;
+ if (tomoyo_env_perm(r, arg_ptr)) {
+ error = -EPERM;
+ break;
+ }
+ if (!--envp_count) {
+ error = 0;
+ break;
+ }
+ arg_len = 0;
+ }
+ offset = 0;
+ }
+out:
+ if (r->mode != TOMOYO_CONFIG_ENFORCING)
+ error = 0;
+ kfree(env_page.data);
+ kfree(arg_ptr);
+ return error;
+}
+
+/**
* tomoyo_find_next_domain - Find a domain.
*
* @bprm: Pointer to "struct linux_binprm".
@@ -577,10 +679,11 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
struct tomoyo_domain_info *domain = NULL;
const char *original_name = bprm->filename;
int retval = -ENOMEM;
- bool need_kfree = false;
bool reject_on_transition_failure = false;
- struct tomoyo_path_info rn = { }; /* real name */
+ const struct tomoyo_path_info *candidate;
+ struct tomoyo_path_info exename;
struct tomoyo_execve *ee = kzalloc(sizeof(*ee), GFP_NOFS);
+
if (!ee)
return -ENOMEM;
ee->tmp = kzalloc(TOMOYO_EXEC_TMPSIZE, GFP_NOFS);
@@ -594,40 +697,32 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
ee->bprm = bprm;
ee->r.obj = &ee->obj;
ee->obj.path1 = bprm->file->f_path;
- retry:
- if (need_kfree) {
- kfree(rn.name);
- need_kfree = false;
- }
/* Get symlink's pathname of program. */
retval = -ENOENT;
- rn.name = tomoyo_realpath_nofollow(original_name);
- if (!rn.name)
+ exename.name = tomoyo_realpath_nofollow(original_name);
+ if (!exename.name)
goto out;
- tomoyo_fill_path_info(&rn);
- need_kfree = true;
-
+ tomoyo_fill_path_info(&exename);
+retry:
/* Check 'aggregator' directive. */
{
struct tomoyo_aggregator *ptr;
struct list_head *list =
&old_domain->ns->policy_list[TOMOYO_ID_AGGREGATOR];
/* Check 'aggregator' directive. */
+ candidate = &exename;
list_for_each_entry_rcu(ptr, list, head.list) {
if (ptr->head.is_deleted ||
- !tomoyo_path_matches_pattern(&rn,
+ !tomoyo_path_matches_pattern(&exename,
ptr->original_name))
continue;
- kfree(rn.name);
- need_kfree = false;
- /* This is OK because it is read only. */
- rn = *ptr->aggregated_name;
+ candidate = ptr->aggregated_name;
break;
}
}
/* Check execute permission. */
- retval = tomoyo_path_permission(&ee->r, TOMOYO_TYPE_EXECUTE, &rn);
+ retval = tomoyo_execute_permission(&ee->r, candidate);
if (retval == TOMOYO_RETRY_REQUEST)
goto retry;
if (retval < 0)
@@ -638,20 +733,51 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
* wildcard) rather than the pathname passed to execve()
* (which never contains wildcard).
*/
- if (ee->r.param.path.matched_path) {
- if (need_kfree)
- kfree(rn.name);
- need_kfree = false;
- /* This is OK because it is read only. */
- rn = *ee->r.param.path.matched_path;
- }
+ if (ee->r.param.path.matched_path)
+ candidate = ee->r.param.path.matched_path;
- /* Calculate domain to transit to. */
+ /*
+ * Check for domain transition preference if "file execute" matched.
+ * If preference is given, make do_execve() fail if domain transition
+ * has failed, for domain transition preference should be used with
+ * destination domain defined.
+ */
+ if (ee->transition) {
+ const char *domainname = ee->transition->name;
+ reject_on_transition_failure = true;
+ if (!strcmp(domainname, "keep"))
+ goto force_keep_domain;
+ if (!strcmp(domainname, "child"))
+ goto force_child_domain;
+ if (!strcmp(domainname, "reset"))
+ goto force_reset_domain;
+ if (!strcmp(domainname, "initialize"))
+ goto force_initialize_domain;
+ if (!strcmp(domainname, "parent")) {
+ char *cp;
+ strncpy(ee->tmp, old_domain->domainname->name,
+ TOMOYO_EXEC_TMPSIZE - 1);
+ cp = strrchr(ee->tmp, ' ');
+ if (cp)
+ *cp = '\0';
+ } else if (*domainname == '<')
+ strncpy(ee->tmp, domainname, TOMOYO_EXEC_TMPSIZE - 1);
+ else
+ snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s",
+ old_domain->domainname->name, domainname);
+ goto force_jump_domain;
+ }
+ /*
+ * No domain transition preference specified.
+ * Calculate domain to transit to.
+ */
switch (tomoyo_transition_type(old_domain->ns, old_domain->domainname,
- &rn)) {
+ candidate)) {
case TOMOYO_TRANSITION_CONTROL_RESET:
+force_reset_domain:
/* Transit to the root of specified namespace. */
- snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "<%s>", rn.name);
+ snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "<%s>",
+ candidate->name);
/*
* Make do_execve() fail if domain transition across namespaces
* has failed.
@@ -659,11 +785,13 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
reject_on_transition_failure = true;
break;
case TOMOYO_TRANSITION_CONTROL_INITIALIZE:
+force_initialize_domain:
/* Transit to the child of current namespace's root. */
snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s",
- old_domain->ns->name, rn.name);
+ old_domain->ns->name, candidate->name);
break;
case TOMOYO_TRANSITION_CONTROL_KEEP:
+force_keep_domain:
/* Keep current domain. */
domain = old_domain;
break;
@@ -677,13 +805,15 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
* before /sbin/init.
*/
domain = old_domain;
- } else {
- /* Normal domain transition. */
- snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s",
- old_domain->domainname->name, rn.name);
+ break;
}
+force_child_domain:
+ /* Normal domain transition. */
+ snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s",
+ old_domain->domainname->name, candidate->name);
break;
}
+force_jump_domain:
if (!domain)
domain = tomoyo_assign_domain(ee->tmp, true);
if (domain)
@@ -711,8 +841,11 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
/* Update reference count on "struct tomoyo_domain_info". */
atomic_inc(&domain->users);
bprm->cred->security = domain;
- if (need_kfree)
- kfree(rn.name);
+ kfree(exename.name);
+ if (!retval) {
+ ee->r.domain = domain;
+ retval = tomoyo_environ(ee);
+ }
kfree(ee->tmp);
kfree(ee->dump.data);
kfree(ee);
@@ -732,7 +865,8 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
struct tomoyo_page_dump *dump)
{
struct page *page;
- /* dump->data is released by tomoyo_finish_execve(). */
+
+ /* dump->data is released by tomoyo_find_next_domain(). */
if (!dump->data) {
dump->data = kzalloc(PAGE_SIZE, GFP_NOFS);
if (!dump->data)
@@ -753,6 +887,7 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
* So do I.
*/
char *kaddr = kmap_atomic(page, KM_USER0);
+
dump->page = page;
memcpy(dump->data + offset, kaddr + offset,
PAGE_SIZE - offset);
diff --git a/security/tomoyo/environ.c b/security/tomoyo/environ.c
new file mode 100644
index 000000000000..ad4c6e18a437
--- /dev/null
+++ b/security/tomoyo/environ.c
@@ -0,0 +1,122 @@
+/*
+ * security/tomoyo/environ.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include "common.h"
+
+/**
+ * tomoyo_check_env_acl - Check permission for environment variable's name.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_env_acl(struct tomoyo_request_info *r,
+ const struct tomoyo_acl_info *ptr)
+{
+ const struct tomoyo_env_acl *acl =
+ container_of(ptr, typeof(*acl), head);
+
+ return tomoyo_path_matches_pattern(r->param.environ.name, acl->env);
+}
+
+/**
+ * tomoyo_audit_env_log - Audit environment variable name log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_env_log(struct tomoyo_request_info *r)
+{
+ return tomoyo_supervisor(r, "misc env %s\n",
+ r->param.environ.name->name);
+}
+
+/**
+ * tomoyo_env_perm - Check permission for environment variable's name.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @env: The name of environment variable.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+int tomoyo_env_perm(struct tomoyo_request_info *r, const char *env)
+{
+ struct tomoyo_path_info environ;
+ int error;
+
+ if (!env || !*env)
+ return 0;
+ environ.name = env;
+ tomoyo_fill_path_info(&environ);
+ r->param_type = TOMOYO_TYPE_ENV_ACL;
+ r->param.environ.name = &environ;
+ do {
+ tomoyo_check_acl(r, tomoyo_check_env_acl);
+ error = tomoyo_audit_env_log(r);
+ } while (error == TOMOYO_RETRY_REQUEST);
+ return error;
+}
+
+/**
+ * tomoyo_same_env_acl - Check for duplicated "struct tomoyo_env_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_env_acl(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
+{
+ const struct tomoyo_env_acl *p1 = container_of(a, typeof(*p1), head);
+ const struct tomoyo_env_acl *p2 = container_of(b, typeof(*p2), head);
+
+ return p1->env == p2->env;
+}
+
+/**
+ * tomoyo_write_env - Write "struct tomoyo_env_acl" list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_write_env(struct tomoyo_acl_param *param)
+{
+ struct tomoyo_env_acl e = { .head.type = TOMOYO_TYPE_ENV_ACL };
+ int error = -ENOMEM;
+ const char *data = tomoyo_read_token(param);
+
+ if (!tomoyo_correct_word(data) || strchr(data, '='))
+ return -EINVAL;
+ e.env = tomoyo_get_name(data);
+ if (!e.env)
+ return error;
+ error = tomoyo_update_domain(&e.head, sizeof(e), param,
+ tomoyo_same_env_acl, NULL);
+ tomoyo_put_name(e.env);
+ return error;
+}
+
+/**
+ * tomoyo_write_misc - Update environment variable list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_write_misc(struct tomoyo_acl_param *param)
+{
+ if (tomoyo_str_starts(&param->data, "env "))
+ return tomoyo_write_env(param);
+ return -EINVAL;
+}
diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c
index 743c35f5084a..400390790745 100644
--- a/security/tomoyo/file.c
+++ b/security/tomoyo/file.c
@@ -555,8 +555,8 @@ static int tomoyo_update_path2_acl(const u8 perm,
*
* Caller holds tomoyo_read_lock().
*/
-int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation,
- const struct tomoyo_path_info *filename)
+static int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation,
+ const struct tomoyo_path_info *filename)
{
int error;
@@ -570,16 +570,42 @@ int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation,
do {
tomoyo_check_acl(r, tomoyo_check_path_acl);
error = tomoyo_audit_path_log(r);
- /*
- * Do not retry for execute request, for alias may have
- * changed.
- */
- } while (error == TOMOYO_RETRY_REQUEST &&
- operation != TOMOYO_TYPE_EXECUTE);
+ } while (error == TOMOYO_RETRY_REQUEST);
return error;
}
/**
+ * tomoyo_execute_permission - Check permission for execute operation.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @filename: Filename to check.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+int tomoyo_execute_permission(struct tomoyo_request_info *r,
+ const struct tomoyo_path_info *filename)
+{
+ /*
+ * Unlike other permission checks, this check is done regardless of
+ * profile mode settings in order to check for domain transition
+ * preference.
+ */
+ r->type = TOMOYO_MAC_FILE_EXECUTE;
+ r->mode = tomoyo_get_mode(r->domain->ns, r->profile, r->type);
+ r->param_type = TOMOYO_TYPE_PATH_ACL;
+ r->param.path.filename = filename;
+ r->param.path.operation = TOMOYO_TYPE_EXECUTE;
+ tomoyo_check_acl(r, tomoyo_check_path_acl);
+ r->ee->transition = r->matched_acl && r->matched_acl->cond ?
+ r->matched_acl->cond->transit : NULL;
+ if (r->mode != TOMOYO_CONFIG_DISABLED)
+ return tomoyo_audit_path_log(r);
+ return 0;
+}
+
+/**
* tomoyo_same_path_number_acl - Check for duplicated "struct tomoyo_path_number_acl" entry.
*
* @a: Pointer to "struct tomoyo_acl_info".
diff --git a/security/tomoyo/gc.c b/security/tomoyo/gc.c
index ae135fbbbe95..986a6a756868 100644
--- a/security/tomoyo/gc.c
+++ b/security/tomoyo/gc.c
@@ -8,36 +8,26 @@
#include <linux/kthread.h>
#include <linux/slab.h>
+/**
+ * tomoyo_memory_free - Free memory for elements.
+ *
+ * @ptr: Pointer to allocated memory.
+ *
+ * Returns nothing.
+ *
+ * Caller holds tomoyo_policy_lock mutex.
+ */
+static inline void tomoyo_memory_free(void *ptr)
+{
+ tomoyo_memory_used[TOMOYO_MEMORY_POLICY] -= ksize(ptr);
+ kfree(ptr);
+}
+
/* The list for "struct tomoyo_io_buffer". */
static LIST_HEAD(tomoyo_io_buffer_list);
/* Lock for protecting tomoyo_io_buffer_list. */
static DEFINE_SPINLOCK(tomoyo_io_buffer_list_lock);
-/* Size of an element. */
-static const u8 tomoyo_element_size[TOMOYO_MAX_POLICY] = {
- [TOMOYO_ID_GROUP] = sizeof(struct tomoyo_group),
- [TOMOYO_ID_PATH_GROUP] = sizeof(struct tomoyo_path_group),
- [TOMOYO_ID_NUMBER_GROUP] = sizeof(struct tomoyo_number_group),
- [TOMOYO_ID_AGGREGATOR] = sizeof(struct tomoyo_aggregator),
- [TOMOYO_ID_TRANSITION_CONTROL] =
- sizeof(struct tomoyo_transition_control),
- [TOMOYO_ID_MANAGER] = sizeof(struct tomoyo_manager),
- /* [TOMOYO_ID_CONDITION] = "struct tomoyo_condition"->size, */
- /* [TOMOYO_ID_NAME] = "struct tomoyo_name"->size, */
- /* [TOMOYO_ID_ACL] =
- tomoyo_acl_size["struct tomoyo_acl_info"->type], */
- [TOMOYO_ID_DOMAIN] = sizeof(struct tomoyo_domain_info),
-};
-
-/* Size of a domain ACL element. */
-static const u8 tomoyo_acl_size[] = {
- [TOMOYO_TYPE_PATH_ACL] = sizeof(struct tomoyo_path_acl),
- [TOMOYO_TYPE_PATH2_ACL] = sizeof(struct tomoyo_path2_acl),
- [TOMOYO_TYPE_PATH_NUMBER_ACL] = sizeof(struct tomoyo_path_number_acl),
- [TOMOYO_TYPE_MKDEV_ACL] = sizeof(struct tomoyo_mkdev_acl),
- [TOMOYO_TYPE_MOUNT_ACL] = sizeof(struct tomoyo_mount_acl),
-};
-
/**
* tomoyo_struct_used_by_io_buffer - Check whether the list element is used by /sys/kernel/security/tomoyo/ users or not.
*
@@ -55,15 +45,11 @@ static bool tomoyo_struct_used_by_io_buffer(const struct list_head *element)
list_for_each_entry(head, &tomoyo_io_buffer_list, list) {
head->users++;
spin_unlock(&tomoyo_io_buffer_list_lock);
- if (mutex_lock_interruptible(&head->io_sem)) {
- in_use = true;
- goto out;
- }
+ mutex_lock(&head->io_sem);
if (head->r.domain == element || head->r.group == element ||
head->r.acl == element || &head->w.domain->list == element)
in_use = true;
mutex_unlock(&head->io_sem);
-out:
spin_lock(&tomoyo_io_buffer_list_lock);
head->users--;
if (in_use)
@@ -77,15 +63,14 @@ out:
* tomoyo_name_used_by_io_buffer - Check whether the string is used by /sys/kernel/security/tomoyo/ users or not.
*
* @string: String to check.
- * @size: Memory allocated for @string .
*
* Returns true if @string is used by /sys/kernel/security/tomoyo/ users,
* false otherwise.
*/
-static bool tomoyo_name_used_by_io_buffer(const char *string,
- const size_t size)
+static bool tomoyo_name_used_by_io_buffer(const char *string)
{
struct tomoyo_io_buffer *head;
+ const size_t size = strlen(string) + 1;
bool in_use = false;
spin_lock(&tomoyo_io_buffer_list_lock);
@@ -93,10 +78,7 @@ static bool tomoyo_name_used_by_io_buffer(const char *string,
int i;
head->users++;
spin_unlock(&tomoyo_io_buffer_list_lock);
- if (mutex_lock_interruptible(&head->io_sem)) {
- in_use = true;
- goto out;
- }
+ mutex_lock(&head->io_sem);
for (i = 0; i < TOMOYO_MAX_IO_READ_QUEUE; i++) {
const char *w = head->r.w[i];
if (w < string || w > string + size)
@@ -105,7 +87,6 @@ static bool tomoyo_name_used_by_io_buffer(const char *string,
break;
}
mutex_unlock(&head->io_sem);
-out:
spin_lock(&tomoyo_io_buffer_list_lock);
head->users--;
if (in_use)
@@ -115,84 +96,6 @@ out:
return in_use;
}
-/* Structure for garbage collection. */
-struct tomoyo_gc {
- struct list_head list;
- enum tomoyo_policy_id type;
- size_t size;
- struct list_head *element;
-};
-/* List of entries to be deleted. */
-static LIST_HEAD(tomoyo_gc_list);
-/* Length of tomoyo_gc_list. */
-static int tomoyo_gc_list_len;
-
-/**
- * tomoyo_add_to_gc - Add an entry to to be deleted list.
- *
- * @type: One of values in "enum tomoyo_policy_id".
- * @element: Pointer to "struct list_head".
- *
- * Returns true on success, false otherwise.
- *
- * Caller holds tomoyo_policy_lock mutex.
- *
- * Adding an entry needs kmalloc(). Thus, if we try to add thousands of
- * entries at once, it will take too long time. Thus, do not add more than 128
- * entries per a scan. But to be able to handle worst case where all entries
- * are in-use, we accept one more entry per a scan.
- *
- * If we use singly linked list using "struct list_head"->prev (which is
- * LIST_POISON2), we can avoid kmalloc().
- */
-static bool tomoyo_add_to_gc(const int type, struct list_head *element)
-{
- struct tomoyo_gc *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
- if (!entry)
- return false;
- entry->type = type;
- if (type == TOMOYO_ID_ACL)
- entry->size = tomoyo_acl_size[
- container_of(element,
- typeof(struct tomoyo_acl_info),
- list)->type];
- else if (type == TOMOYO_ID_NAME)
- entry->size = strlen(container_of(element,
- typeof(struct tomoyo_name),
- head.list)->entry.name) + 1;
- else if (type == TOMOYO_ID_CONDITION)
- entry->size =
- container_of(element, typeof(struct tomoyo_condition),
- head.list)->size;
- else
- entry->size = tomoyo_element_size[type];
- entry->element = element;
- list_add(&entry->list, &tomoyo_gc_list);
- list_del_rcu(element);
- return tomoyo_gc_list_len++ < 128;
-}
-
-/**
- * tomoyo_element_linked_by_gc - Validate next element of an entry.
- *
- * @element: Pointer to an element.
- * @size: Size of @element in byte.
- *
- * Returns true if @element is linked by other elements in the garbage
- * collector's queue, false otherwise.
- */
-static bool tomoyo_element_linked_by_gc(const u8 *element, const size_t size)
-{
- struct tomoyo_gc *p;
- list_for_each_entry(p, &tomoyo_gc_list, list) {
- const u8 *ptr = (const u8 *) p->element->next;
- if (ptr < element || element + size < ptr)
- continue;
- return true;
- }
- return false;
-}
-
/**
* tomoyo_del_transition_control - Delete members in "struct tomoyo_transition_control".
*
@@ -200,7 +103,7 @@ static bool tomoyo_element_linked_by_gc(const u8 *element, const size_t size)
*
* Returns nothing.
*/
-static void tomoyo_del_transition_control(struct list_head *element)
+static inline void tomoyo_del_transition_control(struct list_head *element)
{
struct tomoyo_transition_control *ptr =
container_of(element, typeof(*ptr), head.list);
@@ -215,7 +118,7 @@ static void tomoyo_del_transition_control(struct list_head *element)
*
* Returns nothing.
*/
-static void tomoyo_del_aggregator(struct list_head *element)
+static inline void tomoyo_del_aggregator(struct list_head *element)
{
struct tomoyo_aggregator *ptr =
container_of(element, typeof(*ptr), head.list);
@@ -230,7 +133,7 @@ static void tomoyo_del_aggregator(struct list_head *element)
*
* Returns nothing.
*/
-static void tomoyo_del_manager(struct list_head *element)
+static inline void tomoyo_del_manager(struct list_head *element)
{
struct tomoyo_manager *ptr =
container_of(element, typeof(*ptr), head.list);
@@ -293,6 +196,38 @@ static void tomoyo_del_acl(struct list_head *element)
tomoyo_put_number_union(&entry->flags);
}
break;
+ case TOMOYO_TYPE_ENV_ACL:
+ {
+ struct tomoyo_env_acl *entry =
+ container_of(acl, typeof(*entry), head);
+
+ tomoyo_put_name(entry->env);
+ }
+ break;
+ case TOMOYO_TYPE_INET_ACL:
+ {
+ struct tomoyo_inet_acl *entry =
+ container_of(acl, typeof(*entry), head);
+
+ tomoyo_put_group(entry->address.group);
+ tomoyo_put_number_union(&entry->port);
+ }
+ break;
+ case TOMOYO_TYPE_UNIX_ACL:
+ {
+ struct tomoyo_unix_acl *entry =
+ container_of(acl, typeof(*entry), head);
+
+ tomoyo_put_name_union(&entry->name);
+ }
+ break;
+ case TOMOYO_TYPE_MANUAL_TASK_ACL:
+ {
+ struct tomoyo_task_acl *entry =
+ container_of(acl, typeof(*entry), head);
+ tomoyo_put_name(entry->domainname);
+ }
+ break;
}
}
@@ -301,44 +236,26 @@ static void tomoyo_del_acl(struct list_head *element)
*
* @element: Pointer to "struct list_head".
*
- * Returns true if deleted, false otherwise.
+ * Returns nothing.
+ *
+ * Caller holds tomoyo_policy_lock mutex.
*/
-static bool tomoyo_del_domain(struct list_head *element)
+static inline void tomoyo_del_domain(struct list_head *element)
{
struct tomoyo_domain_info *domain =
container_of(element, typeof(*domain), list);
struct tomoyo_acl_info *acl;
struct tomoyo_acl_info *tmp;
/*
- * Since we don't protect whole execve() operation using SRCU,
- * we need to recheck domain->users at this point.
- *
- * (1) Reader starts SRCU section upon execve().
- * (2) Reader traverses tomoyo_domain_list and finds this domain.
- * (3) Writer marks this domain as deleted.
- * (4) Garbage collector removes this domain from tomoyo_domain_list
- * because this domain is marked as deleted and used by nobody.
- * (5) Reader saves reference to this domain into
- * "struct linux_binprm"->cred->security .
- * (6) Reader finishes SRCU section, although execve() operation has
- * not finished yet.
- * (7) Garbage collector waits for SRCU synchronization.
- * (8) Garbage collector kfree() this domain because this domain is
- * used by nobody.
- * (9) Reader finishes execve() operation and restores this domain from
- * "struct linux_binprm"->cred->security.
- *
- * By updating domain->users at (5), we can solve this race problem
- * by rechecking domain->users at (8).
+ * Since this domain is referenced from neither
+ * "struct tomoyo_io_buffer" nor "struct cred"->security, we can delete
+ * elements without checking for is_deleted flag.
*/
- if (atomic_read(&domain->users))
- return false;
list_for_each_entry_safe(acl, tmp, &domain->acl_info_list, list) {
tomoyo_del_acl(&acl->list);
tomoyo_memory_free(acl);
}
tomoyo_put_name(domain->domainname);
- return true;
}
/**
@@ -387,10 +304,9 @@ void tomoyo_del_condition(struct list_head *element)
*
* Returns nothing.
*/
-static void tomoyo_del_name(struct list_head *element)
+static inline void tomoyo_del_name(struct list_head *element)
{
- const struct tomoyo_name *ptr =
- container_of(element, typeof(*ptr), head.list);
+ /* Nothing to do. */
}
/**
@@ -400,7 +316,7 @@ static void tomoyo_del_name(struct list_head *element)
*
* Returns nothing.
*/
-static void tomoyo_del_path_group(struct list_head *element)
+static inline void tomoyo_del_path_group(struct list_head *element)
{
struct tomoyo_path_group *member =
container_of(element, typeof(*member), head.list);
@@ -414,7 +330,7 @@ static void tomoyo_del_path_group(struct list_head *element)
*
* Returns nothing.
*/
-static void tomoyo_del_group(struct list_head *element)
+static inline void tomoyo_del_group(struct list_head *element)
{
struct tomoyo_group *group =
container_of(element, typeof(*group), head.list);
@@ -422,16 +338,128 @@ static void tomoyo_del_group(struct list_head *element)
}
/**
+ * tomoyo_del_address_group - Delete members in "struct tomoyo_address_group".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_del_address_group(struct list_head *element)
+{
+ /* Nothing to do. */
+}
+
+/**
* tomoyo_del_number_group - Delete members in "struct tomoyo_number_group".
*
* @element: Pointer to "struct list_head".
*
* Returns nothing.
*/
-static void tomoyo_del_number_group(struct list_head *element)
+static inline void tomoyo_del_number_group(struct list_head *element)
{
- struct tomoyo_number_group *member =
- container_of(element, typeof(*member), head.list);
+ /* Nothing to do. */
+}
+
+/**
+ * tomoyo_try_to_gc - Try to kfree() an entry.
+ *
+ * @type: One of values in "enum tomoyo_policy_id".
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ *
+ * Caller holds tomoyo_policy_lock mutex.
+ */
+static void tomoyo_try_to_gc(const enum tomoyo_policy_id type,
+ struct list_head *element)
+{
+ /*
+ * __list_del_entry() guarantees that the list element became no longer
+ * reachable from the list which the element was originally on (e.g.
+ * tomoyo_domain_list). Also, synchronize_srcu() guarantees that the
+ * list element became no longer referenced by syscall users.
+ */
+ __list_del_entry(element);
+ mutex_unlock(&tomoyo_policy_lock);
+ synchronize_srcu(&tomoyo_ss);
+ /*
+ * However, there are two users which may still be using the list
+ * element. We need to defer until both users forget this element.
+ *
+ * Don't kfree() until "struct tomoyo_io_buffer"->r.{domain,group,acl}
+ * and "struct tomoyo_io_buffer"->w.domain forget this element.
+ */
+ if (tomoyo_struct_used_by_io_buffer(element))
+ goto reinject;
+ switch (type) {
+ case TOMOYO_ID_TRANSITION_CONTROL:
+ tomoyo_del_transition_control(element);
+ break;
+ case TOMOYO_ID_MANAGER:
+ tomoyo_del_manager(element);
+ break;
+ case TOMOYO_ID_AGGREGATOR:
+ tomoyo_del_aggregator(element);
+ break;
+ case TOMOYO_ID_GROUP:
+ tomoyo_del_group(element);
+ break;
+ case TOMOYO_ID_PATH_GROUP:
+ tomoyo_del_path_group(element);
+ break;
+ case TOMOYO_ID_ADDRESS_GROUP:
+ tomoyo_del_address_group(element);
+ break;
+ case TOMOYO_ID_NUMBER_GROUP:
+ tomoyo_del_number_group(element);
+ break;
+ case TOMOYO_ID_CONDITION:
+ tomoyo_del_condition(element);
+ break;
+ case TOMOYO_ID_NAME:
+ /*
+ * Don't kfree() until all "struct tomoyo_io_buffer"->r.w[]
+ * forget this element.
+ */
+ if (tomoyo_name_used_by_io_buffer
+ (container_of(element, typeof(struct tomoyo_name),
+ head.list)->entry.name))
+ goto reinject;
+ tomoyo_del_name(element);
+ break;
+ case TOMOYO_ID_ACL:
+ tomoyo_del_acl(element);
+ break;
+ case TOMOYO_ID_DOMAIN:
+ /*
+ * Don't kfree() until all "struct cred"->security forget this
+ * element.
+ */
+ if (atomic_read(&container_of
+ (element, typeof(struct tomoyo_domain_info),
+ list)->users))
+ goto reinject;
+ break;
+ case TOMOYO_MAX_POLICY:
+ break;
+ }
+ mutex_lock(&tomoyo_policy_lock);
+ if (type == TOMOYO_ID_DOMAIN)
+ tomoyo_del_domain(element);
+ tomoyo_memory_free(element);
+ return;
+reinject:
+ /*
+ * We can safely reinject this element here bacause
+ * (1) Appending list elements and removing list elements are protected
+ * by tomoyo_policy_lock mutex.
+ * (2) Only this function removes list elements and this function is
+ * exclusively executed by tomoyo_gc_mutex mutex.
+ * are true.
+ */
+ mutex_lock(&tomoyo_policy_lock);
+ list_add_rcu(element, element->prev);
}
/**
@@ -440,19 +468,19 @@ static void tomoyo_del_number_group(struct list_head *element)
* @id: One of values in "enum tomoyo_policy_id".
* @member_list: Pointer to "struct list_head".
*
- * Returns true if some elements are deleted, false otherwise.
+ * Returns nothing.
*/
-static bool tomoyo_collect_member(const enum tomoyo_policy_id id,
+static void tomoyo_collect_member(const enum tomoyo_policy_id id,
struct list_head *member_list)
{
struct tomoyo_acl_head *member;
- list_for_each_entry(member, member_list, list) {
+ struct tomoyo_acl_head *tmp;
+ list_for_each_entry_safe(member, tmp, member_list, list) {
if (!member->is_deleted)
continue;
- if (!tomoyo_add_to_gc(id, &member->list))
- return false;
+ member->is_deleted = TOMOYO_GC_IN_PROGRESS;
+ tomoyo_try_to_gc(id, &member->list);
}
- return true;
}
/**
@@ -460,22 +488,22 @@ static bool tomoyo_collect_member(const enum tomoyo_policy_id id,
*
* @list: Pointer to "struct list_head".
*
- * Returns true if some elements are deleted, false otherwise.
+ * Returns nothing.
*/
-static bool tomoyo_collect_acl(struct list_head *list)
+static void tomoyo_collect_acl(struct list_head *list)
{
struct tomoyo_acl_info *acl;
- list_for_each_entry(acl, list, list) {
+ struct tomoyo_acl_info *tmp;
+ list_for_each_entry_safe(acl, tmp, list, list) {
if (!acl->is_deleted)
continue;
- if (!tomoyo_add_to_gc(TOMOYO_ID_ACL, &acl->list))
- return false;
+ acl->is_deleted = TOMOYO_GC_IN_PROGRESS;
+ tomoyo_try_to_gc(TOMOYO_ID_ACL, &acl->list);
}
- return true;
}
/**
- * tomoyo_collect_entry - Scan lists for deleted elements.
+ * tomoyo_collect_entry - Try to kfree() deleted elements.
*
* Returns nothing.
*/
@@ -484,174 +512,82 @@ static void tomoyo_collect_entry(void)
int i;
enum tomoyo_policy_id id;
struct tomoyo_policy_namespace *ns;
- int idx;
- if (mutex_lock_interruptible(&tomoyo_policy_lock))
- return;
- idx = tomoyo_read_lock();
+ mutex_lock(&tomoyo_policy_lock);
{
struct tomoyo_domain_info *domain;
- list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
- if (!tomoyo_collect_acl(&domain->acl_info_list))
- goto unlock;
+ struct tomoyo_domain_info *tmp;
+ list_for_each_entry_safe(domain, tmp, &tomoyo_domain_list,
+ list) {
+ tomoyo_collect_acl(&domain->acl_info_list);
if (!domain->is_deleted || atomic_read(&domain->users))
continue;
- /*
- * Nobody is referring this domain. But somebody may
- * refer this domain after successful execve().
- * We recheck domain->users after SRCU synchronization.
- */
- if (!tomoyo_add_to_gc(TOMOYO_ID_DOMAIN, &domain->list))
- goto unlock;
+ tomoyo_try_to_gc(TOMOYO_ID_DOMAIN, &domain->list);
}
}
- list_for_each_entry_rcu(ns, &tomoyo_namespace_list, namespace_list) {
+ list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) {
for (id = 0; id < TOMOYO_MAX_POLICY; id++)
- if (!tomoyo_collect_member(id, &ns->policy_list[id]))
- goto unlock;
+ tomoyo_collect_member(id, &ns->policy_list[id]);
for (i = 0; i < TOMOYO_MAX_ACL_GROUPS; i++)
- if (!tomoyo_collect_acl(&ns->acl_group[i]))
- goto unlock;
+ tomoyo_collect_acl(&ns->acl_group[i]);
+ }
+ {
+ struct tomoyo_shared_acl_head *ptr;
+ struct tomoyo_shared_acl_head *tmp;
+ list_for_each_entry_safe(ptr, tmp, &tomoyo_condition_list,
+ list) {
+ if (atomic_read(&ptr->users) > 0)
+ continue;
+ atomic_set(&ptr->users, TOMOYO_GC_IN_PROGRESS);
+ tomoyo_try_to_gc(TOMOYO_ID_CONDITION, &ptr->list);
+ }
+ }
+ list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) {
for (i = 0; i < TOMOYO_MAX_GROUP; i++) {
struct list_head *list = &ns->group_list[i];
struct tomoyo_group *group;
+ struct tomoyo_group *tmp;
switch (i) {
case 0:
id = TOMOYO_ID_PATH_GROUP;
break;
- default:
+ case 1:
id = TOMOYO_ID_NUMBER_GROUP;
break;
+ default:
+ id = TOMOYO_ID_ADDRESS_GROUP;
+ break;
}
- list_for_each_entry(group, list, head.list) {
- if (!tomoyo_collect_member
- (id, &group->member_list))
- goto unlock;
+ list_for_each_entry_safe(group, tmp, list, head.list) {
+ tomoyo_collect_member(id, &group->member_list);
if (!list_empty(&group->member_list) ||
- atomic_read(&group->head.users))
+ atomic_read(&group->head.users) > 0)
continue;
- if (!tomoyo_add_to_gc(TOMOYO_ID_GROUP,
- &group->head.list))
- goto unlock;
+ atomic_set(&group->head.users,
+ TOMOYO_GC_IN_PROGRESS);
+ tomoyo_try_to_gc(TOMOYO_ID_GROUP,
+ &group->head.list);
}
}
}
- id = TOMOYO_ID_CONDITION;
- for (i = 0; i < TOMOYO_MAX_HASH + 1; i++) {
- struct list_head *list = !i ?
- &tomoyo_condition_list : &tomoyo_name_list[i - 1];
+ for (i = 0; i < TOMOYO_MAX_HASH; i++) {
+ struct list_head *list = &tomoyo_name_list[i];
struct tomoyo_shared_acl_head *ptr;
- list_for_each_entry(ptr, list, list) {
- if (atomic_read(&ptr->users))
+ struct tomoyo_shared_acl_head *tmp;
+ list_for_each_entry_safe(ptr, tmp, list, list) {
+ if (atomic_read(&ptr->users) > 0)
continue;
- if (!tomoyo_add_to_gc(id, &ptr->list))
- goto unlock;
+ atomic_set(&ptr->users, TOMOYO_GC_IN_PROGRESS);
+ tomoyo_try_to_gc(TOMOYO_ID_NAME, &ptr->list);
}
- id = TOMOYO_ID_NAME;
}
-unlock:
- tomoyo_read_unlock(idx);
mutex_unlock(&tomoyo_policy_lock);
}
/**
- * tomoyo_kfree_entry - Delete entries in tomoyo_gc_list.
- *
- * Returns true if some entries were kfree()d, false otherwise.
- */
-static bool tomoyo_kfree_entry(void)
-{
- struct tomoyo_gc *p;
- struct tomoyo_gc *tmp;
- bool result = false;
-
- list_for_each_entry_safe(p, tmp, &tomoyo_gc_list, list) {
- struct list_head *element = p->element;
-
- /*
- * list_del_rcu() in tomoyo_add_to_gc() guarantees that the
- * list element became no longer reachable from the list which
- * the element was originally on (e.g. tomoyo_domain_list).
- * Also, synchronize_srcu() in tomoyo_gc_thread() guarantees
- * that the list element became no longer referenced by syscall
- * users.
- *
- * However, there are three users which may still be using the
- * list element. We need to defer until all of these users
- * forget the list element.
- *
- * Firstly, defer until "struct tomoyo_io_buffer"->r.{domain,
- * group,acl} and "struct tomoyo_io_buffer"->w.domain forget
- * the list element.
- */
- if (tomoyo_struct_used_by_io_buffer(element))
- continue;
- /*
- * Secondly, defer until all other elements in the
- * tomoyo_gc_list list forget the list element.
- */
- if (tomoyo_element_linked_by_gc((const u8 *) element, p->size))
- continue;
- switch (p->type) {
- case TOMOYO_ID_TRANSITION_CONTROL:
- tomoyo_del_transition_control(element);
- break;
- case TOMOYO_ID_AGGREGATOR:
- tomoyo_del_aggregator(element);
- break;
- case TOMOYO_ID_MANAGER:
- tomoyo_del_manager(element);
- break;
- case TOMOYO_ID_CONDITION:
- tomoyo_del_condition(element);
- break;
- case TOMOYO_ID_NAME:
- /*
- * Thirdly, defer until all "struct tomoyo_io_buffer"
- * ->r.w[] forget the list element.
- */
- if (tomoyo_name_used_by_io_buffer(
- container_of(element, typeof(struct tomoyo_name),
- head.list)->entry.name, p->size))
- continue;
- tomoyo_del_name(element);
- break;
- case TOMOYO_ID_ACL:
- tomoyo_del_acl(element);
- break;
- case TOMOYO_ID_DOMAIN:
- if (!tomoyo_del_domain(element))
- continue;
- break;
- case TOMOYO_ID_PATH_GROUP:
- tomoyo_del_path_group(element);
- break;
- case TOMOYO_ID_GROUP:
- tomoyo_del_group(element);
- break;
- case TOMOYO_ID_NUMBER_GROUP:
- tomoyo_del_number_group(element);
- break;
- case TOMOYO_MAX_POLICY:
- break;
- }
- tomoyo_memory_free(element);
- list_del(&p->list);
- kfree(p);
- tomoyo_gc_list_len--;
- result = true;
- }
- return result;
-}
-
-/**
* tomoyo_gc_thread - Garbage collector thread function.
*
* @unused: Unused.
*
- * In case OOM-killer choose this thread for termination, we create this thread
- * as a short live thread whenever /sys/kernel/security/tomoyo/ interface was
- * close()d.
- *
* Returns 0.
*/
static int tomoyo_gc_thread(void *unused)
@@ -660,13 +596,7 @@ static int tomoyo_gc_thread(void *unused)
static DEFINE_MUTEX(tomoyo_gc_mutex);
if (!mutex_trylock(&tomoyo_gc_mutex))
goto out;
- daemonize("GC for TOMOYO");
- do {
- tomoyo_collect_entry();
- if (list_empty(&tomoyo_gc_list))
- break;
- synchronize_srcu(&tomoyo_ss);
- } while (tomoyo_kfree_entry());
+ tomoyo_collect_entry();
{
struct tomoyo_io_buffer *head;
struct tomoyo_io_buffer *tmp;
diff --git a/security/tomoyo/group.c b/security/tomoyo/group.c
index 5fb0e1298400..50092534ec54 100644
--- a/security/tomoyo/group.c
+++ b/security/tomoyo/group.c
@@ -42,7 +42,26 @@ static bool tomoyo_same_number_group(const struct tomoyo_acl_head *a,
}
/**
- * tomoyo_write_group - Write "struct tomoyo_path_group"/"struct tomoyo_number_group" list.
+ * tomoyo_same_address_group - Check for duplicated "struct tomoyo_address_group" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_head".
+ * @b: Pointer to "struct tomoyo_acl_head".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_address_group(const struct tomoyo_acl_head *a,
+ const struct tomoyo_acl_head *b)
+{
+ const struct tomoyo_address_group *p1 = container_of(a, typeof(*p1),
+ head);
+ const struct tomoyo_address_group *p2 = container_of(b, typeof(*p2),
+ head);
+
+ return tomoyo_same_ipaddr_union(&p1->address, &p2->address);
+}
+
+/**
+ * tomoyo_write_group - Write "struct tomoyo_path_group"/"struct tomoyo_number_group"/"struct tomoyo_address_group" list.
*
* @param: Pointer to "struct tomoyo_acl_param".
* @type: Type of this group.
@@ -77,6 +96,14 @@ int tomoyo_write_group(struct tomoyo_acl_param *param, const u8 type)
* tomoyo_put_number_union() is not needed because
* param->data[0] != '@'.
*/
+ } else {
+ struct tomoyo_address_group e = { };
+
+ if (param->data[0] == '@' ||
+ !tomoyo_parse_ipaddr_union(param, &e.address))
+ goto out;
+ error = tomoyo_update_policy(&e.head, sizeof(e), param,
+ tomoyo_same_address_group);
}
out:
tomoyo_put_group(group);
@@ -137,3 +164,35 @@ bool tomoyo_number_matches_group(const unsigned long min,
}
return matched;
}
+
+/**
+ * tomoyo_address_matches_group - Check whether the given address matches members of the given address group.
+ *
+ * @is_ipv6: True if @address is an IPv6 address.
+ * @address: An IPv4 or IPv6 address.
+ * @group: Pointer to "struct tomoyo_address_group".
+ *
+ * Returns true if @address matches addresses in @group group, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+bool tomoyo_address_matches_group(const bool is_ipv6, const __be32 *address,
+ const struct tomoyo_group *group)
+{
+ struct tomoyo_address_group *member;
+ bool matched = false;
+ const u8 size = is_ipv6 ? 16 : 4;
+
+ list_for_each_entry_rcu(member, &group->member_list, head.list) {
+ if (member->head.is_deleted)
+ continue;
+ if (member->address.is_ipv6 != is_ipv6)
+ continue;
+ if (memcmp(&member->address.ip[0], address, size) > 0 ||
+ memcmp(address, &member->address.ip[1], size) > 0)
+ continue;
+ matched = true;
+ break;
+ }
+ return matched;
+}
diff --git a/security/tomoyo/memory.c b/security/tomoyo/memory.c
index 7a56051146c2..0e995716cc25 100644
--- a/security/tomoyo/memory.c
+++ b/security/tomoyo/memory.c
@@ -27,8 +27,6 @@ void tomoyo_warn_oom(const char *function)
panic("MAC Initialization failed.\n");
}
-/* Lock for protecting tomoyo_memory_used. */
-static DEFINE_SPINLOCK(tomoyo_policy_memory_lock);
/* Memoy currently used by policy/audit log/query. */
unsigned int tomoyo_memory_used[TOMOYO_MAX_MEMORY_STAT];
/* Memory quota for "policy"/"audit log"/"query". */
@@ -42,22 +40,19 @@ unsigned int tomoyo_memory_quota[TOMOYO_MAX_MEMORY_STAT];
* Returns true on success, false otherwise.
*
* Returns true if @ptr is not NULL and quota not exceeded, false otherwise.
+ *
+ * Caller holds tomoyo_policy_lock mutex.
*/
bool tomoyo_memory_ok(void *ptr)
{
if (ptr) {
const size_t s = ksize(ptr);
- bool result;
- spin_lock(&tomoyo_policy_memory_lock);
tomoyo_memory_used[TOMOYO_MEMORY_POLICY] += s;
- result = !tomoyo_memory_quota[TOMOYO_MEMORY_POLICY] ||
- tomoyo_memory_used[TOMOYO_MEMORY_POLICY] <=
- tomoyo_memory_quota[TOMOYO_MEMORY_POLICY];
- if (!result)
- tomoyo_memory_used[TOMOYO_MEMORY_POLICY] -= s;
- spin_unlock(&tomoyo_policy_memory_lock);
- if (result)
+ if (!tomoyo_memory_quota[TOMOYO_MEMORY_POLICY] ||
+ tomoyo_memory_used[TOMOYO_MEMORY_POLICY] <=
+ tomoyo_memory_quota[TOMOYO_MEMORY_POLICY])
return true;
+ tomoyo_memory_used[TOMOYO_MEMORY_POLICY] -= s;
}
tomoyo_warn_oom(__func__);
return false;
@@ -71,6 +66,8 @@ bool tomoyo_memory_ok(void *ptr)
*
* Returns pointer to allocated memory on success, NULL otherwise.
* @data is zero-cleared on success.
+ *
+ * Caller holds tomoyo_policy_lock mutex.
*/
void *tomoyo_commit_ok(void *data, const unsigned int size)
{
@@ -85,20 +82,6 @@ void *tomoyo_commit_ok(void *data, const unsigned int size)
}
/**
- * tomoyo_memory_free - Free memory for elements.
- *
- * @ptr: Pointer to allocated memory.
- */
-void tomoyo_memory_free(void *ptr)
-{
- size_t s = ksize(ptr);
- spin_lock(&tomoyo_policy_memory_lock);
- tomoyo_memory_used[TOMOYO_MEMORY_POLICY] -= s;
- spin_unlock(&tomoyo_policy_memory_lock);
- kfree(ptr);
-}
-
-/**
* tomoyo_get_group - Allocate memory for "struct tomoyo_path_group"/"struct tomoyo_number_group".
*
* @param: Pointer to "struct tomoyo_acl_param".
@@ -123,7 +106,8 @@ struct tomoyo_group *tomoyo_get_group(struct tomoyo_acl_param *param,
goto out;
list = &param->ns->group_list[idx];
list_for_each_entry(group, list, head.list) {
- if (e.group_name != group->group_name)
+ if (e.group_name != group->group_name ||
+ atomic_read(&group->head.users) == TOMOYO_GC_IN_PROGRESS)
continue;
atomic_inc(&group->head.users);
found = true;
@@ -175,7 +159,8 @@ const struct tomoyo_path_info *tomoyo_get_name(const char *name)
if (mutex_lock_interruptible(&tomoyo_policy_lock))
return NULL;
list_for_each_entry(ptr, head, head.list) {
- if (hash != ptr->entry.hash || strcmp(name, ptr->entry.name))
+ if (hash != ptr->entry.hash || strcmp(name, ptr->entry.name) ||
+ atomic_read(&ptr->head.users) == TOMOYO_GC_IN_PROGRESS)
continue;
atomic_inc(&ptr->head.users);
goto out;
diff --git a/security/tomoyo/network.c b/security/tomoyo/network.c
new file mode 100644
index 000000000000..97527710a72a
--- /dev/null
+++ b/security/tomoyo/network.c
@@ -0,0 +1,771 @@
+/*
+ * security/tomoyo/network.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include "common.h"
+#include <linux/slab.h>
+
+/* Structure for holding inet domain socket's address. */
+struct tomoyo_inet_addr_info {
+ __be16 port; /* In network byte order. */
+ const __be32 *address; /* In network byte order. */
+ bool is_ipv6;
+};
+
+/* Structure for holding unix domain socket's address. */
+struct tomoyo_unix_addr_info {
+ u8 *addr; /* This may not be '\0' terminated string. */
+ unsigned int addr_len;
+};
+
+/* Structure for holding socket address. */
+struct tomoyo_addr_info {
+ u8 protocol;
+ u8 operation;
+ struct tomoyo_inet_addr_info inet;
+ struct tomoyo_unix_addr_info unix0;
+};
+
+/* String table for socket's protocols. */
+const char * const tomoyo_proto_keyword[TOMOYO_SOCK_MAX] = {
+ [SOCK_STREAM] = "stream",
+ [SOCK_DGRAM] = "dgram",
+ [SOCK_RAW] = "raw",
+ [SOCK_SEQPACKET] = "seqpacket",
+ [0] = " ", /* Dummy for avoiding NULL pointer dereference. */
+ [4] = " ", /* Dummy for avoiding NULL pointer dereference. */
+};
+
+/**
+ * tomoyo_parse_ipaddr_union - Parse an IP address.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @ptr: Pointer to "struct tomoyo_ipaddr_union".
+ *
+ * Returns true on success, false otherwise.
+ */
+bool tomoyo_parse_ipaddr_union(struct tomoyo_acl_param *param,
+ struct tomoyo_ipaddr_union *ptr)
+{
+ u8 * const min = ptr->ip[0].in6_u.u6_addr8;
+ u8 * const max = ptr->ip[1].in6_u.u6_addr8;
+ char *address = tomoyo_read_token(param);
+ const char *end;
+
+ if (!strchr(address, ':') &&
+ in4_pton(address, -1, min, '-', &end) > 0) {
+ ptr->is_ipv6 = false;
+ if (!*end)
+ ptr->ip[1].s6_addr32[0] = ptr->ip[0].s6_addr32[0];
+ else if (*end++ != '-' ||
+ in4_pton(end, -1, max, '\0', &end) <= 0 || *end)
+ return false;
+ return true;
+ }
+ if (in6_pton(address, -1, min, '-', &end) > 0) {
+ ptr->is_ipv6 = true;
+ if (!*end)
+ memmove(max, min, sizeof(u16) * 8);
+ else if (*end++ != '-' ||
+ in6_pton(end, -1, max, '\0', &end) <= 0 || *end)
+ return false;
+ return true;
+ }
+ return false;
+}
+
+/**
+ * tomoyo_print_ipv4 - Print an IPv4 address.
+ *
+ * @buffer: Buffer to write to.
+ * @buffer_len: Size of @buffer.
+ * @min_ip: Pointer to __be32.
+ * @max_ip: Pointer to __be32.
+ *
+ * Returns nothing.
+ */
+static void tomoyo_print_ipv4(char *buffer, const unsigned int buffer_len,
+ const __be32 *min_ip, const __be32 *max_ip)
+{
+ snprintf(buffer, buffer_len, "%pI4%c%pI4", min_ip,
+ *min_ip == *max_ip ? '\0' : '-', max_ip);
+}
+
+/**
+ * tomoyo_print_ipv6 - Print an IPv6 address.
+ *
+ * @buffer: Buffer to write to.
+ * @buffer_len: Size of @buffer.
+ * @min_ip: Pointer to "struct in6_addr".
+ * @max_ip: Pointer to "struct in6_addr".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_print_ipv6(char *buffer, const unsigned int buffer_len,
+ const struct in6_addr *min_ip,
+ const struct in6_addr *max_ip)
+{
+ snprintf(buffer, buffer_len, "%pI6c%c%pI6c", min_ip,
+ !memcmp(min_ip, max_ip, 16) ? '\0' : '-', max_ip);
+}
+
+/**
+ * tomoyo_print_ip - Print an IP address.
+ *
+ * @buf: Buffer to write to.
+ * @size: Size of @buf.
+ * @ptr: Pointer to "struct ipaddr_union".
+ *
+ * Returns nothing.
+ */
+void tomoyo_print_ip(char *buf, const unsigned int size,
+ const struct tomoyo_ipaddr_union *ptr)
+{
+ if (ptr->is_ipv6)
+ tomoyo_print_ipv6(buf, size, &ptr->ip[0], &ptr->ip[1]);
+ else
+ tomoyo_print_ipv4(buf, size, &ptr->ip[0].s6_addr32[0],
+ &ptr->ip[1].s6_addr32[0]);
+}
+
+/*
+ * Mapping table from "enum tomoyo_network_acl_index" to
+ * "enum tomoyo_mac_index" for inet domain socket.
+ */
+static const u8 tomoyo_inet2mac
+[TOMOYO_SOCK_MAX][TOMOYO_MAX_NETWORK_OPERATION] = {
+ [SOCK_STREAM] = {
+ [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_INET_STREAM_BIND,
+ [TOMOYO_NETWORK_LISTEN] =
+ TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN,
+ [TOMOYO_NETWORK_CONNECT] =
+ TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT,
+ },
+ [SOCK_DGRAM] = {
+ [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_INET_DGRAM_BIND,
+ [TOMOYO_NETWORK_SEND] = TOMOYO_MAC_NETWORK_INET_DGRAM_SEND,
+ },
+ [SOCK_RAW] = {
+ [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_INET_RAW_BIND,
+ [TOMOYO_NETWORK_SEND] = TOMOYO_MAC_NETWORK_INET_RAW_SEND,
+ },
+};
+
+/*
+ * Mapping table from "enum tomoyo_network_acl_index" to
+ * "enum tomoyo_mac_index" for unix domain socket.
+ */
+static const u8 tomoyo_unix2mac
+[TOMOYO_SOCK_MAX][TOMOYO_MAX_NETWORK_OPERATION] = {
+ [SOCK_STREAM] = {
+ [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND,
+ [TOMOYO_NETWORK_LISTEN] =
+ TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN,
+ [TOMOYO_NETWORK_CONNECT] =
+ TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT,
+ },
+ [SOCK_DGRAM] = {
+ [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND,
+ [TOMOYO_NETWORK_SEND] = TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND,
+ },
+ [SOCK_SEQPACKET] = {
+ [TOMOYO_NETWORK_BIND] =
+ TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND,
+ [TOMOYO_NETWORK_LISTEN] =
+ TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN,
+ [TOMOYO_NETWORK_CONNECT] =
+ TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT,
+ },
+};
+
+/**
+ * tomoyo_same_inet_acl - Check for duplicated "struct tomoyo_inet_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b except permission bits, false otherwise.
+ */
+static bool tomoyo_same_inet_acl(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
+{
+ const struct tomoyo_inet_acl *p1 = container_of(a, typeof(*p1), head);
+ const struct tomoyo_inet_acl *p2 = container_of(b, typeof(*p2), head);
+
+ return p1->protocol == p2->protocol &&
+ tomoyo_same_ipaddr_union(&p1->address, &p2->address) &&
+ tomoyo_same_number_union(&p1->port, &p2->port);
+}
+
+/**
+ * tomoyo_same_unix_acl - Check for duplicated "struct tomoyo_unix_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b except permission bits, false otherwise.
+ */
+static bool tomoyo_same_unix_acl(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
+{
+ const struct tomoyo_unix_acl *p1 = container_of(a, typeof(*p1), head);
+ const struct tomoyo_unix_acl *p2 = container_of(b, typeof(*p2), head);
+
+ return p1->protocol == p2->protocol &&
+ tomoyo_same_name_union(&p1->name, &p2->name);
+}
+
+/**
+ * tomoyo_merge_inet_acl - Merge duplicated "struct tomoyo_inet_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ * @is_delete: True for @a &= ~@b, false for @a |= @b.
+ *
+ * Returns true if @a is empty, false otherwise.
+ */
+static bool tomoyo_merge_inet_acl(struct tomoyo_acl_info *a,
+ struct tomoyo_acl_info *b,
+ const bool is_delete)
+{
+ u8 * const a_perm =
+ &container_of(a, struct tomoyo_inet_acl, head)->perm;
+ u8 perm = *a_perm;
+ const u8 b_perm = container_of(b, struct tomoyo_inet_acl, head)->perm;
+
+ if (is_delete)
+ perm &= ~b_perm;
+ else
+ perm |= b_perm;
+ *a_perm = perm;
+ return !perm;
+}
+
+/**
+ * tomoyo_merge_unix_acl - Merge duplicated "struct tomoyo_unix_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ * @is_delete: True for @a &= ~@b, false for @a |= @b.
+ *
+ * Returns true if @a is empty, false otherwise.
+ */
+static bool tomoyo_merge_unix_acl(struct tomoyo_acl_info *a,
+ struct tomoyo_acl_info *b,
+ const bool is_delete)
+{
+ u8 * const a_perm =
+ &container_of(a, struct tomoyo_unix_acl, head)->perm;
+ u8 perm = *a_perm;
+ const u8 b_perm = container_of(b, struct tomoyo_unix_acl, head)->perm;
+
+ if (is_delete)
+ perm &= ~b_perm;
+ else
+ perm |= b_perm;
+ *a_perm = perm;
+ return !perm;
+}
+
+/**
+ * tomoyo_write_inet_network - Write "struct tomoyo_inet_acl" list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+int tomoyo_write_inet_network(struct tomoyo_acl_param *param)
+{
+ struct tomoyo_inet_acl e = { .head.type = TOMOYO_TYPE_INET_ACL };
+ int error = -EINVAL;
+ u8 type;
+ const char *protocol = tomoyo_read_token(param);
+ const char *operation = tomoyo_read_token(param);
+
+ for (e.protocol = 0; e.protocol < TOMOYO_SOCK_MAX; e.protocol++)
+ if (!strcmp(protocol, tomoyo_proto_keyword[e.protocol]))
+ break;
+ for (type = 0; type < TOMOYO_MAX_NETWORK_OPERATION; type++)
+ if (tomoyo_permstr(operation, tomoyo_socket_keyword[type]))
+ e.perm |= 1 << type;
+ if (e.protocol == TOMOYO_SOCK_MAX || !e.perm)
+ return -EINVAL;
+ if (param->data[0] == '@') {
+ param->data++;
+ e.address.group =
+ tomoyo_get_group(param, TOMOYO_ADDRESS_GROUP);
+ if (!e.address.group)
+ return -ENOMEM;
+ } else {
+ if (!tomoyo_parse_ipaddr_union(param, &e.address))
+ goto out;
+ }
+ if (!tomoyo_parse_number_union(param, &e.port) ||
+ e.port.values[1] > 65535)
+ goto out;
+ error = tomoyo_update_domain(&e.head, sizeof(e), param,
+ tomoyo_same_inet_acl,
+ tomoyo_merge_inet_acl);
+out:
+ tomoyo_put_group(e.address.group);
+ tomoyo_put_number_union(&e.port);
+ return error;
+}
+
+/**
+ * tomoyo_write_unix_network - Write "struct tomoyo_unix_acl" list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_write_unix_network(struct tomoyo_acl_param *param)
+{
+ struct tomoyo_unix_acl e = { .head.type = TOMOYO_TYPE_UNIX_ACL };
+ int error;
+ u8 type;
+ const char *protocol = tomoyo_read_token(param);
+ const char *operation = tomoyo_read_token(param);
+
+ for (e.protocol = 0; e.protocol < TOMOYO_SOCK_MAX; e.protocol++)
+ if (!strcmp(protocol, tomoyo_proto_keyword[e.protocol]))
+ break;
+ for (type = 0; type < TOMOYO_MAX_NETWORK_OPERATION; type++)
+ if (tomoyo_permstr(operation, tomoyo_socket_keyword[type]))
+ e.perm |= 1 << type;
+ if (e.protocol == TOMOYO_SOCK_MAX || !e.perm)
+ return -EINVAL;
+ if (!tomoyo_parse_name_union(param, &e.name))
+ return -EINVAL;
+ error = tomoyo_update_domain(&e.head, sizeof(e), param,
+ tomoyo_same_unix_acl,
+ tomoyo_merge_unix_acl);
+ tomoyo_put_name_union(&e.name);
+ return error;
+}
+
+/**
+ * tomoyo_audit_net_log - Audit network log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @family: Name of socket family ("inet" or "unix").
+ * @protocol: Name of protocol in @family.
+ * @operation: Name of socket operation.
+ * @address: Name of address.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_net_log(struct tomoyo_request_info *r,
+ const char *family, const u8 protocol,
+ const u8 operation, const char *address)
+{
+ return tomoyo_supervisor(r, "network %s %s %s %s\n", family,
+ tomoyo_proto_keyword[protocol],
+ tomoyo_socket_keyword[operation], address);
+}
+
+/**
+ * tomoyo_audit_inet_log - Audit INET network log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_inet_log(struct tomoyo_request_info *r)
+{
+ char buf[128];
+ int len;
+ const __be32 *address = r->param.inet_network.address;
+
+ if (r->param.inet_network.is_ipv6)
+ tomoyo_print_ipv6(buf, sizeof(buf), (const struct in6_addr *)
+ address, (const struct in6_addr *) address);
+ else
+ tomoyo_print_ipv4(buf, sizeof(buf), address, address);
+ len = strlen(buf);
+ snprintf(buf + len, sizeof(buf) - len, " %u",
+ r->param.inet_network.port);
+ return tomoyo_audit_net_log(r, "inet", r->param.inet_network.protocol,
+ r->param.inet_network.operation, buf);
+}
+
+/**
+ * tomoyo_audit_unix_log - Audit UNIX network log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_unix_log(struct tomoyo_request_info *r)
+{
+ return tomoyo_audit_net_log(r, "unix", r->param.unix_network.protocol,
+ r->param.unix_network.operation,
+ r->param.unix_network.address->name);
+}
+
+/**
+ * tomoyo_check_inet_acl - Check permission for inet domain socket operation.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_inet_acl(struct tomoyo_request_info *r,
+ const struct tomoyo_acl_info *ptr)
+{
+ const struct tomoyo_inet_acl *acl =
+ container_of(ptr, typeof(*acl), head);
+ const u8 size = r->param.inet_network.is_ipv6 ? 16 : 4;
+
+ if (!(acl->perm & (1 << r->param.inet_network.operation)) ||
+ !tomoyo_compare_number_union(r->param.inet_network.port,
+ &acl->port))
+ return false;
+ if (acl->address.group)
+ return tomoyo_address_matches_group
+ (r->param.inet_network.is_ipv6,
+ r->param.inet_network.address, acl->address.group);
+ return acl->address.is_ipv6 == r->param.inet_network.is_ipv6 &&
+ memcmp(&acl->address.ip[0],
+ r->param.inet_network.address, size) <= 0 &&
+ memcmp(r->param.inet_network.address,
+ &acl->address.ip[1], size) <= 0;
+}
+
+/**
+ * tomoyo_check_unix_acl - Check permission for unix domain socket operation.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_unix_acl(struct tomoyo_request_info *r,
+ const struct tomoyo_acl_info *ptr)
+{
+ const struct tomoyo_unix_acl *acl =
+ container_of(ptr, typeof(*acl), head);
+
+ return (acl->perm & (1 << r->param.unix_network.operation)) &&
+ tomoyo_compare_name_union(r->param.unix_network.address,
+ &acl->name);
+}
+
+/**
+ * tomoyo_inet_entry - Check permission for INET network operation.
+ *
+ * @address: Pointer to "struct tomoyo_addr_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_inet_entry(const struct tomoyo_addr_info *address)
+{
+ const int idx = tomoyo_read_lock();
+ struct tomoyo_request_info r;
+ int error = 0;
+ const u8 type = tomoyo_inet2mac[address->protocol][address->operation];
+
+ if (type && tomoyo_init_request_info(&r, NULL, type)
+ != TOMOYO_CONFIG_DISABLED) {
+ r.param_type = TOMOYO_TYPE_INET_ACL;
+ r.param.inet_network.protocol = address->protocol;
+ r.param.inet_network.operation = address->operation;
+ r.param.inet_network.is_ipv6 = address->inet.is_ipv6;
+ r.param.inet_network.address = address->inet.address;
+ r.param.inet_network.port = ntohs(address->inet.port);
+ do {
+ tomoyo_check_acl(&r, tomoyo_check_inet_acl);
+ error = tomoyo_audit_inet_log(&r);
+ } while (error == TOMOYO_RETRY_REQUEST);
+ }
+ tomoyo_read_unlock(idx);
+ return error;
+}
+
+/**
+ * tomoyo_check_inet_address - Check permission for inet domain socket's operation.
+ *
+ * @addr: Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ * @port: Port number.
+ * @address: Pointer to "struct tomoyo_addr_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_check_inet_address(const struct sockaddr *addr,
+ const unsigned int addr_len,
+ const u16 port,
+ struct tomoyo_addr_info *address)
+{
+ struct tomoyo_inet_addr_info *i = &address->inet;
+
+ switch (addr->sa_family) {
+ case AF_INET6:
+ if (addr_len < SIN6_LEN_RFC2133)
+ goto skip;
+ i->is_ipv6 = true;
+ i->address = (__be32 *)
+ ((struct sockaddr_in6 *) addr)->sin6_addr.s6_addr;
+ i->port = ((struct sockaddr_in6 *) addr)->sin6_port;
+ break;
+ case AF_INET:
+ if (addr_len < sizeof(struct sockaddr_in))
+ goto skip;
+ i->is_ipv6 = false;
+ i->address = (__be32 *)
+ &((struct sockaddr_in *) addr)->sin_addr;
+ i->port = ((struct sockaddr_in *) addr)->sin_port;
+ break;
+ default:
+ goto skip;
+ }
+ if (address->protocol == SOCK_RAW)
+ i->port = htons(port);
+ return tomoyo_inet_entry(address);
+skip:
+ return 0;
+}
+
+/**
+ * tomoyo_unix_entry - Check permission for UNIX network operation.
+ *
+ * @address: Pointer to "struct tomoyo_addr_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_unix_entry(const struct tomoyo_addr_info *address)
+{
+ const int idx = tomoyo_read_lock();
+ struct tomoyo_request_info r;
+ int error = 0;
+ const u8 type = tomoyo_unix2mac[address->protocol][address->operation];
+
+ if (type && tomoyo_init_request_info(&r, NULL, type)
+ != TOMOYO_CONFIG_DISABLED) {
+ char *buf = address->unix0.addr;
+ int len = address->unix0.addr_len - sizeof(sa_family_t);
+
+ if (len <= 0) {
+ buf = "anonymous";
+ len = 9;
+ } else if (buf[0]) {
+ len = strnlen(buf, len);
+ }
+ buf = tomoyo_encode2(buf, len);
+ if (buf) {
+ struct tomoyo_path_info addr;
+
+ addr.name = buf;
+ tomoyo_fill_path_info(&addr);
+ r.param_type = TOMOYO_TYPE_UNIX_ACL;
+ r.param.unix_network.protocol = address->protocol;
+ r.param.unix_network.operation = address->operation;
+ r.param.unix_network.address = &addr;
+ do {
+ tomoyo_check_acl(&r, tomoyo_check_unix_acl);
+ error = tomoyo_audit_unix_log(&r);
+ } while (error == TOMOYO_RETRY_REQUEST);
+ kfree(buf);
+ } else
+ error = -ENOMEM;
+ }
+ tomoyo_read_unlock(idx);
+ return error;
+}
+
+/**
+ * tomoyo_check_unix_address - Check permission for unix domain socket's operation.
+ *
+ * @addr: Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ * @address: Pointer to "struct tomoyo_addr_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_check_unix_address(struct sockaddr *addr,
+ const unsigned int addr_len,
+ struct tomoyo_addr_info *address)
+{
+ struct tomoyo_unix_addr_info *u = &address->unix0;
+
+ if (addr->sa_family != AF_UNIX)
+ return 0;
+ u->addr = ((struct sockaddr_un *) addr)->sun_path;
+ u->addr_len = addr_len;
+ return tomoyo_unix_entry(address);
+}
+
+/**
+ * tomoyo_kernel_service - Check whether I'm kernel service or not.
+ *
+ * Returns true if I'm kernel service, false otherwise.
+ */
+static bool tomoyo_kernel_service(void)
+{
+ /* Nothing to do if I am a kernel service. */
+ return segment_eq(get_fs(), KERNEL_DS);
+}
+
+/**
+ * tomoyo_sock_family - Get socket's family.
+ *
+ * @sk: Pointer to "struct sock".
+ *
+ * Returns one of PF_INET, PF_INET6, PF_UNIX or 0.
+ */
+static u8 tomoyo_sock_family(struct sock *sk)
+{
+ u8 family;
+
+ if (tomoyo_kernel_service())
+ return 0;
+ family = sk->sk_family;
+ switch (family) {
+ case PF_INET:
+ case PF_INET6:
+ case PF_UNIX:
+ return family;
+ default:
+ return 0;
+ }
+}
+
+/**
+ * tomoyo_socket_listen_permission - Check permission for listening a socket.
+ *
+ * @sock: Pointer to "struct socket".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_socket_listen_permission(struct socket *sock)
+{
+ struct tomoyo_addr_info address;
+ const u8 family = tomoyo_sock_family(sock->sk);
+ const unsigned int type = sock->type;
+ struct sockaddr_storage addr;
+ int addr_len;
+
+ if (!family || (type != SOCK_STREAM && type != SOCK_SEQPACKET))
+ return 0;
+ {
+ const int error = sock->ops->getname(sock, (struct sockaddr *)
+ &addr, &addr_len, 0);
+
+ if (error)
+ return error;
+ }
+ address.protocol = type;
+ address.operation = TOMOYO_NETWORK_LISTEN;
+ if (family == PF_UNIX)
+ return tomoyo_check_unix_address((struct sockaddr *) &addr,
+ addr_len, &address);
+ return tomoyo_check_inet_address((struct sockaddr *) &addr, addr_len,
+ 0, &address);
+}
+
+/**
+ * tomoyo_socket_connect_permission - Check permission for setting the remote address of a socket.
+ *
+ * @sock: Pointer to "struct socket".
+ * @addr: Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_socket_connect_permission(struct socket *sock,
+ struct sockaddr *addr, int addr_len)
+{
+ struct tomoyo_addr_info address;
+ const u8 family = tomoyo_sock_family(sock->sk);
+ const unsigned int type = sock->type;
+
+ if (!family)
+ return 0;
+ address.protocol = type;
+ switch (type) {
+ case SOCK_DGRAM:
+ case SOCK_RAW:
+ address.operation = TOMOYO_NETWORK_SEND;
+ break;
+ case SOCK_STREAM:
+ case SOCK_SEQPACKET:
+ address.operation = TOMOYO_NETWORK_CONNECT;
+ break;
+ default:
+ return 0;
+ }
+ if (family == PF_UNIX)
+ return tomoyo_check_unix_address(addr, addr_len, &address);
+ return tomoyo_check_inet_address(addr, addr_len, sock->sk->sk_protocol,
+ &address);
+}
+
+/**
+ * tomoyo_socket_bind_permission - Check permission for setting the local address of a socket.
+ *
+ * @sock: Pointer to "struct socket".
+ * @addr: Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_socket_bind_permission(struct socket *sock, struct sockaddr *addr,
+ int addr_len)
+{
+ struct tomoyo_addr_info address;
+ const u8 family = tomoyo_sock_family(sock->sk);
+ const unsigned int type = sock->type;
+
+ if (!family)
+ return 0;
+ switch (type) {
+ case SOCK_STREAM:
+ case SOCK_DGRAM:
+ case SOCK_RAW:
+ case SOCK_SEQPACKET:
+ address.protocol = type;
+ address.operation = TOMOYO_NETWORK_BIND;
+ break;
+ default:
+ return 0;
+ }
+ if (family == PF_UNIX)
+ return tomoyo_check_unix_address(addr, addr_len, &address);
+ return tomoyo_check_inet_address(addr, addr_len, sock->sk->sk_protocol,
+ &address);
+}
+
+/**
+ * tomoyo_socket_sendmsg_permission - Check permission for sending a datagram.
+ *
+ * @sock: Pointer to "struct socket".
+ * @msg: Pointer to "struct msghdr".
+ * @size: Unused.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_socket_sendmsg_permission(struct socket *sock, struct msghdr *msg,
+ int size)
+{
+ struct tomoyo_addr_info address;
+ const u8 family = tomoyo_sock_family(sock->sk);
+ const unsigned int type = sock->type;
+
+ if (!msg->msg_name || !family ||
+ (type != SOCK_DGRAM && type != SOCK_RAW))
+ return 0;
+ address.protocol = type;
+ address.operation = TOMOYO_NETWORK_SEND;
+ if (family == PF_UNIX)
+ return tomoyo_check_unix_address((struct sockaddr *)
+ msg->msg_name,
+ msg->msg_namelen, &address);
+ return tomoyo_check_inet_address((struct sockaddr *) msg->msg_name,
+ msg->msg_namelen,
+ sock->sk->sk_protocol, &address);
+}
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
index 6c601bd300f3..738bbdf8d4c7 100644
--- a/security/tomoyo/realpath.c
+++ b/security/tomoyo/realpath.c
@@ -15,17 +15,19 @@
#include "../../fs/internal.h"
/**
- * tomoyo_encode: Convert binary string to ascii string.
+ * tomoyo_encode2 - Encode binary string to ascii string.
*
- * @str: String in binary format.
+ * @str: String in binary format.
+ * @str_len: Size of @str in byte.
*
* Returns pointer to @str in ascii format on success, NULL otherwise.
*
* This function uses kzalloc(), so caller must kfree() if this function
* didn't return NULL.
*/
-char *tomoyo_encode(const char *str)
+char *tomoyo_encode2(const char *str, int str_len)
{
+ int i;
int len = 0;
const char *p = str;
char *cp;
@@ -33,8 +35,9 @@ char *tomoyo_encode(const char *str)
if (!p)
return NULL;
- while (*p) {
- const unsigned char c = *p++;
+ for (i = 0; i < str_len; i++) {
+ const unsigned char c = p[i];
+
if (c == '\\')
len += 2;
else if (c > ' ' && c < 127)
@@ -49,8 +52,8 @@ char *tomoyo_encode(const char *str)
return NULL;
cp0 = cp;
p = str;
- while (*p) {
- const unsigned char c = *p++;
+ for (i = 0; i < str_len; i++) {
+ const unsigned char c = p[i];
if (c == '\\') {
*cp++ = '\\';
@@ -68,6 +71,21 @@ char *tomoyo_encode(const char *str)
}
/**
+ * tomoyo_encode - Encode binary string to ascii string.
+ *
+ * @str: String in binary format.
+ *
+ * Returns pointer to @str in ascii format on success, NULL otherwise.
+ *
+ * This function uses kzalloc(), so caller must kfree() if this function
+ * didn't return NULL.
+ */
+char *tomoyo_encode(const char *str)
+{
+ return str ? tomoyo_encode2(str, strlen(str)) : NULL;
+}
+
+/**
* tomoyo_get_absolute_path - Get the path of a dentry but ignores chroot'ed root.
*
* @path: Pointer to "struct path".
diff --git a/security/tomoyo/securityfs_if.c b/security/tomoyo/securityfs_if.c
index a49c3bfd4dd5..2672ac4f3beb 100644
--- a/security/tomoyo/securityfs_if.c
+++ b/security/tomoyo/securityfs_if.c
@@ -8,6 +8,124 @@
#include "common.h"
/**
+ * tomoyo_check_task_acl - Check permission for task operation.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_task_acl(struct tomoyo_request_info *r,
+ const struct tomoyo_acl_info *ptr)
+{
+ const struct tomoyo_task_acl *acl = container_of(ptr, typeof(*acl),
+ head);
+ return !tomoyo_pathcmp(r->param.task.domainname, acl->domainname);
+}
+
+/**
+ * tomoyo_write_self - write() for /sys/kernel/security/tomoyo/self_domain interface.
+ *
+ * @file: Pointer to "struct file".
+ * @buf: Domainname to transit to.
+ * @count: Size of @buf.
+ * @ppos: Unused.
+ *
+ * Returns @count on success, negative value otherwise.
+ *
+ * If domain transition was permitted but the domain transition failed, this
+ * function returns error rather than terminating current thread with SIGKILL.
+ */
+static ssize_t tomoyo_write_self(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *data;
+ int error;
+ if (!count || count >= TOMOYO_EXEC_TMPSIZE - 10)
+ return -ENOMEM;
+ data = kzalloc(count + 1, GFP_NOFS);
+ if (!data)
+ return -ENOMEM;
+ if (copy_from_user(data, buf, count)) {
+ error = -EFAULT;
+ goto out;
+ }
+ tomoyo_normalize_line(data);
+ if (tomoyo_correct_domain(data)) {
+ const int idx = tomoyo_read_lock();
+ struct tomoyo_path_info name;
+ struct tomoyo_request_info r;
+ name.name = data;
+ tomoyo_fill_path_info(&name);
+ /* Check "task manual_domain_transition" permission. */
+ tomoyo_init_request_info(&r, NULL, TOMOYO_MAC_FILE_EXECUTE);
+ r.param_type = TOMOYO_TYPE_MANUAL_TASK_ACL;
+ r.param.task.domainname = &name;
+ tomoyo_check_acl(&r, tomoyo_check_task_acl);
+ if (!r.granted)
+ error = -EPERM;
+ else {
+ struct tomoyo_domain_info *new_domain =
+ tomoyo_assign_domain(data, true);
+ if (!new_domain) {
+ error = -ENOENT;
+ } else {
+ struct cred *cred = prepare_creds();
+ if (!cred) {
+ error = -ENOMEM;
+ } else {
+ struct tomoyo_domain_info *old_domain =
+ cred->security;
+ cred->security = new_domain;
+ atomic_inc(&new_domain->users);
+ atomic_dec(&old_domain->users);
+ commit_creds(cred);
+ error = 0;
+ }
+ }
+ }
+ tomoyo_read_unlock(idx);
+ } else
+ error = -EINVAL;
+out:
+ kfree(data);
+ return error ? error : count;
+}
+
+/**
+ * tomoyo_read_self - read() for /sys/kernel/security/tomoyo/self_domain interface.
+ *
+ * @file: Pointer to "struct file".
+ * @buf: Domainname which current thread belongs to.
+ * @count: Size of @buf.
+ * @ppos: Bytes read by now.
+ *
+ * Returns read size on success, negative value otherwise.
+ */
+static ssize_t tomoyo_read_self(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ const char *domain = tomoyo_domain()->domainname->name;
+ loff_t len = strlen(domain);
+ loff_t pos = *ppos;
+ if (pos >= len || !count)
+ return 0;
+ len -= pos;
+ if (count < len)
+ len = count;
+ if (copy_to_user(buf, domain + pos, len))
+ return -EFAULT;
+ *ppos += len;
+ return len;
+}
+
+/* Operations for /sys/kernel/security/tomoyo/self_domain interface. */
+static const struct file_operations tomoyo_self_operations = {
+ .write = tomoyo_write_self,
+ .read = tomoyo_read_self,
+};
+
+/**
* tomoyo_open - open() for /sys/kernel/security/tomoyo/ interface.
*
* @inode: Pointer to "struct inode".
@@ -135,8 +253,6 @@ static int __init tomoyo_initerface_init(void)
TOMOYO_EXCEPTIONPOLICY);
tomoyo_create_entry("audit", 0400, tomoyo_dir,
TOMOYO_AUDIT);
- tomoyo_create_entry("self_domain", 0400, tomoyo_dir,
- TOMOYO_SELFDOMAIN);
tomoyo_create_entry(".process_status", 0600, tomoyo_dir,
TOMOYO_PROCESS_STATUS);
tomoyo_create_entry("stat", 0644, tomoyo_dir,
@@ -147,6 +263,9 @@ static int __init tomoyo_initerface_init(void)
TOMOYO_MANAGER);
tomoyo_create_entry("version", 0400, tomoyo_dir,
TOMOYO_VERSION);
+ securityfs_create_file("self_domain", 0666, tomoyo_dir, NULL,
+ &tomoyo_self_operations);
+ tomoyo_load_builtin_policy();
return 0;
}
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
index f776400a8f31..4b327b691745 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/tomoyo.c
@@ -442,6 +442,64 @@ static int tomoyo_sb_pivotroot(struct path *old_path, struct path *new_path)
return tomoyo_path2_perm(TOMOYO_TYPE_PIVOT_ROOT, new_path, old_path);
}
+/**
+ * tomoyo_socket_listen - Check permission for listen().
+ *
+ * @sock: Pointer to "struct socket".
+ * @backlog: Backlog parameter.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_socket_listen(struct socket *sock, int backlog)
+{
+ return tomoyo_socket_listen_permission(sock);
+}
+
+/**
+ * tomoyo_socket_connect - Check permission for connect().
+ *
+ * @sock: Pointer to "struct socket".
+ * @addr: Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_socket_connect(struct socket *sock, struct sockaddr *addr,
+ int addr_len)
+{
+ return tomoyo_socket_connect_permission(sock, addr, addr_len);
+}
+
+/**
+ * tomoyo_socket_bind - Check permission for bind().
+ *
+ * @sock: Pointer to "struct socket".
+ * @addr: Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_socket_bind(struct socket *sock, struct sockaddr *addr,
+ int addr_len)
+{
+ return tomoyo_socket_bind_permission(sock, addr, addr_len);
+}
+
+/**
+ * tomoyo_socket_sendmsg - Check permission for sendmsg().
+ *
+ * @sock: Pointer to "struct socket".
+ * @msg: Pointer to "struct msghdr".
+ * @size: Size of message.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
+ int size)
+{
+ return tomoyo_socket_sendmsg_permission(sock, msg, size);
+}
+
/*
* tomoyo_security_ops is a "struct security_operations" which is used for
* registering TOMOYO.
@@ -472,6 +530,10 @@ static struct security_operations tomoyo_security_ops = {
.sb_mount = tomoyo_sb_mount,
.sb_umount = tomoyo_sb_umount,
.sb_pivotroot = tomoyo_sb_pivotroot,
+ .socket_bind = tomoyo_socket_bind,
+ .socket_connect = tomoyo_socket_connect,
+ .socket_listen = tomoyo_socket_listen,
+ .socket_sendmsg = tomoyo_socket_sendmsg,
};
/* Lock for GC. */
diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
index c36bd1107fc8..4a9b4b2eb755 100644
--- a/security/tomoyo/util.c
+++ b/security/tomoyo/util.c
@@ -42,6 +42,39 @@ const u8 tomoyo_index2category[TOMOYO_MAX_MAC_INDEX] = {
[TOMOYO_MAC_FILE_MOUNT] = TOMOYO_MAC_CATEGORY_FILE,
[TOMOYO_MAC_FILE_UMOUNT] = TOMOYO_MAC_CATEGORY_FILE,
[TOMOYO_MAC_FILE_PIVOT_ROOT] = TOMOYO_MAC_CATEGORY_FILE,
+ /* CONFIG::network group */
+ [TOMOYO_MAC_NETWORK_INET_STREAM_BIND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_INET_DGRAM_BIND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_INET_DGRAM_SEND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_INET_RAW_BIND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_INET_RAW_SEND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ /* CONFIG::misc group */
+ [TOMOYO_MAC_ENVIRON] = TOMOYO_MAC_CATEGORY_MISC,
};
/**
@@ -126,6 +159,31 @@ char *tomoyo_read_token(struct tomoyo_acl_param *param)
}
/**
+ * tomoyo_get_domainname - Read a domainname from a line.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns a domainname on success, NULL otherwise.
+ */
+const struct tomoyo_path_info *tomoyo_get_domainname
+(struct tomoyo_acl_param *param)
+{
+ char *start = param->data;
+ char *pos = start;
+ while (*pos) {
+ if (*pos++ != ' ' || *pos++ == '/')
+ continue;
+ pos -= 2;
+ *pos++ = '\0';
+ break;
+ }
+ param->data = pos;
+ if (tomoyo_correct_domain(start))
+ return tomoyo_get_name(start);
+ return NULL;
+}
+
+/**
* tomoyo_parse_ulong - Parse an "unsigned long" value.
*
* @result: Pointer to "unsigned long".
@@ -920,14 +978,17 @@ int tomoyo_get_mode(const struct tomoyo_policy_namespace *ns, const u8 profile,
const u8 index)
{
u8 mode;
- const u8 category = TOMOYO_MAC_CATEGORY_FILE;
+ struct tomoyo_profile *p;
+
if (!tomoyo_policy_loaded)
return TOMOYO_CONFIG_DISABLED;
- mode = tomoyo_profile(ns, profile)->config[index];
+ p = tomoyo_profile(ns, profile);
+ mode = p->config[index];
if (mode == TOMOYO_CONFIG_USE_DEFAULT)
- mode = tomoyo_profile(ns, profile)->config[category];
+ mode = p->config[tomoyo_index2category[index]
+ + TOMOYO_MAX_MAC_INDEX];
if (mode == TOMOYO_CONFIG_USE_DEFAULT)
- mode = tomoyo_profile(ns, profile)->default_config;
+ mode = p->default_config;
return mode & 3;
}
@@ -996,6 +1057,17 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
perm = container_of(ptr, struct tomoyo_mkdev_acl,
head)->perm;
break;
+ case TOMOYO_TYPE_INET_ACL:
+ perm = container_of(ptr, struct tomoyo_inet_acl,
+ head)->perm;
+ break;
+ case TOMOYO_TYPE_UNIX_ACL:
+ perm = container_of(ptr, struct tomoyo_unix_acl,
+ head)->perm;
+ break;
+ case TOMOYO_TYPE_MANUAL_TASK_ACL:
+ perm = 0;
+ break;
default:
perm = 1;
}