summaryrefslogtreecommitdiff
path: root/security
diff options
context:
space:
mode:
Diffstat (limited to 'security')
-rw-r--r--security/Kconfig2
-rw-r--r--security/Kconfig.hardening29
-rw-r--r--security/apparmor/include/policy.h11
-rw-r--r--security/apparmor/label.c8
-rw-r--r--security/apparmor/policy_unpack.c49
-rw-r--r--security/commoncap.c6
-rw-r--r--security/device_cgroup.c2
-rw-r--r--security/inode.c5
-rw-r--r--security/integrity/digsig.c5
-rw-r--r--security/integrity/digsig_asymmetric.c4
-rw-r--r--security/integrity/evm/evm_main.c8
-rw-r--r--security/integrity/ima/Kconfig3
-rw-r--r--security/integrity/ima/ima.h21
-rw-r--r--security/integrity/ima/ima_api.c38
-rw-r--r--security/integrity/ima/ima_appraise.c9
-rw-r--r--security/integrity/ima/ima_init.c6
-rw-r--r--security/integrity/ima/ima_main.c123
-rw-r--r--security/integrity/ima/ima_policy.c163
-rw-r--r--security/integrity/ima/ima_template.c23
-rw-r--r--security/integrity/ima/ima_template_lib.c21
-rw-r--r--security/integrity/ima/ima_template_lib.h4
-rw-r--r--security/integrity/integrity.h6
-rw-r--r--security/keys/Kconfig18
-rw-r--r--security/keys/compat.c6
-rw-r--r--security/keys/gc.c2
-rw-r--r--security/keys/internal.h23
-rw-r--r--security/keys/key.c36
-rw-r--r--security/keys/keyctl.c96
-rw-r--r--security/keys/keyring.c557
-rw-r--r--security/keys/persistent.c10
-rw-r--r--security/keys/proc.c7
-rw-r--r--security/keys/process_keys.c327
-rw-r--r--security/keys/request_key.c206
-rw-r--r--security/keys/request_key_auth.c67
-rw-r--r--security/loadpin/loadpin.c48
-rw-r--r--security/lsm_audit.c5
-rw-r--r--security/safesetid/lsm.c276
-rw-r--r--security/safesetid/lsm.h34
-rw-r--r--security/safesetid/securityfs.c307
-rw-r--r--security/security.c23
-rw-r--r--security/selinux/avc.c5
-rw-r--r--security/selinux/hooks.c18
-rw-r--r--security/selinux/include/audit.h5
-rw-r--r--security/selinux/include/netif.h5
-rw-r--r--security/selinux/include/objsec.h5
-rw-r--r--security/selinux/netif.c5
-rw-r--r--security/selinux/netlink.c5
-rw-r--r--security/selinux/nlmsgtab.c10
-rw-r--r--security/selinux/selinuxfs.c2
-rw-r--r--security/selinux/ss/ebitmap.c10
-rw-r--r--security/selinux/ss/services.c33
-rw-r--r--security/selinux/ss/status.c5
-rw-r--r--security/selinux/xfrm.c5
-rw-r--r--security/smack/smack_lsm.c5
-rw-r--r--security/smack/smack_netfilter.c5
-rw-r--r--security/yama/yama_lsm.c6
56 files changed, 1798 insertions, 925 deletions
diff --git a/security/Kconfig b/security/Kconfig
index 466cc1f8ffed..06a30851511a 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -63,7 +63,7 @@ config PAGE_TABLE_ISOLATION
ensuring that the majority of kernel addresses are not mapped
into userspace.
- See Documentation/x86/pti.txt for more details.
+ See Documentation/x86/pti.rst for more details.
config SECURITY_INFINIBAND
bool "Infiniband Security Hooks"
diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening
index c6cb2d9b2905..a1ffe2eb4d5f 100644
--- a/security/Kconfig.hardening
+++ b/security/Kconfig.hardening
@@ -160,6 +160,35 @@ config STACKLEAK_RUNTIME_DISABLE
runtime to control kernel stack erasing for kernels built with
CONFIG_GCC_PLUGIN_STACKLEAK.
+config INIT_ON_ALLOC_DEFAULT_ON
+ bool "Enable heap memory zeroing on allocation by default"
+ help
+ This has the effect of setting "init_on_alloc=1" on the kernel
+ command line. This can be disabled with "init_on_alloc=0".
+ When "init_on_alloc" is enabled, all page allocator and slab
+ allocator memory will be zeroed when allocated, eliminating
+ many kinds of "uninitialized heap memory" flaws, especially
+ heap content exposures. The performance impact varies by
+ workload, but most cases see <1% impact. Some synthetic
+ workloads have measured as high as 7%.
+
+config INIT_ON_FREE_DEFAULT_ON
+ bool "Enable heap memory zeroing on free by default"
+ help
+ This has the effect of setting "init_on_free=1" on the kernel
+ command line. This can be disabled with "init_on_free=0".
+ Similar to "init_on_alloc", when "init_on_free" is enabled,
+ all page allocator and slab allocator memory will be zeroed
+ when freed, eliminating many kinds of "uninitialized heap memory"
+ flaws, especially heap content exposures. The primary difference
+ with "init_on_free" is that data lifetime in memory is reduced,
+ as anything freed is wiped immediately, making live forensics or
+ cold boot memory attacks unable to recover freed memory contents.
+ The performance impact varies by workload, but is more expensive
+ than "init_on_alloc" due to the negative cache effects of
+ touching "cold" memory areas. Most cases see 3-5% impact. Some
+ synthetic workloads have measured as high as 8%.
+
endmenu
endmenu
diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
index 1ce4e9bdac48..b5b4b8190e65 100644
--- a/security/apparmor/include/policy.h
+++ b/security/apparmor/include/policy.h
@@ -213,7 +213,16 @@ static inline struct aa_profile *aa_get_newest_profile(struct aa_profile *p)
return labels_profile(aa_get_newest_label(&p->label));
}
-#define PROFILE_MEDIATES(P, T) ((P)->policy.start[(unsigned char) (T)])
+static inline unsigned int PROFILE_MEDIATES(struct aa_profile *profile,
+ unsigned char class)
+{
+ if (class <= AA_CLASS_LAST)
+ return profile->policy.start[class];
+ else
+ return aa_dfa_match_len(profile->policy.dfa,
+ profile->policy.start[0], &class, 1);
+}
+
static inline unsigned int PROFILE_MEDIATES_AF(struct aa_profile *profile,
u16 AF) {
unsigned int state = PROFILE_MEDIATES(profile, AA_CLASS_NET);
diff --git a/security/apparmor/label.c b/security/apparmor/label.c
index 068e93c5d29c..59f1cc2557a7 100644
--- a/security/apparmor/label.c
+++ b/security/apparmor/label.c
@@ -76,7 +76,7 @@ void __aa_proxy_redirect(struct aa_label *orig, struct aa_label *new)
AA_BUG(!orig);
AA_BUG(!new);
- lockdep_assert_held_exclusive(&labels_set(orig)->lock);
+ lockdep_assert_held_write(&labels_set(orig)->lock);
tmp = rcu_dereference_protected(orig->proxy->label,
&labels_ns(orig)->lock);
@@ -566,7 +566,7 @@ static bool __label_remove(struct aa_label *label, struct aa_label *new)
AA_BUG(!ls);
AA_BUG(!label);
- lockdep_assert_held_exclusive(&ls->lock);
+ lockdep_assert_held_write(&ls->lock);
if (new)
__aa_proxy_redirect(label, new);
@@ -603,7 +603,7 @@ static bool __label_replace(struct aa_label *old, struct aa_label *new)
AA_BUG(!ls);
AA_BUG(!old);
AA_BUG(!new);
- lockdep_assert_held_exclusive(&ls->lock);
+ lockdep_assert_held_write(&ls->lock);
AA_BUG(new->flags & FLAG_IN_TREE);
if (!label_is_stale(old))
@@ -640,7 +640,7 @@ static struct aa_label *__label_insert(struct aa_labelset *ls,
AA_BUG(!ls);
AA_BUG(!label);
AA_BUG(labels_set(label) != ls);
- lockdep_assert_held_exclusive(&ls->lock);
+ lockdep_assert_held_write(&ls->lock);
AA_BUG(label->flags & FLAG_IN_TREE);
/* Figure out where to put new node */
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 01957ce9252b..8cfc9493eefc 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -219,16 +219,21 @@ static void *kvmemdup(const void *src, size_t len)
static size_t unpack_u16_chunk(struct aa_ext *e, char **chunk)
{
size_t size = 0;
+ void *pos = e->pos;
if (!inbounds(e, sizeof(u16)))
- return 0;
+ goto fail;
size = le16_to_cpu(get_unaligned((__le16 *) e->pos));
e->pos += sizeof(__le16);
if (!inbounds(e, size))
- return 0;
+ goto fail;
*chunk = e->pos;
e->pos += size;
return size;
+
+fail:
+ e->pos = pos;
+ return 0;
}
/* unpack control byte */
@@ -272,7 +277,7 @@ static bool unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
char *tag = NULL;
size_t size = unpack_u16_chunk(e, &tag);
/* if a name is specified it must match. otherwise skip tag */
- if (name && (!size || strcmp(name, tag)))
+ if (name && (!size || tag[size-1] != '\0' || strcmp(name, tag)))
goto fail;
} else if (name) {
/* if a name is specified and there is no name tag fail */
@@ -290,62 +295,84 @@ fail:
static bool unpack_u8(struct aa_ext *e, u8 *data, const char *name)
{
+ void *pos = e->pos;
+
if (unpack_nameX(e, AA_U8, name)) {
if (!inbounds(e, sizeof(u8)))
- return 0;
+ goto fail;
if (data)
*data = get_unaligned((u8 *)e->pos);
e->pos += sizeof(u8);
return 1;
}
+
+fail:
+ e->pos = pos;
return 0;
}
static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name)
{
+ void *pos = e->pos;
+
if (unpack_nameX(e, AA_U32, name)) {
if (!inbounds(e, sizeof(u32)))
- return 0;
+ goto fail;
if (data)
*data = le32_to_cpu(get_unaligned((__le32 *) e->pos));
e->pos += sizeof(u32);
return 1;
}
+
+fail:
+ e->pos = pos;
return 0;
}
static bool unpack_u64(struct aa_ext *e, u64 *data, const char *name)
{
+ void *pos = e->pos;
+
if (unpack_nameX(e, AA_U64, name)) {
if (!inbounds(e, sizeof(u64)))
- return 0;
+ goto fail;
if (data)
*data = le64_to_cpu(get_unaligned((__le64 *) e->pos));
e->pos += sizeof(u64);
return 1;
}
+
+fail:
+ e->pos = pos;
return 0;
}
static size_t unpack_array(struct aa_ext *e, const char *name)
{
+ void *pos = e->pos;
+
if (unpack_nameX(e, AA_ARRAY, name)) {
int size;
if (!inbounds(e, sizeof(u16)))
- return 0;
+ goto fail;
size = (int)le16_to_cpu(get_unaligned((__le16 *) e->pos));
e->pos += sizeof(u16);
return size;
}
+
+fail:
+ e->pos = pos;
return 0;
}
static size_t unpack_blob(struct aa_ext *e, char **blob, const char *name)
{
+ void *pos = e->pos;
+
if (unpack_nameX(e, AA_BLOB, name)) {
u32 size;
if (!inbounds(e, sizeof(u32)))
- return 0;
+ goto fail;
size = le32_to_cpu(get_unaligned((__le32 *) e->pos));
e->pos += sizeof(u32);
if (inbounds(e, (size_t) size)) {
@@ -354,6 +381,9 @@ static size_t unpack_blob(struct aa_ext *e, char **blob, const char *name)
return size;
}
}
+
+fail:
+ e->pos = pos;
return 0;
}
@@ -370,9 +400,10 @@ static int unpack_str(struct aa_ext *e, const char **string, const char *name)
if (src_str[size - 1] != 0)
goto fail;
*string = src_str;
+
+ return size;
}
}
- return size;
fail:
e->pos = pos;
diff --git a/security/commoncap.c b/security/commoncap.c
index c0b9664ee49e..f4ee0ae106b2 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -915,7 +915,7 @@ int cap_inode_setxattr(struct dentry *dentry, const char *name,
/* Ignore non-security xattrs */
if (strncmp(name, XATTR_SECURITY_PREFIX,
- sizeof(XATTR_SECURITY_PREFIX) - 1) != 0)
+ XATTR_SECURITY_PREFIX_LEN) != 0)
return 0;
/*
@@ -947,7 +947,7 @@ int cap_inode_removexattr(struct dentry *dentry, const char *name)
/* Ignore non-security xattrs */
if (strncmp(name, XATTR_SECURITY_PREFIX,
- sizeof(XATTR_SECURITY_PREFIX) - 1) != 0)
+ XATTR_SECURITY_PREFIX_LEN) != 0)
return 0;
if (strcmp(name, XATTR_NAME_CAPS) == 0) {
@@ -1339,7 +1339,7 @@ int cap_mmap_file(struct file *file, unsigned long reqprot,
#ifdef CONFIG_SECURITY
-struct security_hook_list capability_hooks[] __lsm_ro_after_init = {
+static struct security_hook_list capability_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(capable, cap_capable),
LSM_HOOK_INIT(settime, cap_settime),
LSM_HOOK_INIT(ptrace_access_check, cap_ptrace_access_check),
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index dc28914fa72e..c07196502577 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -509,7 +509,7 @@ static inline int may_allow_all(struct dev_cgroup *parent)
* This is one of the three key functions for hierarchy implementation.
* This function is responsible for re-evaluating all the cgroup's active
* exceptions due to a parent's exception change.
- * Refer to Documentation/cgroup-v1/devices.txt for more details.
+ * Refer to Documentation/cgroup-v1/devices.rst for more details.
*/
static void revalidate_active_exceptions(struct dev_cgroup *devcg)
{
diff --git a/security/inode.c b/security/inode.c
index aacc4dabba7d..fcff7f08bb1c 100644
--- a/security/inode.c
+++ b/security/inode.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* inode.c - securityfs
*
* Copyright (C) 2005 Greg Kroah-Hartman <gregkh@suse.de>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
* Based on fs/debugfs/inode.c which had the following copyright notice:
* Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (C) 2004 IBM Inc.
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
index 4582bc26770a..868ade3e8970 100644
--- a/security/integrity/digsig.c
+++ b/security/integrity/digsig.c
@@ -69,8 +69,9 @@ int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
return -EOPNOTSUPP;
}
-static int __integrity_init_keyring(const unsigned int id, key_perm_t perm,
- struct key_restriction *restriction)
+static int __init __integrity_init_keyring(const unsigned int id,
+ key_perm_t perm,
+ struct key_restriction *restriction)
{
const struct cred *cred = current_cred();
int err = 0;
diff --git a/security/integrity/digsig_asymmetric.c b/security/integrity/digsig_asymmetric.c
index ad4b323ecea1..55aec161d0e1 100644
--- a/security/integrity/digsig_asymmetric.c
+++ b/security/integrity/digsig_asymmetric.c
@@ -35,7 +35,7 @@ static struct key *request_asymmetric_key(struct key *keyring, uint32_t keyid)
key_ref_t kref;
kref = keyring_search(make_key_ref(key, 1),
- &key_type_asymmetric, name);
+ &key_type_asymmetric, name, true);
if (!IS_ERR(kref)) {
pr_err("Key '%s' is in ima_blacklist_keyring\n", name);
return ERR_PTR(-EKEYREJECTED);
@@ -47,7 +47,7 @@ static struct key *request_asymmetric_key(struct key *keyring, uint32_t keyid)
key_ref_t kref;
kref = keyring_search(make_key_ref(keyring, 1),
- &key_type_asymmetric, name);
+ &key_type_asymmetric, name, true);
if (IS_ERR(kref))
key = ERR_CAST(kref);
else
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index 5bbd8b4dc29a..f9a81b187fae 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -166,7 +166,7 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
/* check value type */
switch (xattr_data->type) {
case EVM_XATTR_HMAC:
- if (xattr_len != sizeof(struct evm_ima_xattr_data)) {
+ if (xattr_len != sizeof(struct evm_xattr)) {
evm_status = INTEGRITY_FAIL;
goto out;
}
@@ -176,7 +176,7 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
xattr_value_len, &digest);
if (rc)
break;
- rc = crypto_memneq(xattr_data->digest, digest.digest,
+ rc = crypto_memneq(xattr_data->data, digest.digest,
SHA1_DIGEST_SIZE);
if (rc)
rc = -EINVAL;
@@ -520,7 +520,7 @@ int evm_inode_init_security(struct inode *inode,
const struct xattr *lsm_xattr,
struct xattr *evm_xattr)
{
- struct evm_ima_xattr_data *xattr_data;
+ struct evm_xattr *xattr_data;
int rc;
if (!evm_key_loaded() || !evm_protected_xattr(lsm_xattr->name))
@@ -530,7 +530,7 @@ int evm_inode_init_security(struct inode *inode,
if (!xattr_data)
return -ENOMEM;
- xattr_data->type = EVM_XATTR_HMAC;
+ xattr_data->data.type = EVM_XATTR_HMAC;
rc = evm_init_hmac(inode, lsm_xattr, xattr_data->digest);
if (rc < 0)
goto out;
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index 2692c7358c2c..2ced99dde694 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -160,7 +160,8 @@ config IMA_APPRAISE
config IMA_ARCH_POLICY
bool "Enable loading an IMA architecture specific policy"
- depends on KEXEC_VERIFY_SIG || IMA_APPRAISE && INTEGRITY_ASYMMETRIC_KEYS
+ depends on (KEXEC_VERIFY_SIG && IMA) || IMA_APPRAISE \
+ && INTEGRITY_ASYMMETRIC_KEYS
default n
help
This option enables loading an IMA architecture specific policy
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index ca10917b5f89..011b91c79351 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -61,6 +61,8 @@ struct ima_event_data {
struct evm_ima_xattr_data *xattr_value;
int xattr_len;
const char *violation;
+ const void *buf;
+ int buf_len;
};
/* IMA template field data definition */
@@ -142,7 +144,11 @@ void ima_add_violation(struct file *file, const unsigned char *filename,
int ima_init_crypto(void);
void ima_putc(struct seq_file *m, void *data, int datalen);
void ima_print_digest(struct seq_file *m, u8 *digest, u32 size);
+int template_desc_init_fields(const char *template_fmt,
+ const struct ima_template_field ***fields,
+ int *num_fields);
struct ima_template_desc *ima_template_desc_current(void);
+struct ima_template_desc *lookup_template_desc(const char *name);
int ima_restore_measurement_entry(struct ima_template_entry *entry);
int ima_restore_measurement_list(loff_t bufsize, void *buf);
int ima_measurements_show(struct seq_file *m, void *v);
@@ -150,6 +156,8 @@ unsigned long ima_get_binary_runtime_size(void);
int ima_init_template(void);
void ima_init_template_list(void);
int __init ima_init_digests(void);
+int ima_lsm_policy_change(struct notifier_block *nb, unsigned long event,
+ void *lsm_data);
/*
* used to protect h_table and sha_table
@@ -180,6 +188,7 @@ static inline unsigned long ima_hash_key(u8 *digest)
hook(KEXEC_KERNEL_CHECK) \
hook(KEXEC_INITRAMFS_CHECK) \
hook(POLICY_CHECK) \
+ hook(KEXEC_CMDLINE) \
hook(MAX_CHECK)
#define __ima_hook_enumify(ENUM) ENUM,
@@ -189,7 +198,8 @@ enum ima_hooks {
/* LIM API function definitions */
int ima_get_action(struct inode *inode, const struct cred *cred, u32 secid,
- int mask, enum ima_hooks func, int *pcr);
+ int mask, enum ima_hooks func, int *pcr,
+ struct ima_template_desc **template_desc);
int ima_must_measure(struct inode *inode, int mask, enum ima_hooks func);
int ima_collect_measurement(struct integrity_iint_cache *iint,
struct file *file, void *buf, loff_t size,
@@ -197,11 +207,13 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
void ima_store_measurement(struct integrity_iint_cache *iint, struct file *file,
const unsigned char *filename,
struct evm_ima_xattr_data *xattr_value,
- int xattr_len, int pcr);
+ int xattr_len, int pcr,
+ struct ima_template_desc *template_desc);
void ima_audit_measurement(struct integrity_iint_cache *iint,
const unsigned char *filename);
int ima_alloc_init_template(struct ima_event_data *event_data,
- struct ima_template_entry **entry);
+ struct ima_template_entry **entry,
+ struct ima_template_desc *template_desc);
int ima_store_template(struct ima_template_entry *entry, int violation,
struct inode *inode,
const unsigned char *filename, int pcr);
@@ -210,7 +222,8 @@ const char *ima_d_path(const struct path *path, char **pathbuf, char *filename);
/* IMA policy related functions */
int ima_match_policy(struct inode *inode, const struct cred *cred, u32 secid,
- enum ima_hooks func, int mask, int flags, int *pcr);
+ enum ima_hooks func, int mask, int flags, int *pcr,
+ struct ima_template_desc **template_desc);
void ima_init_policy(void);
void ima_update_policy(void);
void ima_update_policy_flag(void);
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index 35c129cbb7e9..f614e22bf39f 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -34,11 +34,17 @@ void ima_free_template_entry(struct ima_template_entry *entry)
* ima_alloc_init_template - create and initialize a new template entry
*/
int ima_alloc_init_template(struct ima_event_data *event_data,
- struct ima_template_entry **entry)
+ struct ima_template_entry **entry,
+ struct ima_template_desc *desc)
{
- struct ima_template_desc *template_desc = ima_template_desc_current();
+ struct ima_template_desc *template_desc;
int i, result = 0;
+ if (desc)
+ template_desc = desc;
+ else
+ template_desc = ima_template_desc_current();
+
*entry = kzalloc(sizeof(**entry) + template_desc->num_fields *
sizeof(struct ima_field_data), GFP_NOFS);
if (!*entry)
@@ -129,15 +135,17 @@ void ima_add_violation(struct file *file, const unsigned char *filename,
{
struct ima_template_entry *entry;
struct inode *inode = file_inode(file);
- struct ima_event_data event_data = {iint, file, filename, NULL, 0,
- cause};
+ struct ima_event_data event_data = { .iint = iint,
+ .file = file,
+ .filename = filename,
+ .violation = cause };
int violation = 1;
int result;
/* can overflow, only indicator */
atomic_long_inc(&ima_htable.violations);
- result = ima_alloc_init_template(&event_data, &entry);
+ result = ima_alloc_init_template(&event_data, &entry, NULL);
if (result < 0) {
result = -ENOMEM;
goto err_out;
@@ -160,11 +168,13 @@ err_out:
* MAY_APPEND)
* @func: caller identifier
* @pcr: pointer filled in if matched measure policy sets pcr=
+ * @template_desc: pointer filled in if matched measure policy sets template=
*
* The policy is defined in terms of keypairs:
* subj=, obj=, type=, func=, mask=, fsmagic=
* subj,obj, and type: are LSM specific.
* func: FILE_CHECK | BPRM_CHECK | CREDS_CHECK | MMAP_CHECK | MODULE_CHECK
+ * | KEXEC_CMDLINE
* mask: contains the permission mask
* fsmagic: hex value
*
@@ -172,13 +182,15 @@ err_out:
*
*/
int ima_get_action(struct inode *inode, const struct cred *cred, u32 secid,
- int mask, enum ima_hooks func, int *pcr)
+ int mask, enum ima_hooks func, int *pcr,
+ struct ima_template_desc **template_desc)
{
int flags = IMA_MEASURE | IMA_AUDIT | IMA_APPRAISE | IMA_HASH;
flags &= ima_policy_flag;
- return ima_match_policy(inode, cred, secid, func, mask, flags, pcr);
+ return ima_match_policy(inode, cred, secid, func, mask, flags, pcr,
+ template_desc);
}
/*
@@ -273,21 +285,25 @@ out:
void ima_store_measurement(struct integrity_iint_cache *iint,
struct file *file, const unsigned char *filename,
struct evm_ima_xattr_data *xattr_value,
- int xattr_len, int pcr)
+ int xattr_len, int pcr,
+ struct ima_template_desc *template_desc)
{
static const char op[] = "add_template_measure";
static const char audit_cause[] = "ENOMEM";
int result = -ENOMEM;
struct inode *inode = file_inode(file);
struct ima_template_entry *entry;
- struct ima_event_data event_data = {iint, file, filename, xattr_value,
- xattr_len, NULL};
+ struct ima_event_data event_data = { .iint = iint,
+ .file = file,
+ .filename = filename,
+ .xattr_value = xattr_value,
+ .xattr_len = xattr_len };
int violation = 0;
if (iint->measured_pcrs & (0x1 << pcr))
return;
- result = ima_alloc_init_template(&event_data, &entry);
+ result = ima_alloc_init_template(&event_data, &entry, template_desc);
if (result < 0) {
integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
op, audit_cause, result, 0);
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index f0cd67cab6aa..89b83194d1dc 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -54,7 +54,7 @@ int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func)
security_task_getsecid(current, &secid);
return ima_match_policy(inode, current_cred(), secid, func, mask,
- IMA_APPRAISE | IMA_HASH, NULL);
+ IMA_APPRAISE | IMA_HASH, NULL, NULL);
}
static int ima_fix_xattr(struct dentry *dentry,
@@ -165,7 +165,8 @@ enum hash_algo ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value,
return sig->hash_algo;
break;
case IMA_XATTR_DIGEST_NG:
- ret = xattr_value->digest[0];
+ /* first byte contains algorithm id */
+ ret = xattr_value->data[0];
if (ret < HASH_ALGO__LAST)
return ret;
break;
@@ -173,7 +174,7 @@ enum hash_algo ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value,
/* this is for backward compatibility */
if (xattr_len == 21) {
unsigned int zero = 0;
- if (!memcmp(&xattr_value->digest[16], &zero, 4))
+ if (!memcmp(&xattr_value->data[16], &zero, 4))
return HASH_ALGO_MD5;
else
return HASH_ALGO_SHA1;
@@ -272,7 +273,7 @@ int ima_appraise_measurement(enum ima_hooks func,
/* xattr length may be longer. md5 hash in previous
version occupied 20 bytes in xattr, instead of 16
*/
- rc = memcmp(&xattr_value->digest[hash_start],
+ rc = memcmp(&xattr_value->data[hash_start],
iint->ima_hash->digest,
iint->ima_hash->length);
else
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index 1e47c1026471..5d55ade5f3b9 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -45,8 +45,8 @@ static int __init ima_add_boot_aggregate(void)
const char *audit_cause = "ENOMEM";
struct ima_template_entry *entry;
struct integrity_iint_cache tmp_iint, *iint = &tmp_iint;
- struct ima_event_data event_data = {iint, NULL, boot_aggregate_name,
- NULL, 0, NULL};
+ struct ima_event_data event_data = { .iint = iint,
+ .filename = boot_aggregate_name };
int result = -ENOMEM;
int violation = 0;
struct {
@@ -68,7 +68,7 @@ static int __init ima_add_boot_aggregate(void)
}
}
- result = ima_alloc_init_template(&event_data, &entry);
+ result = ima_alloc_init_template(&event_data, &entry, NULL);
if (result < 0) {
audit_cause = "alloc_entry";
goto err_out;
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index f556e6c18f9b..584019728660 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -39,6 +39,10 @@ int ima_appraise;
int ima_hash_algo = HASH_ALGO_SHA1;
static int hash_setup_done;
+static struct notifier_block ima_lsm_policy_notifier = {
+ .notifier_call = ima_lsm_policy_change,
+};
+
static int __init hash_setup(char *str)
{
struct ima_template_desc *template_desc = ima_template_desc_current();
@@ -68,6 +72,27 @@ out:
}
__setup("ima_hash=", hash_setup);
+/* Prevent mmap'ing a file execute that is already mmap'ed write */
+static int mmap_violation_check(enum ima_hooks func, struct file *file,
+ char **pathbuf, const char **pathname,
+ char *filename)
+{
+ struct inode *inode;
+ int rc = 0;
+
+ if ((func == MMAP_CHECK) && mapping_writably_mapped(file->f_mapping)) {
+ rc = -ETXTBSY;
+ inode = file_inode(file);
+
+ if (!*pathbuf) /* ima_rdwr_violation possibly pre-fetched */
+ *pathname = ima_d_path(&file->f_path, pathbuf,
+ filename);
+ integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, *pathname,
+ "mmap_file", "mmapped_writers", rc, 0);
+ }
+ return rc;
+}
+
/*
* ima_rdwr_violation_check
*
@@ -170,7 +195,7 @@ static int process_measurement(struct file *file, const struct cred *cred,
{
struct inode *inode = file_inode(file);
struct integrity_iint_cache *iint = NULL;
- struct ima_template_desc *template_desc;
+ struct ima_template_desc *template_desc = NULL;
char *pathbuf = NULL;
char filename[NAME_MAX];
const char *pathname = NULL;
@@ -188,7 +213,8 @@ static int process_measurement(struct file *file, const struct cred *cred,
* bitmask based on the appraise/audit/measurement policy.
* Included is the appraise submask.
*/
- action = ima_get_action(inode, cred, secid, mask, func, &pcr);
+ action = ima_get_action(inode, cred, secid, mask, func, &pcr,
+ &template_desc);
violation_check = ((func == FILE_CHECK || func == MMAP_CHECK) &&
(ima_policy_flag & IMA_MEASURE));
if (!action && !violation_check)
@@ -266,12 +292,15 @@ static int process_measurement(struct file *file, const struct cred *cred,
/* Nothing to do, just return existing appraised status */
if (!action) {
- if (must_appraise)
- rc = ima_get_cache_status(iint, func);
+ if (must_appraise) {
+ rc = mmap_violation_check(func, file, &pathbuf,
+ &pathname, filename);
+ if (!rc)
+ rc = ima_get_cache_status(iint, func);
+ }
goto out_locked;
}
- template_desc = ima_template_desc_current();
if ((action & IMA_APPRAISE_SUBMASK) ||
strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) != 0)
/* read 'security.ima' */
@@ -288,12 +317,16 @@ static int process_measurement(struct file *file, const struct cred *cred,
if (action & IMA_MEASURE)
ima_store_measurement(iint, file, pathname,
- xattr_value, xattr_len, pcr);
+ xattr_value, xattr_len, pcr,
+ template_desc);
if (rc == 0 && (action & IMA_APPRAISE_SUBMASK)) {
inode_lock(inode);
rc = ima_appraise_measurement(func, iint, file, pathname,
xattr_value, xattr_len);
inode_unlock(inode);
+ if (!rc)
+ rc = mmap_violation_check(func, file, &pathbuf,
+ &pathname, filename);
}
if (action & IMA_AUDIT)
ima_audit_measurement(iint, pathname);
@@ -572,6 +605,80 @@ int ima_load_data(enum kernel_load_data_id id)
return 0;
}
+/*
+ * process_buffer_measurement - Measure the buffer to ima log.
+ * @buf: pointer to the buffer that needs to be added to the log.
+ * @size: size of buffer(in bytes).
+ * @eventname: event name to be used for the buffer entry.
+ * @cred: a pointer to a credentials structure for user validation.
+ * @secid: the secid of the task to be validated.
+ *
+ * Based on policy, the buffer is measured into the ima log.
+ */
+static void process_buffer_measurement(const void *buf, int size,
+ const char *eventname,
+ const struct cred *cred, u32 secid)
+{
+ int ret = 0;
+ struct ima_template_entry *entry = NULL;
+ struct integrity_iint_cache iint = {};
+ struct ima_event_data event_data = {.iint = &iint,
+ .filename = eventname,
+ .buf = buf,
+ .buf_len = size};
+ struct ima_template_desc *template_desc = NULL;
+ struct {
+ struct ima_digest_data hdr;
+ char digest[IMA_MAX_DIGEST_SIZE];
+ } hash = {};
+ int violation = 0;
+ int pcr = CONFIG_IMA_MEASURE_PCR_IDX;
+ int action = 0;
+
+ action = ima_get_action(NULL, cred, secid, 0, KEXEC_CMDLINE, &pcr,
+ &template_desc);
+ if (!(action & IMA_MEASURE))
+ return;
+
+ iint.ima_hash = &hash.hdr;
+ iint.ima_hash->algo = ima_hash_algo;
+ iint.ima_hash->length = hash_digest_size[ima_hash_algo];
+
+ ret = ima_calc_buffer_hash(buf, size, iint.ima_hash);
+ if (ret < 0)
+ goto out;
+
+ ret = ima_alloc_init_template(&event_data, &entry, template_desc);
+ if (ret < 0)
+ goto out;
+
+ ret = ima_store_template(entry, violation, NULL, buf, pcr);
+
+ if (ret < 0)
+ ima_free_template_entry(entry);
+
+out:
+ return;
+}
+
+/**
+ * ima_kexec_cmdline - measure kexec cmdline boot args
+ * @buf: pointer to buffer
+ * @size: size of buffer
+ *
+ * Buffers can only be measured, not appraised.
+ */
+void ima_kexec_cmdline(const void *buf, int size)
+{
+ u32 secid;
+
+ if (buf && size != 0) {
+ security_task_getsecid(current, &secid);
+ process_buffer_measurement(buf, size, "kexec-cmdline",
+ current_cred(), secid);
+ }
+}
+
static int __init init_ima(void)
{
int error;
@@ -589,6 +696,10 @@ static int __init init_ima(void)
error = ima_init();
}
+ error = register_blocking_lsm_notifier(&ima_lsm_policy_notifier);
+ if (error)
+ pr_warn("Couldn't register LSM notifier, error %d\n", error);
+
if (!error)
ima_update_policy_flag();
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index 7b53f2ca58e2..6df7f641ff66 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -76,6 +76,7 @@ struct ima_rule_entry {
int type; /* audit type */
} lsm[MAX_LSM_RULES];
char *fsname;
+ struct ima_template_desc *template;
};
/*
@@ -195,7 +196,7 @@ static struct ima_rule_entry secure_boot_rules[] __ro_after_init = {
};
/* An array of architecture specific rules */
-struct ima_rule_entry *arch_policy_entry __ro_after_init;
+static struct ima_rule_entry *arch_policy_entry __ro_after_init;
static LIST_HEAD(ima_default_rules);
static LIST_HEAD(ima_policy_rules);
@@ -245,31 +246,113 @@ static int __init default_appraise_policy_setup(char *str)
}
__setup("ima_appraise_tcb", default_appraise_policy_setup);
+static void ima_lsm_free_rule(struct ima_rule_entry *entry)
+{
+ int i;
+
+ for (i = 0; i < MAX_LSM_RULES; i++) {
+ kfree(entry->lsm[i].rule);
+ kfree(entry->lsm[i].args_p);
+ }
+ kfree(entry);
+}
+
+static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
+{
+ struct ima_rule_entry *nentry;
+ int i, result;
+
+ nentry = kmalloc(sizeof(*nentry), GFP_KERNEL);
+ if (!nentry)
+ return NULL;
+
+ /*
+ * Immutable elements are copied over as pointers and data; only
+ * lsm rules can change
+ */
+ memcpy(nentry, entry, sizeof(*nentry));
+ memset(nentry->lsm, 0, FIELD_SIZEOF(struct ima_rule_entry, lsm));
+
+ for (i = 0; i < MAX_LSM_RULES; i++) {
+ if (!entry->lsm[i].rule)
+ continue;
+
+ nentry->lsm[i].type = entry->lsm[i].type;
+ nentry->lsm[i].args_p = kstrdup(entry->lsm[i].args_p,
+ GFP_KERNEL);
+ if (!nentry->lsm[i].args_p)
+ goto out_err;
+
+ result = security_filter_rule_init(nentry->lsm[i].type,
+ Audit_equal,
+ nentry->lsm[i].args_p,
+ &nentry->lsm[i].rule);
+ if (result == -EINVAL)
+ pr_warn("ima: rule for LSM \'%d\' is undefined\n",
+ entry->lsm[i].type);
+ }
+ return nentry;
+
+out_err:
+ ima_lsm_free_rule(nentry);
+ return NULL;
+}
+
+static int ima_lsm_update_rule(struct ima_rule_entry *entry)
+{
+ struct ima_rule_entry *nentry;
+
+ nentry = ima_lsm_copy_rule(entry);
+ if (!nentry)
+ return -ENOMEM;
+
+ list_replace_rcu(&entry->list, &nentry->list);
+ synchronize_rcu();
+ ima_lsm_free_rule(entry);
+
+ return 0;
+}
+
/*
* The LSM policy can be reloaded, leaving the IMA LSM based rules referring
* to the old, stale LSM policy. Update the IMA LSM based rules to reflect
- * the reloaded LSM policy. We assume the rules still exist; and BUG_ON() if
- * they don't.
+ * the reloaded LSM policy.
*/
static void ima_lsm_update_rules(void)
{
- struct ima_rule_entry *entry;
- int result;
- int i;
+ struct ima_rule_entry *entry, *e;
+ int i, result, needs_update;
- list_for_each_entry(entry, &ima_policy_rules, list) {
+ list_for_each_entry_safe(entry, e, &ima_policy_rules, list) {
+ needs_update = 0;
for (i = 0; i < MAX_LSM_RULES; i++) {
- if (!entry->lsm[i].rule)
- continue;
- result = security_filter_rule_init(entry->lsm[i].type,
- Audit_equal,
- entry->lsm[i].args_p,
- &entry->lsm[i].rule);
- BUG_ON(!entry->lsm[i].rule);
+ if (entry->lsm[i].rule) {
+ needs_update = 1;
+ break;
+ }
+ }
+ if (!needs_update)
+ continue;
+
+ result = ima_lsm_update_rule(entry);
+ if (result) {
+ pr_err("ima: lsm rule update error %d\n",
+ result);
+ return;
}
}
}
+int ima_lsm_policy_change(struct notifier_block *nb, unsigned long event,
+ void *lsm_data)
+{
+ if (event != LSM_POLICY_CHANGE)
+ return NOTIFY_DONE;
+
+ ima_lsm_update_rules();
+ return NOTIFY_OK;
+}
+
/**
* ima_match_rules - determine whether an inode matches the measure rule.
* @rule: a pointer to a rule
@@ -287,6 +370,11 @@ static bool ima_match_rules(struct ima_rule_entry *rule, struct inode *inode,
{
int i;
+ if (func == KEXEC_CMDLINE) {
+ if ((rule->flags & IMA_FUNC) && (rule->func == func))
+ return true;
+ return false;
+ }
if ((rule->flags & IMA_FUNC) &&
(rule->func != func && func != POST_SETATTR))
return false;
@@ -323,11 +411,10 @@ static bool ima_match_rules(struct ima_rule_entry *rule, struct inode *inode,
for (i = 0; i < MAX_LSM_RULES; i++) {
int rc = 0;
u32 osid;
- int retried = 0;
if (!rule->lsm[i].rule)
continue;
-retry:
+
switch (i) {
case LSM_OBJ_USER:
case LSM_OBJ_ROLE:
@@ -348,11 +435,6 @@ retry:
default:
break;
}
- if ((rc < 0) && (!retried)) {
- retried = 1;
- ima_lsm_update_rules();
- goto retry;
- }
if (!rc)
return false;
}
@@ -393,6 +475,7 @@ static int get_subaction(struct ima_rule_entry *rule, enum ima_hooks func)
* @func: IMA hook identifier
* @mask: requested action (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC)
* @pcr: set the pcr to extend
+ * @template_desc: the template that should be used for this rule
*
* Measure decision based on func/mask/fsmagic and LSM(subj/obj/type)
* conditions.
@@ -402,7 +485,8 @@ static int get_subaction(struct ima_rule_entry *rule, enum ima_hooks func)
* than writes so ima_match_policy() is classical RCU candidate.
*/
int ima_match_policy(struct inode *inode, const struct cred *cred, u32 secid,
- enum ima_hooks func, int mask, int flags, int *pcr)
+ enum ima_hooks func, int mask, int flags, int *pcr,
+ struct ima_template_desc **template_desc)
{
struct ima_rule_entry *entry;
int action = 0, actmask = flags | (flags << 1);
@@ -434,6 +518,11 @@ int ima_match_policy(struct inode *inode, const struct cred *cred, u32 secid,
if ((pcr) && (entry->flags & IMA_PCR))
*pcr = entry->pcr;
+ if (template_desc && entry->template)
+ *template_desc = entry->template;
+ else if (template_desc)
+ *template_desc = ima_template_desc_current();
+
if (!actmask)
break;
}
@@ -672,7 +761,7 @@ enum {
Opt_uid_gt, Opt_euid_gt, Opt_fowner_gt,
Opt_uid_lt, Opt_euid_lt, Opt_fowner_lt,
Opt_appraise_type, Opt_permit_directio,
- Opt_pcr, Opt_err
+ Opt_pcr, Opt_template, Opt_err
};
static const match_table_t policy_tokens = {
@@ -706,6 +795,7 @@ static const match_table_t policy_tokens = {
{Opt_appraise_type, "appraise_type=%s"},
{Opt_permit_directio, "permit_directio"},
{Opt_pcr, "pcr=%s"},
+ {Opt_template, "template=%s"},
{Opt_err, NULL}
};
@@ -759,6 +849,7 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
char *from;
char *p;
bool uid_token;
+ struct ima_template_desc *template_desc;
int result = 0;
ab = integrity_audit_log_start(audit_context(), GFP_KERNEL,
@@ -866,6 +957,8 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
entry->func = KEXEC_INITRAMFS_CHECK;
else if (strcmp(args[0].from, "POLICY_CHECK") == 0)
entry->func = POLICY_CHECK;
+ else if (strcmp(args[0].from, "KEXEC_CMDLINE") == 0)
+ entry->func = KEXEC_CMDLINE;
else
result = -EINVAL;
if (!result)
@@ -1055,6 +1148,28 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
entry->flags |= IMA_PCR;
break;
+ case Opt_template:
+ ima_log_string(ab, "template", args[0].from);
+ if (entry->action != MEASURE) {
+ result = -EINVAL;
+ break;
+ }
+ template_desc = lookup_template_desc(args[0].from);
+ if (!template_desc || entry->template) {
+ result = -EINVAL;
+ break;
+ }
+
+ /*
+ * template_desc_init_fields() does nothing if
+ * the template is already initialised, so
+ * it's safe to do this unconditionally
+ */
+ template_desc_init_fields(template_desc->fmt,
+ &(template_desc->fields),
+ &(template_desc->num_fields));
+ entry->template = template_desc;
+ break;
case Opt_err:
ima_log_string(ab, "UNKNOWN", p);
result = -EINVAL;
@@ -1330,6 +1445,8 @@ int ima_policy_show(struct seq_file *m, void *v)
}
}
}
+ if (entry->template)
+ seq_printf(m, "template=%s ", entry->template->name);
if (entry->flags & IMA_DIGSIG_REQUIRED)
seq_puts(m, "appraise_type=imasig ");
if (entry->flags & IMA_PERMIT_DIRECTIO)
diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
index f4354c267396..cb349d7b2601 100644
--- a/security/integrity/ima/ima_template.c
+++ b/security/integrity/ima/ima_template.c
@@ -22,6 +22,7 @@ static struct ima_template_desc builtin_templates[] = {
{.name = IMA_TEMPLATE_IMA_NAME, .fmt = IMA_TEMPLATE_IMA_FMT},
{.name = "ima-ng", .fmt = "d-ng|n-ng"},
{.name = "ima-sig", .fmt = "d-ng|n-ng|sig"},
+ {.name = "ima-buf", .fmt = "d-ng|n-ng|buf"},
{.name = "", .fmt = ""}, /* placeholder for a custom format */
};
@@ -39,14 +40,18 @@ static const struct ima_template_field supported_fields[] = {
.field_show = ima_show_template_string},
{.field_id = "sig", .field_init = ima_eventsig_init,
.field_show = ima_show_template_sig},
+ {.field_id = "buf", .field_init = ima_eventbuf_init,
+ .field_show = ima_show_template_buf},
};
-#define MAX_TEMPLATE_NAME_LEN 15
+
+/*
+ * Used when restoring measurements carried over from a kexec. 'd' and 'n' don't
+ * need to be accounted for since they shouldn't be defined in the same template
+ * description as 'd-ng' and 'n-ng' respectively.
+ */
+#define MAX_TEMPLATE_NAME_LEN sizeof("d-ng|n-ng|sig|buf")
static struct ima_template_desc *ima_template;
-static struct ima_template_desc *lookup_template_desc(const char *name);
-static int template_desc_init_fields(const char *template_fmt,
- const struct ima_template_field ***fields,
- int *num_fields);
static int __init ima_template_setup(char *str)
{
@@ -104,7 +109,7 @@ static int __init ima_template_fmt_setup(char *str)
}
__setup("ima_template_fmt=", ima_template_fmt_setup);
-static struct ima_template_desc *lookup_template_desc(const char *name)
+struct ima_template_desc *lookup_template_desc(const char *name)
{
struct ima_template_desc *template_desc;
int found = 0;
@@ -149,9 +154,9 @@ static int template_fmt_size(const char *template_fmt)
return j + 1;
}
-static int template_desc_init_fields(const char *template_fmt,
- const struct ima_template_field ***fields,
- int *num_fields)
+int template_desc_init_fields(const char *template_fmt,
+ const struct ima_template_field ***fields,
+ int *num_fields)
{
const char *template_fmt_ptr;
const struct ima_template_field *found_fields[IMA_TEMPLATE_NUM_FIELDS_MAX];
diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
index 9fe0ef7f91e2..2fb9a10bc6b7 100644
--- a/security/integrity/ima/ima_template_lib.c
+++ b/security/integrity/ima/ima_template_lib.c
@@ -158,6 +158,12 @@ void ima_show_template_sig(struct seq_file *m, enum ima_show_type show,
ima_show_template_field_data(m, show, DATA_FMT_HEX, field_data);
}
+void ima_show_template_buf(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_HEX, field_data);
+}
+
/**
* ima_parse_buf() - Parses lengths and data from an input buffer
* @bufstartp: Buffer start address.
@@ -385,3 +391,18 @@ int ima_eventsig_init(struct ima_event_data *event_data,
return ima_write_template_field_data(xattr_value, event_data->xattr_len,
DATA_FMT_HEX, field_data);
}
+
+/*
+ * ima_eventbuf_init - include the buffer(kexec-cmldine) as part of the
+ * template data.
+ */
+int ima_eventbuf_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ if ((!event_data->buf) || (event_data->buf_len == 0))
+ return 0;
+
+ return ima_write_template_field_data(event_data->buf,
+ event_data->buf_len, DATA_FMT_HEX,
+ field_data);
+}
diff --git a/security/integrity/ima/ima_template_lib.h b/security/integrity/ima/ima_template_lib.h
index e515955456a3..652aa5de81ef 100644
--- a/security/integrity/ima/ima_template_lib.h
+++ b/security/integrity/ima/ima_template_lib.h
@@ -25,6 +25,8 @@ void ima_show_template_string(struct seq_file *m, enum ima_show_type show,
struct ima_field_data *field_data);
void ima_show_template_sig(struct seq_file *m, enum ima_show_type show,
struct ima_field_data *field_data);
+void ima_show_template_buf(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
int ima_parse_buf(void *bufstartp, void *bufendp, void **bufcurp,
int maxfields, struct ima_field_data *fields, int *curfields,
unsigned long *len_mask, int enforce_mask, char *bufname);
@@ -38,4 +40,6 @@ int ima_eventname_ng_init(struct ima_event_data *event_data,
struct ima_field_data *field_data);
int ima_eventsig_init(struct ima_event_data *event_data,
struct ima_field_data *field_data);
+int ima_eventbuf_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
#endif /* __LINUX_IMA_TEMPLATE_LIB_H */
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
index 65377848fbc5..ed12d8e13d04 100644
--- a/security/integrity/integrity.h
+++ b/security/integrity/integrity.h
@@ -74,6 +74,12 @@ enum evm_ima_xattr_type {
struct evm_ima_xattr_data {
u8 type;
+ u8 data[];
+} __packed;
+
+/* Only used in the EVM HMAC code. */
+struct evm_xattr {
+ struct evm_ima_xattr_data data;
u8 digest[SHA1_DIGEST_SIZE];
} __packed;
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
index ee502e4d390b..dd313438fecf 100644
--- a/security/keys/Kconfig
+++ b/security/keys/Kconfig
@@ -25,6 +25,24 @@ config KEYS_COMPAT
def_bool y
depends on COMPAT && KEYS
+config KEYS_REQUEST_CACHE
+ bool "Enable temporary caching of the last request_key() result"
+ depends on KEYS
+ help
+ This option causes the result of the last successful request_key()
+ call that didn't upcall to the kernel to be cached temporarily in the
+ task_struct. The cache is cleared by exit and just prior to the
+ resumption of userspace.
+
+ This allows the key used for multiple step processes where each step
+ wants to request a key that is likely the same as the one requested
+ by the last step to save on the searching.
+
+ An example of such a process is a pathwalk through a network
+ filesystem in which each method needs to request an authentication
+ key. Pathwalk will call multiple methods for each dentry traversed
+ (permission, d_revalidate, lookup, getxattr, getacl, ...).
+
config PERSISTENT_KEYRINGS
bool "Enable register of persistent per-UID keyrings"
depends on KEYS
diff --git a/security/keys/compat.c b/security/keys/compat.c
index 35ce47ce2285..9bcc404131aa 100644
--- a/security/keys/compat.c
+++ b/security/keys/compat.c
@@ -155,6 +155,12 @@ COMPAT_SYSCALL_DEFINE5(keyctl, u32, option,
return keyctl_pkey_verify(compat_ptr(arg2), compat_ptr(arg3),
compat_ptr(arg4), compat_ptr(arg5));
+ case KEYCTL_MOVE:
+ return keyctl_keyring_move(arg2, arg3, arg4, arg5);
+
+ case KEYCTL_CAPABILITIES:
+ return keyctl_capabilities(compat_ptr(arg2), arg3);
+
default:
return -EOPNOTSUPP;
}
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 44e58a3e5663..671dd730ecfc 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -150,7 +150,7 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
atomic_dec(&key->user->nikeys);
key_user_put(key->user);
-
+ key_put_tag(key->domain_tag);
kfree(key->description);
memzero_explicit(key, sizeof(*key));
diff --git a/security/keys/internal.h b/security/keys/internal.h
index d59bc25a9249..c039373488bd 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -85,10 +85,14 @@ extern spinlock_t key_serial_lock;
extern struct mutex key_construction_mutex;
extern wait_queue_head_t request_key_conswq;
-
+extern void key_set_index_key(struct keyring_index_key *index_key);
extern struct key_type *key_type_lookup(const char *type);
extern void key_type_put(struct key_type *ktype);
+extern int __key_link_lock(struct key *keyring,
+ const struct keyring_index_key *index_key);
+extern int __key_move_lock(struct key *l_keyring, struct key *u_keyring,
+ const struct keyring_index_key *index_key);
extern int __key_link_begin(struct key *keyring,
const struct keyring_index_key *index_key,
struct assoc_array_edit **_edit);
@@ -119,6 +123,7 @@ struct keyring_search_context {
#define KEYRING_SEARCH_NO_CHECK_PERM 0x0008 /* Don't check permissions */
#define KEYRING_SEARCH_DETECT_TOO_DEEP 0x0010 /* Give an error on excessive depth */
#define KEYRING_SEARCH_SKIP_EXPIRED 0x0020 /* Ignore expired keys (intention to replace) */
+#define KEYRING_SEARCH_RECURSE 0x0040 /* Search child keyrings also */
int (*iterator)(const void *object, void *iterator_data);
@@ -131,21 +136,23 @@ struct keyring_search_context {
extern bool key_default_cmp(const struct key *key,
const struct key_match_data *match_data);
-extern key_ref_t keyring_search_aux(key_ref_t keyring_ref,
+extern key_ref_t keyring_search_rcu(key_ref_t keyring_ref,
struct keyring_search_context *ctx);
-extern key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx);
-extern key_ref_t search_process_keyrings(struct keyring_search_context *ctx);
+extern key_ref_t search_cred_keyrings_rcu(struct keyring_search_context *ctx);
+extern key_ref_t search_process_keyrings_rcu(struct keyring_search_context *ctx);
extern struct key *find_keyring_by_name(const char *name, bool uid_keyring);
-extern int install_user_keyrings(void);
+extern int look_up_user_keyrings(struct key **, struct key **);
+extern struct key *get_user_session_keyring_rcu(const struct cred *);
extern int install_thread_keyring_to_cred(struct cred *);
extern int install_process_keyring_to_cred(struct cred *);
extern int install_session_keyring_to_cred(struct cred *, struct key *);
extern struct key *request_key_and_link(struct key_type *type,
const char *description,
+ struct key_tag *domain_tag,
const void *callout_info,
size_t callout_len,
void *aux,
@@ -199,7 +206,8 @@ static inline bool key_is_dead(const struct key *key, time64_t limit)
return
key->flags & ((1 << KEY_FLAG_DEAD) |
(1 << KEY_FLAG_INVALIDATED)) ||
- (key->expiry > 0 && key->expiry <= limit);
+ (key->expiry > 0 && key->expiry <= limit) ||
+ key->domain_tag->removed;
}
/*
@@ -211,6 +219,7 @@ extern long keyctl_update_key(key_serial_t, const void __user *, size_t);
extern long keyctl_revoke_key(key_serial_t);
extern long keyctl_keyring_clear(key_serial_t);
extern long keyctl_keyring_link(key_serial_t, key_serial_t);
+extern long keyctl_keyring_move(key_serial_t, key_serial_t, key_serial_t, unsigned int);
extern long keyctl_keyring_unlink(key_serial_t, key_serial_t);
extern long keyctl_describe_key(key_serial_t, char __user *, size_t);
extern long keyctl_keyring_search(key_serial_t, const char __user *,
@@ -320,6 +329,8 @@ static inline long keyctl_pkey_e_d_s(int op,
}
#endif
+extern long keyctl_capabilities(unsigned char __user *_buffer, size_t buflen);
+
/*
* Debugging key validation
*/
diff --git a/security/keys/key.c b/security/keys/key.c
index 9a6108aefae9..764f4c57913e 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -281,11 +281,12 @@ struct key *key_alloc(struct key_type *type, const char *desc,
key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL);
if (!key->index_key.description)
goto no_memory_3;
+ key->index_key.type = type;
+ key_set_index_key(&key->index_key);
refcount_set(&key->usage, 1);
init_rwsem(&key->sem);
lockdep_set_class(&key->sem, &type->lock_class);
- key->index_key.type = type;
key->user = user;
key->quotalen = quotalen;
key->datalen = type->def_datalen;
@@ -312,6 +313,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
goto security_error;
/* publish the key by giving it a serial number */
+ refcount_inc(&key->domain_tag->usage);
atomic_inc(&user->nkeys);
key_alloc_serial(key);
@@ -455,7 +457,7 @@ static int __key_instantiate_and_link(struct key *key,
/* disable the authorisation key */
if (authkey)
- key_revoke(authkey);
+ key_invalidate(authkey);
if (prep->expiry != TIME64_MAX) {
key->expiry = prep->expiry;
@@ -496,7 +498,7 @@ int key_instantiate_and_link(struct key *key,
struct key *authkey)
{
struct key_preparsed_payload prep;
- struct assoc_array_edit *edit;
+ struct assoc_array_edit *edit = NULL;
int ret;
memset(&prep, 0, sizeof(prep));
@@ -511,10 +513,14 @@ int key_instantiate_and_link(struct key *key,
}
if (keyring) {
- ret = __key_link_begin(keyring, &key->index_key, &edit);
+ ret = __key_link_lock(keyring, &key->index_key);
if (ret < 0)
goto error;
+ ret = __key_link_begin(keyring, &key->index_key, &edit);
+ if (ret < 0)
+ goto error_link_end;
+
if (keyring->restrict_link && keyring->restrict_link->check) {
struct key_restriction *keyres = keyring->restrict_link;
@@ -566,7 +572,7 @@ int key_reject_and_link(struct key *key,
struct key *keyring,
struct key *authkey)
{
- struct assoc_array_edit *edit;
+ struct assoc_array_edit *edit = NULL;
int ret, awaken, link_ret = 0;
key_check(key);
@@ -579,7 +585,12 @@ int key_reject_and_link(struct key *key,
if (keyring->restrict_link)
return -EPERM;
- link_ret = __key_link_begin(keyring, &key->index_key, &edit);
+ link_ret = __key_link_lock(keyring, &key->index_key);
+ if (link_ret == 0) {
+ link_ret = __key_link_begin(keyring, &key->index_key, &edit);
+ if (link_ret < 0)
+ __key_link_end(keyring, &key->index_key, edit);
+ }
}
mutex_lock(&key_construction_mutex);
@@ -603,7 +614,7 @@ int key_reject_and_link(struct key *key,
/* disable the authorisation key */
if (authkey)
- key_revoke(authkey);
+ key_invalidate(authkey);
}
mutex_unlock(&key_construction_mutex);
@@ -806,7 +817,7 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
.description = description,
};
struct key_preparsed_payload prep;
- struct assoc_array_edit *edit;
+ struct assoc_array_edit *edit = NULL;
const struct cred *cred = current_cred();
struct key *keyring, *key = NULL;
key_ref_t key_ref;
@@ -855,13 +866,20 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
goto error_free_prep;
}
index_key.desc_len = strlen(index_key.description);
+ key_set_index_key(&index_key);
- ret = __key_link_begin(keyring, &index_key, &edit);
+ ret = __key_link_lock(keyring, &index_key);
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error_free_prep;
}
+ ret = __key_link_begin(keyring, &index_key, &edit);
+ if (ret < 0) {
+ key_ref = ERR_PTR(ret);
+ goto error_link_end;
+ }
+
if (restrict_link && restrict_link->check) {
ret = restrict_link->check(keyring, index_key.type,
&prep.payload, restrict_link->key);
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 5aa605ef8d9d..9b898c969558 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -26,6 +26,20 @@
#define KEY_MAX_DESC_SIZE 4096
+static const unsigned char keyrings_capabilities[2] = {
+ [0] = (KEYCTL_CAPS0_CAPABILITIES |
+ (IS_ENABLED(CONFIG_PERSISTENT_KEYRINGS) ? KEYCTL_CAPS0_PERSISTENT_KEYRINGS : 0) |
+ (IS_ENABLED(CONFIG_KEY_DH_OPERATIONS) ? KEYCTL_CAPS0_DIFFIE_HELLMAN : 0) |
+ (IS_ENABLED(CONFIG_ASYMMETRIC_KEY_TYPE) ? KEYCTL_CAPS0_PUBLIC_KEY : 0) |
+ (IS_ENABLED(CONFIG_BIG_KEYS) ? KEYCTL_CAPS0_BIG_KEY : 0) |
+ KEYCTL_CAPS0_INVALIDATE |
+ KEYCTL_CAPS0_RESTRICT_KEYRING |
+ KEYCTL_CAPS0_MOVE
+ ),
+ [1] = (KEYCTL_CAPS1_NS_KEYRING_NAME |
+ KEYCTL_CAPS1_NS_KEY_TAG),
+};
+
static int key_get_type_from_user(char *type,
const char __user *_type,
unsigned len)
@@ -206,7 +220,7 @@ SYSCALL_DEFINE4(request_key, const char __user *, _type,
}
/* do the search */
- key = request_key_and_link(ktype, description, callout_info,
+ key = request_key_and_link(ktype, description, NULL, callout_info,
callout_len, NULL, key_ref_to_ptr(dest_ref),
KEY_ALLOC_IN_QUOTA);
if (IS_ERR(key)) {
@@ -569,6 +583,52 @@ error:
}
/*
+ * Move a link to a key from one keyring to another, displacing any matching
+ * key from the destination keyring.
+ *
+ * The key must grant the caller Link permission and both keyrings must grant
+ * the caller Write permission. There must also be a link in the from keyring
+ * to the key. If both keyrings are the same, nothing is done.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_keyring_move(key_serial_t id, key_serial_t from_ringid,
+ key_serial_t to_ringid, unsigned int flags)
+{
+ key_ref_t key_ref, from_ref, to_ref;
+ long ret;
+
+ if (flags & ~KEYCTL_MOVE_EXCL)
+ return -EINVAL;
+
+ key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE, KEY_NEED_LINK);
+ if (IS_ERR(key_ref))
+ return PTR_ERR(key_ref);
+
+ from_ref = lookup_user_key(from_ringid, 0, KEY_NEED_WRITE);
+ if (IS_ERR(from_ref)) {
+ ret = PTR_ERR(from_ref);
+ goto error2;
+ }
+
+ to_ref = lookup_user_key(to_ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
+ if (IS_ERR(to_ref)) {
+ ret = PTR_ERR(to_ref);
+ goto error3;
+ }
+
+ ret = key_move(key_ref_to_ptr(key_ref), key_ref_to_ptr(from_ref),
+ key_ref_to_ptr(to_ref), flags);
+
+ key_ref_put(to_ref);
+error3:
+ key_ref_put(from_ref);
+error2:
+ key_ref_put(key_ref);
+ return ret;
+}
+
+/*
* Return a description of a key to userspace.
*
* The key must grant the caller View permission for this to work.
@@ -700,7 +760,7 @@ long keyctl_keyring_search(key_serial_t ringid,
}
/* do the search */
- key_ref = keyring_search(keyring_ref, ktype, description);
+ key_ref = keyring_search(keyring_ref, ktype, description, true);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
@@ -1520,7 +1580,8 @@ long keyctl_session_to_parent(void)
ret = -EPERM;
oldwork = NULL;
- parent = me->real_parent;
+ parent = rcu_dereference_protected(me->real_parent,
+ lockdep_is_held(&tasklist_lock));
/* the parent mustn't be init and mustn't be a kernel thread */
if (parent->pid <= 1 || !parent->mm)
@@ -1628,6 +1689,26 @@ error:
}
/*
+ * Get keyrings subsystem capabilities.
+ */
+long keyctl_capabilities(unsigned char __user *_buffer, size_t buflen)
+{
+ size_t size = buflen;
+
+ if (size > 0) {
+ if (size > sizeof(keyrings_capabilities))
+ size = sizeof(keyrings_capabilities);
+ if (copy_to_user(_buffer, keyrings_capabilities, size) != 0)
+ return -EFAULT;
+ if (size < buflen &&
+ clear_user(_buffer + size, buflen - size) != 0)
+ return -EFAULT;
+ }
+
+ return sizeof(keyrings_capabilities);
+}
+
+/*
* The key control system call
*/
SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
@@ -1767,6 +1848,15 @@ SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
(const void __user *)arg4,
(const void __user *)arg5);
+ case KEYCTL_MOVE:
+ return keyctl_keyring_move((key_serial_t)arg2,
+ (key_serial_t)arg3,
+ (key_serial_t)arg4,
+ (unsigned int)arg5);
+
+ case KEYCTL_CAPABILITIES:
+ return keyctl_capabilities((unsigned char __user *)arg2, (size_t)arg3);
+
default:
return -EOPNOTSUPP;
}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index e311cc5df358..febf36c6ddc5 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -12,10 +12,13 @@
#include <linux/security.h>
#include <linux/seq_file.h>
#include <linux/err.h>
+#include <linux/user_namespace.h>
+#include <linux/nsproxy.h>
#include <keys/keyring-type.h>
#include <keys/user-type.h>
#include <linux/assoc_array_priv.h>
#include <linux/uaccess.h>
+#include <net/net_namespace.h>
#include "internal.h"
/*
@@ -25,11 +28,6 @@
#define KEYRING_SEARCH_MAX_DEPTH 6
/*
- * We keep all named keyrings in a hash to speed looking them up.
- */
-#define KEYRING_NAME_HASH_SIZE (1 << 5)
-
-/*
* We mark pointers we pass to the associative array with bit 1 set if
* they're keyrings and clear otherwise.
*/
@@ -51,17 +49,21 @@ static inline void *keyring_key_to_ptr(struct key *key)
return key;
}
-static struct list_head keyring_name_hash[KEYRING_NAME_HASH_SIZE];
static DEFINE_RWLOCK(keyring_name_lock);
-static inline unsigned keyring_hash(const char *desc)
+/*
+ * Clean up the bits of user_namespace that belong to us.
+ */
+void key_free_user_ns(struct user_namespace *ns)
{
- unsigned bucket = 0;
-
- for (; *desc; desc++)
- bucket += (unsigned char)*desc;
-
- return bucket & (KEYRING_NAME_HASH_SIZE - 1);
+ write_lock(&keyring_name_lock);
+ list_del_init(&ns->keyring_name_list);
+ write_unlock(&keyring_name_lock);
+
+ key_put(ns->user_keyring_register);
+#ifdef CONFIG_PERSISTENT_KEYRINGS
+ key_put(ns->persistent_keyring_register);
+#endif
}
/*
@@ -96,27 +98,21 @@ EXPORT_SYMBOL(key_type_keyring);
* Semaphore to serialise link/link calls to prevent two link calls in parallel
* introducing a cycle.
*/
-static DECLARE_RWSEM(keyring_serialise_link_sem);
+static DEFINE_MUTEX(keyring_serialise_link_lock);
/*
* Publish the name of a keyring so that it can be found by name (if it has
- * one).
+ * one and it doesn't begin with a dot).
*/
static void keyring_publish_name(struct key *keyring)
{
- int bucket;
-
- if (keyring->description) {
- bucket = keyring_hash(keyring->description);
+ struct user_namespace *ns = current_user_ns();
+ if (keyring->description &&
+ keyring->description[0] &&
+ keyring->description[0] != '.') {
write_lock(&keyring_name_lock);
-
- if (!keyring_name_hash[bucket].next)
- INIT_LIST_HEAD(&keyring_name_hash[bucket]);
-
- list_add_tail(&keyring->name_link,
- &keyring_name_hash[bucket]);
-
+ list_add_tail(&keyring->name_link, &ns->keyring_name_list);
write_unlock(&keyring_name_lock);
}
}
@@ -164,7 +160,7 @@ static u64 mult_64x32_and_fold(u64 x, u32 y)
/*
* Hash a key type and description.
*/
-static unsigned long hash_key_type_and_desc(const struct keyring_index_key *index_key)
+static void hash_key_type_and_desc(struct keyring_index_key *index_key)
{
const unsigned level_shift = ASSOC_ARRAY_LEVEL_STEP;
const unsigned long fan_mask = ASSOC_ARRAY_FAN_MASK;
@@ -175,9 +171,12 @@ static unsigned long hash_key_type_and_desc(const struct keyring_index_key *inde
int n, desc_len = index_key->desc_len;
type = (unsigned long)index_key->type;
-
acc = mult_64x32_and_fold(type, desc_len + 13);
acc = mult_64x32_and_fold(acc, 9207);
+ piece = (unsigned long)index_key->domain_tag;
+ acc = mult_64x32_and_fold(acc, piece);
+ acc = mult_64x32_and_fold(acc, 9207);
+
for (;;) {
n = desc_len;
if (n <= 0)
@@ -202,24 +201,67 @@ static unsigned long hash_key_type_and_desc(const struct keyring_index_key *inde
* zero for keyrings and non-zero otherwise.
*/
if (index_key->type != &key_type_keyring && (hash & fan_mask) == 0)
- return hash | (hash >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - level_shift)) | 1;
- if (index_key->type == &key_type_keyring && (hash & fan_mask) != 0)
- return (hash + (hash << level_shift)) & ~fan_mask;
- return hash;
+ hash |= (hash >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - level_shift)) | 1;
+ else if (index_key->type == &key_type_keyring && (hash & fan_mask) != 0)
+ hash = (hash + (hash << level_shift)) & ~fan_mask;
+ index_key->hash = hash;
}
/*
- * Build the next index key chunk.
- *
- * On 32-bit systems the index key is laid out as:
- *
- * 0 4 5 9...
- * hash desclen typeptr desc[]
+ * Finalise an index key to include a part of the description actually in the
+ * index key, to set the domain tag and to calculate the hash.
+ */
+void key_set_index_key(struct keyring_index_key *index_key)
+{
+ static struct key_tag default_domain_tag = { .usage = REFCOUNT_INIT(1), };
+ size_t n = min_t(size_t, index_key->desc_len, sizeof(index_key->desc));
+
+ memcpy(index_key->desc, index_key->description, n);
+
+ if (!index_key->domain_tag) {
+ if (index_key->type->flags & KEY_TYPE_NET_DOMAIN)
+ index_key->domain_tag = current->nsproxy->net_ns->key_domain;
+ else
+ index_key->domain_tag = &default_domain_tag;
+ }
+
+ hash_key_type_and_desc(index_key);
+}
+
+/**
+ * key_put_tag - Release a ref on a tag.
+ * @tag: The tag to release.
*
- * On 64-bit systems:
+ * This releases a reference the given tag and returns true if that ref was the
+ * last one.
+ */
+bool key_put_tag(struct key_tag *tag)
+{
+ if (refcount_dec_and_test(&tag->usage)) {
+ kfree_rcu(tag, rcu);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * key_remove_domain - Kill off a key domain and gc its keys
+ * @domain_tag: The domain tag to release.
*
- * 0 8 9 17...
- * hash desclen typeptr desc[]
+ * This marks a domain tag as being dead and releases a ref on it. If that
+ * wasn't the last reference, the garbage collector is poked to try and delete
+ * all keys that were in the domain.
+ */
+void key_remove_domain(struct key_tag *domain_tag)
+{
+ domain_tag->removed = true;
+ if (!key_put_tag(domain_tag))
+ key_schedule_gc_links();
+}
+
+/*
+ * Build the next index key chunk.
*
* We return it one word-sized chunk at a time.
*/
@@ -227,41 +269,33 @@ static unsigned long keyring_get_key_chunk(const void *data, int level)
{
const struct keyring_index_key *index_key = data;
unsigned long chunk = 0;
- long offset = 0;
+ const u8 *d;
int desc_len = index_key->desc_len, n = sizeof(chunk);
level /= ASSOC_ARRAY_KEY_CHUNK_SIZE;
switch (level) {
case 0:
- return hash_key_type_and_desc(index_key);
+ return index_key->hash;
case 1:
- return ((unsigned long)index_key->type << 8) | desc_len;
+ return index_key->x;
case 2:
- if (desc_len == 0)
- return (u8)((unsigned long)index_key->type >>
- (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8));
- n--;
- offset = 1;
- /* fall through */
+ return (unsigned long)index_key->type;
+ case 3:
+ return (unsigned long)index_key->domain_tag;
default:
- offset += sizeof(chunk) - 1;
- offset += (level - 3) * sizeof(chunk);
- if (offset >= desc_len)
+ level -= 4;
+ if (desc_len <= sizeof(index_key->desc))
return 0;
- desc_len -= offset;
+
+ d = index_key->description + sizeof(index_key->desc);
+ d += level * sizeof(long);
+ desc_len -= sizeof(index_key->desc);
if (desc_len > n)
desc_len = n;
- offset += desc_len;
do {
chunk <<= 8;
- chunk |= ((u8*)index_key->description)[--offset];
+ chunk |= *d++;
} while (--desc_len > 0);
-
- if (level == 2) {
- chunk <<= 8;
- chunk |= (u8)((unsigned long)index_key->type >>
- (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8));
- }
return chunk;
}
}
@@ -278,6 +312,7 @@ static bool keyring_compare_object(const void *object, const void *data)
const struct key *key = keyring_ptr_to_key(object);
return key->index_key.type == index_key->type &&
+ key->index_key.domain_tag == index_key->domain_tag &&
key->index_key.desc_len == index_key->desc_len &&
memcmp(key->index_key.description, index_key->description,
index_key->desc_len) == 0;
@@ -296,43 +331,38 @@ static int keyring_diff_objects(const void *object, const void *data)
int level, i;
level = 0;
- seg_a = hash_key_type_and_desc(a);
- seg_b = hash_key_type_and_desc(b);
+ seg_a = a->hash;
+ seg_b = b->hash;
if ((seg_a ^ seg_b) != 0)
goto differ;
+ level += ASSOC_ARRAY_KEY_CHUNK_SIZE / 8;
/* The number of bits contributed by the hash is controlled by a
* constant in the assoc_array headers. Everything else thereafter we
* can deal with as being machine word-size dependent.
*/
- level += ASSOC_ARRAY_KEY_CHUNK_SIZE / 8;
- seg_a = a->desc_len;
- seg_b = b->desc_len;
+ seg_a = a->x;
+ seg_b = b->x;
if ((seg_a ^ seg_b) != 0)
goto differ;
+ level += sizeof(unsigned long);
/* The next bit may not work on big endian */
- level++;
seg_a = (unsigned long)a->type;
seg_b = (unsigned long)b->type;
if ((seg_a ^ seg_b) != 0)
goto differ;
+ level += sizeof(unsigned long);
+ seg_a = (unsigned long)a->domain_tag;
+ seg_b = (unsigned long)b->domain_tag;
+ if ((seg_a ^ seg_b) != 0)
+ goto differ;
level += sizeof(unsigned long);
- if (a->desc_len == 0)
- goto same;
- i = 0;
- if (((unsigned long)a->description | (unsigned long)b->description) &
- (sizeof(unsigned long) - 1)) {
- do {
- seg_a = *(unsigned long *)(a->description + i);
- seg_b = *(unsigned long *)(b->description + i);
- if ((seg_a ^ seg_b) != 0)
- goto differ_plus_i;
- i += sizeof(unsigned long);
- } while (i < (a->desc_len & (sizeof(unsigned long) - 1)));
- }
+ i = sizeof(a->desc);
+ if (a->desc_len <= i)
+ goto same;
for (; i < a->desc_len; i++) {
seg_a = *(unsigned char *)(a->description + i);
@@ -516,7 +546,7 @@ EXPORT_SYMBOL(keyring_alloc);
* @keyring: The keyring being added to.
* @type: The type of key being added.
* @payload: The payload of the key intended to be added.
- * @data: Additional data for evaluating restriction.
+ * @restriction_key: Keys providing additional data for evaluating restriction.
*
* Reject the addition of any links to a keyring. It can be overridden by
* passing KEY_ALLOC_BYPASS_RESTRICTION to key_instantiate_and_link() when
@@ -658,6 +688,9 @@ static bool search_nested_keyrings(struct key *keyring,
BUG_ON((ctx->flags & STATE_CHECKS) == 0 ||
(ctx->flags & STATE_CHECKS) == STATE_CHECKS);
+ if (ctx->index_key.description)
+ key_set_index_key(&ctx->index_key);
+
/* Check to see if this top-level keyring is what we are looking for
* and whether it is valid or not.
*/
@@ -697,6 +730,9 @@ descend_to_keyring:
* Non-keyrings avoid the leftmost branch of the root entirely (root
* slots 1-15).
*/
+ if (!(ctx->flags & KEYRING_SEARCH_RECURSE))
+ goto not_this_keyring;
+
ptr = READ_ONCE(keyring->keys.root);
if (!ptr)
goto not_this_keyring;
@@ -831,7 +867,7 @@ found:
}
/**
- * keyring_search_aux - Search a keyring tree for a key matching some criteria
+ * keyring_search_rcu - Search a keyring tree for a matching key under RCU
* @keyring_ref: A pointer to the keyring with possession indicator.
* @ctx: The keyring search context.
*
@@ -843,7 +879,9 @@ found:
* addition, the LSM gets to forbid keyring searches and key matches.
*
* The search is performed as a breadth-then-depth search up to the prescribed
- * limit (KEYRING_SEARCH_MAX_DEPTH).
+ * limit (KEYRING_SEARCH_MAX_DEPTH). The caller must hold the RCU read lock to
+ * prevent keyrings from being destroyed or rearranged whilst they are being
+ * searched.
*
* Keys are matched to the type provided and are then filtered by the match
* function, which is given the description to use in any way it sees fit. The
@@ -862,7 +900,7 @@ found:
* In the case of a successful return, the possession attribute from
* @keyring_ref is propagated to the returned key reference.
*/
-key_ref_t keyring_search_aux(key_ref_t keyring_ref,
+key_ref_t keyring_search_rcu(key_ref_t keyring_ref,
struct keyring_search_context *ctx)
{
struct key *keyring;
@@ -884,11 +922,9 @@ key_ref_t keyring_search_aux(key_ref_t keyring_ref,
return ERR_PTR(err);
}
- rcu_read_lock();
ctx->now = ktime_get_real_seconds();
if (search_nested_keyrings(keyring, ctx))
__key_get(key_ref_to_ptr(ctx->result));
- rcu_read_unlock();
return ctx->result;
}
@@ -897,13 +933,15 @@ key_ref_t keyring_search_aux(key_ref_t keyring_ref,
* @keyring: The root of the keyring tree to be searched.
* @type: The type of keyring we want to find.
* @description: The name of the keyring we want to find.
+ * @recurse: True to search the children of @keyring also
*
- * As keyring_search_aux() above, but using the current task's credentials and
+ * As keyring_search_rcu() above, but using the current task's credentials and
* type's default matching function and preferred search method.
*/
key_ref_t keyring_search(key_ref_t keyring,
struct key_type *type,
- const char *description)
+ const char *description,
+ bool recurse)
{
struct keyring_search_context ctx = {
.index_key.type = type,
@@ -918,13 +956,17 @@ key_ref_t keyring_search(key_ref_t keyring,
key_ref_t key;
int ret;
+ if (recurse)
+ ctx.flags |= KEYRING_SEARCH_RECURSE;
if (type->match_preparse) {
ret = type->match_preparse(&ctx.match_data);
if (ret < 0)
return ERR_PTR(ret);
}
- key = keyring_search_aux(keyring, &ctx);
+ rcu_read_lock();
+ key = keyring_search_rcu(keyring, &ctx);
+ rcu_read_unlock();
if (type->match_free)
type->match_free(&ctx.match_data);
@@ -972,9 +1014,13 @@ static bool keyring_detect_restriction_cycle(const struct key *dest_keyring,
/**
* keyring_restrict - Look up and apply a restriction to a keyring
- *
- * @keyring: The keyring to be restricted
+ * @keyring_ref: The keyring to be restricted
+ * @type: The key type that will provide the restriction checker.
* @restriction: The restriction options to apply to the keyring
+ *
+ * Look up a keyring and apply a restriction to it. The restriction is managed
+ * by the specific key type, but can be configured by the options specified in
+ * the restriction string.
*/
int keyring_restrict(key_ref_t keyring_ref, const char *type,
const char *restriction)
@@ -1096,50 +1142,44 @@ found:
*/
struct key *find_keyring_by_name(const char *name, bool uid_keyring)
{
+ struct user_namespace *ns = current_user_ns();
struct key *keyring;
- int bucket;
if (!name)
return ERR_PTR(-EINVAL);
- bucket = keyring_hash(name);
-
read_lock(&keyring_name_lock);
- if (keyring_name_hash[bucket].next) {
- /* search this hash bucket for a keyring with a matching name
- * that's readable and that hasn't been revoked */
- list_for_each_entry(keyring,
- &keyring_name_hash[bucket],
- name_link
- ) {
- if (!kuid_has_mapping(current_user_ns(), keyring->user->uid))
- continue;
-
- if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
- continue;
+ /* Search this hash bucket for a keyring with a matching name that
+ * grants Search permission and that hasn't been revoked
+ */
+ list_for_each_entry(keyring, &ns->keyring_name_list, name_link) {
+ if (!kuid_has_mapping(ns, keyring->user->uid))
+ continue;
- if (strcmp(keyring->description, name) != 0)
- continue;
+ if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
+ continue;
- if (uid_keyring) {
- if (!test_bit(KEY_FLAG_UID_KEYRING,
- &keyring->flags))
- continue;
- } else {
- if (key_permission(make_key_ref(keyring, 0),
- KEY_NEED_SEARCH) < 0)
- continue;
- }
+ if (strcmp(keyring->description, name) != 0)
+ continue;
- /* we've got a match but we might end up racing with
- * key_cleanup() if the keyring is currently 'dead'
- * (ie. it has a zero usage count) */
- if (!refcount_inc_not_zero(&keyring->usage))
+ if (uid_keyring) {
+ if (!test_bit(KEY_FLAG_UID_KEYRING,
+ &keyring->flags))
+ continue;
+ } else {
+ if (key_permission(make_key_ref(keyring, 0),
+ KEY_NEED_SEARCH) < 0)
continue;
- keyring->last_used_at = ktime_get_real_seconds();
- goto out;
}
+
+ /* we've got a match but we might end up racing with
+ * key_cleanup() if the keyring is currently 'dead'
+ * (ie. it has a zero usage count) */
+ if (!refcount_inc_not_zero(&keyring->usage))
+ continue;
+ keyring->last_used_at = ktime_get_real_seconds();
+ goto out;
}
keyring = ERR_PTR(-ENOKEY);
@@ -1182,7 +1222,8 @@ static int keyring_detect_cycle(struct key *A, struct key *B)
.flags = (KEYRING_SEARCH_NO_STATE_CHECK |
KEYRING_SEARCH_NO_UPDATE_TIME |
KEYRING_SEARCH_NO_CHECK_PERM |
- KEYRING_SEARCH_DETECT_TOO_DEEP),
+ KEYRING_SEARCH_DETECT_TOO_DEEP |
+ KEYRING_SEARCH_RECURSE),
};
rcu_read_lock();
@@ -1192,13 +1233,67 @@ static int keyring_detect_cycle(struct key *A, struct key *B)
}
/*
+ * Lock keyring for link.
+ */
+int __key_link_lock(struct key *keyring,
+ const struct keyring_index_key *index_key)
+ __acquires(&keyring->sem)
+ __acquires(&keyring_serialise_link_lock)
+{
+ if (keyring->type != &key_type_keyring)
+ return -ENOTDIR;
+
+ down_write(&keyring->sem);
+
+ /* Serialise link/link calls to prevent parallel calls causing a cycle
+ * when linking two keyring in opposite orders.
+ */
+ if (index_key->type == &key_type_keyring)
+ mutex_lock(&keyring_serialise_link_lock);
+
+ return 0;
+}
+
+/*
+ * Lock keyrings for move (link/unlink combination).
+ */
+int __key_move_lock(struct key *l_keyring, struct key *u_keyring,
+ const struct keyring_index_key *index_key)
+ __acquires(&l_keyring->sem)
+ __acquires(&u_keyring->sem)
+ __acquires(&keyring_serialise_link_lock)
+{
+ if (l_keyring->type != &key_type_keyring ||
+ u_keyring->type != &key_type_keyring)
+ return -ENOTDIR;
+
+ /* We have to be very careful here to take the keyring locks in the
+ * right order, lest we open ourselves to deadlocking against another
+ * move operation.
+ */
+ if (l_keyring < u_keyring) {
+ down_write(&l_keyring->sem);
+ down_write_nested(&u_keyring->sem, 1);
+ } else {
+ down_write(&u_keyring->sem);
+ down_write_nested(&l_keyring->sem, 1);
+ }
+
+ /* Serialise link/link calls to prevent parallel calls causing a cycle
+ * when linking two keyring in opposite orders.
+ */
+ if (index_key->type == &key_type_keyring)
+ mutex_lock(&keyring_serialise_link_lock);
+
+ return 0;
+}
+
+/*
* Preallocate memory so that a key can be linked into to a keyring.
*/
int __key_link_begin(struct key *keyring,
const struct keyring_index_key *index_key,
struct assoc_array_edit **_edit)
- __acquires(&keyring->sem)
- __acquires(&keyring_serialise_link_sem)
{
struct assoc_array_edit *edit;
int ret;
@@ -1207,20 +1302,13 @@ int __key_link_begin(struct key *keyring,
keyring->serial, index_key->type->name, index_key->description);
BUG_ON(index_key->desc_len == 0);
+ BUG_ON(*_edit != NULL);
- if (keyring->type != &key_type_keyring)
- return -ENOTDIR;
-
- down_write(&keyring->sem);
+ *_edit = NULL;
ret = -EKEYREVOKED;
if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
- goto error_krsem;
-
- /* serialise link/link calls to prevent parallel calls causing a cycle
- * when linking two keyring in opposite orders */
- if (index_key->type == &key_type_keyring)
- down_write(&keyring_serialise_link_sem);
+ goto error;
/* Create an edit script that will insert/replace the key in the
* keyring tree.
@@ -1231,7 +1319,7 @@ int __key_link_begin(struct key *keyring,
NULL);
if (IS_ERR(edit)) {
ret = PTR_ERR(edit);
- goto error_sem;
+ goto error;
}
/* If we're not replacing a link in-place then we're going to need some
@@ -1250,11 +1338,7 @@ int __key_link_begin(struct key *keyring,
error_cancel:
assoc_array_cancel_edit(edit);
-error_sem:
- if (index_key->type == &key_type_keyring)
- up_write(&keyring_serialise_link_sem);
-error_krsem:
- up_write(&keyring->sem);
+error:
kleave(" = %d", ret);
return ret;
}
@@ -1299,14 +1383,11 @@ void __key_link_end(struct key *keyring,
const struct keyring_index_key *index_key,
struct assoc_array_edit *edit)
__releases(&keyring->sem)
- __releases(&keyring_serialise_link_sem)
+ __releases(&keyring_serialise_link_lock)
{
BUG_ON(index_key->type == NULL);
kenter("%d,%s,", keyring->serial, index_key->type->name);
- if (index_key->type == &key_type_keyring)
- up_write(&keyring_serialise_link_sem);
-
if (edit) {
if (!edit->dead_leaf) {
key_payload_reserve(keyring,
@@ -1315,6 +1396,9 @@ void __key_link_end(struct key *keyring,
assoc_array_cancel_edit(edit);
}
up_write(&keyring->sem);
+
+ if (index_key->type == &key_type_keyring)
+ mutex_unlock(&keyring_serialise_link_lock);
}
/*
@@ -1350,7 +1434,7 @@ static int __key_link_check_restriction(struct key *keyring, struct key *key)
*/
int key_link(struct key *keyring, struct key *key)
{
- struct assoc_array_edit *edit;
+ struct assoc_array_edit *edit = NULL;
int ret;
kenter("{%d,%d}", keyring->serial, refcount_read(&keyring->usage));
@@ -1358,22 +1442,88 @@ int key_link(struct key *keyring, struct key *key)
key_check(keyring);
key_check(key);
+ ret = __key_link_lock(keyring, &key->index_key);
+ if (ret < 0)
+ goto error;
+
ret = __key_link_begin(keyring, &key->index_key, &edit);
- if (ret == 0) {
- kdebug("begun {%d,%d}", keyring->serial, refcount_read(&keyring->usage));
- ret = __key_link_check_restriction(keyring, key);
- if (ret == 0)
- ret = __key_link_check_live_key(keyring, key);
- if (ret == 0)
- __key_link(key, &edit);
- __key_link_end(keyring, &key->index_key, edit);
- }
+ if (ret < 0)
+ goto error_end;
+
+ kdebug("begun {%d,%d}", keyring->serial, refcount_read(&keyring->usage));
+ ret = __key_link_check_restriction(keyring, key);
+ if (ret == 0)
+ ret = __key_link_check_live_key(keyring, key);
+ if (ret == 0)
+ __key_link(key, &edit);
+error_end:
+ __key_link_end(keyring, &key->index_key, edit);
+error:
kleave(" = %d {%d,%d}", ret, keyring->serial, refcount_read(&keyring->usage));
return ret;
}
EXPORT_SYMBOL(key_link);
+/*
+ * Lock a keyring for unlink.
+ */
+static int __key_unlink_lock(struct key *keyring)
+ __acquires(&keyring->sem)
+{
+ if (keyring->type != &key_type_keyring)
+ return -ENOTDIR;
+
+ down_write(&keyring->sem);
+ return 0;
+}
+
+/*
+ * Begin the process of unlinking a key from a keyring.
+ */
+static int __key_unlink_begin(struct key *keyring, struct key *key,
+ struct assoc_array_edit **_edit)
+{
+ struct assoc_array_edit *edit;
+
+ BUG_ON(*_edit != NULL);
+
+ edit = assoc_array_delete(&keyring->keys, &keyring_assoc_array_ops,
+ &key->index_key);
+ if (IS_ERR(edit))
+ return PTR_ERR(edit);
+
+ if (!edit)
+ return -ENOENT;
+
+ *_edit = edit;
+ return 0;
+}
+
+/*
+ * Apply an unlink change.
+ */
+static void __key_unlink(struct key *keyring, struct key *key,
+ struct assoc_array_edit **_edit)
+{
+ assoc_array_apply_edit(*_edit);
+ *_edit = NULL;
+ key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES);
+}
+
+/*
+ * Finish unlinking a key from to a keyring.
+ */
+static void __key_unlink_end(struct key *keyring,
+ struct key *key,
+ struct assoc_array_edit *edit)
+ __releases(&keyring->sem)
+{
+ if (edit)
+ assoc_array_cancel_edit(edit);
+ up_write(&keyring->sem);
+}
+
/**
* key_unlink - Unlink the first link to a key from a keyring.
* @keyring: The keyring to remove the link from.
@@ -1393,36 +1543,97 @@ EXPORT_SYMBOL(key_link);
*/
int key_unlink(struct key *keyring, struct key *key)
{
- struct assoc_array_edit *edit;
+ struct assoc_array_edit *edit = NULL;
int ret;
key_check(keyring);
key_check(key);
- if (keyring->type != &key_type_keyring)
- return -ENOTDIR;
+ ret = __key_unlink_lock(keyring);
+ if (ret < 0)
+ return ret;
- down_write(&keyring->sem);
+ ret = __key_unlink_begin(keyring, key, &edit);
+ if (ret == 0)
+ __key_unlink(keyring, key, &edit);
+ __key_unlink_end(keyring, key, edit);
+ return ret;
+}
+EXPORT_SYMBOL(key_unlink);
- edit = assoc_array_delete(&keyring->keys, &keyring_assoc_array_ops,
- &key->index_key);
- if (IS_ERR(edit)) {
- ret = PTR_ERR(edit);
+/**
+ * key_move - Move a key from one keyring to another
+ * @key: The key to move
+ * @from_keyring: The keyring to remove the link from.
+ * @to_keyring: The keyring to make the link in.
+ * @flags: Qualifying flags, such as KEYCTL_MOVE_EXCL.
+ *
+ * Make a link in @to_keyring to a key, such that the keyring holds a reference
+ * on that key and the key can potentially be found by searching that keyring
+ * whilst simultaneously removing a link to the key from @from_keyring.
+ *
+ * This function will write-lock both keyring's semaphores and will consume
+ * some of the user's key data quota to hold the link on @to_keyring.
+ *
+ * Returns 0 if successful, -ENOTDIR if either keyring isn't a keyring,
+ * -EKEYREVOKED if either keyring has been revoked, -ENFILE if the second
+ * keyring is full, -EDQUOT if there is insufficient key data quota remaining
+ * to add another link or -ENOMEM if there's insufficient memory. If
+ * KEYCTL_MOVE_EXCL is set, then -EEXIST will be returned if there's already a
+ * matching key in @to_keyring.
+ *
+ * It is assumed that the caller has checked that it is permitted for a link to
+ * be made (the keyring should have Write permission and the key Link
+ * permission).
+ */
+int key_move(struct key *key,
+ struct key *from_keyring,
+ struct key *to_keyring,
+ unsigned int flags)
+{
+ struct assoc_array_edit *from_edit = NULL, *to_edit = NULL;
+ int ret;
+
+ kenter("%d,%d,%d", key->serial, from_keyring->serial, to_keyring->serial);
+
+ if (from_keyring == to_keyring)
+ return 0;
+
+ key_check(key);
+ key_check(from_keyring);
+ key_check(to_keyring);
+
+ ret = __key_move_lock(from_keyring, to_keyring, &key->index_key);
+ if (ret < 0)
+ goto out;
+ ret = __key_unlink_begin(from_keyring, key, &from_edit);
+ if (ret < 0)
goto error;
- }
- ret = -ENOENT;
- if (edit == NULL)
+ ret = __key_link_begin(to_keyring, &key->index_key, &to_edit);
+ if (ret < 0)
goto error;
- assoc_array_apply_edit(edit);
- key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES);
- ret = 0;
+ ret = -EEXIST;
+ if (to_edit->dead_leaf && (flags & KEYCTL_MOVE_EXCL))
+ goto error;
+ ret = __key_link_check_restriction(to_keyring, key);
+ if (ret < 0)
+ goto error;
+ ret = __key_link_check_live_key(to_keyring, key);
+ if (ret < 0)
+ goto error;
+
+ __key_unlink(from_keyring, key, &from_edit);
+ __key_link(key, &to_edit);
error:
- up_write(&keyring->sem);
+ __key_link_end(to_keyring, &key->index_key, to_edit);
+ __key_unlink_end(from_keyring, key, from_edit);
+out:
+ kleave(" = %d", ret);
return ret;
}
-EXPORT_SYMBOL(key_unlink);
+EXPORT_SYMBOL(key_move);
/**
* keyring_clear - Clear a keyring
diff --git a/security/keys/persistent.c b/security/keys/persistent.c
index da9a0f42b795..97af230aa4b2 100644
--- a/security/keys/persistent.c
+++ b/security/keys/persistent.c
@@ -80,15 +80,17 @@ static long key_get_persistent(struct user_namespace *ns, kuid_t uid,
long ret;
/* Look in the register if it exists */
+ memset(&index_key, 0, sizeof(index_key));
index_key.type = &key_type_keyring;
index_key.description = buf;
index_key.desc_len = sprintf(buf, "_persistent.%u", from_kuid(ns, uid));
+ key_set_index_key(&index_key);
if (ns->persistent_keyring_register) {
reg_ref = make_key_ref(ns->persistent_keyring_register, true);
- down_read(&ns->persistent_keyring_register_sem);
+ down_read(&ns->keyring_sem);
persistent_ref = find_key_to_update(reg_ref, &index_key);
- up_read(&ns->persistent_keyring_register_sem);
+ up_read(&ns->keyring_sem);
if (persistent_ref)
goto found;
@@ -97,9 +99,9 @@ static long key_get_persistent(struct user_namespace *ns, kuid_t uid,
/* It wasn't in the register, so we'll need to create it. We might
* also need to create the register.
*/
- down_write(&ns->persistent_keyring_register_sem);
+ down_write(&ns->keyring_sem);
persistent_ref = key_create_persistent(ns, uid, &index_key);
- up_write(&ns->persistent_keyring_register_sem);
+ up_write(&ns->keyring_sem);
if (!IS_ERR(persistent_ref))
goto found;
diff --git a/security/keys/proc.c b/security/keys/proc.c
index 4e3266a2529e..415f3f1c2da0 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -166,7 +166,8 @@ static int proc_keys_show(struct seq_file *m, void *v)
.match_data.cmp = lookup_user_key_possessed,
.match_data.raw_data = key,
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
- .flags = KEYRING_SEARCH_NO_STATE_CHECK,
+ .flags = (KEYRING_SEARCH_NO_STATE_CHECK |
+ KEYRING_SEARCH_RECURSE),
};
key_ref = make_key_ref(key, 0);
@@ -175,7 +176,9 @@ static int proc_keys_show(struct seq_file *m, void *v)
* skip if the key does not indicate the possessor can view it
*/
if (key->perm & KEY_POS_VIEW) {
- skey_ref = search_my_process_keyrings(&ctx);
+ rcu_read_lock();
+ skey_ref = search_cred_keyrings_rcu(&ctx);
+ rcu_read_unlock();
if (!IS_ERR(skey_ref)) {
key_ref_put(skey_ref);
key_ref = make_key_ref(key, 1);
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 0b9406bf60e5..09541de31f2f 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -15,15 +15,13 @@
#include <linux/security.h>
#include <linux/user_namespace.h>
#include <linux/uaccess.h>
+#include <linux/init_task.h>
#include <keys/request_key_auth-type.h>
#include "internal.h"
/* Session keyring create vs join semaphore */
static DEFINE_MUTEX(key_session_mutex);
-/* User keyring creation semaphore */
-static DEFINE_MUTEX(key_user_keyring_mutex);
-
/* The root user's tracking struct */
struct key_user root_key_user = {
.usage = REFCOUNT_INIT(3),
@@ -35,99 +33,186 @@ struct key_user root_key_user = {
};
/*
- * Install the user and user session keyrings for the current process's UID.
+ * Get or create a user register keyring.
+ */
+static struct key *get_user_register(struct user_namespace *user_ns)
+{
+ struct key *reg_keyring = READ_ONCE(user_ns->user_keyring_register);
+
+ if (reg_keyring)
+ return reg_keyring;
+
+ down_write(&user_ns->keyring_sem);
+
+ /* Make sure there's a register keyring. It gets owned by the
+ * user_namespace's owner.
+ */
+ reg_keyring = user_ns->user_keyring_register;
+ if (!reg_keyring) {
+ reg_keyring = keyring_alloc(".user_reg",
+ user_ns->owner, INVALID_GID,
+ &init_cred,
+ KEY_POS_WRITE | KEY_POS_SEARCH |
+ KEY_USR_VIEW | KEY_USR_READ,
+ 0,
+ NULL, NULL);
+ if (!IS_ERR(reg_keyring))
+ smp_store_release(&user_ns->user_keyring_register,
+ reg_keyring);
+ }
+
+ up_write(&user_ns->keyring_sem);
+
+ /* We don't return a ref since the keyring is pinned by the user_ns */
+ return reg_keyring;
+}
+
+/*
+ * Look up the user and user session keyrings for the current process's UID,
+ * creating them if they don't exist.
*/
-int install_user_keyrings(void)
+int look_up_user_keyrings(struct key **_user_keyring,
+ struct key **_user_session_keyring)
{
- struct user_struct *user;
- const struct cred *cred;
- struct key *uid_keyring, *session_keyring;
+ const struct cred *cred = current_cred();
+ struct user_namespace *user_ns = current_user_ns();
+ struct key *reg_keyring, *uid_keyring, *session_keyring;
key_perm_t user_keyring_perm;
+ key_ref_t uid_keyring_r, session_keyring_r;
+ uid_t uid = from_kuid(user_ns, cred->user->uid);
char buf[20];
int ret;
- uid_t uid;
user_keyring_perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL;
- cred = current_cred();
- user = cred->user;
- uid = from_kuid(cred->user_ns, user->uid);
- kenter("%p{%u}", user, uid);
+ kenter("%u", uid);
- if (READ_ONCE(user->uid_keyring) && READ_ONCE(user->session_keyring)) {
- kleave(" = 0 [exist]");
- return 0;
- }
+ reg_keyring = get_user_register(user_ns);
+ if (IS_ERR(reg_keyring))
+ return PTR_ERR(reg_keyring);
- mutex_lock(&key_user_keyring_mutex);
+ down_write(&user_ns->keyring_sem);
ret = 0;
- if (!user->uid_keyring) {
- /* get the UID-specific keyring
- * - there may be one in existence already as it may have been
- * pinned by a session, but the user_struct pointing to it
- * may have been destroyed by setuid */
- sprintf(buf, "_uid.%u", uid);
-
- uid_keyring = find_keyring_by_name(buf, true);
+ /* Get the user keyring. Note that there may be one in existence
+ * already as it may have been pinned by a session, but the user_struct
+ * pointing to it may have been destroyed by setuid.
+ */
+ snprintf(buf, sizeof(buf), "_uid.%u", uid);
+ uid_keyring_r = keyring_search(make_key_ref(reg_keyring, true),
+ &key_type_keyring, buf, false);
+ kdebug("_uid %p", uid_keyring_r);
+ if (uid_keyring_r == ERR_PTR(-EAGAIN)) {
+ uid_keyring = keyring_alloc(buf, cred->user->uid, INVALID_GID,
+ cred, user_keyring_perm,
+ KEY_ALLOC_UID_KEYRING |
+ KEY_ALLOC_IN_QUOTA,
+ NULL, reg_keyring);
if (IS_ERR(uid_keyring)) {
- uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID,
- cred, user_keyring_perm,
- KEY_ALLOC_UID_KEYRING |
- KEY_ALLOC_IN_QUOTA,
- NULL, NULL);
- if (IS_ERR(uid_keyring)) {
- ret = PTR_ERR(uid_keyring);
- goto error;
- }
+ ret = PTR_ERR(uid_keyring);
+ goto error;
}
+ } else if (IS_ERR(uid_keyring_r)) {
+ ret = PTR_ERR(uid_keyring_r);
+ goto error;
+ } else {
+ uid_keyring = key_ref_to_ptr(uid_keyring_r);
+ }
- /* get a default session keyring (which might also exist
- * already) */
- sprintf(buf, "_uid_ses.%u", uid);
-
- session_keyring = find_keyring_by_name(buf, true);
+ /* Get a default session keyring (which might also exist already) */
+ snprintf(buf, sizeof(buf), "_uid_ses.%u", uid);
+ session_keyring_r = keyring_search(make_key_ref(reg_keyring, true),
+ &key_type_keyring, buf, false);
+ kdebug("_uid_ses %p", session_keyring_r);
+ if (session_keyring_r == ERR_PTR(-EAGAIN)) {
+ session_keyring = keyring_alloc(buf, cred->user->uid, INVALID_GID,
+ cred, user_keyring_perm,
+ KEY_ALLOC_UID_KEYRING |
+ KEY_ALLOC_IN_QUOTA,
+ NULL, NULL);
if (IS_ERR(session_keyring)) {
- session_keyring =
- keyring_alloc(buf, user->uid, INVALID_GID,
- cred, user_keyring_perm,
- KEY_ALLOC_UID_KEYRING |
- KEY_ALLOC_IN_QUOTA,
- NULL, NULL);
- if (IS_ERR(session_keyring)) {
- ret = PTR_ERR(session_keyring);
- goto error_release;
- }
-
- /* we install a link from the user session keyring to
- * the user keyring */
- ret = key_link(session_keyring, uid_keyring);
- if (ret < 0)
- goto error_release_both;
+ ret = PTR_ERR(session_keyring);
+ goto error_release;
}
- /* install the keyrings */
- /* paired with READ_ONCE() */
- smp_store_release(&user->uid_keyring, uid_keyring);
- /* paired with READ_ONCE() */
- smp_store_release(&user->session_keyring, session_keyring);
+ /* We install a link from the user session keyring to
+ * the user keyring.
+ */
+ ret = key_link(session_keyring, uid_keyring);
+ if (ret < 0)
+ goto error_release_session;
+
+ /* And only then link the user-session keyring to the
+ * register.
+ */
+ ret = key_link(reg_keyring, session_keyring);
+ if (ret < 0)
+ goto error_release_session;
+ } else if (IS_ERR(session_keyring_r)) {
+ ret = PTR_ERR(session_keyring_r);
+ goto error_release;
+ } else {
+ session_keyring = key_ref_to_ptr(session_keyring_r);
}
- mutex_unlock(&key_user_keyring_mutex);
+ up_write(&user_ns->keyring_sem);
+
+ if (_user_session_keyring)
+ *_user_session_keyring = session_keyring;
+ else
+ key_put(session_keyring);
+ if (_user_keyring)
+ *_user_keyring = uid_keyring;
+ else
+ key_put(uid_keyring);
kleave(" = 0");
return 0;
-error_release_both:
+error_release_session:
key_put(session_keyring);
error_release:
key_put(uid_keyring);
error:
- mutex_unlock(&key_user_keyring_mutex);
+ up_write(&user_ns->keyring_sem);
kleave(" = %d", ret);
return ret;
}
/*
+ * Get the user session keyring if it exists, but don't create it if it
+ * doesn't.
+ */
+struct key *get_user_session_keyring_rcu(const struct cred *cred)
+{
+ struct key *reg_keyring = READ_ONCE(cred->user_ns->user_keyring_register);
+ key_ref_t session_keyring_r;
+ char buf[20];
+
+ struct keyring_search_context ctx = {
+ .index_key.type = &key_type_keyring,
+ .index_key.description = buf,
+ .cred = cred,
+ .match_data.cmp = key_default_cmp,
+ .match_data.raw_data = buf,
+ .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .flags = KEYRING_SEARCH_DO_STATE_CHECK,
+ };
+
+ if (!reg_keyring)
+ return NULL;
+
+ ctx.index_key.desc_len = snprintf(buf, sizeof(buf), "_uid_ses.%u",
+ from_kuid(cred->user_ns,
+ cred->user->uid));
+
+ session_keyring_r = keyring_search_rcu(make_key_ref(reg_keyring, true),
+ &ctx);
+ if (IS_ERR(session_keyring_r))
+ return NULL;
+ return key_ref_to_ptr(session_keyring_r);
+}
+
+/*
* Install a thread keyring to the given credentials struct if it didn't have
* one already. This is allowed to overrun the quota.
*
@@ -289,34 +374,33 @@ static int install_session_keyring(struct key *keyring)
/*
* Handle the fsuid changing.
*/
-void key_fsuid_changed(struct task_struct *tsk)
+void key_fsuid_changed(struct cred *new_cred)
{
/* update the ownership of the thread keyring */
- BUG_ON(!tsk->cred);
- if (tsk->cred->thread_keyring) {
- down_write(&tsk->cred->thread_keyring->sem);
- tsk->cred->thread_keyring->uid = tsk->cred->fsuid;
- up_write(&tsk->cred->thread_keyring->sem);
+ if (new_cred->thread_keyring) {
+ down_write(&new_cred->thread_keyring->sem);
+ new_cred->thread_keyring->uid = new_cred->fsuid;
+ up_write(&new_cred->thread_keyring->sem);
}
}
/*
* Handle the fsgid changing.
*/
-void key_fsgid_changed(struct task_struct *tsk)
+void key_fsgid_changed(struct cred *new_cred)
{
/* update the ownership of the thread keyring */
- BUG_ON(!tsk->cred);
- if (tsk->cred->thread_keyring) {
- down_write(&tsk->cred->thread_keyring->sem);
- tsk->cred->thread_keyring->gid = tsk->cred->fsgid;
- up_write(&tsk->cred->thread_keyring->sem);
+ if (new_cred->thread_keyring) {
+ down_write(&new_cred->thread_keyring->sem);
+ new_cred->thread_keyring->gid = new_cred->fsgid;
+ up_write(&new_cred->thread_keyring->sem);
}
}
/*
* Search the process keyrings attached to the supplied cred for the first
- * matching key.
+ * matching key under RCU conditions (the caller must be holding the RCU read
+ * lock).
*
* The search criteria are the type and the match function. The description is
* given to the match function as a parameter, but doesn't otherwise influence
@@ -335,8 +419,9 @@ void key_fsgid_changed(struct task_struct *tsk)
* In the case of a successful return, the possession attribute is set on the
* returned key reference.
*/
-key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
+key_ref_t search_cred_keyrings_rcu(struct keyring_search_context *ctx)
{
+ struct key *user_session;
key_ref_t key_ref, ret, err;
const struct cred *cred = ctx->cred;
@@ -353,7 +438,7 @@ key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
/* search the thread keyring first */
if (cred->thread_keyring) {
- key_ref = keyring_search_aux(
+ key_ref = keyring_search_rcu(
make_key_ref(cred->thread_keyring, 1), ctx);
if (!IS_ERR(key_ref))
goto found;
@@ -371,7 +456,7 @@ key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
/* search the process keyring second */
if (cred->process_keyring) {
- key_ref = keyring_search_aux(
+ key_ref = keyring_search_rcu(
make_key_ref(cred->process_keyring, 1), ctx);
if (!IS_ERR(key_ref))
goto found;
@@ -392,7 +477,7 @@ key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
/* search the session keyring */
if (cred->session_keyring) {
- key_ref = keyring_search_aux(
+ key_ref = keyring_search_rcu(
make_key_ref(cred->session_keyring, 1), ctx);
if (!IS_ERR(key_ref))
@@ -412,10 +497,11 @@ key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
}
}
/* or search the user-session keyring */
- else if (READ_ONCE(cred->user->session_keyring)) {
- key_ref = keyring_search_aux(
- make_key_ref(READ_ONCE(cred->user->session_keyring), 1),
- ctx);
+ else if ((user_session = get_user_session_keyring_rcu(cred))) {
+ key_ref = keyring_search_rcu(make_key_ref(user_session, 1),
+ ctx);
+ key_put(user_session);
+
if (!IS_ERR(key_ref))
goto found;
@@ -446,16 +532,16 @@ found:
* the keys attached to the assumed authorisation key using its credentials if
* one is available.
*
- * Return same as search_my_process_keyrings().
+ * The caller must be holding the RCU read lock.
+ *
+ * Return same as search_cred_keyrings_rcu().
*/
-key_ref_t search_process_keyrings(struct keyring_search_context *ctx)
+key_ref_t search_process_keyrings_rcu(struct keyring_search_context *ctx)
{
struct request_key_auth *rka;
key_ref_t key_ref, ret = ERR_PTR(-EACCES), err;
- might_sleep();
-
- key_ref = search_my_process_keyrings(ctx);
+ key_ref = search_cred_keyrings_rcu(ctx);
if (!IS_ERR(key_ref))
goto found;
err = key_ref;
@@ -470,24 +556,17 @@ key_ref_t search_process_keyrings(struct keyring_search_context *ctx)
) {
const struct cred *cred = ctx->cred;
- /* defend against the auth key being revoked */
- down_read(&cred->request_key_auth->sem);
-
- if (key_validate(ctx->cred->request_key_auth) == 0) {
+ if (key_validate(cred->request_key_auth) == 0) {
rka = ctx->cred->request_key_auth->payload.data[0];
+ //// was search_process_keyrings() [ie. recursive]
ctx->cred = rka->cred;
- key_ref = search_process_keyrings(ctx);
+ key_ref = search_cred_keyrings_rcu(ctx);
ctx->cred = cred;
- up_read(&cred->request_key_auth->sem);
-
if (!IS_ERR(key_ref))
goto found;
-
ret = key_ref;
- } else {
- up_read(&cred->request_key_auth->sem);
}
}
@@ -502,7 +581,6 @@ key_ref_t search_process_keyrings(struct keyring_search_context *ctx)
found:
return key_ref;
}
-
/*
* See if the key we're looking at is the target key.
*/
@@ -536,10 +614,11 @@ key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags,
struct keyring_search_context ctx = {
.match_data.cmp = lookup_user_key_possessed,
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
- .flags = KEYRING_SEARCH_NO_STATE_CHECK,
+ .flags = (KEYRING_SEARCH_NO_STATE_CHECK |
+ KEYRING_SEARCH_RECURSE),
};
struct request_key_auth *rka;
- struct key *key;
+ struct key *key, *user_session;
key_ref_t key_ref, skey_ref;
int ret;
@@ -588,20 +667,20 @@ try_again:
if (!ctx.cred->session_keyring) {
/* always install a session keyring upon access if one
* doesn't exist yet */
- ret = install_user_keyrings();
+ ret = look_up_user_keyrings(NULL, &user_session);
if (ret < 0)
goto error;
if (lflags & KEY_LOOKUP_CREATE)
ret = join_session_keyring(NULL);
else
- ret = install_session_keyring(
- ctx.cred->user->session_keyring);
+ ret = install_session_keyring(user_session);
+ key_put(user_session);
if (ret < 0)
goto error;
goto reget_creds;
- } else if (ctx.cred->session_keyring ==
- READ_ONCE(ctx.cred->user->session_keyring) &&
+ } else if (test_bit(KEY_FLAG_UID_KEYRING,
+ &ctx.cred->session_keyring->flags) &&
lflags & KEY_LOOKUP_CREATE) {
ret = join_session_keyring(NULL);
if (ret < 0)
@@ -615,26 +694,16 @@ try_again:
break;
case KEY_SPEC_USER_KEYRING:
- if (!READ_ONCE(ctx.cred->user->uid_keyring)) {
- ret = install_user_keyrings();
- if (ret < 0)
- goto error;
- }
-
- key = ctx.cred->user->uid_keyring;
- __key_get(key);
+ ret = look_up_user_keyrings(&key, NULL);
+ if (ret < 0)
+ goto error;
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_USER_SESSION_KEYRING:
- if (!READ_ONCE(ctx.cred->user->session_keyring)) {
- ret = install_user_keyrings();
- if (ret < 0)
- goto error;
- }
-
- key = ctx.cred->user->session_keyring;
- __key_get(key);
+ ret = look_up_user_keyrings(NULL, &key);
+ if (ret < 0)
+ goto error;
key_ref = make_key_ref(key, 1);
break;
@@ -686,12 +755,12 @@ try_again:
key_ref = make_key_ref(key, 0);
/* check to see if we possess the key */
- ctx.index_key.type = key->type;
- ctx.index_key.description = key->description;
- ctx.index_key.desc_len = strlen(key->description);
+ ctx.index_key = key->index_key;
ctx.match_data.raw_data = key;
kdebug("check possessed");
- skey_ref = search_process_keyrings(&ctx);
+ rcu_read_lock();
+ skey_ref = search_process_keyrings_rcu(&ctx);
+ rcu_read_unlock();
kdebug("possessed=%p", skey_ref);
if (!IS_ERR(skey_ref)) {
@@ -883,7 +952,7 @@ void key_change_session_keyring(struct callback_head *twork)
*/
static int __init init_root_keyring(void)
{
- return install_user_keyrings();
+ return look_up_user_keyrings(NULL, NULL);
}
late_initcall(init_root_keyring);
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 8ae3b7b18801..7325f382dbf4 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -13,14 +13,40 @@
#include <linux/err.h>
#include <linux/keyctl.h>
#include <linux/slab.h>
+#include <net/net_namespace.h>
#include "internal.h"
#include <keys/request_key_auth-type.h>
#define key_negative_timeout 60 /* default timeout on a negative key's existence */
+static struct key *check_cached_key(struct keyring_search_context *ctx)
+{
+#ifdef CONFIG_KEYS_REQUEST_CACHE
+ struct key *key = current->cached_requested_key;
+
+ if (key &&
+ ctx->match_data.cmp(key, &ctx->match_data) &&
+ !(key->flags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED))))
+ return key_get(key);
+#endif
+ return NULL;
+}
+
+static void cache_requested_key(struct key *key)
+{
+#ifdef CONFIG_KEYS_REQUEST_CACHE
+ struct task_struct *t = current;
+
+ key_put(t->cached_requested_key);
+ t->cached_requested_key = key_get(key);
+ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
+#endif
+}
+
/**
* complete_request_key - Complete the construction of a key.
- * @auth_key: The authorisation key.
+ * @authkey: The authorisation key.
* @error: The success or failute of the construction.
*
* Complete the attempt to construct a key. The key will be negated
@@ -92,7 +118,7 @@ static int call_sbin_request_key(struct key *authkey, void *aux)
struct request_key_auth *rka = get_request_key_auth(authkey);
const struct cred *cred = current_cred();
key_serial_t prkey, sskey;
- struct key *key = rka->target_key, *keyring, *session;
+ struct key *key = rka->target_key, *keyring, *session, *user_session;
char *argv[9], *envp[3], uid_str[12], gid_str[12];
char key_str[12], keyring_str[3][12];
char desc[20];
@@ -100,9 +126,9 @@ static int call_sbin_request_key(struct key *authkey, void *aux)
kenter("{%d},{%d},%s", key->serial, authkey->serial, rka->op);
- ret = install_user_keyrings();
+ ret = look_up_user_keyrings(NULL, &user_session);
if (ret < 0)
- goto error_alloc;
+ goto error_us;
/* allocate a new session keyring */
sprintf(desc, "_req.%u", key->serial);
@@ -140,7 +166,7 @@ static int call_sbin_request_key(struct key *authkey, void *aux)
session = cred->session_keyring;
if (!session)
- session = cred->user->session_keyring;
+ session = user_session;
sskey = session->serial;
sprintf(keyring_str[2], "%d", sskey);
@@ -182,6 +208,8 @@ error_link:
key_put(keyring);
error_alloc:
+ key_put(user_session);
+error_us:
complete_request_key(authkey, ret);
kleave(" = %d", ret);
return ret;
@@ -218,7 +246,7 @@ static int construct_key(struct key *key, const void *callout_info,
/* check that the actor called complete_request_key() prior to
* returning an error */
WARN_ON(ret < 0 &&
- !test_bit(KEY_FLAG_REVOKED, &authkey->flags));
+ !test_bit(KEY_FLAG_INVALIDATED, &authkey->flags));
key_put(authkey);
kleave(" = %d", ret);
@@ -288,13 +316,15 @@ static int construct_get_dest_keyring(struct key **_dest_keyring)
/* fall through */
case KEY_REQKEY_DEFL_USER_SESSION_KEYRING:
- dest_keyring =
- key_get(READ_ONCE(cred->user->session_keyring));
+ ret = look_up_user_keyrings(NULL, &dest_keyring);
+ if (ret < 0)
+ return ret;
break;
case KEY_REQKEY_DEFL_USER_KEYRING:
- dest_keyring =
- key_get(READ_ONCE(cred->user->uid_keyring));
+ ret = look_up_user_keyrings(&dest_keyring, NULL);
+ if (ret < 0)
+ return ret;
break;
case KEY_REQKEY_DEFL_GROUP_KEYRING:
@@ -339,7 +369,7 @@ static int construct_alloc_key(struct keyring_search_context *ctx,
struct key_user *user,
struct key **_key)
{
- struct assoc_array_edit *edit;
+ struct assoc_array_edit *edit = NULL;
struct key *key;
key_perm_t perm;
key_ref_t key_ref;
@@ -368,6 +398,9 @@ static int construct_alloc_key(struct keyring_search_context *ctx,
set_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags);
if (dest_keyring) {
+ ret = __key_link_lock(dest_keyring, &ctx->index_key);
+ if (ret < 0)
+ goto link_lock_failed;
ret = __key_link_begin(dest_keyring, &ctx->index_key, &edit);
if (ret < 0)
goto link_prealloc_failed;
@@ -378,7 +411,9 @@ static int construct_alloc_key(struct keyring_search_context *ctx,
* waited for locks */
mutex_lock(&key_construction_mutex);
- key_ref = search_process_keyrings(ctx);
+ rcu_read_lock();
+ key_ref = search_process_keyrings_rcu(ctx);
+ rcu_read_unlock();
if (!IS_ERR(key_ref))
goto key_already_present;
@@ -419,6 +454,8 @@ link_check_failed:
return ret;
link_prealloc_failed:
+ __key_link_end(dest_keyring, &ctx->index_key, edit);
+link_lock_failed:
mutex_unlock(&user->cons_lock);
key_put(key);
kleave(" = %d [prelink]", ret);
@@ -493,16 +530,18 @@ error:
* request_key_and_link - Request a key and cache it in a keyring.
* @type: The type of key we want.
* @description: The searchable description of the key.
+ * @domain_tag: The domain in which the key operates.
* @callout_info: The data to pass to the instantiation upcall (or NULL).
* @callout_len: The length of callout_info.
* @aux: Auxiliary data for the upcall.
* @dest_keyring: Where to cache the key.
* @flags: Flags to key_alloc().
*
- * A key matching the specified criteria is searched for in the process's
- * keyrings and returned with its usage count incremented if found. Otherwise,
- * if callout_info is not NULL, a key will be allocated and some service
- * (probably in userspace) will be asked to instantiate it.
+ * A key matching the specified criteria (type, description, domain_tag) is
+ * searched for in the process's keyrings and returned with its usage count
+ * incremented if found. Otherwise, if callout_info is not NULL, a key will be
+ * allocated and some service (probably in userspace) will be asked to
+ * instantiate it.
*
* If successfully found or created, the key will be linked to the destination
* keyring if one is provided.
@@ -518,6 +557,7 @@ error:
*/
struct key *request_key_and_link(struct key_type *type,
const char *description,
+ struct key_tag *domain_tag,
const void *callout_info,
size_t callout_len,
void *aux,
@@ -526,6 +566,7 @@ struct key *request_key_and_link(struct key_type *type,
{
struct keyring_search_context ctx = {
.index_key.type = type,
+ .index_key.domain_tag = domain_tag,
.index_key.description = description,
.index_key.desc_len = strlen(description),
.cred = current_cred(),
@@ -533,7 +574,8 @@ struct key *request_key_and_link(struct key_type *type,
.match_data.raw_data = description,
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
.flags = (KEYRING_SEARCH_DO_STATE_CHECK |
- KEYRING_SEARCH_SKIP_EXPIRED),
+ KEYRING_SEARCH_SKIP_EXPIRED |
+ KEYRING_SEARCH_RECURSE),
};
struct key *key;
key_ref_t key_ref;
@@ -551,10 +593,26 @@ struct key *request_key_and_link(struct key_type *type,
}
}
+ key = check_cached_key(&ctx);
+ if (key)
+ return key;
+
/* search all the process keyrings for a key */
- key_ref = search_process_keyrings(&ctx);
+ rcu_read_lock();
+ key_ref = search_process_keyrings_rcu(&ctx);
+ rcu_read_unlock();
if (!IS_ERR(key_ref)) {
+ if (dest_keyring) {
+ ret = key_task_permission(key_ref, current_cred(),
+ KEY_NEED_LINK);
+ if (ret < 0) {
+ key_ref_put(key_ref);
+ key = ERR_PTR(ret);
+ goto error_free;
+ }
+ }
+
key = key_ref_to_ptr(key_ref);
if (dest_keyring) {
ret = key_link(dest_keyring, key);
@@ -564,6 +622,9 @@ struct key *request_key_and_link(struct key_type *type,
goto error_free;
}
}
+
+ /* Only cache the key on immediate success */
+ cache_requested_key(key);
} else if (PTR_ERR(key_ref) != -EAGAIN) {
key = ERR_CAST(key_ref);
} else {
@@ -612,9 +673,10 @@ int wait_for_key_construction(struct key *key, bool intr)
EXPORT_SYMBOL(wait_for_key_construction);
/**
- * request_key - Request a key and wait for construction
+ * request_key_tag - Request a key and wait for construction
* @type: Type of key.
* @description: The searchable description of the key.
+ * @domain_tag: The domain in which the key operates.
* @callout_info: The data to pass to the instantiation upcall (or NULL).
*
* As for request_key_and_link() except that it does not add the returned key
@@ -625,9 +687,10 @@ EXPORT_SYMBOL(wait_for_key_construction);
* Furthermore, it then works as wait_for_key_construction() to wait for the
* completion of keys undergoing construction with a non-interruptible wait.
*/
-struct key *request_key(struct key_type *type,
- const char *description,
- const char *callout_info)
+struct key *request_key_tag(struct key_type *type,
+ const char *description,
+ struct key_tag *domain_tag,
+ const char *callout_info)
{
struct key *key;
size_t callout_len = 0;
@@ -635,7 +698,8 @@ struct key *request_key(struct key_type *type,
if (callout_info)
callout_len = strlen(callout_info);
- key = request_key_and_link(type, description, callout_info, callout_len,
+ key = request_key_and_link(type, description, domain_tag,
+ callout_info, callout_len,
NULL, NULL, KEY_ALLOC_IN_QUOTA);
if (!IS_ERR(key)) {
ret = wait_for_key_construction(key, false);
@@ -646,12 +710,13 @@ struct key *request_key(struct key_type *type,
}
return key;
}
-EXPORT_SYMBOL(request_key);
+EXPORT_SYMBOL(request_key_tag);
/**
* request_key_with_auxdata - Request a key with auxiliary data for the upcaller
* @type: The type of key we want.
* @description: The searchable description of the key.
+ * @domain_tag: The domain in which the key operates.
* @callout_info: The data to pass to the instantiation upcall (or NULL).
* @callout_len: The length of callout_info.
* @aux: Auxiliary data for the upcall.
@@ -664,6 +729,7 @@ EXPORT_SYMBOL(request_key);
*/
struct key *request_key_with_auxdata(struct key_type *type,
const char *description,
+ struct key_tag *domain_tag,
const void *callout_info,
size_t callout_len,
void *aux)
@@ -671,7 +737,8 @@ struct key *request_key_with_auxdata(struct key_type *type,
struct key *key;
int ret;
- key = request_key_and_link(type, description, callout_info, callout_len,
+ key = request_key_and_link(type, description, domain_tag,
+ callout_info, callout_len,
aux, NULL, KEY_ALLOC_IN_QUOTA);
if (!IS_ERR(key)) {
ret = wait_for_key_construction(key, false);
@@ -684,52 +751,55 @@ struct key *request_key_with_auxdata(struct key_type *type,
}
EXPORT_SYMBOL(request_key_with_auxdata);
-/*
- * request_key_async - Request a key (allow async construction)
- * @type: Type of key.
- * @description: The searchable description of the key.
- * @callout_info: The data to pass to the instantiation upcall (or NULL).
- * @callout_len: The length of callout_info.
+/**
+ * request_key_rcu - Request key from RCU-read-locked context
+ * @type: The type of key we want.
+ * @description: The name of the key we want.
+ * @domain_tag: The domain in which the key operates.
*
- * As for request_key_and_link() except that it does not add the returned key
- * to a keyring if found, new keys are always allocated in the user's quota and
- * no auxiliary data can be passed.
+ * Request a key from a context that we may not sleep in (such as RCU-mode
+ * pathwalk). Keys under construction are ignored.
*
- * The caller should call wait_for_key_construction() to wait for the
- * completion of the returned key if it is still undergoing construction.
+ * Return a pointer to the found key if successful, -ENOKEY if we couldn't find
+ * a key or some other error if the key found was unsuitable or inaccessible.
*/
-struct key *request_key_async(struct key_type *type,
- const char *description,
- const void *callout_info,
- size_t callout_len)
+struct key *request_key_rcu(struct key_type *type,
+ const char *description,
+ struct key_tag *domain_tag)
{
- return request_key_and_link(type, description, callout_info,
- callout_len, NULL, NULL,
- KEY_ALLOC_IN_QUOTA);
-}
-EXPORT_SYMBOL(request_key_async);
+ struct keyring_search_context ctx = {
+ .index_key.type = type,
+ .index_key.domain_tag = domain_tag,
+ .index_key.description = description,
+ .index_key.desc_len = strlen(description),
+ .cred = current_cred(),
+ .match_data.cmp = key_default_cmp,
+ .match_data.raw_data = description,
+ .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .flags = (KEYRING_SEARCH_DO_STATE_CHECK |
+ KEYRING_SEARCH_SKIP_EXPIRED),
+ };
+ struct key *key;
+ key_ref_t key_ref;
-/*
- * request a key with auxiliary data for the upcaller (allow async construction)
- * @type: Type of key.
- * @description: The searchable description of the key.
- * @callout_info: The data to pass to the instantiation upcall (or NULL).
- * @callout_len: The length of callout_info.
- * @aux: Auxiliary data for the upcall.
- *
- * As for request_key_and_link() except that it does not add the returned key
- * to a keyring if found and new keys are always allocated in the user's quota.
- *
- * The caller should call wait_for_key_construction() to wait for the
- * completion of the returned key if it is still undergoing construction.
- */
-struct key *request_key_async_with_auxdata(struct key_type *type,
- const char *description,
- const void *callout_info,
- size_t callout_len,
- void *aux)
-{
- return request_key_and_link(type, description, callout_info,
- callout_len, aux, NULL, KEY_ALLOC_IN_QUOTA);
+ kenter("%s,%s", type->name, description);
+
+ key = check_cached_key(&ctx);
+ if (key)
+ return key;
+
+ /* search all the process keyrings for a key */
+ key_ref = search_process_keyrings_rcu(&ctx);
+ if (IS_ERR(key_ref)) {
+ key = ERR_CAST(key_ref);
+ if (PTR_ERR(key_ref) == -EAGAIN)
+ key = ERR_PTR(-ENOKEY);
+ } else {
+ key = key_ref_to_ptr(key_ref);
+ cache_requested_key(key);
+ }
+
+ kleave(" = %p", key);
+ return key;
}
-EXPORT_SYMBOL(request_key_async_with_auxdata);
+EXPORT_SYMBOL(request_key_rcu);
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index e45b5cf3b97f..e73ec040e250 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -54,7 +54,7 @@ static void request_key_auth_free_preparse(struct key_preparsed_payload *prep)
static int request_key_auth_instantiate(struct key *key,
struct key_preparsed_payload *prep)
{
- key->payload.data[0] = (struct request_key_auth *)prep->data;
+ rcu_assign_keypointer(key, (struct request_key_auth *)prep->data);
return 0;
}
@@ -64,7 +64,7 @@ static int request_key_auth_instantiate(struct key *key,
static void request_key_auth_describe(const struct key *key,
struct seq_file *m)
{
- struct request_key_auth *rka = get_request_key_auth(key);
+ struct request_key_auth *rka = dereference_key_rcu(key);
seq_puts(m, "key:");
seq_puts(m, key->description);
@@ -79,7 +79,7 @@ static void request_key_auth_describe(const struct key *key,
static long request_key_auth_read(const struct key *key,
char __user *buffer, size_t buflen)
{
- struct request_key_auth *rka = get_request_key_auth(key);
+ struct request_key_auth *rka = dereference_key_locked(key);
size_t datalen;
long ret;
@@ -98,23 +98,6 @@ static long request_key_auth_read(const struct key *key,
return ret;
}
-/*
- * Handle revocation of an authorisation token key.
- *
- * Called with the key sem write-locked.
- */
-static void request_key_auth_revoke(struct key *key)
-{
- struct request_key_auth *rka = get_request_key_auth(key);
-
- kenter("{%d}", key->serial);
-
- if (rka->cred) {
- put_cred(rka->cred);
- rka->cred = NULL;
- }
-}
-
static void free_request_key_auth(struct request_key_auth *rka)
{
if (!rka)
@@ -128,15 +111,42 @@ static void free_request_key_auth(struct request_key_auth *rka)
}
/*
+ * Dispose of the request_key_auth record under RCU conditions
+ */
+static void request_key_auth_rcu_disposal(struct rcu_head *rcu)
+{
+ struct request_key_auth *rka =
+ container_of(rcu, struct request_key_auth, rcu);
+
+ free_request_key_auth(rka);
+}
+
+/*
+ * Handle revocation of an authorisation token key.
+ *
+ * Called with the key sem write-locked.
+ */
+static void request_key_auth_revoke(struct key *key)
+{
+ struct request_key_auth *rka = dereference_key_locked(key);
+
+ kenter("{%d}", key->serial);
+ rcu_assign_keypointer(key, NULL);
+ call_rcu(&rka->rcu, request_key_auth_rcu_disposal);
+}
+
+/*
* Destroy an instantiation authorisation token key.
*/
static void request_key_auth_destroy(struct key *key)
{
- struct request_key_auth *rka = get_request_key_auth(key);
+ struct request_key_auth *rka = rcu_access_pointer(key->payload.rcu_data0);
kenter("{%d}", key->serial);
-
- free_request_key_auth(rka);
+ if (rka) {
+ rcu_assign_keypointer(key, NULL);
+ call_rcu(&rka->rcu, request_key_auth_rcu_disposal);
+ }
}
/*
@@ -148,7 +158,7 @@ struct key *request_key_auth_new(struct key *target, const char *op,
struct key *dest_keyring)
{
struct request_key_auth *rka, *irka;
- const struct cred *cred = current->cred;
+ const struct cred *cred = current_cred();
struct key *authkey = NULL;
char desc[20];
int ret = -ENOMEM;
@@ -200,7 +210,7 @@ struct key *request_key_auth_new(struct key *target, const char *op,
authkey = key_alloc(&key_type_request_key_auth, desc,
cred->fsuid, cred->fsgid, cred,
- KEY_POS_VIEW | KEY_POS_READ | KEY_POS_SEARCH |
+ KEY_POS_VIEW | KEY_POS_READ | KEY_POS_SEARCH | KEY_POS_LINK |
KEY_USR_VIEW, KEY_ALLOC_NOT_IN_QUOTA, NULL);
if (IS_ERR(authkey)) {
ret = PTR_ERR(authkey);
@@ -238,14 +248,17 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id)
.match_data.cmp = key_default_cmp,
.match_data.raw_data = description,
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
- .flags = KEYRING_SEARCH_DO_STATE_CHECK,
+ .flags = (KEYRING_SEARCH_DO_STATE_CHECK |
+ KEYRING_SEARCH_RECURSE),
};
struct key *authkey;
key_ref_t authkey_ref;
ctx.index_key.desc_len = sprintf(description, "%x", target_id);
- authkey_ref = search_process_keyrings(&ctx);
+ rcu_read_lock();
+ authkey_ref = search_process_keyrings_rcu(&ctx);
+ rcu_read_unlock();
if (IS_ERR(authkey_ref)) {
authkey = ERR_CAST(authkey_ref);
diff --git a/security/loadpin/loadpin.c b/security/loadpin/loadpin.c
index 79131efa9634..81519c804888 100644
--- a/security/loadpin/loadpin.c
+++ b/security/loadpin/loadpin.c
@@ -37,6 +37,8 @@ static void report_load(const char *origin, struct file *file, char *operation)
}
static int enforce = IS_ENABLED(CONFIG_SECURITY_LOADPIN_ENFORCE);
+static char *exclude_read_files[READING_MAX_ID];
+static int ignore_read_file_id[READING_MAX_ID] __ro_after_init;
static struct super_block *pinned_root;
static DEFINE_SPINLOCK(pinned_root_spinlock);
@@ -121,6 +123,13 @@ static int loadpin_read_file(struct file *file, enum kernel_read_file_id id)
struct super_block *load_root;
const char *origin = kernel_read_file_id_str(id);
+ /* If the file id is excluded, ignore the pinning. */
+ if ((unsigned int)id < ARRAY_SIZE(ignore_read_file_id) &&
+ ignore_read_file_id[id]) {
+ report_load(origin, file, "pinning-excluded");
+ return 0;
+ }
+
/* This handles the older init_module API that has a NULL file. */
if (!file) {
if (!enforce) {
@@ -179,10 +188,47 @@ static struct security_hook_list loadpin_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(kernel_load_data, loadpin_load_data),
};
+static void __init parse_exclude(void)
+{
+ int i, j;
+ char *cur;
+
+ /*
+ * Make sure all the arrays stay within expected sizes. This
+ * is slightly weird because kernel_read_file_str[] includes
+ * READING_MAX_ID, which isn't actually meaningful here.
+ */
+ BUILD_BUG_ON(ARRAY_SIZE(exclude_read_files) !=
+ ARRAY_SIZE(ignore_read_file_id));
+ BUILD_BUG_ON(ARRAY_SIZE(kernel_read_file_str) <
+ ARRAY_SIZE(ignore_read_file_id));
+
+ for (i = 0; i < ARRAY_SIZE(exclude_read_files); i++) {
+ cur = exclude_read_files[i];
+ if (!cur)
+ break;
+ if (*cur == '\0')
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(ignore_read_file_id); j++) {
+ if (strcmp(cur, kernel_read_file_str[j]) == 0) {
+ pr_info("excluding: %s\n",
+ kernel_read_file_str[j]);
+ ignore_read_file_id[j] = 1;
+ /*
+ * Can not break, because one read_file_str
+ * may map to more than on read_file_id.
+ */
+ }
+ }
+ }
+}
+
static int __init loadpin_init(void)
{
pr_info("ready to pin (currently %senforcing)\n",
enforce ? "" : "not ");
+ parse_exclude();
security_add_hooks(loadpin_hooks, ARRAY_SIZE(loadpin_hooks), "loadpin");
return 0;
}
@@ -195,3 +241,5 @@ DEFINE_LSM(loadpin) = {
/* Should not be mutable after boot, so not listed in sysfs (perm == 0). */
module_param(enforce, int, 0);
MODULE_PARM_DESC(enforce, "Enforce module/firmware pinning");
+module_param_array_named(exclude, exclude_read_files, charp, NULL, 0);
+MODULE_PARM_DESC(exclude, "Exclude pinning specific read file types");
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 33028c098ef3..e40874373f2b 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* common LSM auditing functions
*
@@ -5,10 +6,6 @@
* Stephen Smalley, <sds@tycho.nsa.gov>
* James Morris <jmorris@redhat.com>
* Author : Etienne Basset, <etienne.basset@ensta.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2,
- * as published by the Free Software Foundation.
*/
#include <linux/types.h>
diff --git a/security/safesetid/lsm.c b/security/safesetid/lsm.c
index cecd38e2ac80..7760019ad35d 100644
--- a/security/safesetid/lsm.c
+++ b/security/safesetid/lsm.c
@@ -14,67 +14,50 @@
#define pr_fmt(fmt) "SafeSetID: " fmt
-#include <linux/hashtable.h>
#include <linux/lsm_hooks.h>
#include <linux/module.h>
#include <linux/ptrace.h>
#include <linux/sched/task_stack.h>
#include <linux/security.h>
+#include "lsm.h"
/* Flag indicating whether initialization completed */
int safesetid_initialized;
-#define NUM_BITS 8 /* 128 buckets in hash table */
+struct setuid_ruleset __rcu *safesetid_setuid_rules;
-static DEFINE_HASHTABLE(safesetid_whitelist_hashtable, NUM_BITS);
-
-/*
- * Hash table entry to store safesetid policy signifying that 'parent' user
- * can setid to 'child' user.
- */
-struct entry {
- struct hlist_node next;
- struct hlist_node dlist; /* for deletion cleanup */
- uint64_t parent_kuid;
- uint64_t child_kuid;
-};
-
-static DEFINE_SPINLOCK(safesetid_whitelist_hashtable_spinlock);
-
-static bool check_setuid_policy_hashtable_key(kuid_t parent)
+/* Compute a decision for a transition from @src to @dst under @policy. */
+enum sid_policy_type _setuid_policy_lookup(struct setuid_ruleset *policy,
+ kuid_t src, kuid_t dst)
{
- struct entry *entry;
-
- rcu_read_lock();
- hash_for_each_possible_rcu(safesetid_whitelist_hashtable,
- entry, next, __kuid_val(parent)) {
- if (entry->parent_kuid == __kuid_val(parent)) {
- rcu_read_unlock();
- return true;
- }
+ struct setuid_rule *rule;
+ enum sid_policy_type result = SIDPOL_DEFAULT;
+
+ hash_for_each_possible(policy->rules, rule, next, __kuid_val(src)) {
+ if (!uid_eq(rule->src_uid, src))
+ continue;
+ if (uid_eq(rule->dst_uid, dst))
+ return SIDPOL_ALLOWED;
+ result = SIDPOL_CONSTRAINED;
}
- rcu_read_unlock();
-
- return false;
+ return result;
}
-static bool check_setuid_policy_hashtable_key_value(kuid_t parent,
- kuid_t child)
+/*
+ * Compute a decision for a transition from @src to @dst under the active
+ * policy.
+ */
+static enum sid_policy_type setuid_policy_lookup(kuid_t src, kuid_t dst)
{
- struct entry *entry;
+ enum sid_policy_type result = SIDPOL_DEFAULT;
+ struct setuid_ruleset *pol;
rcu_read_lock();
- hash_for_each_possible_rcu(safesetid_whitelist_hashtable,
- entry, next, __kuid_val(parent)) {
- if (entry->parent_kuid == __kuid_val(parent) &&
- entry->child_kuid == __kuid_val(child)) {
- rcu_read_unlock();
- return true;
- }
- }
+ pol = rcu_dereference(safesetid_setuid_rules);
+ if (pol)
+ result = _setuid_policy_lookup(pol, src, dst);
rcu_read_unlock();
-
- return false;
+ return result;
}
static int safesetid_security_capable(const struct cred *cred,
@@ -82,37 +65,59 @@ static int safesetid_security_capable(const struct cred *cred,
int cap,
unsigned int opts)
{
- if (cap == CAP_SETUID &&
- check_setuid_policy_hashtable_key(cred->uid)) {
- if (!(opts & CAP_OPT_INSETID)) {
- /*
- * Deny if we're not in a set*uid() syscall to avoid
- * giving powers gated by CAP_SETUID that are related
- * to functionality other than calling set*uid() (e.g.
- * allowing user to set up userns uid mappings).
- */
- pr_warn("Operation requires CAP_SETUID, which is not available to UID %u for operations besides approved set*uid transitions",
- __kuid_val(cred->uid));
- return -1;
- }
- }
- return 0;
+ /* We're only interested in CAP_SETUID. */
+ if (cap != CAP_SETUID)
+ return 0;
+
+ /*
+ * If CAP_SETUID is currently used for a set*uid() syscall, we want to
+ * let it go through here; the real security check happens later, in the
+ * task_fix_setuid hook.
+ */
+ if ((opts & CAP_OPT_INSETID) != 0)
+ return 0;
+
+ /*
+ * If no policy applies to this task, allow the use of CAP_SETUID for
+ * other purposes.
+ */
+ if (setuid_policy_lookup(cred->uid, INVALID_UID) == SIDPOL_DEFAULT)
+ return 0;
+
+ /*
+ * Reject use of CAP_SETUID for functionality other than calling
+ * set*uid() (e.g. setting up userns uid mappings).
+ */
+ pr_warn("Operation requires CAP_SETUID, which is not available to UID %u for operations besides approved set*uid transitions\n",
+ __kuid_val(cred->uid));
+ return -EPERM;
}
-static int check_uid_transition(kuid_t parent, kuid_t child)
+/*
+ * Check whether a caller with old credentials @old is allowed to switch to
+ * credentials that contain @new_uid.
+ */
+static bool uid_permitted_for_cred(const struct cred *old, kuid_t new_uid)
{
- if (check_setuid_policy_hashtable_key_value(parent, child))
- return 0;
- pr_warn("UID transition (%d -> %d) blocked",
- __kuid_val(parent),
- __kuid_val(child));
+ bool permitted;
+
+ /* If our old creds already had this UID in it, it's fine. */
+ if (uid_eq(new_uid, old->uid) || uid_eq(new_uid, old->euid) ||
+ uid_eq(new_uid, old->suid))
+ return true;
+
/*
- * Kill this process to avoid potential security vulnerabilities
- * that could arise from a missing whitelist entry preventing a
- * privileged process from dropping to a lesser-privileged one.
+ * Transitions to new UIDs require a check against the policy of the old
+ * RUID.
*/
- force_sig(SIGKILL, current);
- return -EACCES;
+ permitted =
+ setuid_policy_lookup(old->uid, new_uid) != SIDPOL_CONSTRAINED;
+ if (!permitted) {
+ pr_warn("UID transition ((%d,%d,%d) -> %d) blocked\n",
+ __kuid_val(old->uid), __kuid_val(old->euid),
+ __kuid_val(old->suid), __kuid_val(new_uid));
+ }
+ return permitted;
}
/*
@@ -125,134 +130,23 @@ static int safesetid_task_fix_setuid(struct cred *new,
int flags)
{
- /* Do nothing if there are no setuid restrictions for this UID. */
- if (!check_setuid_policy_hashtable_key(old->uid))
+ /* Do nothing if there are no setuid restrictions for our old RUID. */
+ if (setuid_policy_lookup(old->uid, INVALID_UID) == SIDPOL_DEFAULT)
return 0;
- switch (flags) {
- case LSM_SETID_RE:
- /*
- * Users for which setuid restrictions exist can only set the
- * real UID to the real UID or the effective UID, unless an
- * explicit whitelist policy allows the transition.
- */
- if (!uid_eq(old->uid, new->uid) &&
- !uid_eq(old->euid, new->uid)) {
- return check_uid_transition(old->uid, new->uid);
- }
- /*
- * Users for which setuid restrictions exist can only set the
- * effective UID to the real UID, the effective UID, or the
- * saved set-UID, unless an explicit whitelist policy allows
- * the transition.
- */
- if (!uid_eq(old->uid, new->euid) &&
- !uid_eq(old->euid, new->euid) &&
- !uid_eq(old->suid, new->euid)) {
- return check_uid_transition(old->euid, new->euid);
- }
- break;
- case LSM_SETID_ID:
- /*
- * Users for which setuid restrictions exist cannot change the
- * real UID or saved set-UID unless an explicit whitelist
- * policy allows the transition.
- */
- if (!uid_eq(old->uid, new->uid))
- return check_uid_transition(old->uid, new->uid);
- if (!uid_eq(old->suid, new->suid))
- return check_uid_transition(old->suid, new->suid);
- break;
- case LSM_SETID_RES:
- /*
- * Users for which setuid restrictions exist cannot change the
- * real UID, effective UID, or saved set-UID to anything but
- * one of: the current real UID, the current effective UID or
- * the current saved set-user-ID unless an explicit whitelist
- * policy allows the transition.
- */
- if (!uid_eq(new->uid, old->uid) &&
- !uid_eq(new->uid, old->euid) &&
- !uid_eq(new->uid, old->suid)) {
- return check_uid_transition(old->uid, new->uid);
- }
- if (!uid_eq(new->euid, old->uid) &&
- !uid_eq(new->euid, old->euid) &&
- !uid_eq(new->euid, old->suid)) {
- return check_uid_transition(old->euid, new->euid);
- }
- if (!uid_eq(new->suid, old->uid) &&
- !uid_eq(new->suid, old->euid) &&
- !uid_eq(new->suid, old->suid)) {
- return check_uid_transition(old->suid, new->suid);
- }
- break;
- case LSM_SETID_FS:
- /*
- * Users for which setuid restrictions exist cannot change the
- * filesystem UID to anything but one of: the current real UID,
- * the current effective UID or the current saved set-UID
- * unless an explicit whitelist policy allows the transition.
- */
- if (!uid_eq(new->fsuid, old->uid) &&
- !uid_eq(new->fsuid, old->euid) &&
- !uid_eq(new->fsuid, old->suid) &&
- !uid_eq(new->fsuid, old->fsuid)) {
- return check_uid_transition(old->fsuid, new->fsuid);
- }
- break;
- default:
- pr_warn("Unknown setid state %d\n", flags);
- force_sig(SIGKILL, current);
- return -EINVAL;
- }
- return 0;
-}
-
-int add_safesetid_whitelist_entry(kuid_t parent, kuid_t child)
-{
- struct entry *new;
-
- /* Return if entry already exists */
- if (check_setuid_policy_hashtable_key_value(parent, child))
+ if (uid_permitted_for_cred(old, new->uid) &&
+ uid_permitted_for_cred(old, new->euid) &&
+ uid_permitted_for_cred(old, new->suid) &&
+ uid_permitted_for_cred(old, new->fsuid))
return 0;
- new = kzalloc(sizeof(struct entry), GFP_KERNEL);
- if (!new)
- return -ENOMEM;
- new->parent_kuid = __kuid_val(parent);
- new->child_kuid = __kuid_val(child);
- spin_lock(&safesetid_whitelist_hashtable_spinlock);
- hash_add_rcu(safesetid_whitelist_hashtable,
- &new->next,
- __kuid_val(parent));
- spin_unlock(&safesetid_whitelist_hashtable_spinlock);
- return 0;
-}
-
-void flush_safesetid_whitelist_entries(void)
-{
- struct entry *entry;
- struct hlist_node *hlist_node;
- unsigned int bkt_loop_cursor;
- HLIST_HEAD(free_list);
-
/*
- * Could probably use hash_for_each_rcu here instead, but this should
- * be fine as well.
+ * Kill this process to avoid potential security vulnerabilities
+ * that could arise from a missing whitelist entry preventing a
+ * privileged process from dropping to a lesser-privileged one.
*/
- spin_lock(&safesetid_whitelist_hashtable_spinlock);
- hash_for_each_safe(safesetid_whitelist_hashtable, bkt_loop_cursor,
- hlist_node, entry, next) {
- hash_del_rcu(&entry->next);
- hlist_add_head(&entry->dlist, &free_list);
- }
- spin_unlock(&safesetid_whitelist_hashtable_spinlock);
- synchronize_rcu();
- hlist_for_each_entry_safe(entry, hlist_node, &free_list, dlist) {
- hlist_del(&entry->dlist);
- kfree(entry);
- }
+ force_sig(SIGKILL);
+ return -EACCES;
}
static struct security_hook_list safesetid_security_hooks[] = {
diff --git a/security/safesetid/lsm.h b/security/safesetid/lsm.h
index c1ea3c265fcf..db6d16e6bbc3 100644
--- a/security/safesetid/lsm.h
+++ b/security/safesetid/lsm.h
@@ -15,19 +15,39 @@
#define _SAFESETID_H
#include <linux/types.h>
+#include <linux/uidgid.h>
+#include <linux/hashtable.h>
/* Flag indicating whether initialization completed */
extern int safesetid_initialized;
-/* Function type. */
-enum safesetid_whitelist_file_write_type {
- SAFESETID_WHITELIST_ADD, /* Add whitelist policy. */
- SAFESETID_WHITELIST_FLUSH, /* Flush whitelist policies. */
+enum sid_policy_type {
+ SIDPOL_DEFAULT, /* source ID is unaffected by policy */
+ SIDPOL_CONSTRAINED, /* source ID is affected by policy */
+ SIDPOL_ALLOWED /* target ID explicitly allowed */
};
-/* Add entry to safesetid whitelist to allow 'parent' to setid to 'child'. */
-int add_safesetid_whitelist_entry(kuid_t parent, kuid_t child);
+/*
+ * Hash table entry to store safesetid policy signifying that 'src_uid'
+ * can setuid to 'dst_uid'.
+ */
+struct setuid_rule {
+ struct hlist_node next;
+ kuid_t src_uid;
+ kuid_t dst_uid;
+};
+
+#define SETID_HASH_BITS 8 /* 256 buckets in hash table */
+
+struct setuid_ruleset {
+ DECLARE_HASHTABLE(rules, SETID_HASH_BITS);
+ char *policy_str;
+ struct rcu_head rcu;
+};
+
+enum sid_policy_type _setuid_policy_lookup(struct setuid_ruleset *policy,
+ kuid_t src, kuid_t dst);
-void flush_safesetid_whitelist_entries(void);
+extern struct setuid_ruleset __rcu *safesetid_setuid_rules;
#endif /* _SAFESETID_H */
diff --git a/security/safesetid/securityfs.c b/security/safesetid/securityfs.c
index 2c6c829be044..d568e17dd773 100644
--- a/security/safesetid/securityfs.c
+++ b/security/safesetid/securityfs.c
@@ -11,92 +11,184 @@
* published by the Free Software Foundation.
*
*/
+
+#define pr_fmt(fmt) "SafeSetID: " fmt
+
#include <linux/security.h>
#include <linux/cred.h>
#include "lsm.h"
-static struct dentry *safesetid_policy_dir;
-
-struct safesetid_file_entry {
- const char *name;
- enum safesetid_whitelist_file_write_type type;
- struct dentry *dentry;
-};
-
-static struct safesetid_file_entry safesetid_files[] = {
- {.name = "add_whitelist_policy",
- .type = SAFESETID_WHITELIST_ADD},
- {.name = "flush_whitelist_policies",
- .type = SAFESETID_WHITELIST_FLUSH},
-};
+static DEFINE_MUTEX(policy_update_lock);
/*
* In the case the input buffer contains one or more invalid UIDs, the kuid_t
- * variables pointed to by 'parent' and 'child' will get updated but this
+ * variables pointed to by @parent and @child will get updated but this
* function will return an error.
+ * Contents of @buf may be modified.
*/
-static int parse_safesetid_whitelist_policy(const char __user *buf,
- size_t len,
- kuid_t *parent,
- kuid_t *child)
+static int parse_policy_line(struct file *file, char *buf,
+ struct setuid_rule *rule)
{
- char *kern_buf;
- char *parent_buf;
- char *child_buf;
- const char separator[] = ":";
+ char *child_str;
int ret;
- size_t first_substring_length;
- long parsed_parent;
- long parsed_child;
+ u32 parsed_parent, parsed_child;
- /* Duplicate string from user memory and NULL-terminate */
- kern_buf = memdup_user_nul(buf, len);
- if (IS_ERR(kern_buf))
- return PTR_ERR(kern_buf);
+ /* Format of |buf| string should be <UID>:<UID>. */
+ child_str = strchr(buf, ':');
+ if (child_str == NULL)
+ return -EINVAL;
+ *child_str = '\0';
+ child_str++;
- /*
- * Format of |buf| string should be <UID>:<UID>.
- * Find location of ":" in kern_buf (copied from |buf|).
- */
- first_substring_length = strcspn(kern_buf, separator);
- if (first_substring_length == 0 || first_substring_length == len) {
- ret = -EINVAL;
- goto free_kern;
- }
+ ret = kstrtou32(buf, 0, &parsed_parent);
+ if (ret)
+ return ret;
+
+ ret = kstrtou32(child_str, 0, &parsed_child);
+ if (ret)
+ return ret;
- parent_buf = kmemdup_nul(kern_buf, first_substring_length, GFP_KERNEL);
- if (!parent_buf) {
- ret = -ENOMEM;
- goto free_kern;
+ rule->src_uid = make_kuid(file->f_cred->user_ns, parsed_parent);
+ rule->dst_uid = make_kuid(file->f_cred->user_ns, parsed_child);
+ if (!uid_valid(rule->src_uid) || !uid_valid(rule->dst_uid))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void __release_ruleset(struct rcu_head *rcu)
+{
+ struct setuid_ruleset *pol =
+ container_of(rcu, struct setuid_ruleset, rcu);
+ int bucket;
+ struct setuid_rule *rule;
+ struct hlist_node *tmp;
+
+ hash_for_each_safe(pol->rules, bucket, tmp, rule, next)
+ kfree(rule);
+ kfree(pol->policy_str);
+ kfree(pol);
+}
+
+static void release_ruleset(struct setuid_ruleset *pol)
+{
+ call_rcu(&pol->rcu, __release_ruleset);
+}
+
+static void insert_rule(struct setuid_ruleset *pol, struct setuid_rule *rule)
+{
+ hash_add(pol->rules, &rule->next, __kuid_val(rule->src_uid));
+}
+
+static int verify_ruleset(struct setuid_ruleset *pol)
+{
+ int bucket;
+ struct setuid_rule *rule, *nrule;
+ int res = 0;
+
+ hash_for_each(pol->rules, bucket, rule, next) {
+ if (_setuid_policy_lookup(pol, rule->dst_uid, INVALID_UID) ==
+ SIDPOL_DEFAULT) {
+ pr_warn("insecure policy detected: uid %d is constrained but transitively unconstrained through uid %d\n",
+ __kuid_val(rule->src_uid),
+ __kuid_val(rule->dst_uid));
+ res = -EINVAL;
+
+ /* fix it up */
+ nrule = kmalloc(sizeof(struct setuid_rule), GFP_KERNEL);
+ if (!nrule)
+ return -ENOMEM;
+ nrule->src_uid = rule->dst_uid;
+ nrule->dst_uid = rule->dst_uid;
+ insert_rule(pol, nrule);
+ }
}
+ return res;
+}
- ret = kstrtol(parent_buf, 0, &parsed_parent);
- if (ret)
- goto free_both;
+static ssize_t handle_policy_update(struct file *file,
+ const char __user *ubuf, size_t len)
+{
+ struct setuid_ruleset *pol;
+ char *buf, *p, *end;
+ int err;
- child_buf = kern_buf + first_substring_length + 1;
- ret = kstrtol(child_buf, 0, &parsed_child);
- if (ret)
- goto free_both;
+ pol = kmalloc(sizeof(struct setuid_ruleset), GFP_KERNEL);
+ if (!pol)
+ return -ENOMEM;
+ pol->policy_str = NULL;
+ hash_init(pol->rules);
- *parent = make_kuid(current_user_ns(), parsed_parent);
- if (!uid_valid(*parent)) {
- ret = -EINVAL;
- goto free_both;
+ p = buf = memdup_user_nul(ubuf, len);
+ if (IS_ERR(buf)) {
+ err = PTR_ERR(buf);
+ goto out_free_pol;
}
+ pol->policy_str = kstrdup(buf, GFP_KERNEL);
+ if (pol->policy_str == NULL) {
+ err = -ENOMEM;
+ goto out_free_buf;
+ }
+
+ /* policy lines, including the last one, end with \n */
+ while (*p != '\0') {
+ struct setuid_rule *rule;
+
+ end = strchr(p, '\n');
+ if (end == NULL) {
+ err = -EINVAL;
+ goto out_free_buf;
+ }
+ *end = '\0';
+
+ rule = kmalloc(sizeof(struct setuid_rule), GFP_KERNEL);
+ if (!rule) {
+ err = -ENOMEM;
+ goto out_free_buf;
+ }
- *child = make_kuid(current_user_ns(), parsed_child);
- if (!uid_valid(*child)) {
- ret = -EINVAL;
- goto free_both;
+ err = parse_policy_line(file, p, rule);
+ if (err)
+ goto out_free_rule;
+
+ if (_setuid_policy_lookup(pol, rule->src_uid, rule->dst_uid) ==
+ SIDPOL_ALLOWED) {
+ pr_warn("bad policy: duplicate entry\n");
+ err = -EEXIST;
+ goto out_free_rule;
+ }
+
+ insert_rule(pol, rule);
+ p = end + 1;
+ continue;
+
+out_free_rule:
+ kfree(rule);
+ goto out_free_buf;
}
-free_both:
- kfree(parent_buf);
-free_kern:
- kfree(kern_buf);
- return ret;
+ err = verify_ruleset(pol);
+ /* bogus policy falls through after fixing it up */
+ if (err && err != -EINVAL)
+ goto out_free_buf;
+
+ /*
+ * Everything looks good, apply the policy and release the old one.
+ * What we really want here is an xchg() wrapper for RCU, but since that
+ * doesn't currently exist, just use a spinlock for now.
+ */
+ mutex_lock(&policy_update_lock);
+ rcu_swap_protected(safesetid_setuid_rules, pol,
+ lockdep_is_held(&policy_update_lock));
+ mutex_unlock(&policy_update_lock);
+ err = len;
+
+out_free_buf:
+ kfree(buf);
+out_free_pol:
+ release_ruleset(pol);
+ return err;
}
static ssize_t safesetid_file_write(struct file *file,
@@ -104,90 +196,65 @@ static ssize_t safesetid_file_write(struct file *file,
size_t len,
loff_t *ppos)
{
- struct safesetid_file_entry *file_entry =
- file->f_inode->i_private;
- kuid_t parent;
- kuid_t child;
- int ret;
-
- if (!ns_capable(current_user_ns(), CAP_MAC_ADMIN))
+ if (!file_ns_capable(file, &init_user_ns, CAP_MAC_ADMIN))
return -EPERM;
if (*ppos != 0)
return -EINVAL;
- switch (file_entry->type) {
- case SAFESETID_WHITELIST_FLUSH:
- flush_safesetid_whitelist_entries();
- break;
- case SAFESETID_WHITELIST_ADD:
- ret = parse_safesetid_whitelist_policy(buf, len, &parent,
- &child);
- if (ret)
- return ret;
-
- ret = add_safesetid_whitelist_entry(parent, child);
- if (ret)
- return ret;
- break;
- default:
- pr_warn("Unknown securityfs file %d\n", file_entry->type);
- break;
- }
-
- /* Return len on success so caller won't keep trying to write */
- return len;
+ return handle_policy_update(file, buf, len);
}
-static const struct file_operations safesetid_file_fops = {
- .write = safesetid_file_write,
-};
-
-static void safesetid_shutdown_securityfs(void)
+static ssize_t safesetid_file_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
{
- int i;
+ ssize_t res = 0;
+ struct setuid_ruleset *pol;
+ const char *kbuf;
- for (i = 0; i < ARRAY_SIZE(safesetid_files); ++i) {
- struct safesetid_file_entry *entry =
- &safesetid_files[i];
- securityfs_remove(entry->dentry);
- entry->dentry = NULL;
+ mutex_lock(&policy_update_lock);
+ pol = rcu_dereference_protected(safesetid_setuid_rules,
+ lockdep_is_held(&policy_update_lock));
+ if (pol) {
+ kbuf = pol->policy_str;
+ res = simple_read_from_buffer(buf, len, ppos,
+ kbuf, strlen(kbuf));
}
-
- securityfs_remove(safesetid_policy_dir);
- safesetid_policy_dir = NULL;
+ mutex_unlock(&policy_update_lock);
+ return res;
}
+static const struct file_operations safesetid_file_fops = {
+ .read = safesetid_file_read,
+ .write = safesetid_file_write,
+};
+
static int __init safesetid_init_securityfs(void)
{
- int i;
int ret;
+ struct dentry *policy_dir;
+ struct dentry *policy_file;
if (!safesetid_initialized)
return 0;
- safesetid_policy_dir = securityfs_create_dir("safesetid", NULL);
- if (IS_ERR(safesetid_policy_dir)) {
- ret = PTR_ERR(safesetid_policy_dir);
+ policy_dir = securityfs_create_dir("safesetid", NULL);
+ if (IS_ERR(policy_dir)) {
+ ret = PTR_ERR(policy_dir);
goto error;
}
- for (i = 0; i < ARRAY_SIZE(safesetid_files); ++i) {
- struct safesetid_file_entry *entry =
- &safesetid_files[i];
- entry->dentry = securityfs_create_file(
- entry->name, 0200, safesetid_policy_dir,
- entry, &safesetid_file_fops);
- if (IS_ERR(entry->dentry)) {
- ret = PTR_ERR(entry->dentry);
- goto error;
- }
+ policy_file = securityfs_create_file("whitelist_policy", 0600,
+ policy_dir, NULL, &safesetid_file_fops);
+ if (IS_ERR(policy_file)) {
+ ret = PTR_ERR(policy_file);
+ goto error;
}
return 0;
error:
- safesetid_shutdown_securityfs();
+ securityfs_remove(policy_dir);
return ret;
}
fs_initcall(safesetid_init_securityfs);
diff --git a/security/security.c b/security/security.c
index f493db0bf62a..250ee2d76406 100644
--- a/security/security.c
+++ b/security/security.c
@@ -35,7 +35,7 @@
#define LSM_COUNT (__end_lsm_info - __start_lsm_info)
struct security_hook_heads security_hook_heads __lsm_ro_after_init;
-static ATOMIC_NOTIFIER_HEAD(lsm_notifier_chain);
+static BLOCKING_NOTIFIER_HEAD(blocking_lsm_notifier_chain);
static struct kmem_cache *lsm_file_cache;
static struct kmem_cache *lsm_inode_cache;
@@ -426,23 +426,26 @@ void __init security_add_hooks(struct security_hook_list *hooks, int count,
panic("%s - Cannot get early memory.\n", __func__);
}
-int call_lsm_notifier(enum lsm_event event, void *data)
+int call_blocking_lsm_notifier(enum lsm_event event, void *data)
{
- return atomic_notifier_call_chain(&lsm_notifier_chain, event, data);
+ return blocking_notifier_call_chain(&blocking_lsm_notifier_chain,
+ event, data);
}
-EXPORT_SYMBOL(call_lsm_notifier);
+EXPORT_SYMBOL(call_blocking_lsm_notifier);
-int register_lsm_notifier(struct notifier_block *nb)
+int register_blocking_lsm_notifier(struct notifier_block *nb)
{
- return atomic_notifier_chain_register(&lsm_notifier_chain, nb);
+ return blocking_notifier_chain_register(&blocking_lsm_notifier_chain,
+ nb);
}
-EXPORT_SYMBOL(register_lsm_notifier);
+EXPORT_SYMBOL(register_blocking_lsm_notifier);
-int unregister_lsm_notifier(struct notifier_block *nb)
+int unregister_blocking_lsm_notifier(struct notifier_block *nb)
{
- return atomic_notifier_chain_unregister(&lsm_notifier_chain, nb);
+ return blocking_notifier_chain_unregister(&blocking_lsm_notifier_chain,
+ nb);
}
-EXPORT_SYMBOL(unregister_lsm_notifier);
+EXPORT_SYMBOL(unregister_blocking_lsm_notifier);
/**
* lsm_cred_alloc - allocate a composite cred blob
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index a99be508f93d..ecd3829996aa 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Implementation of the kernel access vector cache (AVC).
*
@@ -8,10 +9,6 @@
* Replaced the avc_lock spinlock by RCU.
*
* Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2,
- * as published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/stddef.h>
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index fea66f6b31bf..74dd46de01b6 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NSA Security-Enhanced Linux (SELinux) security module
*
@@ -18,10 +19,6 @@
* Copyright (C) 2007 Hitachi Software Engineering Co., Ltd.
* Yuichi Nakamura <ynakam@hitachisoft.jp>
* Copyright (C) 2016 Mellanox Technologies
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2,
- * as published by the Free Software Foundation.
*/
#include <linux/init.h>
@@ -197,7 +194,7 @@ static int selinux_lsm_notifier_avc_callback(u32 event)
{
if (event == AVC_CALLBACK_RESET) {
sel_ib_pkey_flush();
- call_lsm_notifier(LSM_POLICY_CHANGE, NULL);
+ call_blocking_lsm_notifier(LSM_POLICY_CHANGE, NULL);
}
return 0;
@@ -6354,11 +6351,12 @@ static int selinux_setprocattr(const char *name, void *value, size_t size)
} else if (!strcmp(name, "fscreate")) {
tsec->create_sid = sid;
} else if (!strcmp(name, "keycreate")) {
- error = avc_has_perm(&selinux_state,
- mysid, sid, SECCLASS_KEY, KEY__CREATE,
- NULL);
- if (error)
- goto abort_change;
+ if (sid) {
+ error = avc_has_perm(&selinux_state, mysid, sid,
+ SECCLASS_KEY, KEY__CREATE, NULL);
+ if (error)
+ goto abort_change;
+ }
tsec->keycreate_sid = sid;
} else if (!strcmp(name, "sockcreate")) {
tsec->sockcreate_sid = sid;
diff --git a/security/selinux/include/audit.h b/security/selinux/include/audit.h
index 682e2b5de2a4..073a3d34a0d2 100644
--- a/security/selinux/include/audit.h
+++ b/security/selinux/include/audit.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* SELinux support for the Audit LSM hooks
*
@@ -6,10 +7,6 @@
* Copyright (C) 2005 Red Hat, Inc., James Morris <jmorris@redhat.com>
* Copyright (C) 2006 Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com>
* Copyright (C) 2006 IBM Corporation, Timothy R. Chavez <tinytim@us.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2,
- * as published by the Free Software Foundation.
*/
#ifndef _SELINUX_AUDIT_H
diff --git a/security/selinux/include/netif.h b/security/selinux/include/netif.h
index c72145444090..85ec30d11144 100644
--- a/security/selinux/include/netif.h
+++ b/security/selinux/include/netif.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Network interface table.
*
@@ -9,10 +10,6 @@
* Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
* Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
* Paul Moore <paul@paul-moore.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2,
- * as published by the Free Software Foundation.
*/
#ifndef _SELINUX_NETIF_H_
#define _SELINUX_NETIF_H_
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index 231262d8eac9..91c5395dd20c 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* NSA Security-Enhanced Linux (SELinux) security module
*
@@ -11,10 +12,6 @@
* Copyright (C) 2001,2002 Networks Associates Technology, Inc.
* Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
* Copyright (C) 2016 Mellanox Technologies
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2,
- * as published by the Free Software Foundation.
*/
#ifndef _SELINUX_OBJSEC_H_
#define _SELINUX_OBJSEC_H_
diff --git a/security/selinux/netif.c b/security/selinux/netif.c
index 8c738c189942..9cb83eeee1d9 100644
--- a/security/selinux/netif.c
+++ b/security/selinux/netif.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Network interface table.
*
@@ -9,10 +10,6 @@
* Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
* Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
* Paul Moore <paul@paul-moore.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2,
- * as published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/types.h>
diff --git a/security/selinux/netlink.c b/security/selinux/netlink.c
index 8a8a72507437..621e2e9cd6a1 100644
--- a/security/selinux/netlink.c
+++ b/security/selinux/netlink.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Netlink event notifications for SELinux.
*
* Author: James Morris <jmorris@redhat.com>
*
* Copyright (C) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2,
- * as published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/types.h>
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 9cec81209617..58345ba0528e 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Netlink message type permission tables, for user generated messages.
*
* Author: James Morris <jmorris@redhat.com>
*
* Copyright (C) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2,
- * as published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/kernel.h>
@@ -83,6 +80,9 @@ static const struct nlmsg_perm nlmsg_route_perms[] =
{ RTM_NEWCHAIN, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_DELCHAIN, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_GETCHAIN, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_NEWNEXTHOP, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_DELNEXTHOP, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_GETNEXTHOP, NETLINK_ROUTE_SOCKET__NLMSG_READ },
};
static const struct nlmsg_perm nlmsg_tcpdiag_perms[] =
@@ -166,7 +166,7 @@ int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm)
* structures at the top of this file with the new mappings
* before updating the BUILD_BUG_ON() macro!
*/
- BUILD_BUG_ON(RTM_MAX != (RTM_NEWCHAIN + 3));
+ BUILD_BUG_ON(RTM_MAX != (RTM_NEWNEXTHOP + 3));
err = nlmsg_perm(nlmsg_type, perm, nlmsg_route_perms,
sizeof(nlmsg_route_perms));
break;
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 1884f34bb983..6f195c7915de 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -178,7 +178,7 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
selnl_notify_setenforce(new_value);
selinux_status_update_setenforce(state, new_value);
if (!new_value)
- call_lsm_notifier(LSM_POLICY_CHANGE, NULL);
+ call_blocking_lsm_notifier(LSM_POLICY_CHANGE, NULL);
}
length = count;
out:
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
index 8f624f80055b..09929fc5ab47 100644
--- a/security/selinux/ss/ebitmap.c
+++ b/security/selinux/ss/ebitmap.c
@@ -347,7 +347,9 @@ int ebitmap_read(struct ebitmap *e, void *fp)
{
struct ebitmap_node *n = NULL;
u32 mapunit, count, startbit, index;
+ __le32 ebitmap_start;
u64 map;
+ __le64 mapbits;
__le32 buf[3];
int rc, i;
@@ -381,12 +383,12 @@ int ebitmap_read(struct ebitmap *e, void *fp)
goto bad;
for (i = 0; i < count; i++) {
- rc = next_entry(&startbit, fp, sizeof(u32));
+ rc = next_entry(&ebitmap_start, fp, sizeof(u32));
if (rc < 0) {
pr_err("SELinux: ebitmap: truncated map\n");
goto bad;
}
- startbit = le32_to_cpu(startbit);
+ startbit = le32_to_cpu(ebitmap_start);
if (startbit & (mapunit - 1)) {
pr_err("SELinux: ebitmap start bit (%d) is "
@@ -423,12 +425,12 @@ int ebitmap_read(struct ebitmap *e, void *fp)
goto bad;
}
- rc = next_entry(&map, fp, sizeof(u64));
+ rc = next_entry(&mapbits, fp, sizeof(u64));
if (rc < 0) {
pr_err("SELinux: ebitmap: truncated map\n");
goto bad;
}
- map = le64_to_cpu(map);
+ map = le64_to_cpu(mapbits);
index = (startbit - n->startbit) / EBITMAP_UNIT_SIZE;
while (map) {
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index d3f5568c1f60..d61563a3695e 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -649,9 +649,7 @@ static void context_struct_compute_av(struct policydb *policydb,
avkey.target_class = tclass;
avkey.specified = AVTAB_AV | AVTAB_XPERMS;
sattr = &policydb->type_attr_map_array[scontext->type - 1];
- BUG_ON(!sattr);
tattr = &policydb->type_attr_map_array[tcontext->type - 1];
- BUG_ON(!tattr);
ebitmap_for_each_positive_bit(sattr, snode, i) {
ebitmap_for_each_positive_bit(tattr, tnode, j) {
avkey.source_type = i + 1;
@@ -1057,9 +1055,7 @@ void security_compute_xperms_decision(struct selinux_state *state,
avkey.target_class = tclass;
avkey.specified = AVTAB_XPERMS;
sattr = &policydb->type_attr_map_array[scontext->type - 1];
- BUG_ON(!sattr);
tattr = &policydb->type_attr_map_array[tcontext->type - 1];
- BUG_ON(!tattr);
ebitmap_for_each_positive_bit(sattr, snode, i) {
ebitmap_for_each_positive_bit(tattr, tnode, j) {
avkey.source_type = i + 1;
@@ -1586,6 +1582,7 @@ static int compute_sid_handle_invalid_context(
struct policydb *policydb = &state->ss->policydb;
char *s = NULL, *t = NULL, *n = NULL;
u32 slen, tlen, nlen;
+ struct audit_buffer *ab;
if (context_struct_to_string(policydb, scontext, &s, &slen))
goto out;
@@ -1593,12 +1590,14 @@ static int compute_sid_handle_invalid_context(
goto out;
if (context_struct_to_string(policydb, newcontext, &n, &nlen))
goto out;
- audit_log(audit_context(), GFP_ATOMIC, AUDIT_SELINUX_ERR,
- "op=security_compute_sid invalid_context=%s"
- " scontext=%s"
- " tcontext=%s"
- " tclass=%s",
- n, s, t, sym_name(policydb, SYM_CLASSES, tclass-1));
+ ab = audit_log_start(audit_context(), GFP_ATOMIC, AUDIT_SELINUX_ERR);
+ audit_log_format(ab,
+ "op=security_compute_sid invalid_context=");
+ /* no need to record the NUL with untrusted strings */
+ audit_log_n_untrustedstring(ab, n, nlen - 1);
+ audit_log_format(ab, " scontext=%s tcontext=%s tclass=%s",
+ s, t, sym_name(policydb, SYM_CLASSES, tclass-1));
+ audit_log_end(ab);
out:
kfree(s);
kfree(t);
@@ -3005,10 +3004,16 @@ int security_sid_mls_copy(struct selinux_state *state,
if (rc) {
if (!context_struct_to_string(policydb, &newcon, &s,
&len)) {
- audit_log(audit_context(),
- GFP_ATOMIC, AUDIT_SELINUX_ERR,
- "op=security_sid_mls_copy "
- "invalid_context=%s", s);
+ struct audit_buffer *ab;
+
+ ab = audit_log_start(audit_context(),
+ GFP_ATOMIC,
+ AUDIT_SELINUX_ERR);
+ audit_log_format(ab,
+ "op=security_sid_mls_copy invalid_context=");
+ /* don't record NUL with untrusted strings */
+ audit_log_n_untrustedstring(ab, s, len - 1);
+ audit_log_end(ab);
kfree(s);
}
goto out_unlock;
diff --git a/security/selinux/ss/status.c b/security/selinux/ss/status.c
index a121de45ac0e..3c554a442467 100644
--- a/security/selinux/ss/status.c
+++ b/security/selinux/ss/status.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* mmap based event notifications for SELinux
*
* Author: KaiGai Kohei <kaigai@ak.jp.nec.com>
*
* Copyright (C) 2010 NEC corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2,
- * as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
index 7c57cb7e4146..7314196185d1 100644
--- a/security/selinux/xfrm.c
+++ b/security/selinux/xfrm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* NSA Security-Enhanced Linux (SELinux) security module
*
@@ -12,10 +13,6 @@
*
* Copyright (C) 2005 International Business Machines Corporation
* Copyright (C) 2006 Trusted Computer Solutions, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2,
- * as published by the Free Software Foundation.
*/
/*
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index d99450b4f511..4c5e5a438f8b 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Simplified MAC Kernel (smack) security module
*
@@ -12,10 +13,6 @@
* Paul Moore <paul@paul-moore.com>
* Copyright (C) 2010 Nokia Corporation
* Copyright (C) 2011 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2,
- * as published by the Free Software Foundation.
*/
#include <linux/xattr.h>
diff --git a/security/smack/smack_netfilter.c b/security/smack/smack_netfilter.c
index e36d17835d4f..fc7399b45373 100644
--- a/security/smack/smack_netfilter.c
+++ b/security/smack/smack_netfilter.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Simplified MAC Kernel (smack) security module
*
@@ -8,10 +9,6 @@
*
* Copyright (C) 2014 Casey Schaufler <casey@schaufler-ca.com>
* Copyright (C) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2,
- * as published by the Free Software Foundation.
*/
#include <linux/netfilter_ipv4.h>
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index efac68556b45..01c6239c4493 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Yama Linux Security Module
*
@@ -5,11 +6,6 @@
*
* Copyright (C) 2010 Canonical, Ltd.
* Copyright (C) 2011 The Chromium OS Authors.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2, as
- * published by the Free Software Foundation.
- *
*/
#include <linux/lsm_hooks.h>