net: core: add UID to flows, rules, and routes
- Define a new FIB rule attributes, FRA_UID_RANGE, to describe a range of UIDs. - Define a RTA_UID attribute for per-UID route lookups and dumps. - Support passing these attributes to and from userspace via rtnetlink. The value INVALID_UID indicates no UID was specified. - Add a UID field to the flow structures. [Backport of net-next 622ec2c9d52405973c9f1ca5116eb1c393adfc7d] Bug: 16355602 Change-Id: Iea98e6fedd0fd4435a1f4efa3deb3629505619ab Signed-off-by: Lorenzo Colitti <lorenzo@google.com> Signed-off-by: David S. Miller <davem@davemloft.net> Git-commit: 0baa8bcd2d48a89ee304051167ded77a10baabcc Git-repo: https://android.googlesource.com/kernel/common.git [resolved trivial merge conflicts] Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
This commit is contained in:
parent
6a2dd740c1
commit
d4e12061f4
|
@ -8,6 +8,11 @@
|
|||
#include <net/flow.h>
|
||||
#include <net/rtnetlink.h>
|
||||
|
||||
struct fib_kuid_range {
|
||||
kuid_t start;
|
||||
kuid_t end;
|
||||
};
|
||||
|
||||
struct fib_rule {
|
||||
struct list_head list;
|
||||
atomic_t refcnt;
|
||||
|
@ -23,6 +28,7 @@ struct fib_rule {
|
|||
struct fib_rule __rcu *ctarget;
|
||||
char iifname[IFNAMSIZ];
|
||||
char oifname[IFNAMSIZ];
|
||||
struct fib_kuid_range uid_range;
|
||||
struct rcu_head rcu;
|
||||
struct net * fr_net;
|
||||
};
|
||||
|
@ -80,7 +86,8 @@ struct fib_rules_ops {
|
|||
[FRA_FWMARK] = { .type = NLA_U32 }, \
|
||||
[FRA_FWMASK] = { .type = NLA_U32 }, \
|
||||
[FRA_TABLE] = { .type = NLA_U32 }, \
|
||||
[FRA_GOTO] = { .type = NLA_U32 }
|
||||
[FRA_GOTO] = { .type = NLA_U32 }, \
|
||||
[FRA_UID_RANGE] = { .len = sizeof(struct fib_rule_uid_range) }
|
||||
|
||||
static inline void fib_rule_get(struct fib_rule *rule)
|
||||
{
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/socket.h>
|
||||
#include <linux/in6.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/uidgid.h>
|
||||
|
||||
/*
|
||||
* ifindex generation is per-net namespace, and loopback is
|
||||
|
@ -31,6 +32,7 @@ struct flowi_common {
|
|||
#define FLOWI_FLAG_CAN_SLEEP 0x02
|
||||
#define FLOWI_FLAG_KNOWN_NH 0x04
|
||||
__u32 flowic_secid;
|
||||
kuid_t flowic_uid;
|
||||
};
|
||||
|
||||
union flowi_uli {
|
||||
|
@ -67,6 +69,7 @@ struct flowi4 {
|
|||
#define flowi4_proto __fl_common.flowic_proto
|
||||
#define flowi4_flags __fl_common.flowic_flags
|
||||
#define flowi4_secid __fl_common.flowic_secid
|
||||
#define flowi4_uid __fl_common.flowic_uid
|
||||
|
||||
/* (saddr,daddr) must be grouped, same order as in IP header */
|
||||
__be32 saddr;
|
||||
|
@ -123,6 +126,7 @@ struct flowi6 {
|
|||
#define flowi6_proto __fl_common.flowic_proto
|
||||
#define flowi6_flags __fl_common.flowic_flags
|
||||
#define flowi6_secid __fl_common.flowic_secid
|
||||
#define flowi6_uid __fl_common.flowic_uid
|
||||
struct in6_addr daddr;
|
||||
struct in6_addr saddr;
|
||||
__be32 flowlabel;
|
||||
|
@ -166,6 +170,7 @@ struct flowi {
|
|||
#define flowi_proto u.__fl_common.flowic_proto
|
||||
#define flowi_flags u.__fl_common.flowic_flags
|
||||
#define flowi_secid u.__fl_common.flowic_secid
|
||||
#define flowi_uid u.__fl_common.flowic_uid
|
||||
} __attribute__((__aligned__(BITS_PER_LONG/8)));
|
||||
|
||||
static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4)
|
||||
|
|
|
@ -29,6 +29,11 @@ struct fib_rule_hdr {
|
|||
__u32 flags;
|
||||
};
|
||||
|
||||
struct fib_rule_uid_range {
|
||||
__u32 start;
|
||||
__u32 end;
|
||||
};
|
||||
|
||||
enum {
|
||||
FRA_UNSPEC,
|
||||
FRA_DST, /* destination address */
|
||||
|
@ -49,6 +54,9 @@ enum {
|
|||
FRA_TABLE, /* Extended table id */
|
||||
FRA_FWMASK, /* mask for netfilter mark */
|
||||
FRA_OIFNAME,
|
||||
FRA_PAD,
|
||||
FRA_L3MDEV, /* iif or oif is l3mdev goto its table */
|
||||
FRA_UID_RANGE, /* UID range */
|
||||
__FRA_MAX
|
||||
};
|
||||
|
||||
|
|
|
@ -297,6 +297,14 @@ enum rtattr_type_t {
|
|||
RTA_TABLE,
|
||||
RTA_MARK,
|
||||
RTA_MFC_STATS,
|
||||
RTA_VIA,
|
||||
RTA_NEWDST,
|
||||
RTA_PREF,
|
||||
RTA_ENCAP_TYPE,
|
||||
RTA_ENCAP,
|
||||
RTA_EXPIRES,
|
||||
RTA_PAD,
|
||||
RTA_UID,
|
||||
__RTA_MAX
|
||||
};
|
||||
|
||||
|
|
|
@ -22,6 +22,11 @@
|
|||
#define uid_eq(a, b) ((a) == (b))
|
||||
#define uid_gte(a, b) ((a) >= (b))
|
||||
|
||||
static const struct fib_kuid_range fib_kuid_range_unset = {
|
||||
KUIDT_INIT(0),
|
||||
KUIDT_INIT(~0),
|
||||
};
|
||||
|
||||
int fib_default_rule_add(struct fib_rules_ops *ops,
|
||||
u32 pref, u32 table, u32 flags)
|
||||
{
|
||||
|
@ -37,6 +42,7 @@ int fib_default_rule_add(struct fib_rules_ops *ops,
|
|||
r->table = table;
|
||||
r->flags = flags;
|
||||
r->fr_net = hold_net(ops->fro_net);
|
||||
r->uid_range = fib_kuid_range_unset;
|
||||
|
||||
/* The lock is not required here, the list in unreacheable
|
||||
* at the moment this function is called */
|
||||
|
@ -184,6 +190,34 @@ void fib_rules_unregister(struct fib_rules_ops *ops)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(fib_rules_unregister);
|
||||
|
||||
static int uid_range_set(struct fib_kuid_range *range)
|
||||
{
|
||||
return uid_valid(range->start) && uid_valid(range->end);
|
||||
}
|
||||
|
||||
static struct fib_kuid_range nla_get_kuid_range(struct nlattr **tb)
|
||||
{
|
||||
struct fib_rule_uid_range *in;
|
||||
struct fib_kuid_range out;
|
||||
|
||||
in = (struct fib_rule_uid_range *)nla_data(tb[FRA_UID_RANGE]);
|
||||
|
||||
out.start = make_kuid(current_user_ns(), in->start);
|
||||
out.end = make_kuid(current_user_ns(), in->end);
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
static int nla_put_uid_range(struct sk_buff *skb, struct fib_kuid_range *range)
|
||||
{
|
||||
struct fib_rule_uid_range out = {
|
||||
from_kuid_munged(current_user_ns(), range->start),
|
||||
from_kuid_munged(current_user_ns(), range->end)
|
||||
};
|
||||
|
||||
return nla_put(skb, FRA_UID_RANGE, sizeof(out), &out);
|
||||
}
|
||||
|
||||
static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
|
||||
struct flowi *fl, int flags)
|
||||
{
|
||||
|
@ -198,6 +232,10 @@ static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
|
|||
if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
|
||||
goto out;
|
||||
|
||||
if (uid_lt(fl->flowi_uid, rule->uid_range.start) ||
|
||||
uid_gt(fl->flowi_uid, rule->uid_range.end))
|
||||
goto out;
|
||||
|
||||
ret = ops->match(rule, fl, flags);
|
||||
out:
|
||||
return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
|
||||
|
@ -368,6 +406,21 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
|
|||
} else if (rule->action == FR_ACT_GOTO)
|
||||
goto errout_free;
|
||||
|
||||
if (tb[FRA_UID_RANGE]) {
|
||||
if (current_user_ns() != net->user_ns) {
|
||||
err = -EPERM;
|
||||
goto errout_free;
|
||||
}
|
||||
|
||||
rule->uid_range = nla_get_kuid_range(tb);
|
||||
|
||||
if (!uid_range_set(&rule->uid_range) ||
|
||||
!uid_lte(rule->uid_range.start, rule->uid_range.end))
|
||||
goto errout_free;
|
||||
} else {
|
||||
rule->uid_range = fib_kuid_range_unset;
|
||||
}
|
||||
|
||||
err = ops->configure(rule, skb, frh, tb);
|
||||
if (err < 0)
|
||||
goto errout_free;
|
||||
|
@ -427,6 +480,7 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
|
|||
struct fib_rules_ops *ops = NULL;
|
||||
struct fib_rule *rule, *tmp;
|
||||
struct nlattr *tb[FRA_MAX+1];
|
||||
struct fib_kuid_range range;
|
||||
int err = -EINVAL;
|
||||
|
||||
if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
|
||||
|
@ -446,6 +500,14 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
|
|||
if (err < 0)
|
||||
goto errout;
|
||||
|
||||
if (tb[FRA_UID_RANGE]) {
|
||||
range = nla_get_kuid_range(tb);
|
||||
if (!uid_range_set(&range))
|
||||
goto errout;
|
||||
} else {
|
||||
range = fib_kuid_range_unset;
|
||||
}
|
||||
|
||||
list_for_each_entry(rule, &ops->rules_list, list) {
|
||||
if (frh->action && (frh->action != rule->action))
|
||||
continue;
|
||||
|
@ -474,6 +536,11 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
|
|||
(rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
|
||||
continue;
|
||||
|
||||
if (uid_range_set(&range) &&
|
||||
(!uid_eq(rule->uid_range.start, range.start) ||
|
||||
!uid_eq(rule->uid_range.end, range.end)))
|
||||
continue;
|
||||
|
||||
if (!ops->compare(rule, frh, tb))
|
||||
continue;
|
||||
|
||||
|
@ -530,7 +597,8 @@ static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
|
|||
+ nla_total_size(4) /* FRA_PRIORITY */
|
||||
+ nla_total_size(4) /* FRA_TABLE */
|
||||
+ nla_total_size(4) /* FRA_FWMARK */
|
||||
+ nla_total_size(4); /* FRA_FWMASK */
|
||||
+ nla_total_size(4) /* FRA_FWMASK */
|
||||
+ nla_total_size(sizeof(struct fib_kuid_range));
|
||||
|
||||
if (ops->nlmsg_payload)
|
||||
payload += ops->nlmsg_payload(rule);
|
||||
|
@ -584,7 +652,9 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
|
|||
((rule->mark_mask || rule->mark) &&
|
||||
nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
|
||||
(rule->target &&
|
||||
nla_put_u32(skb, FRA_GOTO, rule->target)))
|
||||
nla_put_u32(skb, FRA_GOTO, rule->target)) ||
|
||||
(uid_range_set(&rule->uid_range) &&
|
||||
nla_put_uid_range(skb, &rule->uid_range)))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (ops->fill(rule, skb, frh) < 0)
|
||||
|
|
|
@ -531,6 +531,7 @@ const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = {
|
|||
[RTA_METRICS] = { .type = NLA_NESTED },
|
||||
[RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
|
||||
[RTA_FLOW] = { .type = NLA_U32 },
|
||||
[RTA_UID] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
|
||||
|
|
|
@ -2350,6 +2350,11 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
|
|||
nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
|
||||
nla_put_u32(skb, RTA_UID,
|
||||
from_kuid_munged(current_user_ns(), fl4->flowi4_uid)))
|
||||
goto nla_put_failure;
|
||||
|
||||
error = rt->dst.error;
|
||||
|
||||
if (rt_is_input_route(rt)) {
|
||||
|
@ -2400,6 +2405,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
|
|||
int err;
|
||||
int mark;
|
||||
struct sk_buff *skb;
|
||||
kuid_t uid;
|
||||
|
||||
err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
|
||||
if (err < 0)
|
||||
|
@ -2427,6 +2433,10 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
|
|||
dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
|
||||
iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
|
||||
mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
|
||||
if (tb[RTA_UID])
|
||||
uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
|
||||
else
|
||||
uid = (iif ? INVALID_UID : current_uid());
|
||||
|
||||
memset(&fl4, 0, sizeof(fl4));
|
||||
fl4.daddr = dst;
|
||||
|
@ -2434,6 +2444,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
|
|||
fl4.flowi4_tos = rtm->rtm_tos;
|
||||
fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
|
||||
fl4.flowi4_mark = mark;
|
||||
fl4.flowi4_uid = uid;
|
||||
|
||||
if (iif) {
|
||||
struct net_device *dev;
|
||||
|
|
|
@ -2257,6 +2257,7 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
|
|||
[RTA_PRIORITY] = { .type = NLA_U32 },
|
||||
[RTA_METRICS] = { .type = NLA_NESTED },
|
||||
[RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
|
||||
[RTA_UID] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
|
@ -2640,6 +2641,12 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh)
|
|||
if (tb[RTA_MARK])
|
||||
fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
|
||||
|
||||
if (tb[RTA_UID])
|
||||
fl6.flowi6_uid = make_kuid(current_user_ns(),
|
||||
nla_get_u32(tb[RTA_UID]));
|
||||
else
|
||||
fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
|
||||
|
||||
if (iif) {
|
||||
struct net_device *dev;
|
||||
int flags = 0;
|
||||
|
|
Loading…
Reference in New Issue