X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=net%2Fsched%2Fcls_tcindex.c;h=7a7bff5ded2487801e3c34941c1e12b8d77926fc;hb=25e18499e08cb097cbbfeab5de25d094d5312ee5;hp=7d46df7eac0955eff20eddb9cae3a33951df65d0;hpb=24beeab539c6f42c4a93e2ff7c3b5f272e60da45;p=linux-2.6-omap-h63xx.git diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 7d46df7eac0..7a7bff5ded2 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -55,7 +55,7 @@ struct tcindex_data { int fall_through; /* 0: only classify if explicit match */ }; -static struct tcf_ext_map tcindex_ext_map = { +static const struct tcf_ext_map tcindex_ext_map = { .police = TCA_TCINDEX_POLICE, .action = TCA_TCINDEX_ACT }; @@ -193,6 +193,14 @@ valid_perfect_hash(struct tcindex_data *p) return p->hash > (p->mask >> p->shift); } +static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = { + [TCA_TCINDEX_HASH] = { .type = NLA_U32 }, + [TCA_TCINDEX_MASK] = { .type = NLA_U16 }, + [TCA_TCINDEX_SHIFT] = { .type = NLA_U32 }, + [TCA_TCINDEX_FALL_THROUGH] = { .type = NLA_U32 }, + [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 }, +}; + static int tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle, struct tcindex_data *p, struct tcindex_filter_result *r, @@ -217,24 +225,14 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle, else memset(&cr, 0, sizeof(cr)); - err = -EINVAL; - if (tb[TCA_TCINDEX_HASH]) { - if (nla_len(tb[TCA_TCINDEX_HASH]) < sizeof(u32)) - goto errout; - cp.hash = *(u32 *) nla_data(tb[TCA_TCINDEX_HASH]); - } + if (tb[TCA_TCINDEX_HASH]) + cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); - if (tb[TCA_TCINDEX_MASK]) { - if (nla_len(tb[TCA_TCINDEX_MASK]) < sizeof(u16)) - goto errout; - cp.mask = *(u16 *) nla_data(tb[TCA_TCINDEX_MASK]); - } + if (tb[TCA_TCINDEX_MASK]) + cp.mask = nla_get_u16(tb[TCA_TCINDEX_MASK]); - if (tb[TCA_TCINDEX_SHIFT]) { - if (nla_len(tb[TCA_TCINDEX_SHIFT]) < sizeof(int)) - goto errout; - cp.shift = *(int *) nla_data(tb[TCA_TCINDEX_SHIFT]); - } + if (tb[TCA_TCINDEX_SHIFT]) + cp.shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]); err = -EBUSY; /* Hash already allocated, make sure that we still meet the @@ -248,12 +246,8 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle, goto errout; err = -EINVAL; - if (tb[TCA_TCINDEX_FALL_THROUGH]) { - if (nla_len(tb[TCA_TCINDEX_FALL_THROUGH]) < sizeof(u32)) - goto errout; - cp.fall_through = - *(u32 *) nla_data(tb[TCA_TCINDEX_FALL_THROUGH]); - } + if (tb[TCA_TCINDEX_FALL_THROUGH]) + cp.fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]); if (!cp.hash) { /* Hash not specified, use perfect hash if the upper limit @@ -305,7 +299,7 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle, } if (tb[TCA_TCINDEX_CLASSID]) { - cr.res.classid = *(u32 *) nla_data(tb[TCA_TCINDEX_CLASSID]); + cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]); tcf_bind_filter(tp, &cr.res, base); } @@ -359,7 +353,7 @@ tcindex_change(struct tcf_proto *tp, unsigned long base, u32 handle, if (!opt) return 0; - err = nla_parse_nested(tb, TCA_TCINDEX_MAX, opt, NULL); + err = nla_parse_nested(tb, TCA_TCINDEX_MAX, opt, tcindex_policy); if (err < 0) return err;