#define RTMGRP_IPV4_IFADDR     0x10
 #define RTMGRP_IPV4_MROUTE     0x20
 #define RTMGRP_IPV4_ROUTE      0x40
+#define RTMGRP_IPV4_RULE       0x80
 
 #define RTMGRP_IPV6_IFADDR     0x100
 #define RTMGRP_IPV6_MROUTE     0x200
 #define        RTNLGRP_IPV4_MROUTE     RTNLGRP_IPV4_MROUTE
        RTNLGRP_IPV4_ROUTE,
 #define RTNLGRP_IPV4_ROUTE     RTNLGRP_IPV4_ROUTE
-       RTNLGRP_NOP1,
+       RTNLGRP_IPV4_RULE,
+#define RTNLGRP_IPV4_RULE      RTNLGRP_IPV4_RULE
        RTNLGRP_IPV6_IFADDR,
 #define RTNLGRP_IPV6_IFADDR    RTNLGRP_IPV6_IFADDR
        RTNLGRP_IPV6_MROUTE,
 
 
 /* writer func called from netlink -- rtnl_sem hold*/
 
+static void rtmsg_rule(int, struct fib_rule *);
+
 int inet_rtm_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
 {
        struct rtattr **rta = arg;
 
                        hlist_del_rcu(&r->hlist);
                        r->r_dead = 1;
+                       rtmsg_rule(RTM_DELRULE, r);
                        fib_rule_put(r);
                        err = 0;
                        break;
        else
                hlist_add_before_rcu(&new_r->hlist, &r->hlist);
 
+       rtmsg_rule(RTM_NEWRULE, new_r);
        return 0;
 }
 
 
 static __inline__ int inet_fill_rule(struct sk_buff *skb,
                                     struct fib_rule *r,
-                                    struct netlink_callback *cb,
+                                    u32 pid, u32 seq, int event,
                                     unsigned int flags)
 {
        struct rtmsg *rtm;
        struct nlmsghdr  *nlh;
        unsigned char    *b = skb->tail;
 
-       nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWRULE, sizeof(*rtm), flags);
+       nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*rtm), flags);
        rtm = NLMSG_DATA(nlh);
        rtm->rtm_family = AF_INET;
        rtm->rtm_dst_len = r->r_dst_len;
 
 /* callers should hold rtnl semaphore */
 
+static void rtmsg_rule(int event, struct fib_rule *r)
+{
+       int size = NLMSG_SPACE(sizeof(struct rtmsg) + 128);
+       struct sk_buff *skb = alloc_skb(size, GFP_KERNEL);
+
+       if (!skb)
+               netlink_set_err(rtnl, 0, RTNLGRP_IPV4_RULE, ENOBUFS);
+       else if (inet_fill_rule(skb, r, 0, 0, event, 0) < 0) {
+               kfree_skb(skb);
+               netlink_set_err(rtnl, 0, RTNLGRP_IPV4_RULE, EINVAL);
+       } else {
+               netlink_broadcast(rtnl, skb, 0, RTNLGRP_IPV4_RULE, GFP_KERNEL);
+       }
+}
+
 int inet_dump_rules(struct sk_buff *skb, struct netlink_callback *cb)
 {
        int idx = 0;
 
                if (idx < s_idx)
                        continue;
-               if (inet_fill_rule(skb, r, cb, NLM_F_MULTI) < 0)
+               if (inet_fill_rule(skb, r, NETLINK_CB(cb->skb).pid,
+                                  cb->nlh->nlmsg_seq,
+                                  RTM_NEWRULE, NLM_F_MULTI) < 0)
                        break;
                idx++;
        }