cmo->excess.size = cmo->entitled - cmo->reserve.size;
        cmo->excess.free = cmo->excess.size - need;
 
-       cancel_delayed_work(container_of(work, struct delayed_work, work));
+       cancel_delayed_work(to_delayed_work(work));
        spin_unlock_irqrestore(&vio_cmo.lock, flags);
 }
 
 
 
 static void hifn_work(struct work_struct *work)
 {
-       struct delayed_work *dw = container_of(work, struct delayed_work, work);
+       struct delayed_work *dw = to_delayed_work(work);
        struct hifn_device *dev = container_of(dw, struct hifn_device, work);
        unsigned long flags;
        int reset = 0;
 
 
 static void hgpk_recalib_work(struct work_struct *work)
 {
-       struct delayed_work *w = container_of(work, struct delayed_work, work);
+       struct delayed_work *w = to_delayed_work(work);
        struct hgpk_data *priv = container_of(w, struct hgpk_data, recalib_wq);
        struct psmouse *psmouse = priv->psmouse;
 
 
 static void
 dm9000_poll_work(struct work_struct *w)
 {
-       struct delayed_work *dw = container_of(w, struct delayed_work, work);
+       struct delayed_work *dw = to_delayed_work(w);
        board_info_t *db = container_of(dw, board_info_t, phy_poll);
        struct net_device *ndev = db->ndev;
 
 
 
 static void mlx4_en_do_get_stats(struct work_struct *work)
 {
-       struct delayed_work *delay = container_of(work, struct delayed_work, work);
+       struct delayed_work *delay = to_delayed_work(work);
        struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
                                                 stats_task);
        struct mlx4_en_dev *mdev = priv->mdev;
 
 
 void mlx4_en_rx_refill(struct work_struct *work)
 {
-       struct delayed_work *delay = container_of(work, struct delayed_work, work);
+       struct delayed_work *delay = to_delayed_work(work);
        struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
                                                 refill_task);
        struct mlx4_en_dev *mdev = priv->mdev;
 
 
 static void mlx4_sense_port(struct work_struct *work)
 {
-       struct delayed_work *delay = container_of(work, struct delayed_work, work);
+       struct delayed_work *delay = to_delayed_work(work);
        struct mlx4_sense *sense = container_of(delay, struct mlx4_sense,
                                                sense_poll);
        struct mlx4_dev *dev = sense->dev;
 
  */
 static void phy_state_machine(struct work_struct *work)
 {
-       struct delayed_work *dwork =
-                       container_of(work, struct delayed_work, work);
+       struct delayed_work *dwork = to_delayed_work(work);
        struct phy_device *phydev =
                        container_of(dwork, struct phy_device, state_queue);
        int needs_aneg = 0;
 
 
 static void zfcp_wka_port_offline(struct work_struct *work)
 {
-       struct delayed_work *dw = container_of(work, struct delayed_work, work);
+       struct delayed_work *dw = to_delayed_work(work);
        struct zfcp_wka_port *wka_port =
                        container_of(dw, struct zfcp_wka_port, work);
 
 
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
 void ieee80211_softmac_scan_wq(struct work_struct *work)
 {
-       struct delayed_work *dwork = container_of(work, struct delayed_work, work);
+       struct delayed_work *dwork = to_delayed_work(work);
        struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, softmac_scan_wq);
 #else
 void ieee80211_softmac_scan_wq(struct ieee80211_device *ieee)
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
 void ieee80211_softmac_scan_wq(struct work_struct *work)
 {
-        struct delayed_work *dwork = container_of(work, struct delayed_work, work);
+       struct delayed_work *dwork = to_delayed_work(work);
         struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, softmac_scan_wq);
 #else
 void ieee80211_softmac_scan_wq(struct ieee80211_device *ieee)
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
 void ieee80211_start_ibss_wq(struct work_struct *work)
 {
-       struct delayed_work *dwork = container_of(work, struct delayed_work, work);
+       struct delayed_work *dwork = to_delayed_work(work);
        struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, start_ibss_wq);
 #else
 void ieee80211_start_ibss_wq(struct ieee80211_device *ieee)
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
 void ieee80211_associate_retry_wq(struct work_struct *work)
 {
-       struct delayed_work *dwork = container_of(work, struct delayed_work, work);
+       struct delayed_work *dwork = to_delayed_work(work);
        struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, associate_retry_wq);
 #else
 void ieee80211_associate_retry_wq(struct ieee80211_device *ieee)
 
 //     struct r8180_priv *priv = container_of(work, struct r8180_priv, watch_dog_wq);
 //     struct ieee80211_device * ieee = (struct ieee80211_device*)
 //                                            container_of(work, struct ieee80211_device, watch_dog_wq);
-       struct delayed_work *dwork = container_of(work,struct delayed_work,work);
+       struct delayed_work *dwork = to_delayed_work(work);
        struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,hw_wakeup_wq);
        struct net_device *dev = ieee->dev;
 #else
 //      struct r8180_priv *priv = container_of(work, struct r8180_priv, watch_dog_wq);
 //      struct ieee80211_device * ieee = (struct ieee80211_device*)
 //                                             container_of(work, struct ieee80211_device, watch_dog_wq);
-        struct delayed_work *dwork = container_of(work,struct delayed_work,work);
+       struct delayed_work *dwork = to_delayed_work(work);
         struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,hw_sleep_wq);
         struct net_device *dev = ieee->dev;
 #else
 void rtl8180_tx_irq_wq(struct work_struct *work)
 {
        //struct r8180_priv *priv = container_of(work, struct r8180_priv, reset_wq);
-        struct delayed_work *dwork = container_of(work,struct delayed_work,work);
+       struct delayed_work *dwork = to_delayed_work(work);
        struct ieee80211_device * ieee = (struct ieee80211_device*)
                                               container_of(dwork, struct ieee80211_device, watch_dog_wq);
        struct net_device *dev = ieee->dev;
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
 void GPIOChangeRFWorkItemCallBack(struct work_struct *work)
 {
-       //struct delayed_work *dwork = container_of(work, struct delayed_work, work);
+       //struct delayed_work *dwork = to_delayed_work(work);
        struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, GPIOChangeRFWorkItem.work);
        struct net_device *dev = ieee->dev;
        struct r8180_priv *priv = ieee80211_priv(dev);
 
  */
 static void wusbhc_keep_alive_run(struct work_struct *ws)
 {
-       struct delayed_work *dw = container_of(ws, struct delayed_work, work);
+       struct delayed_work *dw = to_delayed_work(ws);
        struct wusbhc *wusbhc = container_of(dw, struct wusbhc, keep_alive_timer);
 
        mutex_lock(&wusbhc->mutex);
 
        struct timer_list timer;
 };
 
+static inline struct delayed_work *to_delayed_work(struct work_struct *work)
+{
+       return container_of(work, struct delayed_work, work);
+}
+
 struct execute_work {
        struct work_struct work;
 };
 
        struct kmem_cache *searchp;
        struct kmem_list3 *l3;
        int node = numa_node_id();
-       struct delayed_work *work =
-               container_of(w, struct delayed_work, work);
+       struct delayed_work *work = to_delayed_work(w);
 
        if (!mutex_trylock(&cache_chain_mutex))
                /* Give up. Setup the next iteration. */