u32 qc_id;
 };
 
+static LIST_HEAD(qd_lru_list);
+static atomic_t qd_lru_count = ATOMIC_INIT(0);
+static spinlock_t qd_lru_lock = SPIN_LOCK_UNLOCKED;
+
+int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask)
+{
+       struct gfs2_quota_data *qd;
+       struct gfs2_sbd *sdp;
+
+       if (nr == 0)
+               goto out;
+
+       if (!(gfp_mask & __GFP_FS))
+               return -1;
+
+       spin_lock(&qd_lru_lock);
+       while (nr && !list_empty(&qd_lru_list)) {
+               qd = list_entry(qd_lru_list.next,
+                               struct gfs2_quota_data, qd_reclaim);
+               sdp = qd->qd_gl->gl_sbd;
+
+               /* Free from the filesystem-specific list */
+               list_del(&qd->qd_list);
+
+               spin_lock(&sdp->sd_quota_spin);
+               gfs2_assert_warn(sdp, !qd->qd_change);
+               gfs2_assert_warn(sdp, !qd->qd_slot_count);
+               gfs2_assert_warn(sdp, !qd->qd_bh_count);
+
+               gfs2_lvb_unhold(qd->qd_gl);
+               spin_unlock(&sdp->sd_quota_spin);
+               atomic_dec(&sdp->sd_quota_count);
+
+               /* Delete it from the common reclaim list */
+               list_del_init(&qd->qd_reclaim);
+               atomic_dec(&qd_lru_count);
+               spin_unlock(&qd_lru_lock);
+               kmem_cache_free(gfs2_quotad_cachep, qd);
+               spin_lock(&qd_lru_lock);
+               nr--;
+       }
+       spin_unlock(&qd_lru_lock);
+
+out:
+       return (atomic_read(&qd_lru_count) * sysctl_vfs_cache_pressure) / 100;
+}
+
 static u64 qd2offset(struct gfs2_quota_data *qd)
 {
        u64 offset;
        if (!qd)
                return -ENOMEM;
 
-       qd->qd_count = 1;
+       atomic_set(&qd->qd_count, 1);
        qd->qd_id = id;
        if (user)
                set_bit(QDF_USER, &qd->qd_flags);
        qd->qd_slot = -1;
+       INIT_LIST_HEAD(&qd->qd_reclaim);
 
        error = gfs2_glock_get(sdp, 2 * (u64)id + !user,
                              &gfs2_quota_glops, CREATE, &qd->qd_gl);
 
        for (;;) {
                found = 0;
-               spin_lock(&sdp->sd_quota_spin);
+               spin_lock(&qd_lru_lock);
                list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
                        if (qd->qd_id == id &&
                            !test_bit(QDF_USER, &qd->qd_flags) == !user) {
-                               qd->qd_count++;
+                               if (!atomic_read(&qd->qd_count) &&
+                                   !list_empty(&qd->qd_reclaim)) {
+                                       /* Remove it from reclaim list */
+                                       list_del_init(&qd->qd_reclaim);
+                                       atomic_dec(&qd_lru_count);
+                               }
+                               atomic_inc(&qd->qd_count);
                                found = 1;
                                break;
                        }
                        new_qd = NULL;
                }
 
-               spin_unlock(&sdp->sd_quota_spin);
+               spin_unlock(&qd_lru_lock);
 
                if (qd || !create) {
                        if (new_qd) {
 static void qd_hold(struct gfs2_quota_data *qd)
 {
        struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
-
-       spin_lock(&sdp->sd_quota_spin);
-       gfs2_assert(sdp, qd->qd_count);
-       qd->qd_count++;
-       spin_unlock(&sdp->sd_quota_spin);
+       gfs2_assert(sdp, atomic_read(&qd->qd_count));
+       atomic_inc(&qd->qd_count);
 }
 
 static void qd_put(struct gfs2_quota_data *qd)
 {
-       struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
-       spin_lock(&sdp->sd_quota_spin);
-       gfs2_assert(sdp, qd->qd_count);
-       if (!--qd->qd_count)
-               qd->qd_last_touched = jiffies;
-       spin_unlock(&sdp->sd_quota_spin);
+       if (atomic_dec_and_lock(&qd->qd_count, &qd_lru_lock)) {
+               /* Add to the reclaim list */
+               list_add_tail(&qd->qd_reclaim, &qd_lru_list);
+               atomic_inc(&qd_lru_count);
+               spin_unlock(&qd_lru_lock);
+       }
 }
 
 static int slot_get(struct gfs2_quota_data *qd)
        if (sdp->sd_vfs->s_flags & MS_RDONLY)
                return 0;
 
+       spin_lock(&qd_lru_lock);
        spin_lock(&sdp->sd_quota_spin);
 
        list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
                list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
 
                set_bit(QDF_LOCKED, &qd->qd_flags);
-               gfs2_assert_warn(sdp, qd->qd_count);
-               qd->qd_count++;
+               gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
+               atomic_inc(&qd->qd_count);
                qd->qd_change_sync = qd->qd_change;
                gfs2_assert_warn(sdp, qd->qd_slot_count);
                qd->qd_slot_count++;
                qd = NULL;
 
        spin_unlock(&sdp->sd_quota_spin);
+       spin_unlock(&qd_lru_lock);
 
        if (qd) {
                gfs2_assert_warn(sdp, qd->qd_change_sync);
        if (sdp->sd_vfs->s_flags & MS_RDONLY)
                return 0;
 
+       spin_lock(&qd_lru_lock);
        spin_lock(&sdp->sd_quota_spin);
 
        if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
            !test_bit(QDF_CHANGE, &qd->qd_flags)) {
                spin_unlock(&sdp->sd_quota_spin);
+               spin_unlock(&qd_lru_lock);
                return 0;
        }
 
        list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
 
        set_bit(QDF_LOCKED, &qd->qd_flags);
-       gfs2_assert_warn(sdp, qd->qd_count);
-       qd->qd_count++;
+       gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
+       atomic_inc(&qd->qd_count);
        qd->qd_change_sync = qd->qd_change;
        gfs2_assert_warn(sdp, qd->qd_slot_count);
        qd->qd_slot_count++;
 
        spin_unlock(&sdp->sd_quota_spin);
+       spin_unlock(&qd_lru_lock);
 
        gfs2_assert_warn(sdp, qd->qd_change_sync);
        if (bh_get(qd)) {
                loff_t pos;
                gfs2_glock_dq_uninit(q_gh);
                error = gfs2_glock_nq_init(qd->qd_gl,
-                                         LM_ST_EXCLUSIVE, GL_NOCACHE,
-                                         q_gh);
+                                          LM_ST_EXCLUSIVE, GL_NOCACHE,
+                                          q_gh);
                if (error)
                        return error;
 
 
                gfs2_glock_dq_uninit(&i_gh);
 
-
                gfs2_quota_in(&q, buf);
                qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
                qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
                        qd->qd_change = qc.qc_change;
                        qd->qd_slot = slot;
                        qd->qd_slot_count = 1;
-                       qd->qd_last_touched = jiffies;
 
+                       spin_lock(&qd_lru_lock);
                        spin_lock(&sdp->sd_quota_spin);
                        gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
+                       spin_unlock(&sdp->sd_quota_spin);
                        list_add(&qd->qd_list, &sdp->sd_quota_list);
                        atomic_inc(&sdp->sd_quota_count);
-                       spin_unlock(&sdp->sd_quota_spin);
+                       spin_unlock(&qd_lru_lock);
 
                        found++;
                }
        return error;
 }
 
-static void gfs2_quota_scan(struct gfs2_sbd *sdp)
-{
-       struct gfs2_quota_data *qd, *safe;
-       LIST_HEAD(dead);
-
-       spin_lock(&sdp->sd_quota_spin);
-       list_for_each_entry_safe(qd, safe, &sdp->sd_quota_list, qd_list) {
-               if (!qd->qd_count &&
-                   time_after_eq(jiffies, qd->qd_last_touched +
-                               gfs2_tune_get(sdp, gt_quota_cache_secs) * HZ)) {
-                       list_move(&qd->qd_list, &dead);
-                       gfs2_assert_warn(sdp,
-                                        atomic_read(&sdp->sd_quota_count) > 0);
-                       atomic_dec(&sdp->sd_quota_count);
-               }
-       }
-       spin_unlock(&sdp->sd_quota_spin);
-
-       while (!list_empty(&dead)) {
-               qd = list_entry(dead.next, struct gfs2_quota_data, qd_list);
-               list_del(&qd->qd_list);
-
-               gfs2_assert_warn(sdp, !qd->qd_change);
-               gfs2_assert_warn(sdp, !qd->qd_slot_count);
-               gfs2_assert_warn(sdp, !qd->qd_bh_count);
-
-               gfs2_lvb_unhold(qd->qd_gl);
-               kmem_cache_free(gfs2_quotad_cachep, qd);
-       }
-}
-
 void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
 {
        struct list_head *head = &sdp->sd_quota_list;
        struct gfs2_quota_data *qd;
        unsigned int x;
 
-       spin_lock(&sdp->sd_quota_spin);
+       spin_lock(&qd_lru_lock);
        while (!list_empty(head)) {
                qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
 
-               if (qd->qd_count > 1 ||
-                   (qd->qd_count && !test_bit(QDF_CHANGE, &qd->qd_flags))) {
-                       list_move(&qd->qd_list, head);
+               spin_lock(&sdp->sd_quota_spin);
+               if (atomic_read(&qd->qd_count) > 1 ||
+                   (atomic_read(&qd->qd_count) &&
+                    !test_bit(QDF_CHANGE, &qd->qd_flags))) {
                        spin_unlock(&sdp->sd_quota_spin);
+                       list_move(&qd->qd_list, head);
+                       spin_unlock(&qd_lru_lock);
                        schedule();
-                       spin_lock(&sdp->sd_quota_spin);
+                       spin_lock(&qd_lru_lock);
                        continue;
                }
+               spin_unlock(&sdp->sd_quota_spin);
 
                list_del(&qd->qd_list);
+               /* Also remove if this qd exists in the reclaim list */
+               if (!list_empty(&qd->qd_reclaim)) {
+                       list_del_init(&qd->qd_reclaim);
+                       atomic_dec(&qd_lru_count);
+               }
                atomic_dec(&sdp->sd_quota_count);
-               spin_unlock(&sdp->sd_quota_spin);
+               spin_unlock(&qd_lru_lock);
 
-               if (!qd->qd_count) {
+               if (!atomic_read(&qd->qd_count)) {
                        gfs2_assert_warn(sdp, !qd->qd_change);
                        gfs2_assert_warn(sdp, !qd->qd_slot_count);
                } else
                gfs2_lvb_unhold(qd->qd_gl);
                kmem_cache_free(gfs2_quotad_cachep, qd);
 
-               spin_lock(&sdp->sd_quota_spin);
+               spin_lock(&qd_lru_lock);
        }
-       spin_unlock(&sdp->sd_quota_spin);
+       spin_unlock(&qd_lru_lock);
 
        gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
 
                quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
                                   "ad_timeo, &tune->gt_quota_quantum);
 
-               /* FIXME: This should be turned into a shrinker */
-               gfs2_quota_scan(sdp);
-
                /* Check for & recover partially truncated inodes */
                quotad_check_trunc_list(sdp);