* it with privilege level 3 because the IVE uses non-privileged accesses to these
         * tables.  IA-32 segmentation is used to protect against IA-32 accesses to them.
         */
-       vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
        if (vma) {
-               memset(vma, 0, sizeof(*vma));
                vma->vm_mm = current->mm;
                vma->vm_start = IA32_GDT_OFFSET;
                vma->vm_end = vma->vm_start + PAGE_SIZE;
         * code is locked in specific gate page, which is pointed by pretcode
         * when setup_frame_ia32
         */
-       vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
        if (vma) {
-               memset(vma, 0, sizeof(*vma));
                vma->vm_mm = current->mm;
                vma->vm_start = IA32_GATE_OFFSET;
                vma->vm_end = vma->vm_start + PAGE_SIZE;
         * Install LDT as anonymous memory.  This gives us all-zero segment descriptors
         * until a task modifies them via modify_ldt().
         */
-       vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
        if (vma) {
-               memset(vma, 0, sizeof(*vma));
                vma->vm_mm = current->mm;
                vma->vm_start = IA32_LDT_OFFSET;
                vma->vm_end = vma->vm_start + PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE);
                bprm->loader += stack_base;
        bprm->exec += stack_base;
 
-       mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+       mpnt = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
        if (!mpnt)
                return -ENOMEM;
 
-       memset(mpnt, 0, sizeof(*mpnt));
-
        down_write(¤t->mm->mmap_sem);
        {
                mpnt->vm_mm = current->mm;
 
        DPRINT(("smpl_buf @%p\n", smpl_buf));
 
        /* allocate vma */
-       vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
        if (!vma) {
                DPRINT(("Cannot allocate vma\n"));
                goto error_kmem;
        }
-       memset(vma, 0, sizeof(*vma));
 
        /*
         * partially initialize the vma for the sampling buffer
 
         * the problem.  When the process attempts to write to the register backing store
         * for the first time, it will get a SEGFAULT in this case.
         */
-       vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
        if (vma) {
-               memset(vma, 0, sizeof(*vma));
                vma->vm_mm = current->mm;
                vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
                vma->vm_end = vma->vm_start + PAGE_SIZE;
 
        /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
        if (!(current->personality & MMAP_PAGE_ZERO)) {
-               vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+               vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
                if (vma) {
-                       memset(vma, 0, sizeof(*vma));
                        vma->vm_mm = current->mm;
                        vma->vm_end = PAGE_SIZE;
                        vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
 
                bprm->loader += stack_base;
        bprm->exec += stack_base;
 
-       mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+       mpnt = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
        if (!mpnt) 
                return -ENOMEM; 
 
-       memset(mpnt, 0, sizeof(*mpnt));
-
        down_write(&mm->mmap_sem);
        {
                mpnt->vm_mm = mm;
 
        if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
                return ERR_PTR(-EINVAL);
 
-       my_cq = kmem_cache_alloc(cq_cache, GFP_KERNEL);
+       my_cq = kmem_cache_zalloc(cq_cache, GFP_KERNEL);
        if (!my_cq) {
                ehca_err(device, "Out of memory for ehca_cq struct device=%p",
                         device);
                return ERR_PTR(-ENOMEM);
        }
 
-       memset(my_cq, 0, sizeof(struct ehca_cq));
        memset(¶m, 0, sizeof(struct ehca_alloc_cq_parms));
 
        spin_lock_init(&my_cq->spinlock);
 
 {
        struct ehca_mr *me;
 
-       me = kmem_cache_alloc(mr_cache, GFP_KERNEL);
+       me = kmem_cache_zalloc(mr_cache, GFP_KERNEL);
        if (me) {
-               memset(me, 0, sizeof(struct ehca_mr));
                spin_lock_init(&me->mrlock);
        } else
                ehca_gen_err("alloc failed");
 {
        struct ehca_mw *me;
 
-       me = kmem_cache_alloc(mw_cache, GFP_KERNEL);
+       me = kmem_cache_zalloc(mw_cache, GFP_KERNEL);
        if (me) {
-               memset(me, 0, sizeof(struct ehca_mw));
                spin_lock_init(&me->mwlock);
        } else
                ehca_gen_err("alloc failed");
 
 {
        struct ehca_pd *pd;
 
-       pd = kmem_cache_alloc(pd_cache, GFP_KERNEL);
+       pd = kmem_cache_zalloc(pd_cache, GFP_KERNEL);
        if (!pd) {
                ehca_err(device, "device=%p context=%p out of memory",
                         device, context);
                return ERR_PTR(-ENOMEM);
        }
 
-       memset(pd, 0, sizeof(struct ehca_pd));
        pd->ownpid = current->tgid;
 
        /*
 
        if (pd->uobject && udata)
                context = pd->uobject->context;
 
-       my_qp = kmem_cache_alloc(qp_cache, GFP_KERNEL);
+       my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL);
        if (!my_qp) {
                ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
                return ERR_PTR(-ENOMEM);
        }
 
-       memset(my_qp, 0, sizeof(struct ehca_qp));
        memset (&parms, 0, sizeof(struct ehca_alloc_qp_parms));
        spin_lock_init(&my_qp->spinlock_s);
        spin_lock_init(&my_qp->spinlock_r);
 
        struct asd_ascb *ascb;
        unsigned long flags;
 
-       ascb = kmem_cache_alloc(asd_ascb_cache, gfp_flags);
+       ascb = kmem_cache_zalloc(asd_ascb_cache, gfp_flags);
 
        if (ascb) {
-               memset(ascb, 0, sizeof(*ascb));
                ascb->dma_scb.size = sizeof(struct scb);
                ascb->dma_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool,
                                                     gfp_flags,
 
        int err = 0;
        int write = (data_direction == DMA_TO_DEVICE);
 
-       sioc = kmem_cache_alloc(scsi_io_context_cache, gfp);
+       sioc = kmem_cache_zalloc(scsi_io_context_cache, gfp);
        if (!sioc)
                return DRIVER_ERROR << 24;
-       memset(sioc, 0, sizeof(*sioc));
 
        req = blk_get_request(sdev->request_queue, write, gfp);
        if (!req)
 
 
        maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
 
-       sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_FLAG);
+       sb_desc = kmem_cache_zalloc(usb_desc_cache, SLAB_FLAG);
        assert(sb_desc != NULL);
-       memset(sb_desc, 0, sizeof(USB_SB_Desc_t));
 
 
        if (usb_pipeout(urb->pipe)) {
 
 {
        struct urb_priv *urbp;
 
-       urbp = kmem_cache_alloc(uhci_up_cachep, GFP_ATOMIC);
+       urbp = kmem_cache_zalloc(uhci_up_cachep, GFP_ATOMIC);
        if (!urbp)
                return NULL;
 
-       memset((void *)urbp, 0, sizeof(*urbp));
-
        urbp->urb = urb;
        urb->hcpriv = urbp;
        
 
        if ((unsigned long)nr_events > aio_max_nr)
                return ERR_PTR(-EAGAIN);
 
-       ctx = kmem_cache_alloc(kioctx_cachep, GFP_KERNEL);
+       ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
        if (!ctx)
                return ERR_PTR(-ENOMEM);
 
-       memset(ctx, 0, sizeof(*ctx));
        ctx->max_reqs = nr_events;
        mm = ctx->mm = current->mm;
        atomic_inc(&mm->mm_count);
 
 {
        struct configfs_dirent * sd;
 
-       sd = kmem_cache_alloc(configfs_dir_cachep, GFP_KERNEL);
+       sd = kmem_cache_zalloc(configfs_dir_cachep, GFP_KERNEL);
        if (!sd)
                return NULL;
 
-       memset(sd, 0, sizeof(*sd));
        atomic_set(&sd->s_count, 1);
        INIT_LIST_HEAD(&sd->s_links);
        INIT_LIST_HEAD(&sd->s_children);
 
 {
        struct dlm_lkb *lkb;
 
-       lkb = kmem_cache_alloc(lkb_cache, GFP_KERNEL);
-       if (lkb)
-               memset(lkb, 0, sizeof(*lkb));
+       lkb = kmem_cache_zalloc(lkb_cache, GFP_KERNEL);
        return lkb;
 }
 
 
 {
        struct dquot *dquot;
 
-       dquot = kmem_cache_alloc(dquot_cachep, GFP_NOFS);
+       dquot = kmem_cache_zalloc(dquot_cachep, GFP_NOFS);
        if(!dquot)
                return NODQUOT;
 
-       memset((caddr_t)dquot, 0, sizeof(struct dquot));
        mutex_init(&dquot->dq_lock);
        INIT_LIST_HEAD(&dquot->dq_free);
        INIT_LIST_HEAD(&dquot->dq_inuse);
 
                goto out;
        }
        /* Released in this function */
-       page_virt = kmem_cache_alloc(ecryptfs_header_cache_0, GFP_USER);
+       page_virt = kmem_cache_zalloc(ecryptfs_header_cache_0, GFP_USER);
        if (!page_virt) {
                ecryptfs_printk(KERN_ERR, "Out of memory\n");
                rc = -ENOMEM;
                goto out;
        }
-       memset(page_virt, 0, PAGE_CACHE_SIZE);
+
        rc = ecryptfs_write_headers_virt(page_virt, crypt_stat,
                                         ecryptfs_dentry);
        if (unlikely(rc)) {
 
        int lower_flags;
 
        /* Released in ecryptfs_release or end of function if failure */
-       file_info = kmem_cache_alloc(ecryptfs_file_info_cache, GFP_KERNEL);
+       file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL);
        ecryptfs_set_file_private(file, file_info);
        if (!file_info) {
                ecryptfs_printk(KERN_ERR,
                rc = -ENOMEM;
                goto out;
        }
-       memset(file_info, 0, sizeof(*file_info));
        lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry);
        crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
        mount_crypt_stat = &ecryptfs_superblock_to_private(
 
                goto out;
        }
        /* Released in this function */
-       page_virt =
-           (char *)kmem_cache_alloc(ecryptfs_header_cache_2,
+       page_virt = kmem_cache_zalloc(ecryptfs_header_cache_2,
                                     GFP_USER);
        if (!page_virt) {
                rc = -ENOMEM;
                                "Cannot ecryptfs_kmalloc a page\n");
                goto out_dput;
        }
-       memset(page_virt, 0, PAGE_CACHE_SIZE);
+
        rc = ecryptfs_read_header_region(page_virt, lower_dentry, nd->mnt);
        crypt_stat = &ecryptfs_inode_to_private(dentry->d_inode)->crypt_stat;
        if (!ECRYPTFS_CHECK_FLAG(crypt_stat->flags, ECRYPTFS_POLICY_APPLIED))
 
        /* Released: wipe_auth_tok_list called in ecryptfs_parse_packet_set or
         * at end of function upon failure */
        auth_tok_list_item =
-           kmem_cache_alloc(ecryptfs_auth_tok_list_item_cache, GFP_KERNEL);
+           kmem_cache_zalloc(ecryptfs_auth_tok_list_item_cache, GFP_KERNEL);
        if (!auth_tok_list_item) {
                ecryptfs_printk(KERN_ERR, "Unable to allocate memory\n");
                rc = -ENOMEM;
                goto out;
        }
-       memset(auth_tok_list_item, 0,
-              sizeof(struct ecryptfs_auth_tok_list_item));
        (*new_auth_tok) = &auth_tok_list_item->auth_tok;
 
        /* check for body size - one to two bytes */
 
 
        /* Released in ecryptfs_put_super() */
        ecryptfs_set_superblock_private(sb,
-                                       kmem_cache_alloc(ecryptfs_sb_info_cache,
+                                       kmem_cache_zalloc(ecryptfs_sb_info_cache,
                                                         GFP_KERNEL));
        if (!ecryptfs_superblock_to_private(sb)) {
                ecryptfs_printk(KERN_WARNING, "Out of memory\n");
                rc = -ENOMEM;
                goto out;
        }
-       memset(ecryptfs_superblock_to_private(sb), 0,
-              sizeof(struct ecryptfs_sb_info));
        sb->s_op = &ecryptfs_sops;
        /* Released through deactivate_super(sb) from get_sb_nodev */
        sb->s_root = d_alloc(NULL, &(const struct qstr) {
        /* Released in d_release when dput(sb->s_root) is called */
        /* through deactivate_super(sb) from get_sb_nodev() */
        ecryptfs_set_dentry_private(sb->s_root,
-                                   kmem_cache_alloc(ecryptfs_dentry_info_cache,
+                                   kmem_cache_zalloc(ecryptfs_dentry_info_cache,
                                                     GFP_KERNEL));
        if (!ecryptfs_dentry_to_private(sb->s_root)) {
                ecryptfs_printk(KERN_ERR,
                rc = -ENOMEM;
                goto out;
        }
-       memset(ecryptfs_dentry_to_private(sb->s_root), 0,
-              sizeof(struct ecryptfs_dentry_info));
        rc = 0;
 out:
        /* Should be able to rely on deactivate_super called from
 
                bprm->loader += stack_base;
        bprm->exec += stack_base;
 
-       mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+       mpnt = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
        if (!mpnt)
                return -ENOMEM;
 
-       memset(mpnt, 0, sizeof(*mpnt));
-
        down_write(&mm->mmap_sem);
        {
                mpnt->vm_mm = mm;
 
                return;
        }
 
-       bd = kmem_cache_alloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL),
-       memset(bd, 0, sizeof(struct gfs2_bufdata));
+       bd = kmem_cache_zalloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL),
        bd->bd_bh = bh;
        bd->bd_gl = gl;
 
 
 
 struct vfsmount *alloc_vfsmnt(const char *name)
 {
-       struct vfsmount *mnt = kmem_cache_alloc(mnt_cache, GFP_KERNEL);
+       struct vfsmount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
        if (mnt) {
-               memset(mnt, 0, sizeof(struct vfsmount));
                atomic_set(&mnt->mnt_count, 1);
                INIT_LIST_HEAD(&mnt->mnt_hash);
                INIT_LIST_HEAD(&mnt->mnt_child);
 
        struct smb_request *req;
        unsigned char *buf = NULL;
 
-       req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
+       req = kmem_cache_zalloc(req_cachep, GFP_KERNEL);
        VERBOSE("allocating request: %p\n", req);
        if (!req)
                goto out;
                }
        }
 
-       memset(req, 0, sizeof(struct smb_request));
        req->rq_buffer = buf;
        req->rq_bufsize = bufsize;
        req->rq_server = server;
 
 {
        struct sysfs_dirent * sd;
 
-       sd = kmem_cache_alloc(sysfs_dir_cachep, GFP_KERNEL);
+       sd = kmem_cache_zalloc(sysfs_dir_cachep, GFP_KERNEL);
        if (!sd)
                return NULL;
 
-       memset(sd, 0, sizeof(*sd));
        atomic_set(&sd->s_count, 1);
        atomic_set(&sd->s_event, 1);
        INIT_LIST_HEAD(&sd->s_children);
 
 static inline struct sas_task *sas_alloc_task(gfp_t flags)
 {
        extern struct kmem_cache *sas_task_cache;
-       struct sas_task *task = kmem_cache_alloc(sas_task_cache, flags);
+       struct sas_task *task = kmem_cache_zalloc(sas_task_cache, flags);
 
        if (task) {
-               memset(task, 0, sizeof(*task));
                INIT_LIST_HEAD(&task->list);
                spin_lock_init(&task->task_state_lock);
                task->task_state_flags = SAS_TASK_STATE_PENDING;
 
 static struct k_itimer * alloc_posix_timer(void)
 {
        struct k_itimer *tmr;
-       tmr = kmem_cache_alloc(posix_timers_cache, GFP_KERNEL);
+       tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL);
        if (!tmr)
                return tmr;
-       memset(tmr, 0, sizeof (struct k_itimer));
        if (unlikely(!(tmr->sigq = sigqueue_alloc()))) {
                kmem_cache_free(posix_timers_cache, tmr);
                tmr = NULL;
 
                if (ops->gc())
                        return NULL;
        }
-       dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
+       dst = kmem_cache_zalloc(ops->kmem_cachep, GFP_ATOMIC);
        if (!dst)
                return NULL;
-       memset(dst, 0, ops->entry_size);
        atomic_set(&dst->__refcnt, 0);
        dst->ops = ops;
        dst->lastuse = jiffies;
 
                        goto out_entries;
        }
 
-       n = kmem_cache_alloc(tbl->kmem_cachep, GFP_ATOMIC);
+       n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
        if (!n)
                goto out_entries;
 
-       memset(n, 0, tbl->entry_size);
-
        skb_queue_head_init(&n->arp_queue);
        rwlock_init(&n->lock);
        n->updated        = n->used = now;
 
 
 replace:
        err = -ENOBUFS;
-       new_f = kmem_cache_alloc(dn_hash_kmem, GFP_KERNEL);
+       new_f = kmem_cache_zalloc(dn_hash_kmem, GFP_KERNEL);
        if (new_f == NULL)
                goto out;
 
-       memset(new_f, 0, sizeof(struct dn_fib_node));
-
        new_f->fn_key = key;
        new_f->fn_type = type;
        new_f->fn_scope = r->rtm_scope;
 
  */
 static struct mfc_cache *ipmr_cache_alloc(void)
 {
-       struct mfc_cache *c=kmem_cache_alloc(mrt_cachep, GFP_KERNEL);
+       struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
        if(c==NULL)
                return NULL;
-       memset(c, 0, sizeof(*c));
        c->mfc_un.res.minvif = MAXVIFS;
        return c;
 }
 
 static struct mfc_cache *ipmr_cache_alloc_unres(void)
 {
-       struct mfc_cache *c=kmem_cache_alloc(mrt_cachep, GFP_ATOMIC);
+       struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
        if(c==NULL)
                return NULL;
-       memset(c, 0, sizeof(*c));
        skb_queue_head_init(&c->mfc_un.unres.unresolved);
        c->mfc_un.unres.expires = jiffies + 10*HZ;
        return c;
 
        struct ip_vs_conn *cp;
        struct ip_vs_protocol *pp = ip_vs_proto_get(proto);
 
-       cp = kmem_cache_alloc(ip_vs_conn_cachep, GFP_ATOMIC);
+       cp = kmem_cache_zalloc(ip_vs_conn_cachep, GFP_ATOMIC);
        if (cp == NULL) {
                IP_VS_ERR_RL("ip_vs_conn_new: no memory available.\n");
                return NULL;
        }
 
-       memset(cp, 0, sizeof(*cp));
        INIT_LIST_HEAD(&cp->c_list);
        init_timer(&cp->timer);
        cp->timer.data     = (unsigned long)cp;
 
                }
        }
 
-       conntrack = kmem_cache_alloc(ip_conntrack_cachep, GFP_ATOMIC);
+       conntrack = kmem_cache_zalloc(ip_conntrack_cachep, GFP_ATOMIC);
        if (!conntrack) {
                DEBUGP("Can't allocate conntrack.\n");
                atomic_dec(&ip_conntrack_count);
                return ERR_PTR(-ENOMEM);
        }
 
-       memset(conntrack, 0, sizeof(*conntrack));
        atomic_set(&conntrack->ct_general.use, 1);
        conntrack->ct_general.destroy = destroy_conntrack;
        conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
 
 {
        struct fib6_node *fn;
 
-       if ((fn = kmem_cache_alloc(fib6_node_kmem, GFP_ATOMIC)) != NULL)
-               memset(fn, 0, sizeof(struct fib6_node));
+       fn = kmem_cache_zalloc(fib6_node_kmem, GFP_ATOMIC);
 
        return fn;
 }
 
 {
        struct sctp_chunk *retval;
 
-       retval = kmem_cache_alloc(sctp_chunk_cachep, GFP_ATOMIC);
+       retval = kmem_cache_zalloc(sctp_chunk_cachep, GFP_ATOMIC);
 
        if (!retval)
                goto nodata;
-       memset(retval, 0, sizeof(struct sctp_chunk));
 
        if (!sk) {
                SCTP_DEBUG_PRINTK("chunkifying skb %p w/o an sk\n", skb);
 
 {
        struct avc_node *node;
 
-       node = kmem_cache_alloc(avc_node_cachep, GFP_ATOMIC);
+       node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC);
        if (!node)
                goto out;
 
-       memset(node, 0, sizeof(*node));
        INIT_RCU_HEAD(&node->rhead);
        INIT_LIST_HEAD(&node->list);
        atomic_set(&node->ae.used, 1);
 
        struct task_security_struct *tsec = current->security;
        struct inode_security_struct *isec;
 
-       isec = kmem_cache_alloc(sel_inode_cache, GFP_KERNEL);
+       isec = kmem_cache_zalloc(sel_inode_cache, GFP_KERNEL);
        if (!isec)
                return -ENOMEM;
 
-       memset(isec, 0, sizeof(*isec));
        mutex_init(&isec->lock);
        INIT_LIST_HEAD(&isec->list);
        isec->inode = inode;
 
                  struct avtab_key *key, struct avtab_datum *datum)
 {
        struct avtab_node * newnode;
-       newnode = kmem_cache_alloc(avtab_node_cachep, GFP_KERNEL);
+       newnode = kmem_cache_zalloc(avtab_node_cachep, GFP_KERNEL);
        if (newnode == NULL)
                return NULL;
-       memset(newnode, 0, sizeof(struct avtab_node));
        newnode->key = *key;
        newnode->datum = *datum;
        if (prev) {