2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "xfs_trans.h"
28 #include "xfs_alloc.h"
29 #include "xfs_dmapi.h"
30 #include "xfs_quota.h"
31 #include "xfs_mount.h"
32 #include "xfs_bmap_btree.h"
33 #include "xfs_alloc_btree.h"
34 #include "xfs_ialloc_btree.h"
35 #include "xfs_dir_sf.h"
36 #include "xfs_dir2_sf.h"
37 #include "xfs_attr_sf.h"
38 #include "xfs_dinode.h"
39 #include "xfs_inode.h"
40 #include "xfs_btree.h"
41 #include "xfs_ialloc.h"
43 #include "xfs_rtalloc.h"
44 #include "xfs_error.h"
45 #include "xfs_itable.h"
51 #include "xfs_buf_item.h"
52 #include "xfs_utils.h"
53 #include "xfs_version.h"
55 #include <linux/namei.h>
56 #include <linux/init.h>
57 #include <linux/mount.h>
58 #include <linux/mempool.h>
59 #include <linux/writeback.h>
60 #include <linux/kthread.h>
62 STATIC struct quotactl_ops xfs_quotactl_operations;
63 STATIC struct super_operations xfs_super_operations;
64 STATIC kmem_zone_t *xfs_vnode_zone;
65 STATIC kmem_zone_t *xfs_ioend_zone;
66 mempool_t *xfs_ioend_pool;
68 STATIC struct xfs_mount_args *
70 struct super_block *sb,
73 struct xfs_mount_args *args;
75 args = kmem_zalloc(sizeof(struct xfs_mount_args), KM_SLEEP);
76 args->logbufs = args->logbufsize = -1;
77 strncpy(args->fsname, sb->s_id, MAXNAMELEN);
79 /* Copy the already-parsed mount(2) flags we're interested in */
80 if (sb->s_flags & MS_DIRSYNC)
81 args->flags |= XFSMNT_DIRSYNC;
82 if (sb->s_flags & MS_SYNCHRONOUS)
83 args->flags |= XFSMNT_WSYNC;
85 args->flags |= XFSMNT_QUIET;
86 args->flags |= XFSMNT_32BITINODES;
93 unsigned int blockshift)
95 unsigned int pagefactor = 1;
96 unsigned int bitshift = BITS_PER_LONG - 1;
98 /* Figure out maximum filesize, on Linux this can depend on
99 * the filesystem blocksize (on 32 bit platforms).
100 * __block_prepare_write does this in an [unsigned] long...
101 * page->index << (PAGE_CACHE_SHIFT - bbits)
102 * So, for page sized blocks (4K on 32 bit platforms),
103 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
104 * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
105 * but for smaller blocksizes it is less (bbits = log2 bsize).
106 * Note1: get_block_t takes a long (implicit cast from above)
107 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
108 * can optionally convert the [unsigned] long from above into
109 * an [unsigned] long long.
112 #if BITS_PER_LONG == 32
113 # if defined(CONFIG_LBD)
114 ASSERT(sizeof(sector_t) == 8);
115 pagefactor = PAGE_CACHE_SIZE;
116 bitshift = BITS_PER_LONG;
118 pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
122 return (((__uint64_t)pagefactor) << bitshift) - 1;
125 STATIC __inline__ void
129 switch (inode->i_mode & S_IFMT) {
131 inode->i_op = &xfs_inode_operations;
132 inode->i_fop = &xfs_file_operations;
133 inode->i_mapping->a_ops = &xfs_address_space_operations;
136 inode->i_op = &xfs_dir_inode_operations;
137 inode->i_fop = &xfs_dir_file_operations;
140 inode->i_op = &xfs_symlink_inode_operations;
142 inode->i_mapping->a_ops = &xfs_address_space_operations;
145 inode->i_op = &xfs_inode_operations;
146 init_special_inode(inode, inode->i_mode, inode->i_rdev);
151 STATIC __inline__ void
152 xfs_revalidate_inode(
157 struct inode *inode = vn_to_inode(vp);
159 inode->i_mode = ip->i_d.di_mode;
160 inode->i_nlink = ip->i_d.di_nlink;
161 inode->i_uid = ip->i_d.di_uid;
162 inode->i_gid = ip->i_d.di_gid;
164 switch (inode->i_mode & S_IFMT) {
168 MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff,
169 sysv_minor(ip->i_df.if_u2.if_rdev));
176 inode->i_blksize = xfs_preferred_iosize(mp);
177 inode->i_generation = ip->i_d.di_gen;
178 i_size_write(inode, ip->i_d.di_size);
180 XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
181 inode->i_atime.tv_sec = ip->i_d.di_atime.t_sec;
182 inode->i_atime.tv_nsec = ip->i_d.di_atime.t_nsec;
183 inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec;
184 inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec;
185 inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec;
186 inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec;
187 if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
188 inode->i_flags |= S_IMMUTABLE;
190 inode->i_flags &= ~S_IMMUTABLE;
191 if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
192 inode->i_flags |= S_APPEND;
194 inode->i_flags &= ~S_APPEND;
195 if (ip->i_d.di_flags & XFS_DIFLAG_SYNC)
196 inode->i_flags |= S_SYNC;
198 inode->i_flags &= ~S_SYNC;
199 if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME)
200 inode->i_flags |= S_NOATIME;
202 inode->i_flags &= ~S_NOATIME;
203 vp->v_flag &= ~VMODIFIED;
207 xfs_initialize_vnode(
210 bhv_desc_t *inode_bhv,
213 xfs_inode_t *ip = XFS_BHVTOI(inode_bhv);
214 struct inode *inode = vn_to_inode(vp);
216 if (!inode_bhv->bd_vobj) {
217 vp->v_vfsp = bhvtovfs(bdp);
218 bhv_desc_init(inode_bhv, ip, vp, &xfs_vnodeops);
219 bhv_insert(VN_BHV_HEAD(vp), inode_bhv);
223 * We need to set the ops vectors, and unlock the inode, but if
224 * we have been called during the new inode create process, it is
225 * too early to fill in the Linux inode. We will get called a
226 * second time once the inode is properly set up, and then we can
229 if (ip->i_d.di_mode != 0 && unlock && (inode->i_state & I_NEW)) {
230 xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip);
231 xfs_set_inodeops(inode);
233 ip->i_flags &= ~XFS_INEW;
236 unlock_new_inode(inode);
244 struct block_device **bdevp)
248 *bdevp = open_bdev_excl(name, 0, mp);
249 if (IS_ERR(*bdevp)) {
250 error = PTR_ERR(*bdevp);
251 printk("XFS: Invalid device [%s], error=%d\n", name, error);
259 struct block_device *bdev)
262 close_bdev_excl(bdev);
266 * Try to write out the superblock using barriers.
272 xfs_buf_t *sbp = xfs_getsb(mp, 0);
277 XFS_BUF_UNDELAYWRITE(sbp);
279 XFS_BUF_UNASYNC(sbp);
280 XFS_BUF_ORDERED(sbp);
283 error = xfs_iowait(sbp);
286 * Clear all the flags we set and possible error state in the
287 * buffer. We only did the write to try out whether barriers
288 * worked and shouldn't leave any traces in the superblock
292 XFS_BUF_ERROR(sbp, 0);
293 XFS_BUF_UNORDERED(sbp);
300 xfs_mountfs_check_barriers(xfs_mount_t *mp)
304 if (mp->m_logdev_targp != mp->m_ddev_targp) {
305 xfs_fs_cmn_err(CE_NOTE, mp,
306 "Disabling barriers, not supported with external log device");
307 mp->m_flags &= ~XFS_MOUNT_BARRIER;
311 if (mp->m_ddev_targp->bt_bdev->bd_disk->queue->ordered ==
312 QUEUE_ORDERED_NONE) {
313 xfs_fs_cmn_err(CE_NOTE, mp,
314 "Disabling barriers, not supported by the underlying device");
315 mp->m_flags &= ~XFS_MOUNT_BARRIER;
319 error = xfs_barrier_test(mp);
321 xfs_fs_cmn_err(CE_NOTE, mp,
322 "Disabling barriers, trial barrier write failed");
323 mp->m_flags &= ~XFS_MOUNT_BARRIER;
329 xfs_blkdev_issue_flush(
330 xfs_buftarg_t *buftarg)
332 blkdev_issue_flush(buftarg->bt_bdev, NULL);
335 STATIC struct inode *
337 struct super_block *sb)
341 vp = kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP);
344 return vn_to_inode(vp);
348 xfs_fs_destroy_inode(
351 kmem_zone_free(xfs_vnode_zone, vn_from_inode(inode));
355 xfs_fs_inode_init_once(
360 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
361 SLAB_CTOR_CONSTRUCTOR)
362 inode_init_once(vn_to_inode((bhv_vnode_t *)vnode));
368 xfs_vnode_zone = kmem_zone_init_flags(sizeof(bhv_vnode_t), "xfs_vnode",
369 KM_ZONE_HWALIGN | KM_ZONE_RECLAIM |
371 xfs_fs_inode_init_once);
375 xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend");
377 goto out_destroy_vnode_zone;
379 xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
382 goto out_free_ioend_zone;
386 kmem_zone_destroy(xfs_ioend_zone);
387 out_destroy_vnode_zone:
388 kmem_zone_destroy(xfs_vnode_zone);
394 xfs_destroy_zones(void)
396 mempool_destroy(xfs_ioend_pool);
397 kmem_zone_destroy(xfs_vnode_zone);
398 kmem_zone_destroy(xfs_ioend_zone);
402 * Attempt to flush the inode, this will actually fail
403 * if the inode is pinned, but we dirty the inode again
404 * at the point when it is unpinned after a log write,
405 * since this is when the inode itself becomes flushable.
412 bhv_vnode_t *vp = vn_from_inode(inode);
413 int error = 0, flags = FLUSH_INODE;
416 vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
419 error = bhv_vop_iflush(vp, flags);
421 error = sync? bhv_vop_iflush(vp, flags | FLUSH_LOG) : 0;
430 bhv_vnode_t *vp = vn_from_inode(inode);
432 vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
434 XFS_STATS_INC(vn_rele);
435 XFS_STATS_INC(vn_remove);
436 XFS_STATS_INC(vn_reclaim);
437 XFS_STATS_DEC(vn_active);
440 * This can happen because xfs_iget_core calls xfs_idestroy if we
441 * find an inode with di_mode == 0 but without IGET_CREATE set.
444 bhv_vop_inactive(vp, NULL);
447 vp->v_flag &= ~VMODIFIED;
451 if (bhv_vop_reclaim(vp))
452 panic("%s: cannot reclaim 0x%p\n", __FUNCTION__, vp);
454 ASSERT(VNHEAD(vp) == NULL);
456 #ifdef XFS_VNODE_TRACE
457 ktrace_free(vp->v_trace);
462 * Enqueue a work item to be picked up by the vfs xfssyncd thread.
463 * Doing this has two advantages:
464 * - It saves on stack space, which is tight in certain situations
465 * - It can be used (with care) as a mechanism to avoid deadlocks.
466 * Flushing while allocating in a full filesystem requires both.
469 xfs_syncd_queue_work(
472 void (*syncer)(bhv_vfs_t *, void *))
474 struct bhv_vfs_sync_work *work;
476 work = kmem_alloc(sizeof(struct bhv_vfs_sync_work), KM_SLEEP);
477 INIT_LIST_HEAD(&work->w_list);
478 work->w_syncer = syncer;
481 spin_lock(&vfs->vfs_sync_lock);
482 list_add_tail(&work->w_list, &vfs->vfs_sync_list);
483 spin_unlock(&vfs->vfs_sync_lock);
484 wake_up_process(vfs->vfs_sync_task);
488 * Flush delayed allocate data, attempting to free up reserved space
489 * from existing allocations. At this point a new allocation attempt
490 * has failed with ENOSPC and we are in the process of scratching our
491 * heads, looking about for more room...
494 xfs_flush_inode_work(
498 filemap_flush(((struct inode *)inode)->i_mapping);
499 iput((struct inode *)inode);
506 struct inode *inode = vn_to_inode(XFS_ITOV(ip));
507 struct bhv_vfs *vfs = XFS_MTOVFS(ip->i_mount);
510 xfs_syncd_queue_work(vfs, inode, xfs_flush_inode_work);
511 delay(msecs_to_jiffies(500));
515 * This is the "bigger hammer" version of xfs_flush_inode_work...
516 * (IOW, "If at first you don't succeed, use a Bigger Hammer").
519 xfs_flush_device_work(
523 sync_blockdev(vfs->vfs_super->s_bdev);
524 iput((struct inode *)inode);
531 struct inode *inode = vn_to_inode(XFS_ITOV(ip));
532 struct bhv_vfs *vfs = XFS_MTOVFS(ip->i_mount);
535 xfs_syncd_queue_work(vfs, inode, xfs_flush_device_work);
536 delay(msecs_to_jiffies(500));
537 xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
547 if (!(vfsp->vfs_flag & VFS_RDONLY))
548 error = bhv_vfs_sync(vfsp, SYNC_FSDATA | SYNC_BDFLUSH | \
549 SYNC_ATTR | SYNC_REFCACHE, NULL);
550 vfsp->vfs_sync_seq++;
552 wake_up(&vfsp->vfs_wait_single_sync_task);
560 bhv_vfs_t *vfsp = (bhv_vfs_t *) arg;
561 bhv_vfs_sync_work_t *work, *n;
564 timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
566 timeleft = schedule_timeout_interruptible(timeleft);
569 if (kthread_should_stop() && list_empty(&vfsp->vfs_sync_list))
572 spin_lock(&vfsp->vfs_sync_lock);
574 * We can get woken by laptop mode, to do a sync -
575 * that's the (only!) case where the list would be
576 * empty with time remaining.
578 if (!timeleft || list_empty(&vfsp->vfs_sync_list)) {
580 timeleft = xfs_syncd_centisecs *
581 msecs_to_jiffies(10);
582 INIT_LIST_HEAD(&vfsp->vfs_sync_work.w_list);
583 list_add_tail(&vfsp->vfs_sync_work.w_list,
584 &vfsp->vfs_sync_list);
586 list_for_each_entry_safe(work, n, &vfsp->vfs_sync_list, w_list)
587 list_move(&work->w_list, &tmp);
588 spin_unlock(&vfsp->vfs_sync_lock);
590 list_for_each_entry_safe(work, n, &tmp, w_list) {
591 (*work->w_syncer)(vfsp, work->w_data);
592 list_del(&work->w_list);
593 if (work == &vfsp->vfs_sync_work)
595 kmem_free(work, sizeof(struct bhv_vfs_sync_work));
606 vfsp->vfs_sync_work.w_syncer = vfs_sync_worker;
607 vfsp->vfs_sync_work.w_vfs = vfsp;
608 vfsp->vfs_sync_task = kthread_run(xfssyncd, vfsp, "xfssyncd");
609 if (IS_ERR(vfsp->vfs_sync_task))
610 return -PTR_ERR(vfsp->vfs_sync_task);
618 kthread_stop(vfsp->vfs_sync_task);
623 struct super_block *sb)
625 bhv_vfs_t *vfsp = vfs_from_sb(sb);
628 xfs_fs_stop_syncd(vfsp);
629 bhv_vfs_sync(vfsp, SYNC_ATTR | SYNC_DELWRI, NULL);
630 error = bhv_vfs_unmount(vfsp, 0, NULL);
632 printk("XFS: unmount got error=%d\n", error);
633 printk("%s: vfs=0x%p left dangling!\n", __FUNCTION__, vfsp);
635 vfs_deallocate(vfsp);
641 struct super_block *sb)
643 if (!(sb->s_flags & MS_RDONLY))
644 bhv_vfs_sync(vfs_from_sb(sb), SYNC_FSDATA, NULL);
650 struct super_block *sb,
653 bhv_vfs_t *vfsp = vfs_from_sb(sb);
657 if (unlikely(sb->s_frozen == SB_FREEZE_WRITE))
658 flags = SYNC_QUIESCE;
660 flags = SYNC_FSDATA | (wait ? SYNC_WAIT : 0);
662 error = bhv_vfs_sync(vfsp, flags, NULL);
665 if (unlikely(laptop_mode)) {
666 int prev_sync_seq = vfsp->vfs_sync_seq;
669 * The disk must be active because we're syncing.
670 * We schedule xfssyncd now (now that the disk is
671 * active) instead of later (when it might not be).
673 wake_up_process(vfsp->vfs_sync_task);
675 * We have to wait for the sync iteration to complete.
676 * If we don't, the disk activity caused by the sync
677 * will come after the sync is completed, and that
678 * triggers another sync from laptop mode.
680 wait_event(vfsp->vfs_wait_single_sync_task,
681 vfsp->vfs_sync_seq != prev_sync_seq);
689 struct super_block *sb,
690 struct kstatfs *statp)
692 return -bhv_vfs_statvfs(vfs_from_sb(sb), statp, NULL);
697 struct super_block *sb,
701 bhv_vfs_t *vfsp = vfs_from_sb(sb);
702 struct xfs_mount_args *args = xfs_args_allocate(sb, 0);
705 error = bhv_vfs_parseargs(vfsp, options, args, 1);
707 error = bhv_vfs_mntupdate(vfsp, flags, args);
708 kmem_free(args, sizeof(*args));
714 struct super_block *sb)
716 bhv_vfs_freeze(vfs_from_sb(sb));
722 struct vfsmount *mnt)
724 return -bhv_vfs_showargs(vfs_from_sb(mnt->mnt_sb), m);
729 struct super_block *sb,
732 return -bhv_vfs_quotactl(vfs_from_sb(sb), Q_XQUOTASYNC, 0, NULL);
737 struct super_block *sb,
738 struct fs_quota_stat *fqs)
740 return -bhv_vfs_quotactl(vfs_from_sb(sb), Q_XGETQSTAT, 0, (caddr_t)fqs);
745 struct super_block *sb,
749 return -bhv_vfs_quotactl(vfs_from_sb(sb), op, 0, (caddr_t)&flags);
754 struct super_block *sb,
757 struct fs_disk_quota *fdq)
759 return -bhv_vfs_quotactl(vfs_from_sb(sb),
760 (type == USRQUOTA) ? Q_XGETQUOTA :
761 ((type == GRPQUOTA) ? Q_XGETGQUOTA :
762 Q_XGETPQUOTA), id, (caddr_t)fdq);
767 struct super_block *sb,
770 struct fs_disk_quota *fdq)
772 return -bhv_vfs_quotactl(vfs_from_sb(sb),
773 (type == USRQUOTA) ? Q_XSETQLIM :
774 ((type == GRPQUOTA) ? Q_XSETGQLIM :
775 Q_XSETPQLIM), id, (caddr_t)fdq);
780 struct super_block *sb,
784 struct bhv_vnode *rootvp;
785 struct bhv_vfs *vfsp = vfs_allocate(sb);
786 struct xfs_mount_args *args = xfs_args_allocate(sb, silent);
787 struct kstatfs statvfs;
790 bhv_insert_all_vfsops(vfsp);
792 error = bhv_vfs_parseargs(vfsp, (char *)data, args, 0);
794 bhv_remove_all_vfsops(vfsp, 1);
798 sb_min_blocksize(sb, BBSIZE);
799 sb->s_export_op = &xfs_export_operations;
800 sb->s_qcop = &xfs_quotactl_operations;
801 sb->s_op = &xfs_super_operations;
803 error = bhv_vfs_mount(vfsp, args, NULL);
805 bhv_remove_all_vfsops(vfsp, 1);
809 error = bhv_vfs_statvfs(vfsp, &statvfs, NULL);
814 sb->s_magic = statvfs.f_type;
815 sb->s_blocksize = statvfs.f_bsize;
816 sb->s_blocksize_bits = ffs(statvfs.f_bsize) - 1;
817 sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
819 set_posix_acl_flag(sb);
821 error = bhv_vfs_root(vfsp, &rootvp);
825 sb->s_root = d_alloc_root(vn_to_inode(rootvp));
830 if (is_bad_inode(sb->s_root->d_inode)) {
834 if ((error = xfs_fs_start_syncd(vfsp)))
836 vn_trace_exit(rootvp, __FUNCTION__, (inst_t *)__return_address);
838 kmem_free(args, sizeof(*args));
850 bhv_vfs_unmount(vfsp, 0, NULL);
853 vfs_deallocate(vfsp);
854 kmem_free(args, sizeof(*args));
858 STATIC struct super_block *
860 struct file_system_type *fs_type,
862 const char *dev_name,
865 return get_sb_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super);
868 STATIC struct super_operations xfs_super_operations = {
869 .alloc_inode = xfs_fs_alloc_inode,
870 .destroy_inode = xfs_fs_destroy_inode,
871 .write_inode = xfs_fs_write_inode,
872 .clear_inode = xfs_fs_clear_inode,
873 .put_super = xfs_fs_put_super,
874 .write_super = xfs_fs_write_super,
875 .sync_fs = xfs_fs_sync_super,
876 .write_super_lockfs = xfs_fs_lockfs,
877 .statfs = xfs_fs_statfs,
878 .remount_fs = xfs_fs_remount,
879 .show_options = xfs_fs_show_options,
882 STATIC struct quotactl_ops xfs_quotactl_operations = {
883 .quota_sync = xfs_fs_quotasync,
884 .get_xstate = xfs_fs_getxstate,
885 .set_xstate = xfs_fs_setxstate,
886 .get_xquota = xfs_fs_getxquota,
887 .set_xquota = xfs_fs_setxquota,
890 STATIC struct file_system_type xfs_fs_type = {
891 .owner = THIS_MODULE,
893 .get_sb = xfs_fs_get_sb,
894 .kill_sb = kill_block_super,
895 .fs_flags = FS_REQUIRES_DEV,
904 static char message[] __initdata = KERN_INFO \
905 XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled\n";
910 xfs_physmem = si.totalram;
914 error = xfs_init_zones();
918 error = xfs_buf_init();
927 error = register_filesystem(&xfs_fs_type);
946 unregister_filesystem(&xfs_fs_type);
953 module_init(init_xfs_fs);
954 module_exit(exit_xfs_fs);
956 MODULE_AUTHOR("Silicon Graphics, Inc.");
957 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
958 MODULE_LICENSE("GPL");