2 * linux/kernel/power/snapshot.c
4 * This file provide system snapshot/restore functionality.
6 * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz>
8 * This file is released under the GPLv2, and is based on swsusp.c.
13 #include <linux/version.h>
14 #include <linux/module.h>
16 #include <linux/suspend.h>
17 #include <linux/smp_lock.h>
18 #include <linux/delay.h>
19 #include <linux/bitops.h>
20 #include <linux/spinlock.h>
21 #include <linux/kernel.h>
23 #include <linux/device.h>
24 #include <linux/bootmem.h>
25 #include <linux/syscalls.h>
26 #include <linux/console.h>
27 #include <linux/highmem.h>
29 #include <asm/uaccess.h>
30 #include <asm/mmu_context.h>
31 #include <asm/pgtable.h>
32 #include <asm/tlbflush.h>
37 struct pbe *pagedir_nosave;
38 static unsigned int nr_copy_pages;
39 static unsigned int nr_meta_pages;
40 static unsigned long *buffer;
43 unsigned int count_highmem_pages(void)
46 unsigned long zone_pfn;
50 if (is_highmem(zone)) {
51 mark_free_pages(zone);
52 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; zone_pfn++) {
54 unsigned long pfn = zone_pfn + zone->zone_start_pfn;
57 page = pfn_to_page(pfn);
58 if (PageReserved(page))
60 if (PageNosaveFree(page))
71 struct highmem_page *next;
74 static struct highmem_page *highmem_copy;
76 static int save_highmem_zone(struct zone *zone)
78 unsigned long zone_pfn;
79 mark_free_pages(zone);
80 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
82 struct highmem_page *save;
84 unsigned long pfn = zone_pfn + zone->zone_start_pfn;
90 page = pfn_to_page(pfn);
92 * This condition results from rvmalloc() sans vmalloc_32()
93 * and architectural memory reservations. This should be
94 * corrected eventually when the cases giving rise to this
95 * are better understood.
97 if (PageReserved(page))
99 BUG_ON(PageNosave(page));
100 if (PageNosaveFree(page))
102 save = kmalloc(sizeof(struct highmem_page), GFP_ATOMIC);
105 save->next = highmem_copy;
107 save->data = (void *) get_zeroed_page(GFP_ATOMIC);
112 kaddr = kmap_atomic(page, KM_USER0);
113 memcpy(save->data, kaddr, PAGE_SIZE);
114 kunmap_atomic(kaddr, KM_USER0);
120 int save_highmem(void)
125 pr_debug("swsusp: Saving Highmem");
127 for_each_zone (zone) {
128 if (is_highmem(zone))
129 res = save_highmem_zone(zone);
137 int restore_highmem(void)
139 printk("swsusp: Restoring Highmem\n");
140 while (highmem_copy) {
141 struct highmem_page *save = highmem_copy;
143 highmem_copy = save->next;
145 kaddr = kmap_atomic(save->page, KM_USER0);
146 memcpy(kaddr, save->data, PAGE_SIZE);
147 kunmap_atomic(kaddr, KM_USER0);
148 free_page((long) save->data);
154 static inline unsigned int count_highmem_pages(void) {return 0;}
155 static inline int save_highmem(void) {return 0;}
156 static inline int restore_highmem(void) {return 0;}
160 * @safe_needed - on resume, for storing the PBE list and the image,
161 * we can only use memory pages that do not conflict with the pages
162 * used before suspend.
164 * The unsafe pages are marked with the PG_nosave_free flag
165 * and we count them using unsafe_pages
168 static unsigned int unsafe_pages;
170 static void *alloc_image_page(gfp_t gfp_mask, int safe_needed)
174 res = (void *)get_zeroed_page(gfp_mask);
176 while (res && PageNosaveFree(virt_to_page(res))) {
177 /* The page is unsafe, mark it for swsusp_free() */
178 SetPageNosave(virt_to_page(res));
180 res = (void *)get_zeroed_page(gfp_mask);
183 SetPageNosave(virt_to_page(res));
184 SetPageNosaveFree(virt_to_page(res));
189 unsigned long get_safe_page(gfp_t gfp_mask)
191 return (unsigned long)alloc_image_page(gfp_mask, 1);
195 * free_image_page - free page represented by @addr, allocated with
196 * alloc_image_page (page flags set by it must be cleared)
199 static inline void free_image_page(void *addr, int clear_nosave_free)
201 ClearPageNosave(virt_to_page(addr));
202 if (clear_nosave_free)
203 ClearPageNosaveFree(virt_to_page(addr));
204 free_page((unsigned long)addr);
208 * pfn_is_nosave - check if given pfn is in the 'nosave' section
211 static inline int pfn_is_nosave(unsigned long pfn)
213 unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
214 unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT;
215 return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
219 * saveable - Determine whether a page should be cloned or not.
222 * We save a page if it isn't Nosave, and is not in the range of pages
223 * statically defined as 'unsaveable', and it
224 * isn't a part of a free chunk of pages.
227 static struct page *saveable_page(unsigned long pfn)
234 page = pfn_to_page(pfn);
236 if (PageNosave(page))
238 if (PageReserved(page) && pfn_is_nosave(pfn))
240 if (PageNosaveFree(page))
246 unsigned int count_data_pages(void)
249 unsigned long pfn, max_zone_pfn;
252 for_each_zone (zone) {
253 if (is_highmem(zone))
255 mark_free_pages(zone);
256 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
257 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
258 n += !!saveable_page(pfn);
263 static inline void copy_data_page(long *dst, long *src)
267 /* copy_page and memcpy are not usable for copying task structs. */
268 for (n = PAGE_SIZE / sizeof(long); n; n--)
272 static void copy_data_pages(struct pbe *pblist)
275 unsigned long pfn, max_zone_pfn;
279 for_each_zone (zone) {
280 if (is_highmem(zone))
282 mark_free_pages(zone);
283 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
284 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
285 struct page *page = saveable_page(pfn);
288 void *ptr = page_address(page);
291 copy_data_page((void *)pbe->address, ptr);
292 pbe->orig_address = (unsigned long)ptr;
301 * free_pagedir - free pages allocated with alloc_pagedir()
304 static void free_pagedir(struct pbe *pblist, int clear_nosave_free)
309 pbe = (pblist + PB_PAGE_SKIP)->next;
310 free_image_page(pblist, clear_nosave_free);
316 * fill_pb_page - Create a list of PBEs on a given memory page
319 static inline void fill_pb_page(struct pbe *pbpage, unsigned int n)
327 while (++p < pbpage);
331 * create_pbe_list - Create a list of PBEs on top of a given chain
332 * of memory pages allocated with alloc_pagedir()
334 * This function assumes that pages allocated by alloc_image_page() will
338 static inline void create_pbe_list(struct pbe *pblist, unsigned int nr_pages)
341 unsigned int num = PBES_PER_PAGE;
343 for_each_pb_page (pbpage, pblist) {
347 fill_pb_page(pbpage, PBES_PER_PAGE);
348 num += PBES_PER_PAGE;
351 num -= PBES_PER_PAGE;
352 fill_pb_page(pbpage, nr_pages - num);
357 * alloc_pagedir - Allocate the page directory.
359 * First, determine exactly how many pages we need and
362 * We arrange the pages in a chain: each page is an array of PBES_PER_PAGE
363 * struct pbe elements (pbes) and the last element in the page points
366 * On each page we set up a list of struct_pbe elements.
369 static struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask,
373 struct pbe *pblist, *pbe;
378 pblist = alloc_image_page(gfp_mask, safe_needed);
380 for (num = PBES_PER_PAGE; num < nr_pages; num += PBES_PER_PAGE) {
382 free_pagedir(pblist, 1);
386 pbe->next = alloc_image_page(gfp_mask, safe_needed);
389 create_pbe_list(pblist, nr_pages);
394 * Free pages we allocated for suspend. Suspend pages are alocated
395 * before atomic copy, so we need to free them after resume.
398 void swsusp_free(void)
401 unsigned long pfn, max_zone_pfn;
403 for_each_zone(zone) {
404 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
405 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
406 if (pfn_valid(pfn)) {
407 struct page *page = pfn_to_page(pfn);
409 if (PageNosave(page) && PageNosaveFree(page)) {
410 ClearPageNosave(page);
411 ClearPageNosaveFree(page);
412 free_page((long) page_address(page));
418 pagedir_nosave = NULL;
424 * enough_free_mem - Make sure we enough free memory to snapshot.
426 * Returns TRUE or FALSE after checking the number of available
430 static int enough_free_mem(unsigned int nr_pages)
436 if (!is_highmem(zone))
437 n += zone->free_pages;
438 pr_debug("swsusp: available memory: %u pages\n", n);
439 return n > (nr_pages + PAGES_FOR_IO +
440 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE);
443 static int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed)
447 for_each_pbe (p, pblist) {
448 p->address = (unsigned long)alloc_image_page(gfp_mask, safe_needed);
455 static struct pbe *swsusp_alloc(unsigned int nr_pages)
459 if (!(pblist = alloc_pagedir(nr_pages, GFP_ATOMIC | __GFP_COLD, 0))) {
460 printk(KERN_ERR "suspend: Allocating pagedir failed.\n");
464 if (alloc_data_pages(pblist, GFP_ATOMIC | __GFP_COLD, 0)) {
465 printk(KERN_ERR "suspend: Allocating image pages failed.\n");
473 asmlinkage int swsusp_save(void)
475 unsigned int nr_pages;
477 pr_debug("swsusp: critical section: \n");
480 nr_pages = count_data_pages();
481 printk("swsusp: Need to copy %u pages\n", nr_pages);
483 pr_debug("swsusp: pages needed: %u + %lu + %u, free: %u\n",
485 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE,
486 PAGES_FOR_IO, nr_free_pages());
488 if (!enough_free_mem(nr_pages)) {
489 printk(KERN_ERR "swsusp: Not enough free memory\n");
493 pagedir_nosave = swsusp_alloc(nr_pages);
497 /* During allocating of suspend pagedir, new cold pages may appear.
501 copy_data_pages(pagedir_nosave);
504 * End of critical section. From now on, we can write to memory,
505 * but we should not touch disk. This specially means we must _not_
506 * touch swap space! Except we must write out our image of course.
509 nr_copy_pages = nr_pages;
510 nr_meta_pages = (nr_pages * sizeof(long) + PAGE_SIZE - 1) >> PAGE_SHIFT;
512 printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages);
516 static void init_header(struct swsusp_info *info)
518 memset(info, 0, sizeof(struct swsusp_info));
519 info->version_code = LINUX_VERSION_CODE;
520 info->num_physpages = num_physpages;
521 memcpy(&info->uts, &system_utsname, sizeof(system_utsname));
522 info->cpus = num_online_cpus();
523 info->image_pages = nr_copy_pages;
524 info->pages = nr_copy_pages + nr_meta_pages + 1;
525 info->size = info->pages;
526 info->size <<= PAGE_SHIFT;
530 * pack_orig_addresses - the .orig_address fields of the PBEs from the
531 * list starting at @pbe are stored in the array @buf[] (1 page)
534 static inline struct pbe *pack_orig_addresses(unsigned long *buf, struct pbe *pbe)
538 for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) {
539 buf[j] = pbe->orig_address;
543 for (; j < PAGE_SIZE / sizeof(long); j++)
549 * snapshot_read_next - used for reading the system memory snapshot.
551 * On the first call to it @handle should point to a zeroed
552 * snapshot_handle structure. The structure gets updated and a pointer
553 * to it should be passed to this function every next time.
555 * The @count parameter should contain the number of bytes the caller
556 * wants to read from the snapshot. It must not be zero.
558 * On success the function returns a positive number. Then, the caller
559 * is allowed to read up to the returned number of bytes from the memory
560 * location computed by the data_of() macro. The number returned
561 * may be smaller than @count, but this only happens if the read would
562 * cross a page boundary otherwise.
564 * The function returns 0 to indicate the end of data stream condition,
565 * and a negative number is returned on error. In such cases the
566 * structure pointed to by @handle is not updated and should not be used
570 int snapshot_read_next(struct snapshot_handle *handle, size_t count)
572 if (handle->cur > nr_meta_pages + nr_copy_pages)
575 /* This makes the buffer be freed by swsusp_free() */
576 buffer = alloc_image_page(GFP_ATOMIC, 0);
580 if (!handle->offset) {
581 init_header((struct swsusp_info *)buffer);
582 handle->buffer = buffer;
583 handle->pbe = pagedir_nosave;
585 if (handle->prev < handle->cur) {
586 if (handle->cur <= nr_meta_pages) {
587 handle->pbe = pack_orig_addresses(buffer, handle->pbe);
589 handle->pbe = pagedir_nosave;
591 handle->buffer = (void *)handle->pbe->address;
592 handle->pbe = handle->pbe->next;
594 handle->prev = handle->cur;
596 handle->buf_offset = handle->cur_offset;
597 if (handle->cur_offset + count >= PAGE_SIZE) {
598 count = PAGE_SIZE - handle->cur_offset;
599 handle->cur_offset = 0;
602 handle->cur_offset += count;
604 handle->offset += count;
609 * mark_unsafe_pages - mark the pages that cannot be used for storing
610 * the image during resume, because they conflict with the pages that
611 * had been used before suspend
614 static int mark_unsafe_pages(struct pbe *pblist)
617 unsigned long pfn, max_zone_pfn;
620 if (!pblist) /* a sanity check */
623 /* Clear page flags */
624 for_each_zone (zone) {
625 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
626 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
628 ClearPageNosaveFree(pfn_to_page(pfn));
631 /* Mark orig addresses */
632 for_each_pbe (p, pblist) {
633 if (virt_addr_valid(p->orig_address))
634 SetPageNosaveFree(virt_to_page(p->orig_address));
644 static void copy_page_backup_list(struct pbe *dst, struct pbe *src)
646 /* We assume both lists contain the same number of elements */
648 dst->orig_address = src->orig_address;
654 static int check_header(struct swsusp_info *info)
658 if (info->version_code != LINUX_VERSION_CODE)
659 reason = "kernel version";
660 if (info->num_physpages != num_physpages)
661 reason = "memory size";
662 if (strcmp(info->uts.sysname,system_utsname.sysname))
663 reason = "system type";
664 if (strcmp(info->uts.release,system_utsname.release))
665 reason = "kernel release";
666 if (strcmp(info->uts.version,system_utsname.version))
668 if (strcmp(info->uts.machine,system_utsname.machine))
671 printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason);
678 * load header - check the image header and copy data from it
681 static int load_header(struct snapshot_handle *handle,
682 struct swsusp_info *info)
687 error = check_header(info);
689 pblist = alloc_pagedir(info->image_pages, GFP_ATOMIC, 0);
692 pagedir_nosave = pblist;
693 handle->pbe = pblist;
694 nr_copy_pages = info->image_pages;
695 nr_meta_pages = info->pages - info->image_pages - 1;
701 * unpack_orig_addresses - copy the elements of @buf[] (1 page) to
702 * the PBEs in the list starting at @pbe
705 static inline struct pbe *unpack_orig_addresses(unsigned long *buf,
710 for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) {
711 pbe->orig_address = buf[j];
718 * prepare_image - use metadata contained in the PBE list
719 * pointed to by pagedir_nosave to mark the pages that will
720 * be overwritten in the process of restoring the system
721 * memory state from the image ("unsafe" pages) and allocate
722 * memory for the image
724 * The idea is to allocate the PBE list first and then
725 * allocate as many pages as it's needed for the image data,
726 * but not to assign these pages to the PBEs initially.
727 * Instead, we just mark them as allocated and create a list
728 * of "safe" which will be used later
732 struct safe_page *next;
733 char padding[PAGE_SIZE - sizeof(void *)];
736 static struct safe_page *safe_pages;
738 static int prepare_image(struct snapshot_handle *handle)
741 unsigned int nr_pages = nr_copy_pages;
742 struct pbe *p, *pblist = NULL;
745 error = mark_unsafe_pages(p);
747 pblist = alloc_pagedir(nr_pages, GFP_ATOMIC, 1);
749 copy_page_backup_list(pblist, p);
755 if (!error && nr_pages > unsafe_pages) {
756 nr_pages -= unsafe_pages;
758 struct safe_page *ptr;
760 ptr = (struct safe_page *)get_zeroed_page(GFP_ATOMIC);
765 if (!PageNosaveFree(virt_to_page(ptr))) {
766 /* The page is "safe", add it to the list */
767 ptr->next = safe_pages;
770 /* Mark the page as allocated */
771 SetPageNosave(virt_to_page(ptr));
772 SetPageNosaveFree(virt_to_page(ptr));
776 pagedir_nosave = pblist;
784 static void *get_buffer(struct snapshot_handle *handle)
786 struct pbe *pbe = handle->pbe, *last = handle->last_pbe;
787 struct page *page = virt_to_page(pbe->orig_address);
789 if (PageNosave(page) && PageNosaveFree(page)) {
791 * We have allocated the "original" page frame and we can
792 * use it directly to store the read page
795 if (last && last->next)
797 return (void *)pbe->orig_address;
800 * The "original" page frame has not been allocated and we have to
801 * use a "safe" page frame to store the read page
803 pbe->address = (unsigned long)safe_pages;
804 safe_pages = safe_pages->next;
807 handle->last_pbe = pbe;
808 return (void *)pbe->address;
812 * snapshot_write_next - used for writing the system memory snapshot.
814 * On the first call to it @handle should point to a zeroed
815 * snapshot_handle structure. The structure gets updated and a pointer
816 * to it should be passed to this function every next time.
818 * The @count parameter should contain the number of bytes the caller
819 * wants to write to the image. It must not be zero.
821 * On success the function returns a positive number. Then, the caller
822 * is allowed to write up to the returned number of bytes to the memory
823 * location computed by the data_of() macro. The number returned
824 * may be smaller than @count, but this only happens if the write would
825 * cross a page boundary otherwise.
827 * The function returns 0 to indicate the "end of file" condition,
828 * and a negative number is returned on error. In such cases the
829 * structure pointed to by @handle is not updated and should not be used
833 int snapshot_write_next(struct snapshot_handle *handle, size_t count)
837 if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages)
840 /* This makes the buffer be freed by swsusp_free() */
841 buffer = alloc_image_page(GFP_ATOMIC, 0);
846 handle->buffer = buffer;
847 handle->sync_read = 1;
848 if (handle->prev < handle->cur) {
850 error = load_header(handle,
851 (struct swsusp_info *)buffer);
854 } else if (handle->prev <= nr_meta_pages) {
855 handle->pbe = unpack_orig_addresses(buffer,
858 error = prepare_image(handle);
861 handle->pbe = pagedir_nosave;
862 handle->last_pbe = NULL;
863 handle->buffer = get_buffer(handle);
864 handle->sync_read = 0;
867 handle->pbe = handle->pbe->next;
868 handle->buffer = get_buffer(handle);
869 handle->sync_read = 0;
871 handle->prev = handle->cur;
873 handle->buf_offset = handle->cur_offset;
874 if (handle->cur_offset + count >= PAGE_SIZE) {
875 count = PAGE_SIZE - handle->cur_offset;
876 handle->cur_offset = 0;
879 handle->cur_offset += count;
881 handle->offset += count;
885 int snapshot_image_loaded(struct snapshot_handle *handle)
887 return !(!handle->pbe || handle->pbe->next || !nr_copy_pages ||
888 handle->cur <= nr_meta_pages + nr_copy_pages);