2 * PS3 address space management.
4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/memory_hotplug.h>
24 #include <linux/lmb.h>
26 #include <asm/firmware.h>
29 #include <asm/lv1call.h>
34 #define DBG udbg_printf
40 #if defined(CONFIG_PS3_DYNAMIC_DMA)
53 static unsigned long make_page_sizes(unsigned long a, unsigned long b)
55 return (a << 56) | (b << 48);
59 ALLOCATE_MEMORY_TRY_ALT_UNIT = 0X04,
60 ALLOCATE_MEMORY_ADDR_ZERO = 0X08,
63 /* valid htab sizes are {18,19,20} = 256K, 512K, 1M */
66 HTAB_SIZE_MAX = 20U, /* HV limit of 1MB */
67 HTAB_SIZE_MIN = 18U, /* CPU limit of 256KB */
70 /*============================================================================*/
71 /* virtual address space routines */
72 /*============================================================================*/
75 * struct mem_region - memory region structure
77 * @size: size in bytes
78 * @offset: difference between base and rm.size
88 * struct map - address space state variables holder
89 * @total: total memory available as reported by HV
90 * @vas_id - HV virtual address space id
91 * @htab_size: htab size in bytes
93 * The HV virtual address space (vas) allows for hotplug memory regions.
94 * Memory regions can be created and destroyed in the vas at runtime.
95 * @rm: real mode (bootmem) region
96 * @r1: hotplug memory region(s)
99 * virt_addr: a cpu 'translated' effective address
100 * phys_addr: an address in what Linux thinks is the physical address space
101 * lpar_addr: an address in the HV virtual address space
102 * bus_addr: an io controller 'translated' address on a device bus
107 unsigned long vas_id;
108 unsigned long htab_size;
109 struct mem_region rm;
110 struct mem_region r1;
113 #define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__)
114 static void __maybe_unused _debug_dump_map(const struct map *m,
115 const char *func, int line)
117 DBG("%s:%d: map.total = %lxh\n", func, line, m->total);
118 DBG("%s:%d: map.rm.size = %lxh\n", func, line, m->rm.size);
119 DBG("%s:%d: map.vas_id = %lu\n", func, line, m->vas_id);
120 DBG("%s:%d: map.htab_size = %lxh\n", func, line, m->htab_size);
121 DBG("%s:%d: map.r1.base = %lxh\n", func, line, m->r1.base);
122 DBG("%s:%d: map.r1.offset = %lxh\n", func, line, m->r1.offset);
123 DBG("%s:%d: map.r1.size = %lxh\n", func, line, m->r1.size);
126 static struct map map;
129 * ps3_mm_phys_to_lpar - translate a linux physical address to lpar address
130 * @phys_addr: linux physical address
133 unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr)
135 BUG_ON(is_kernel_addr(phys_addr));
136 return (phys_addr < map.rm.size || phys_addr >= map.total)
137 ? phys_addr : phys_addr + map.r1.offset;
140 EXPORT_SYMBOL(ps3_mm_phys_to_lpar);
143 * ps3_mm_vas_create - create the virtual address space
146 void __init ps3_mm_vas_create(unsigned long* htab_size)
149 unsigned long start_address;
151 unsigned long access_right;
152 unsigned long max_page_size;
155 result = lv1_query_logical_partition_address_region_info(0,
156 &start_address, &size, &access_right, &max_page_size,
160 DBG("%s:%d: lv1_query_logical_partition_address_region_info "
161 "failed: %s\n", __func__, __LINE__,
166 if (max_page_size < PAGE_SHIFT_16M) {
167 DBG("%s:%d: bad max_page_size %lxh\n", __func__, __LINE__,
172 BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE > HTAB_SIZE_MAX);
173 BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE < HTAB_SIZE_MIN);
175 result = lv1_construct_virtual_address_space(CONFIG_PS3_HTAB_SIZE,
176 2, make_page_sizes(PAGE_SHIFT_16M, PAGE_SHIFT_64K),
177 &map.vas_id, &map.htab_size);
180 DBG("%s:%d: lv1_construct_virtual_address_space failed: %s\n",
181 __func__, __LINE__, ps3_result(result));
185 result = lv1_select_virtual_address_space(map.vas_id);
188 DBG("%s:%d: lv1_select_virtual_address_space failed: %s\n",
189 __func__, __LINE__, ps3_result(result));
193 *htab_size = map.htab_size;
195 debug_dump_map(&map);
200 panic("ps3_mm_vas_create failed");
204 * ps3_mm_vas_destroy -
207 void ps3_mm_vas_destroy(void)
211 DBG("%s:%d: map.vas_id = %lu\n", __func__, __LINE__, map.vas_id);
214 result = lv1_select_virtual_address_space(0);
216 result = lv1_destruct_virtual_address_space(map.vas_id);
222 /*============================================================================*/
223 /* memory hotplug routines */
224 /*============================================================================*/
227 * ps3_mm_region_create - create a memory region in the vas
228 * @r: pointer to a struct mem_region to accept initialized values
229 * @size: requested region size
231 * This implementation creates the region with the vas large page size.
232 * @size is rounded down to a multiple of the vas large page size.
235 static int ps3_mm_region_create(struct mem_region *r, unsigned long size)
240 r->size = _ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M);
242 DBG("%s:%d requested %lxh\n", __func__, __LINE__, size);
243 DBG("%s:%d actual %lxh\n", __func__, __LINE__, r->size);
244 DBG("%s:%d difference %lxh (%luMB)\n", __func__, __LINE__,
245 (unsigned long)(size - r->size),
246 (size - r->size) / 1024 / 1024);
249 DBG("%s:%d: size == 0\n", __func__, __LINE__);
254 result = lv1_allocate_memory(r->size, PAGE_SHIFT_16M, 0,
255 ALLOCATE_MEMORY_TRY_ALT_UNIT, &r->base, &muid);
257 if (result || r->base < map.rm.size) {
258 DBG("%s:%d: lv1_allocate_memory failed: %s\n",
259 __func__, __LINE__, ps3_result(result));
263 r->offset = r->base - map.rm.size;
267 r->size = r->base = r->offset = 0;
272 * ps3_mm_region_destroy - destroy a memory region
273 * @r: pointer to struct mem_region
276 static void ps3_mm_region_destroy(struct mem_region *r)
280 DBG("%s:%d: r->base = %lxh\n", __func__, __LINE__, r->base);
282 result = lv1_release_memory(r->base);
284 r->size = r->base = r->offset = 0;
285 map.total = map.rm.size;
290 * ps3_mm_add_memory - hot add memory
293 static int __init ps3_mm_add_memory(void)
296 unsigned long start_addr;
297 unsigned long start_pfn;
298 unsigned long nr_pages;
300 if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
303 BUG_ON(!mem_init_done);
305 start_addr = map.rm.size;
306 start_pfn = start_addr >> PAGE_SHIFT;
307 nr_pages = (map.r1.size + PAGE_SIZE - 1) >> PAGE_SHIFT;
309 DBG("%s:%d: start_addr %lxh, start_pfn %lxh, nr_pages %lxh\n",
310 __func__, __LINE__, start_addr, start_pfn, nr_pages);
312 result = add_memory(0, start_addr, map.r1.size);
315 DBG("%s:%d: add_memory failed: (%d)\n",
316 __func__, __LINE__, result);
320 lmb_add(start_addr, map.r1.size);
323 result = online_pages(start_pfn, nr_pages);
326 DBG("%s:%d: online_pages failed: (%d)\n",
327 __func__, __LINE__, result);
332 core_initcall(ps3_mm_add_memory);
334 /*============================================================================*/
336 /*============================================================================*/
339 * dma_sb_lpar_to_bus - Translate an lpar address to ioc mapped bus address.
340 * @r: pointer to dma region structure
341 * @lpar_addr: HV lpar address
344 static unsigned long dma_sb_lpar_to_bus(struct ps3_dma_region *r,
345 unsigned long lpar_addr)
347 if (lpar_addr >= map.rm.size)
348 lpar_addr -= map.r1.offset;
349 BUG_ON(lpar_addr < r->offset);
350 BUG_ON(lpar_addr >= r->offset + r->len);
351 return r->bus_addr + lpar_addr - r->offset;
354 #define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__)
355 static void __maybe_unused _dma_dump_region(const struct ps3_dma_region *r,
356 const char *func, int line)
358 DBG("%s:%d: dev %lu:%lu\n", func, line, r->dev->bus_id,
360 DBG("%s:%d: page_size %u\n", func, line, r->page_size);
361 DBG("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr);
362 DBG("%s:%d: len %lxh\n", func, line, r->len);
363 DBG("%s:%d: offset %lxh\n", func, line, r->offset);
367 * dma_chunk - A chunk of dma pages mapped by the io controller.
368 * @region - The dma region that owns this chunk.
369 * @lpar_addr: Starting lpar address of the area to map.
370 * @bus_addr: Starting ioc bus address of the area to map.
371 * @len: Length in bytes of the area to map.
372 * @link: A struct list_head used with struct ps3_dma_region.chunk_list, the
373 * list of all chuncks owned by the region.
375 * This implementation uses a very simple dma page manager
376 * based on the dma_chunk structure. This scheme assumes
377 * that all drivers use very well behaved dma ops.
381 struct ps3_dma_region *region;
382 unsigned long lpar_addr;
383 unsigned long bus_addr;
385 struct list_head link;
386 unsigned int usage_count;
389 #define dma_dump_chunk(_a) _dma_dump_chunk(_a, __func__, __LINE__)
390 static void _dma_dump_chunk (const struct dma_chunk* c, const char* func,
393 DBG("%s:%d: r.dev %lu:%lu\n", func, line,
394 c->region->dev->bus_id, c->region->dev->dev_id);
395 DBG("%s:%d: r.bus_addr %lxh\n", func, line, c->region->bus_addr);
396 DBG("%s:%d: r.page_size %u\n", func, line, c->region->page_size);
397 DBG("%s:%d: r.len %lxh\n", func, line, c->region->len);
398 DBG("%s:%d: r.offset %lxh\n", func, line, c->region->offset);
399 DBG("%s:%d: c.lpar_addr %lxh\n", func, line, c->lpar_addr);
400 DBG("%s:%d: c.bus_addr %lxh\n", func, line, c->bus_addr);
401 DBG("%s:%d: c.len %lxh\n", func, line, c->len);
404 static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r,
405 unsigned long bus_addr, unsigned long len)
408 unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size);
409 unsigned long aligned_len = _ALIGN_UP(len+bus_addr-aligned_bus,
412 list_for_each_entry(c, &r->chunk_list.head, link) {
414 if (aligned_bus >= c->bus_addr &&
415 aligned_bus + aligned_len <= c->bus_addr + c->len)
419 if (aligned_bus + aligned_len <= c->bus_addr)
423 if (aligned_bus >= c->bus_addr + c->len)
426 /* we don't handle the multi-chunk case for now */
433 static struct dma_chunk *dma_find_chunk_lpar(struct ps3_dma_region *r,
434 unsigned long lpar_addr, unsigned long len)
437 unsigned long aligned_lpar = _ALIGN_DOWN(lpar_addr, 1 << r->page_size);
438 unsigned long aligned_len = _ALIGN_UP(len + lpar_addr - aligned_lpar,
441 list_for_each_entry(c, &r->chunk_list.head, link) {
443 if (c->lpar_addr <= aligned_lpar &&
444 aligned_lpar < c->lpar_addr + c->len) {
445 if (aligned_lpar + aligned_len <= c->lpar_addr + c->len)
453 if (aligned_lpar + aligned_len <= c->lpar_addr) {
457 if (c->lpar_addr + c->len <= aligned_lpar) {
464 static int dma_sb_free_chunk(struct dma_chunk *c)
469 result = lv1_unmap_device_dma_region(c->region->dev->bus_id,
470 c->region->dev->dev_id, c->bus_addr, c->len);
478 static int dma_ioc0_free_chunk(struct dma_chunk *c)
482 unsigned long offset;
483 struct ps3_dma_region *r = c->region;
485 DBG("%s:start\n", __func__);
486 for (iopage = 0; iopage < (c->len >> r->page_size); iopage++) {
487 offset = (1 << r->page_size) * iopage;
488 /* put INVALID entry */
489 result = lv1_put_iopte(0,
490 c->bus_addr + offset,
491 c->lpar_addr + offset,
494 DBG("%s: bus=%#lx, lpar=%#lx, ioid=%d\n", __func__,
495 c->bus_addr + offset,
496 c->lpar_addr + offset,
500 DBG("%s:%d: lv1_put_iopte failed: %s\n", __func__,
501 __LINE__, ps3_result(result));
505 DBG("%s:end\n", __func__);
510 * dma_sb_map_pages - Maps dma pages into the io controller bus address space.
511 * @r: Pointer to a struct ps3_dma_region.
512 * @phys_addr: Starting physical address of the area to map.
513 * @len: Length in bytes of the area to map.
514 * c_out: A pointer to receive an allocated struct dma_chunk for this area.
516 * This is the lowest level dma mapping routine, and is the one that will
517 * make the HV call to add the pages into the io controller address space.
520 static int dma_sb_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
521 unsigned long len, struct dma_chunk **c_out, u64 iopte_flag)
526 c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC);
534 c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
535 c->bus_addr = dma_sb_lpar_to_bus(r, c->lpar_addr);
538 BUG_ON(iopte_flag != 0xf800000000000000UL);
539 result = lv1_map_device_dma_region(c->region->dev->bus_id,
540 c->region->dev->dev_id, c->lpar_addr,
541 c->bus_addr, c->len, iopte_flag);
543 DBG("%s:%d: lv1_map_device_dma_region failed: %s\n",
544 __func__, __LINE__, ps3_result(result));
548 list_add(&c->link, &r->chunk_list.head);
557 DBG(" <- %s:%d\n", __func__, __LINE__);
561 static int dma_ioc0_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
562 unsigned long len, struct dma_chunk **c_out,
566 struct dma_chunk *c, *last;
568 unsigned long offset;
570 DBG(KERN_ERR "%s: phy=%#lx, lpar%#lx, len=%#lx\n", __func__,
571 phys_addr, ps3_mm_phys_to_lpar(phys_addr), len);
572 c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC);
581 c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
582 /* allocate IO address */
583 if (list_empty(&r->chunk_list.head)) {
585 c->bus_addr = r->bus_addr;
587 /* derive from last bus addr*/
588 last = list_entry(r->chunk_list.head.next,
589 struct dma_chunk, link);
590 c->bus_addr = last->bus_addr + last->len;
591 DBG("%s: last bus=%#lx, len=%#lx\n", __func__,
592 last->bus_addr, last->len);
595 /* FIXME: check whether length exceeds region size */
597 /* build ioptes for the area */
598 pages = len >> r->page_size;
599 DBG("%s: pgsize=%#x len=%#lx pages=%#x iopteflag=%#lx\n", __func__,
600 r->page_size, r->len, pages, iopte_flag);
601 for (iopage = 0; iopage < pages; iopage++) {
602 offset = (1 << r->page_size) * iopage;
603 result = lv1_put_iopte(0,
604 c->bus_addr + offset,
605 c->lpar_addr + offset,
609 printk(KERN_WARNING "%s:%d: lv1_map_device_dma_region "
610 "failed: %s\n", __func__, __LINE__,
614 DBG("%s: pg=%d bus=%#lx, lpar=%#lx, ioid=%#x\n", __func__,
615 iopage, c->bus_addr + offset, c->lpar_addr + offset,
619 /* be sure that last allocated one is inserted at head */
620 list_add(&c->link, &r->chunk_list.head);
623 DBG("%s: end\n", __func__);
627 for (iopage--; 0 <= iopage; iopage--) {
629 c->bus_addr + offset,
630 c->lpar_addr + offset,
641 * dma_sb_region_create - Create a device dma region.
642 * @r: Pointer to a struct ps3_dma_region.
644 * This is the lowest level dma region create routine, and is the one that
645 * will make the HV call to create the region.
648 static int dma_sb_region_create(struct ps3_dma_region *r)
652 DBG(" -> %s:%d:\n", __func__, __LINE__);
656 if (!r->dev->bus_id) {
657 pr_info("%s:%d: %lu:%lu no dma\n", __func__, __LINE__,
658 r->dev->bus_id, r->dev->dev_id);
662 DBG("%s:%u: len = 0x%lx, page_size = %u, offset = 0x%lx\n", __func__,
663 __LINE__, r->len, r->page_size, r->offset);
666 BUG_ON(!r->page_size);
667 BUG_ON(!r->region_ops);
669 INIT_LIST_HEAD(&r->chunk_list.head);
670 spin_lock_init(&r->chunk_list.lock);
672 result = lv1_allocate_device_dma_region(r->dev->bus_id, r->dev->dev_id,
673 roundup_pow_of_two(r->len), r->page_size, r->region_type,
677 DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n",
678 __func__, __LINE__, ps3_result(result));
679 r->len = r->bus_addr = 0;
685 static int dma_ioc0_region_create(struct ps3_dma_region *r)
689 INIT_LIST_HEAD(&r->chunk_list.head);
690 spin_lock_init(&r->chunk_list.lock);
692 result = lv1_allocate_io_segment(0,
697 DBG("%s:%d: lv1_allocate_io_segment failed: %s\n",
698 __func__, __LINE__, ps3_result(result));
699 r->len = r->bus_addr = 0;
701 DBG("%s: len=%#lx, pg=%d, bus=%#lx\n", __func__,
702 r->len, r->page_size, r->bus_addr);
707 * dma_region_free - Free a device dma region.
708 * @r: Pointer to a struct ps3_dma_region.
710 * This is the lowest level dma region free routine, and is the one that
711 * will make the HV call to free the region.
714 static int dma_sb_region_free(struct ps3_dma_region *r)
718 struct dma_chunk *tmp;
722 if (!r->dev->bus_id) {
723 pr_info("%s:%d: %lu:%lu no dma\n", __func__, __LINE__,
724 r->dev->bus_id, r->dev->dev_id);
728 list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) {
730 dma_sb_free_chunk(c);
733 result = lv1_free_device_dma_region(r->dev->bus_id, r->dev->dev_id,
737 DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
738 __func__, __LINE__, ps3_result(result));
745 static int dma_ioc0_region_free(struct ps3_dma_region *r)
748 struct dma_chunk *c, *n;
750 DBG("%s: start\n", __func__);
751 list_for_each_entry_safe(c, n, &r->chunk_list.head, link) {
753 dma_ioc0_free_chunk(c);
756 result = lv1_release_io_segment(0, r->bus_addr);
759 DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
760 __func__, __LINE__, ps3_result(result));
763 DBG("%s: end\n", __func__);
769 * dma_sb_map_area - Map an area of memory into a device dma region.
770 * @r: Pointer to a struct ps3_dma_region.
771 * @virt_addr: Starting virtual address of the area to map.
772 * @len: Length in bytes of the area to map.
773 * @bus_addr: A pointer to return the starting ioc bus address of the area to
776 * This is the common dma mapping routine.
779 static int dma_sb_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
780 unsigned long len, unsigned long *bus_addr,
786 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
788 unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size);
789 unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys,
791 *bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
793 if (!USE_DYNAMIC_DMA) {
794 unsigned long lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
795 DBG(" -> %s:%d\n", __func__, __LINE__);
796 DBG("%s:%d virt_addr %lxh\n", __func__, __LINE__,
798 DBG("%s:%d phys_addr %lxh\n", __func__, __LINE__,
800 DBG("%s:%d lpar_addr %lxh\n", __func__, __LINE__,
802 DBG("%s:%d len %lxh\n", __func__, __LINE__, len);
803 DBG("%s:%d bus_addr %lxh (%lxh)\n", __func__, __LINE__,
807 spin_lock_irqsave(&r->chunk_list.lock, flags);
808 c = dma_find_chunk(r, *bus_addr, len);
811 DBG("%s:%d: reusing mapped chunk", __func__, __LINE__);
814 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
818 result = dma_sb_map_pages(r, aligned_phys, aligned_len, &c, iopte_flag);
822 DBG("%s:%d: dma_sb_map_pages failed (%d)\n",
823 __func__, __LINE__, result);
824 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
830 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
834 static int dma_ioc0_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
835 unsigned long len, unsigned long *bus_addr,
841 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
843 unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size);
844 unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys,
847 DBG(KERN_ERR "%s: vaddr=%#lx, len=%#lx\n", __func__,
849 DBG(KERN_ERR "%s: ph=%#lx a_ph=%#lx a_l=%#lx\n", __func__,
850 phys_addr, aligned_phys, aligned_len);
852 spin_lock_irqsave(&r->chunk_list.lock, flags);
853 c = dma_find_chunk_lpar(r, ps3_mm_phys_to_lpar(phys_addr), len);
858 *bus_addr = c->bus_addr + phys_addr - aligned_phys;
860 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
864 result = dma_ioc0_map_pages(r, aligned_phys, aligned_len, &c,
869 DBG("%s:%d: dma_ioc0_map_pages failed (%d)\n",
870 __func__, __LINE__, result);
871 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
874 *bus_addr = c->bus_addr + phys_addr - aligned_phys;
875 DBG("%s: va=%#lx pa=%#lx a_pa=%#lx bus=%#lx\n", __func__,
876 virt_addr, phys_addr, aligned_phys, *bus_addr);
879 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
884 * dma_sb_unmap_area - Unmap an area of memory from a device dma region.
885 * @r: Pointer to a struct ps3_dma_region.
886 * @bus_addr: The starting ioc bus address of the area to unmap.
887 * @len: Length in bytes of the area to unmap.
889 * This is the common dma unmap routine.
892 static int dma_sb_unmap_area(struct ps3_dma_region *r, unsigned long bus_addr,
898 spin_lock_irqsave(&r->chunk_list.lock, flags);
899 c = dma_find_chunk(r, bus_addr, len);
902 unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
904 unsigned long aligned_len = _ALIGN_UP(len + bus_addr
905 - aligned_bus, 1 << r->page_size);
906 DBG("%s:%d: not found: bus_addr %lxh\n",
907 __func__, __LINE__, bus_addr);
908 DBG("%s:%d: not found: len %lxh\n",
909 __func__, __LINE__, len);
910 DBG("%s:%d: not found: aligned_bus %lxh\n",
911 __func__, __LINE__, aligned_bus);
912 DBG("%s:%d: not found: aligned_len %lxh\n",
913 __func__, __LINE__, aligned_len);
919 if (!c->usage_count) {
921 dma_sb_free_chunk(c);
924 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
928 static int dma_ioc0_unmap_area(struct ps3_dma_region *r,
929 unsigned long bus_addr, unsigned long len)
934 DBG("%s: start a=%#lx l=%#lx\n", __func__, bus_addr, len);
935 spin_lock_irqsave(&r->chunk_list.lock, flags);
936 c = dma_find_chunk(r, bus_addr, len);
939 unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
941 unsigned long aligned_len = _ALIGN_UP(len + bus_addr
944 DBG("%s:%d: not found: bus_addr %lxh\n",
945 __func__, __LINE__, bus_addr);
946 DBG("%s:%d: not found: len %lxh\n",
947 __func__, __LINE__, len);
948 DBG("%s:%d: not found: aligned_bus %lxh\n",
949 __func__, __LINE__, aligned_bus);
950 DBG("%s:%d: not found: aligned_len %lxh\n",
951 __func__, __LINE__, aligned_len);
957 if (!c->usage_count) {
959 dma_ioc0_free_chunk(c);
962 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
963 DBG("%s: end\n", __func__);
968 * dma_sb_region_create_linear - Setup a linear dma mapping for a device.
969 * @r: Pointer to a struct ps3_dma_region.
971 * This routine creates an HV dma region for the device and maps all available
972 * ram into the io controller bus address space.
975 static int dma_sb_region_create_linear(struct ps3_dma_region *r)
978 unsigned long virt_addr, len, tmp;
980 if (r->len > 16*1024*1024) { /* FIXME: need proper fix */
981 /* force 16M dma pages for linear mapping */
982 if (r->page_size != PS3_DMA_16M) {
983 pr_info("%s:%d: forcing 16M pages for linear map\n",
985 r->page_size = PS3_DMA_16M;
986 r->len = _ALIGN_UP(r->len, 1 << r->page_size);
990 result = dma_sb_region_create(r);
993 if (r->offset < map.rm.size) {
994 /* Map (part of) 1st RAM chunk */
995 virt_addr = map.rm.base + r->offset;
996 len = map.rm.size - r->offset;
999 result = dma_sb_map_area(r, virt_addr, len, &tmp,
1000 IOPTE_PP_W | IOPTE_PP_R | IOPTE_SO_RW | IOPTE_M);
1004 if (r->offset + r->len > map.rm.size) {
1005 /* Map (part of) 2nd RAM chunk */
1006 virt_addr = map.rm.size;
1008 if (r->offset >= map.rm.size)
1009 virt_addr += r->offset - map.rm.size;
1011 len -= map.rm.size - r->offset;
1012 result = dma_sb_map_area(r, virt_addr, len, &tmp,
1013 IOPTE_PP_W | IOPTE_PP_R | IOPTE_SO_RW | IOPTE_M);
1021 * dma_sb_region_free_linear - Free a linear dma mapping for a device.
1022 * @r: Pointer to a struct ps3_dma_region.
1024 * This routine will unmap all mapped areas and free the HV dma region.
1027 static int dma_sb_region_free_linear(struct ps3_dma_region *r)
1030 unsigned long bus_addr, len, lpar_addr;
1032 if (r->offset < map.rm.size) {
1033 /* Unmap (part of) 1st RAM chunk */
1034 lpar_addr = map.rm.base + r->offset;
1035 len = map.rm.size - r->offset;
1038 bus_addr = dma_sb_lpar_to_bus(r, lpar_addr);
1039 result = dma_sb_unmap_area(r, bus_addr, len);
1043 if (r->offset + r->len > map.rm.size) {
1044 /* Unmap (part of) 2nd RAM chunk */
1045 lpar_addr = map.r1.base;
1047 if (r->offset >= map.rm.size)
1048 lpar_addr += r->offset - map.rm.size;
1050 len -= map.rm.size - r->offset;
1051 bus_addr = dma_sb_lpar_to_bus(r, lpar_addr);
1052 result = dma_sb_unmap_area(r, bus_addr, len);
1056 result = dma_sb_region_free(r);
1063 * dma_sb_map_area_linear - Map an area of memory into a device dma region.
1064 * @r: Pointer to a struct ps3_dma_region.
1065 * @virt_addr: Starting virtual address of the area to map.
1066 * @len: Length in bytes of the area to map.
1067 * @bus_addr: A pointer to return the starting ioc bus address of the area to
1070 * This routine just returns the corresponding bus address. Actual mapping
1071 * occurs in dma_region_create_linear().
1074 static int dma_sb_map_area_linear(struct ps3_dma_region *r,
1075 unsigned long virt_addr, unsigned long len, unsigned long *bus_addr,
1078 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
1080 *bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
1085 * dma_unmap_area_linear - Unmap an area of memory from a device dma region.
1086 * @r: Pointer to a struct ps3_dma_region.
1087 * @bus_addr: The starting ioc bus address of the area to unmap.
1088 * @len: Length in bytes of the area to unmap.
1090 * This routine does nothing. Unmapping occurs in dma_sb_region_free_linear().
1093 static int dma_sb_unmap_area_linear(struct ps3_dma_region *r,
1094 unsigned long bus_addr, unsigned long len)
1099 static const struct ps3_dma_region_ops ps3_dma_sb_region_ops = {
1100 .create = dma_sb_region_create,
1101 .free = dma_sb_region_free,
1102 .map = dma_sb_map_area,
1103 .unmap = dma_sb_unmap_area
1106 static const struct ps3_dma_region_ops ps3_dma_sb_region_linear_ops = {
1107 .create = dma_sb_region_create_linear,
1108 .free = dma_sb_region_free_linear,
1109 .map = dma_sb_map_area_linear,
1110 .unmap = dma_sb_unmap_area_linear
1113 static const struct ps3_dma_region_ops ps3_dma_ioc0_region_ops = {
1114 .create = dma_ioc0_region_create,
1115 .free = dma_ioc0_region_free,
1116 .map = dma_ioc0_map_area,
1117 .unmap = dma_ioc0_unmap_area
1120 int ps3_dma_region_init(struct ps3_system_bus_device *dev,
1121 struct ps3_dma_region *r, enum ps3_dma_page_size page_size,
1122 enum ps3_dma_region_type region_type, void *addr, unsigned long len)
1124 unsigned long lpar_addr;
1126 lpar_addr = addr ? ps3_mm_phys_to_lpar(__pa(addr)) : 0;
1129 r->page_size = page_size;
1130 r->region_type = region_type;
1131 r->offset = lpar_addr;
1132 if (r->offset >= map.rm.size)
1133 r->offset -= map.r1.offset;
1134 r->len = len ? len : _ALIGN_UP(map.total, 1 << r->page_size);
1136 switch (dev->dev_type) {
1137 case PS3_DEVICE_TYPE_SB:
1138 r->region_ops = (USE_DYNAMIC_DMA)
1139 ? &ps3_dma_sb_region_ops
1140 : &ps3_dma_sb_region_linear_ops;
1142 case PS3_DEVICE_TYPE_IOC0:
1143 r->region_ops = &ps3_dma_ioc0_region_ops;
1151 EXPORT_SYMBOL(ps3_dma_region_init);
1153 int ps3_dma_region_create(struct ps3_dma_region *r)
1156 BUG_ON(!r->region_ops);
1157 BUG_ON(!r->region_ops->create);
1158 return r->region_ops->create(r);
1160 EXPORT_SYMBOL(ps3_dma_region_create);
1162 int ps3_dma_region_free(struct ps3_dma_region *r)
1165 BUG_ON(!r->region_ops);
1166 BUG_ON(!r->region_ops->free);
1167 return r->region_ops->free(r);
1169 EXPORT_SYMBOL(ps3_dma_region_free);
1171 int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr,
1172 unsigned long len, unsigned long *bus_addr,
1175 return r->region_ops->map(r, virt_addr, len, bus_addr, iopte_flag);
1178 int ps3_dma_unmap(struct ps3_dma_region *r, unsigned long bus_addr,
1181 return r->region_ops->unmap(r, bus_addr, len);
1184 /*============================================================================*/
1185 /* system startup routines */
1186 /*============================================================================*/
1189 * ps3_mm_init - initialize the address space state variables
1192 void __init ps3_mm_init(void)
1196 DBG(" -> %s:%d\n", __func__, __LINE__);
1198 result = ps3_repository_read_mm_info(&map.rm.base, &map.rm.size,
1202 panic("ps3_repository_read_mm_info() failed");
1204 map.rm.offset = map.rm.base;
1205 map.vas_id = map.htab_size = 0;
1207 /* this implementation assumes map.rm.base is zero */
1209 BUG_ON(map.rm.base);
1210 BUG_ON(!map.rm.size);
1213 /* arrange to do this in ps3_mm_add_memory */
1214 ps3_mm_region_create(&map.r1, map.total - map.rm.size);
1216 /* correct map.total for the real total amount of memory we use */
1217 map.total = map.rm.size + map.r1.size;
1219 DBG(" <- %s:%d\n", __func__, __LINE__);
1223 * ps3_mm_shutdown - final cleanup of address space
1226 void ps3_mm_shutdown(void)
1228 ps3_mm_region_destroy(&map.r1);