2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
6 * Authors: Dietmar Decker <ddecker@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
9 * Copyright (c) 2005 IBM Corporation
11 * All rights reserved.
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
42 #include <rdma/ib_umem.h>
44 #include <asm/current.h>
46 #include "ehca_iverbs.h"
47 #include "ehca_mrmw.h"
51 #define NUM_CHUNKS(length, chunk_size) \
52 (((length) + (chunk_size - 1)) / (chunk_size))
53 /* max number of rpages (per hcall register_rpages) */
54 #define MAX_RPAGES 512
56 static struct kmem_cache *mr_cache;
57 static struct kmem_cache *mw_cache;
59 static struct ehca_mr *ehca_mr_new(void)
63 me = kmem_cache_zalloc(mr_cache, GFP_KERNEL);
65 spin_lock_init(&me->mrlock);
67 ehca_gen_err("alloc failed");
72 static void ehca_mr_delete(struct ehca_mr *me)
74 kmem_cache_free(mr_cache, me);
77 static struct ehca_mw *ehca_mw_new(void)
81 me = kmem_cache_zalloc(mw_cache, GFP_KERNEL);
83 spin_lock_init(&me->mwlock);
85 ehca_gen_err("alloc failed");
90 static void ehca_mw_delete(struct ehca_mw *me)
92 kmem_cache_free(mw_cache, me);
95 /*----------------------------------------------------------------------*/
97 struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
101 struct ehca_mr *e_maxmr;
102 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
103 struct ehca_shca *shca =
104 container_of(pd->device, struct ehca_shca, ib_device);
107 e_maxmr = ehca_mr_new();
109 ehca_err(&shca->ib_device, "out of memory");
110 ib_mr = ERR_PTR(-ENOMEM);
111 goto get_dma_mr_exit0;
114 ret = ehca_reg_maxmr(shca, e_maxmr, (u64*)KERNELBASE,
115 mr_access_flags, e_pd,
116 &e_maxmr->ib.ib_mr.lkey,
117 &e_maxmr->ib.ib_mr.rkey);
119 ehca_mr_delete(e_maxmr);
120 ib_mr = ERR_PTR(ret);
121 goto get_dma_mr_exit0;
123 ib_mr = &e_maxmr->ib.ib_mr;
125 ehca_err(&shca->ib_device, "no internal max-MR exist!");
126 ib_mr = ERR_PTR(-EINVAL);
127 goto get_dma_mr_exit0;
132 ehca_err(&shca->ib_device, "rc=%lx pd=%p mr_access_flags=%x ",
133 PTR_ERR(ib_mr), pd, mr_access_flags);
135 } /* end ehca_get_dma_mr() */
137 /*----------------------------------------------------------------------*/
139 struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
140 struct ib_phys_buf *phys_buf_array,
147 struct ehca_mr *e_mr;
148 struct ehca_shca *shca =
149 container_of(pd->device, struct ehca_shca, ib_device);
150 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
153 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
155 u32 num_pages_4k; /* 4k portion "pages" */
157 if ((num_phys_buf <= 0) || !phys_buf_array) {
158 ehca_err(pd->device, "bad input values: num_phys_buf=%x "
159 "phys_buf_array=%p", num_phys_buf, phys_buf_array);
160 ib_mr = ERR_PTR(-EINVAL);
161 goto reg_phys_mr_exit0;
163 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
164 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
165 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
166 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
168 * Remote Write Access requires Local Write Access
169 * Remote Atomic Access requires Local Write Access
171 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
173 ib_mr = ERR_PTR(-EINVAL);
174 goto reg_phys_mr_exit0;
177 /* check physical buffer list and calculate size */
178 ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array, num_phys_buf,
181 ib_mr = ERR_PTR(ret);
182 goto reg_phys_mr_exit0;
185 (((u64)iova_start + size) < (u64)iova_start)) {
186 ehca_err(pd->device, "bad input values: size=%lx iova_start=%p",
188 ib_mr = ERR_PTR(-EINVAL);
189 goto reg_phys_mr_exit0;
192 e_mr = ehca_mr_new();
194 ehca_err(pd->device, "out of memory");
195 ib_mr = ERR_PTR(-ENOMEM);
196 goto reg_phys_mr_exit0;
199 /* determine number of MR pages */
200 num_pages_mr = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size,
202 num_pages_4k = NUM_CHUNKS(((u64)iova_start % EHCA_PAGESIZE) + size,
205 /* register MR on HCA */
206 if (ehca_mr_is_maxmr(size, iova_start)) {
207 e_mr->flags |= EHCA_MR_FLAG_MAXMR;
208 ret = ehca_reg_maxmr(shca, e_mr, iova_start, mr_access_flags,
209 e_pd, &e_mr->ib.ib_mr.lkey,
210 &e_mr->ib.ib_mr.rkey);
212 ib_mr = ERR_PTR(ret);
213 goto reg_phys_mr_exit1;
216 pginfo.type = EHCA_MR_PGI_PHYS;
217 pginfo.num_pages = num_pages_mr;
218 pginfo.num_4k = num_pages_4k;
219 pginfo.num_phys_buf = num_phys_buf;
220 pginfo.phys_buf_array = phys_buf_array;
221 pginfo.next_4k = (((u64)iova_start & ~PAGE_MASK) /
224 ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
225 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
226 &e_mr->ib.ib_mr.rkey);
228 ib_mr = ERR_PTR(ret);
229 goto reg_phys_mr_exit1;
233 /* successful registration of all pages */
234 return &e_mr->ib.ib_mr;
237 ehca_mr_delete(e_mr);
240 ehca_err(pd->device, "rc=%lx pd=%p phys_buf_array=%p "
241 "num_phys_buf=%x mr_access_flags=%x iova_start=%p",
242 PTR_ERR(ib_mr), pd, phys_buf_array,
243 num_phys_buf, mr_access_flags, iova_start);
245 } /* end ehca_reg_phys_mr() */
247 /*----------------------------------------------------------------------*/
249 struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt,
250 int mr_access_flags, struct ib_udata *udata)
253 struct ehca_mr *e_mr;
254 struct ehca_shca *shca =
255 container_of(pd->device, struct ehca_shca, ib_device);
256 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
257 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
260 u32 num_pages_4k; /* 4k portion "pages" */
263 ehca_gen_err("bad pd=%p", pd);
264 return ERR_PTR(-EFAULT);
267 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
268 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
269 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
270 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
272 * Remote Write Access requires Local Write Access
273 * Remote Atomic Access requires Local Write Access
275 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
277 ib_mr = ERR_PTR(-EINVAL);
278 goto reg_user_mr_exit0;
281 if (length == 0 || virt + length < virt) {
282 ehca_err(pd->device, "bad input values: length=%lx "
283 "virt_base=%lx", length, virt);
284 ib_mr = ERR_PTR(-EINVAL);
285 goto reg_user_mr_exit0;
288 e_mr = ehca_mr_new();
290 ehca_err(pd->device, "out of memory");
291 ib_mr = ERR_PTR(-ENOMEM);
292 goto reg_user_mr_exit0;
295 e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
297 if (IS_ERR(e_mr->umem)) {
298 ib_mr = (void *) e_mr->umem;
299 goto reg_user_mr_exit1;
302 if (e_mr->umem->page_size != PAGE_SIZE) {
303 ehca_err(pd->device, "page size not supported, "
304 "e_mr->umem->page_size=%x", e_mr->umem->page_size);
305 ib_mr = ERR_PTR(-EINVAL);
306 goto reg_user_mr_exit2;
309 /* determine number of MR pages */
310 num_pages_mr = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE);
311 num_pages_4k = NUM_CHUNKS((virt % EHCA_PAGESIZE) + length,
314 /* register MR on HCA */
315 pginfo.type = EHCA_MR_PGI_USER;
316 pginfo.num_pages = num_pages_mr;
317 pginfo.num_4k = num_pages_4k;
318 pginfo.region = e_mr->umem;
319 pginfo.next_4k = e_mr->umem->offset / EHCA_PAGESIZE;
320 pginfo.next_chunk = list_prepare_entry(pginfo.next_chunk,
321 (&e_mr->umem->chunk_list),
324 ret = ehca_reg_mr(shca, e_mr, (u64*) virt, length, mr_access_flags, e_pd,
325 &pginfo, &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey);
327 ib_mr = ERR_PTR(ret);
328 goto reg_user_mr_exit2;
331 /* successful registration of all pages */
332 return &e_mr->ib.ib_mr;
335 ib_umem_release(e_mr->umem);
337 ehca_mr_delete(e_mr);
340 ehca_err(pd->device, "rc=%lx pd=%p mr_access_flags=%x"
342 PTR_ERR(ib_mr), pd, mr_access_flags, udata);
344 } /* end ehca_reg_user_mr() */
346 /*----------------------------------------------------------------------*/
348 int ehca_rereg_phys_mr(struct ib_mr *mr,
351 struct ib_phys_buf *phys_buf_array,
358 struct ehca_shca *shca =
359 container_of(mr->device, struct ehca_shca, ib_device);
360 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
361 struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
365 struct ehca_pd *new_pd;
366 u32 tmp_lkey, tmp_rkey;
367 unsigned long sl_flags;
368 u32 num_pages_mr = 0;
369 u32 num_pages_4k = 0; /* 4k portion "pages" */
370 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
371 u32 cur_pid = current->tgid;
373 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
374 (my_pd->ownpid != cur_pid)) {
375 ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
376 cur_pid, my_pd->ownpid);
378 goto rereg_phys_mr_exit0;
381 if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) {
382 /* TODO not supported, because PHYP rereg hCall needs pages */
383 ehca_err(mr->device, "rereg without IB_MR_REREG_TRANS not "
384 "supported yet, mr_rereg_mask=%x", mr_rereg_mask);
386 goto rereg_phys_mr_exit0;
389 if (mr_rereg_mask & IB_MR_REREG_PD) {
391 ehca_err(mr->device, "rereg with bad pd, pd=%p "
392 "mr_rereg_mask=%x", pd, mr_rereg_mask);
394 goto rereg_phys_mr_exit0;
399 ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) ||
400 (mr_rereg_mask == 0)) {
402 goto rereg_phys_mr_exit0;
405 /* check other parameters */
406 if (e_mr == shca->maxmr) {
407 /* should be impossible, however reject to be sure */
408 ehca_err(mr->device, "rereg internal max-MR impossible, mr=%p "
409 "shca->maxmr=%p mr->lkey=%x",
410 mr, shca->maxmr, mr->lkey);
412 goto rereg_phys_mr_exit0;
414 if (mr_rereg_mask & IB_MR_REREG_TRANS) { /* transl., i.e. addr/size */
415 if (e_mr->flags & EHCA_MR_FLAG_FMR) {
416 ehca_err(mr->device, "not supported for FMR, mr=%p "
417 "flags=%x", mr, e_mr->flags);
419 goto rereg_phys_mr_exit0;
421 if (!phys_buf_array || num_phys_buf <= 0) {
422 ehca_err(mr->device, "bad input values: mr_rereg_mask=%x"
423 " phys_buf_array=%p num_phys_buf=%x",
424 mr_rereg_mask, phys_buf_array, num_phys_buf);
426 goto rereg_phys_mr_exit0;
429 if ((mr_rereg_mask & IB_MR_REREG_ACCESS) && /* change ACL */
430 (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
431 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
432 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
433 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)))) {
435 * Remote Write Access requires Local Write Access
436 * Remote Atomic Access requires Local Write Access
438 ehca_err(mr->device, "bad input values: mr_rereg_mask=%x "
439 "mr_access_flags=%x", mr_rereg_mask, mr_access_flags);
441 goto rereg_phys_mr_exit0;
444 /* set requested values dependent on rereg request */
445 spin_lock_irqsave(&e_mr->mrlock, sl_flags);
446 new_start = e_mr->start; /* new == old address */
447 new_size = e_mr->size; /* new == old length */
448 new_acl = e_mr->acl; /* new == old access control */
449 new_pd = container_of(mr->pd,struct ehca_pd,ib_pd); /*new == old PD*/
451 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
452 new_start = iova_start; /* change address */
453 /* check physical buffer list and calculate size */
454 ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array,
455 num_phys_buf, iova_start,
458 goto rereg_phys_mr_exit1;
459 if ((new_size == 0) ||
460 (((u64)iova_start + new_size) < (u64)iova_start)) {
461 ehca_err(mr->device, "bad input values: new_size=%lx "
462 "iova_start=%p", new_size, iova_start);
464 goto rereg_phys_mr_exit1;
466 num_pages_mr = NUM_CHUNKS(((u64)new_start % PAGE_SIZE) +
467 new_size, PAGE_SIZE);
468 num_pages_4k = NUM_CHUNKS(((u64)new_start % EHCA_PAGESIZE) +
469 new_size, EHCA_PAGESIZE);
470 pginfo.type = EHCA_MR_PGI_PHYS;
471 pginfo.num_pages = num_pages_mr;
472 pginfo.num_4k = num_pages_4k;
473 pginfo.num_phys_buf = num_phys_buf;
474 pginfo.phys_buf_array = phys_buf_array;
475 pginfo.next_4k = (((u64)iova_start & ~PAGE_MASK) /
478 if (mr_rereg_mask & IB_MR_REREG_ACCESS)
479 new_acl = mr_access_flags;
480 if (mr_rereg_mask & IB_MR_REREG_PD)
481 new_pd = container_of(pd, struct ehca_pd, ib_pd);
483 ret = ehca_rereg_mr(shca, e_mr, new_start, new_size, new_acl,
484 new_pd, &pginfo, &tmp_lkey, &tmp_rkey);
486 goto rereg_phys_mr_exit1;
488 /* successful reregistration */
489 if (mr_rereg_mask & IB_MR_REREG_PD)
495 spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
498 ehca_err(mr->device, "ret=%x mr=%p mr_rereg_mask=%x pd=%p "
499 "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x "
501 ret, mr, mr_rereg_mask, pd, phys_buf_array,
502 num_phys_buf, mr_access_flags, iova_start);
504 } /* end ehca_rereg_phys_mr() */
506 /*----------------------------------------------------------------------*/
508 int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
512 struct ehca_shca *shca =
513 container_of(mr->device, struct ehca_shca, ib_device);
514 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
515 struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
516 u32 cur_pid = current->tgid;
517 unsigned long sl_flags;
518 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
520 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
521 (my_pd->ownpid != cur_pid)) {
522 ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
523 cur_pid, my_pd->ownpid);
528 if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
529 ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
530 "e_mr->flags=%x", mr, e_mr, e_mr->flags);
535 memset(mr_attr, 0, sizeof(struct ib_mr_attr));
536 spin_lock_irqsave(&e_mr->mrlock, sl_flags);
538 h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout);
539 if (h_ret != H_SUCCESS) {
540 ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lx mr=%p "
541 "hca_hndl=%lx mr_hndl=%lx lkey=%x",
542 h_ret, mr, shca->ipz_hca_handle.handle,
543 e_mr->ipz_mr_handle.handle, mr->lkey);
544 ret = ehca2ib_return_code(h_ret);
547 mr_attr->pd = mr->pd;
548 mr_attr->device_virt_addr = hipzout.vaddr;
549 mr_attr->size = hipzout.len;
550 mr_attr->lkey = hipzout.lkey;
551 mr_attr->rkey = hipzout.rkey;
552 ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags);
555 spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
558 ehca_err(mr->device, "ret=%x mr=%p mr_attr=%p",
561 } /* end ehca_query_mr() */
563 /*----------------------------------------------------------------------*/
565 int ehca_dereg_mr(struct ib_mr *mr)
569 struct ehca_shca *shca =
570 container_of(mr->device, struct ehca_shca, ib_device);
571 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
572 struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
573 u32 cur_pid = current->tgid;
575 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
576 (my_pd->ownpid != cur_pid)) {
577 ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
578 cur_pid, my_pd->ownpid);
583 if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
584 ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
585 "e_mr->flags=%x", mr, e_mr, e_mr->flags);
588 } else if (e_mr == shca->maxmr) {
589 /* should be impossible, however reject to be sure */
590 ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p "
591 "shca->maxmr=%p mr->lkey=%x",
592 mr, shca->maxmr, mr->lkey);
597 /* TODO: BUSY: MR still has bound window(s) */
598 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
599 if (h_ret != H_SUCCESS) {
600 ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lx shca=%p "
601 "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x",
602 h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
603 e_mr->ipz_mr_handle.handle, mr->lkey);
604 ret = ehca2ib_return_code(h_ret);
609 ib_umem_release(e_mr->umem);
611 /* successful deregistration */
612 ehca_mr_delete(e_mr);
616 ehca_err(mr->device, "ret=%x mr=%p", ret, mr);
618 } /* end ehca_dereg_mr() */
620 /*----------------------------------------------------------------------*/
622 struct ib_mw *ehca_alloc_mw(struct ib_pd *pd)
626 struct ehca_mw *e_mw;
627 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
628 struct ehca_shca *shca =
629 container_of(pd->device, struct ehca_shca, ib_device);
630 struct ehca_mw_hipzout_parms hipzout = {{0},0};
632 e_mw = ehca_mw_new();
634 ib_mw = ERR_PTR(-ENOMEM);
638 h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
639 e_pd->fw_pd, &hipzout);
640 if (h_ret != H_SUCCESS) {
641 ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lx "
642 "shca=%p hca_hndl=%lx mw=%p",
643 h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
644 ib_mw = ERR_PTR(ehca2ib_return_code(h_ret));
647 /* successful MW allocation */
648 e_mw->ipz_mw_handle = hipzout.handle;
649 e_mw->ib_mw.rkey = hipzout.rkey;
653 ehca_mw_delete(e_mw);
656 ehca_err(pd->device, "rc=%lx pd=%p", PTR_ERR(ib_mw), pd);
658 } /* end ehca_alloc_mw() */
660 /*----------------------------------------------------------------------*/
662 int ehca_bind_mw(struct ib_qp *qp,
664 struct ib_mw_bind *mw_bind)
666 /* TODO: not supported up to now */
667 ehca_gen_err("bind MW currently not supported by HCAD");
670 } /* end ehca_bind_mw() */
672 /*----------------------------------------------------------------------*/
674 int ehca_dealloc_mw(struct ib_mw *mw)
677 struct ehca_shca *shca =
678 container_of(mw->device, struct ehca_shca, ib_device);
679 struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);
681 h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
682 if (h_ret != H_SUCCESS) {
683 ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lx shca=%p "
684 "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx",
685 h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
686 e_mw->ipz_mw_handle.handle);
687 return ehca2ib_return_code(h_ret);
689 /* successful deallocation */
690 ehca_mw_delete(e_mw);
692 } /* end ehca_dealloc_mw() */
694 /*----------------------------------------------------------------------*/
696 struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
698 struct ib_fmr_attr *fmr_attr)
700 struct ib_fmr *ib_fmr;
701 struct ehca_shca *shca =
702 container_of(pd->device, struct ehca_shca, ib_device);
703 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
704 struct ehca_mr *e_fmr;
706 u32 tmp_lkey, tmp_rkey;
707 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
709 /* check other parameters */
710 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
711 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
712 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
713 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
715 * Remote Write Access requires Local Write Access
716 * Remote Atomic Access requires Local Write Access
718 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
720 ib_fmr = ERR_PTR(-EINVAL);
721 goto alloc_fmr_exit0;
723 if (mr_access_flags & IB_ACCESS_MW_BIND) {
724 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
726 ib_fmr = ERR_PTR(-EINVAL);
727 goto alloc_fmr_exit0;
729 if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {
730 ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x "
731 "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
732 fmr_attr->max_pages, fmr_attr->max_maps,
733 fmr_attr->page_shift);
734 ib_fmr = ERR_PTR(-EINVAL);
735 goto alloc_fmr_exit0;
737 if (((1 << fmr_attr->page_shift) != EHCA_PAGESIZE) &&
738 ((1 << fmr_attr->page_shift) != PAGE_SIZE)) {
739 ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
740 fmr_attr->page_shift);
741 ib_fmr = ERR_PTR(-EINVAL);
742 goto alloc_fmr_exit0;
745 e_fmr = ehca_mr_new();
747 ib_fmr = ERR_PTR(-ENOMEM);
748 goto alloc_fmr_exit0;
750 e_fmr->flags |= EHCA_MR_FLAG_FMR;
752 /* register MR on HCA */
753 ret = ehca_reg_mr(shca, e_fmr, NULL,
754 fmr_attr->max_pages * (1 << fmr_attr->page_shift),
755 mr_access_flags, e_pd, &pginfo,
756 &tmp_lkey, &tmp_rkey);
758 ib_fmr = ERR_PTR(ret);
759 goto alloc_fmr_exit1;
763 e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;
764 e_fmr->fmr_max_pages = fmr_attr->max_pages;
765 e_fmr->fmr_max_maps = fmr_attr->max_maps;
766 e_fmr->fmr_map_cnt = 0;
767 return &e_fmr->ib.ib_fmr;
770 ehca_mr_delete(e_fmr);
773 ehca_err(pd->device, "rc=%lx pd=%p mr_access_flags=%x "
774 "fmr_attr=%p", PTR_ERR(ib_fmr), pd,
775 mr_access_flags, fmr_attr);
777 } /* end ehca_alloc_fmr() */
779 /*----------------------------------------------------------------------*/
781 int ehca_map_phys_fmr(struct ib_fmr *fmr,
787 struct ehca_shca *shca =
788 container_of(fmr->device, struct ehca_shca, ib_device);
789 struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
790 struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
791 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
792 u32 tmp_lkey, tmp_rkey;
794 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
795 ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
796 e_fmr, e_fmr->flags);
798 goto map_phys_fmr_exit0;
800 ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len);
802 goto map_phys_fmr_exit0;
803 if (iova % e_fmr->fmr_page_size) {
804 /* only whole-numbered pages */
805 ehca_err(fmr->device, "bad iova, iova=%lx fmr_page_size=%x",
806 iova, e_fmr->fmr_page_size);
808 goto map_phys_fmr_exit0;
810 if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {
811 /* HCAD does not limit the maps, however trace this anyway */
812 ehca_info(fmr->device, "map limit exceeded, fmr=%p "
813 "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
814 fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
817 pginfo.type = EHCA_MR_PGI_FMR;
818 pginfo.num_pages = list_len;
819 pginfo.num_4k = list_len * (e_fmr->fmr_page_size / EHCA_PAGESIZE);
820 pginfo.page_list = page_list;
821 pginfo.next_4k = ((iova & (e_fmr->fmr_page_size-1)) /
824 ret = ehca_rereg_mr(shca, e_fmr, (u64*)iova,
825 list_len * e_fmr->fmr_page_size,
826 e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
828 goto map_phys_fmr_exit0;
830 /* successful reregistration */
831 e_fmr->fmr_map_cnt++;
832 e_fmr->ib.ib_fmr.lkey = tmp_lkey;
833 e_fmr->ib.ib_fmr.rkey = tmp_rkey;
838 ehca_err(fmr->device, "ret=%x fmr=%p page_list=%p list_len=%x "
840 ret, fmr, page_list, list_len, iova);
842 } /* end ehca_map_phys_fmr() */
844 /*----------------------------------------------------------------------*/
846 int ehca_unmap_fmr(struct list_head *fmr_list)
849 struct ib_fmr *ib_fmr;
850 struct ehca_shca *shca = NULL;
851 struct ehca_shca *prev_shca;
852 struct ehca_mr *e_fmr;
854 u32 unmap_fmr_cnt = 0;
856 /* check all FMR belong to same SHCA, and check internal flag */
857 list_for_each_entry(ib_fmr, fmr_list, list) {
860 ehca_gen_err("bad fmr=%p in list", ib_fmr);
862 goto unmap_fmr_exit0;
864 shca = container_of(ib_fmr->device, struct ehca_shca,
866 e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
867 if ((shca != prev_shca) && prev_shca) {
868 ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p "
869 "prev_shca=%p e_fmr=%p",
870 shca, prev_shca, e_fmr);
872 goto unmap_fmr_exit0;
874 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
875 ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p "
876 "e_fmr->flags=%x", e_fmr, e_fmr->flags);
878 goto unmap_fmr_exit0;
883 /* loop over all FMRs to unmap */
884 list_for_each_entry(ib_fmr, fmr_list, list) {
886 e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
887 shca = container_of(ib_fmr->device, struct ehca_shca,
889 ret = ehca_unmap_one_fmr(shca, e_fmr);
891 /* unmap failed, stop unmapping of rest of FMRs */
892 ehca_err(&shca->ib_device, "unmap of one FMR failed, "
893 "stop rest, e_fmr=%p num_fmr=%x "
894 "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr,
895 unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey);
896 goto unmap_fmr_exit0;
902 ehca_gen_err("ret=%x fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
903 ret, fmr_list, num_fmr, unmap_fmr_cnt);
905 } /* end ehca_unmap_fmr() */
907 /*----------------------------------------------------------------------*/
909 int ehca_dealloc_fmr(struct ib_fmr *fmr)
913 struct ehca_shca *shca =
914 container_of(fmr->device, struct ehca_shca, ib_device);
915 struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
917 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
918 ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
919 e_fmr, e_fmr->flags);
924 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
925 if (h_ret != H_SUCCESS) {
926 ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lx e_fmr=%p "
927 "hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x",
928 h_ret, e_fmr, shca->ipz_hca_handle.handle,
929 e_fmr->ipz_mr_handle.handle, fmr->lkey);
930 ret = ehca2ib_return_code(h_ret);
933 /* successful deregistration */
934 ehca_mr_delete(e_fmr);
939 ehca_err(&shca->ib_device, "ret=%x fmr=%p", ret, fmr);
941 } /* end ehca_dealloc_fmr() */
943 /*----------------------------------------------------------------------*/
945 int ehca_reg_mr(struct ehca_shca *shca,
946 struct ehca_mr *e_mr,
950 struct ehca_pd *e_pd,
951 struct ehca_mr_pginfo *pginfo,
958 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
960 ehca_mrmw_map_acl(acl, &hipz_acl);
961 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
962 if (ehca_use_hp_mr == 1)
963 hipz_acl |= 0x00000001;
965 h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr,
966 (u64)iova_start, size, hipz_acl,
967 e_pd->fw_pd, &hipzout);
968 if (h_ret != H_SUCCESS) {
969 ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lx "
970 "hca_hndl=%lx", h_ret, shca->ipz_hca_handle.handle);
971 ret = ehca2ib_return_code(h_ret);
972 goto ehca_reg_mr_exit0;
975 e_mr->ipz_mr_handle = hipzout.handle;
977 ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
979 goto ehca_reg_mr_exit1;
981 /* successful registration */
982 e_mr->num_pages = pginfo->num_pages;
983 e_mr->num_4k = pginfo->num_4k;
984 e_mr->start = iova_start;
987 *lkey = hipzout.lkey;
988 *rkey = hipzout.rkey;
992 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
993 if (h_ret != H_SUCCESS) {
994 ehca_err(&shca->ib_device, "h_ret=%lx shca=%p e_mr=%p "
995 "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x "
996 "pginfo=%p num_pages=%lx num_4k=%lx ret=%x",
997 h_ret, shca, e_mr, iova_start, size, acl, e_pd,
998 hipzout.lkey, pginfo, pginfo->num_pages,
999 pginfo->num_4k, ret);
1000 ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
1005 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
1006 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
1007 "num_pages=%lx num_4k=%lx",
1008 ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
1009 pginfo->num_pages, pginfo->num_4k);
1011 } /* end ehca_reg_mr() */
1013 /*----------------------------------------------------------------------*/
1015 int ehca_reg_mr_rpages(struct ehca_shca *shca,
1016 struct ehca_mr *e_mr,
1017 struct ehca_mr_pginfo *pginfo)
1026 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1028 ehca_err(&shca->ib_device, "kpage alloc failed");
1030 goto ehca_reg_mr_rpages_exit0;
1033 /* max 512 pages per shot */
1034 for (i = 0; i < NUM_CHUNKS(pginfo->num_4k, MAX_RPAGES); i++) {
1036 if (i == NUM_CHUNKS(pginfo->num_4k, MAX_RPAGES) - 1) {
1037 rnum = pginfo->num_4k % MAX_RPAGES; /* last shot */
1039 rnum = MAX_RPAGES; /* last shot is full */
1044 ret = ehca_set_pagebuf(e_mr, pginfo, rnum, kpage);
1046 ehca_err(&shca->ib_device, "ehca_set_pagebuf "
1047 "bad rc, ret=%x rnum=%x kpage=%p",
1050 goto ehca_reg_mr_rpages_exit1;
1052 rpage = virt_to_abs(kpage);
1054 ehca_err(&shca->ib_device, "kpage=%p i=%x",
1057 goto ehca_reg_mr_rpages_exit1;
1059 } else { /* rnum==1 */
1060 ret = ehca_set_pagebuf_1(e_mr, pginfo, &rpage);
1062 ehca_err(&shca->ib_device, "ehca_set_pagebuf_1 "
1063 "bad rc, ret=%x i=%x", ret, i);
1065 goto ehca_reg_mr_rpages_exit1;
1069 h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, e_mr,
1070 0, /* pagesize 4k */
1073 if (i == NUM_CHUNKS(pginfo->num_4k, MAX_RPAGES) - 1) {
1075 * check for 'registration complete'==H_SUCCESS
1076 * and for 'page registered'==H_PAGE_REGISTERED
1078 if (h_ret != H_SUCCESS) {
1079 ehca_err(&shca->ib_device, "last "
1080 "hipz_reg_rpage_mr failed, h_ret=%lx "
1081 "e_mr=%p i=%x hca_hndl=%lx mr_hndl=%lx"
1082 " lkey=%x", h_ret, e_mr, i,
1083 shca->ipz_hca_handle.handle,
1084 e_mr->ipz_mr_handle.handle,
1085 e_mr->ib.ib_mr.lkey);
1086 ret = ehca2ib_return_code(h_ret);
1090 } else if (h_ret != H_PAGE_REGISTERED) {
1091 ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
1092 "h_ret=%lx e_mr=%p i=%x lkey=%x hca_hndl=%lx "
1093 "mr_hndl=%lx", h_ret, e_mr, i,
1094 e_mr->ib.ib_mr.lkey,
1095 shca->ipz_hca_handle.handle,
1096 e_mr->ipz_mr_handle.handle);
1097 ret = ehca2ib_return_code(h_ret);
1104 ehca_reg_mr_rpages_exit1:
1105 ehca_free_fw_ctrlblock(kpage);
1106 ehca_reg_mr_rpages_exit0:
1108 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p "
1109 "num_pages=%lx num_4k=%lx", ret, shca, e_mr, pginfo,
1110 pginfo->num_pages, pginfo->num_4k);
1112 } /* end ehca_reg_mr_rpages() */
1114 /*----------------------------------------------------------------------*/
1116 inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
1117 struct ehca_mr *e_mr,
1121 struct ehca_pd *e_pd,
1122 struct ehca_mr_pginfo *pginfo,
1131 struct ehca_mr_pginfo pginfo_save;
1132 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1134 ehca_mrmw_map_acl(acl, &hipz_acl);
1135 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
1137 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1139 ehca_err(&shca->ib_device, "kpage alloc failed");
1141 goto ehca_rereg_mr_rereg1_exit0;
1144 pginfo_save = *pginfo;
1145 ret = ehca_set_pagebuf(e_mr, pginfo, pginfo->num_4k, kpage);
1147 ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
1148 "pginfo=%p type=%x num_pages=%lx num_4k=%lx kpage=%p",
1149 e_mr, pginfo, pginfo->type, pginfo->num_pages,
1150 pginfo->num_4k,kpage);
1151 goto ehca_rereg_mr_rereg1_exit1;
1153 rpage = virt_to_abs(kpage);
1155 ehca_err(&shca->ib_device, "kpage=%p", kpage);
1157 goto ehca_rereg_mr_rereg1_exit1;
1159 h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr,
1160 (u64)iova_start, size, hipz_acl,
1161 e_pd->fw_pd, rpage, &hipzout);
1162 if (h_ret != H_SUCCESS) {
1164 * reregistration unsuccessful, try it again with the 3 hCalls,
1165 * e.g. this is required in case H_MR_CONDITION
1166 * (MW bound or MR is shared)
1168 ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
1169 "(Rereg1), h_ret=%lx e_mr=%p", h_ret, e_mr);
1170 *pginfo = pginfo_save;
1172 } else if ((u64*)hipzout.vaddr != iova_start) {
1173 ehca_err(&shca->ib_device, "PHYP changed iova_start in "
1174 "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p "
1175 "mr_handle=%lx lkey=%x lkey_out=%x", iova_start,
1176 hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
1177 e_mr->ib.ib_mr.lkey, hipzout.lkey);
1181 * successful reregistration
1182 * note: start and start_out are identical for eServer HCAs
1184 e_mr->num_pages = pginfo->num_pages;
1185 e_mr->num_4k = pginfo->num_4k;
1186 e_mr->start = iova_start;
1189 *lkey = hipzout.lkey;
1190 *rkey = hipzout.rkey;
1193 ehca_rereg_mr_rereg1_exit1:
1194 ehca_free_fw_ctrlblock(kpage);
1195 ehca_rereg_mr_rereg1_exit0:
1196 if ( ret && (ret != -EAGAIN) )
1197 ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x "
1198 "pginfo=%p num_pages=%lx num_4k=%lx",
1199 ret, *lkey, *rkey, pginfo, pginfo->num_pages,
1202 } /* end ehca_rereg_mr_rereg1() */
1204 /*----------------------------------------------------------------------*/
1206 int ehca_rereg_mr(struct ehca_shca *shca,
1207 struct ehca_mr *e_mr,
1211 struct ehca_pd *e_pd,
1212 struct ehca_mr_pginfo *pginfo,
1218 int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */
1219 int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
1221 /* first determine reregistration hCall(s) */
1222 if ((pginfo->num_4k > MAX_RPAGES) || (e_mr->num_4k > MAX_RPAGES) ||
1223 (pginfo->num_4k > e_mr->num_4k)) {
1224 ehca_dbg(&shca->ib_device, "Rereg3 case, pginfo->num_4k=%lx "
1225 "e_mr->num_4k=%x", pginfo->num_4k, e_mr->num_4k);
1230 if (e_mr->flags & EHCA_MR_FLAG_MAXMR) { /* check for max-MR */
1233 e_mr->flags &= ~EHCA_MR_FLAG_MAXMR;
1234 ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p",
1238 if (rereg_1_hcall) {
1239 ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size,
1240 acl, e_pd, pginfo, lkey, rkey);
1245 goto ehca_rereg_mr_exit0;
1249 if (rereg_3_hcall) {
1250 struct ehca_mr save_mr;
1252 /* first deregister old MR */
1253 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
1254 if (h_ret != H_SUCCESS) {
1255 ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1256 "h_ret=%lx e_mr=%p hca_hndl=%lx mr_hndl=%lx "
1258 h_ret, e_mr, shca->ipz_hca_handle.handle,
1259 e_mr->ipz_mr_handle.handle,
1260 e_mr->ib.ib_mr.lkey);
1261 ret = ehca2ib_return_code(h_ret);
1262 goto ehca_rereg_mr_exit0;
1264 /* clean ehca_mr_t, without changing struct ib_mr and lock */
1266 ehca_mr_deletenew(e_mr);
1268 /* set some MR values */
1269 e_mr->flags = save_mr.flags;
1270 e_mr->fmr_page_size = save_mr.fmr_page_size;
1271 e_mr->fmr_max_pages = save_mr.fmr_max_pages;
1272 e_mr->fmr_max_maps = save_mr.fmr_max_maps;
1273 e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
1275 ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
1276 e_pd, pginfo, lkey, rkey);
1278 u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
1279 memcpy(&e_mr->flags, &(save_mr.flags),
1280 sizeof(struct ehca_mr) - offset);
1281 goto ehca_rereg_mr_exit0;
1285 ehca_rereg_mr_exit0:
1287 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
1288 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
1289 "num_pages=%lx lkey=%x rkey=%x rereg_1_hcall=%x "
1290 "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
1291 acl, e_pd, pginfo, pginfo->num_pages, *lkey, *rkey,
1292 rereg_1_hcall, rereg_3_hcall);
1294 } /* end ehca_rereg_mr() */
1296 /*----------------------------------------------------------------------*/
1298 int ehca_unmap_one_fmr(struct ehca_shca *shca,
1299 struct ehca_mr *e_fmr)
1303 int rereg_1_hcall = 1; /* 1: use hipz_mr_reregister directly */
1304 int rereg_3_hcall = 0; /* 1: use 3 hipz calls for unmapping */
1305 struct ehca_pd *e_pd =
1306 container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
1307 struct ehca_mr save_fmr;
1308 u32 tmp_lkey, tmp_rkey;
1309 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
1310 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1312 /* first check if reregistration hCall can be used for unmap */
1313 if (e_fmr->fmr_max_pages > MAX_RPAGES) {
1318 if (rereg_1_hcall) {
1320 * note: after using rereg hcall with len=0,
1321 * rereg hcall must be used again for registering pages
1323 h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
1324 0, 0, e_pd->fw_pd, 0, &hipzout);
1325 if (h_ret != H_SUCCESS) {
1327 * should not happen, because length checked above,
1328 * FMRs are not shared and no MW bound to FMRs
1330 ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
1331 "(Rereg1), h_ret=%lx e_fmr=%p hca_hndl=%lx "
1332 "mr_hndl=%lx lkey=%x lkey_out=%x",
1333 h_ret, e_fmr, shca->ipz_hca_handle.handle,
1334 e_fmr->ipz_mr_handle.handle,
1335 e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
1338 /* successful reregistration */
1339 e_fmr->start = NULL;
1341 tmp_lkey = hipzout.lkey;
1342 tmp_rkey = hipzout.rkey;
1346 if (rereg_3_hcall) {
1347 struct ehca_mr save_mr;
1349 /* first free old FMR */
1350 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
1351 if (h_ret != H_SUCCESS) {
1352 ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1353 "h_ret=%lx e_fmr=%p hca_hndl=%lx mr_hndl=%lx "
1355 h_ret, e_fmr, shca->ipz_hca_handle.handle,
1356 e_fmr->ipz_mr_handle.handle,
1357 e_fmr->ib.ib_fmr.lkey);
1358 ret = ehca2ib_return_code(h_ret);
1359 goto ehca_unmap_one_fmr_exit0;
1361 /* clean ehca_mr_t, without changing lock */
1363 ehca_mr_deletenew(e_fmr);
1365 /* set some MR values */
1366 e_fmr->flags = save_fmr.flags;
1367 e_fmr->fmr_page_size = save_fmr.fmr_page_size;
1368 e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
1369 e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
1370 e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
1371 e_fmr->acl = save_fmr.acl;
1373 pginfo.type = EHCA_MR_PGI_FMR;
1374 pginfo.num_pages = 0;
1376 ret = ehca_reg_mr(shca, e_fmr, NULL,
1377 (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
1378 e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
1381 u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
1382 memcpy(&e_fmr->flags, &(save_mr.flags),
1383 sizeof(struct ehca_mr) - offset);
1384 goto ehca_unmap_one_fmr_exit0;
1388 ehca_unmap_one_fmr_exit0:
1390 ehca_err(&shca->ib_device, "ret=%x tmp_lkey=%x tmp_rkey=%x "
1391 "fmr_max_pages=%x rereg_1_hcall=%x rereg_3_hcall=%x",
1392 ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages,
1393 rereg_1_hcall, rereg_3_hcall);
1395 } /* end ehca_unmap_one_fmr() */
1397 /*----------------------------------------------------------------------*/
1399 int ehca_reg_smr(struct ehca_shca *shca,
1400 struct ehca_mr *e_origmr,
1401 struct ehca_mr *e_newmr,
1404 struct ehca_pd *e_pd,
1411 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1413 ehca_mrmw_map_acl(acl, &hipz_acl);
1414 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
1416 h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1417 (u64)iova_start, hipz_acl, e_pd->fw_pd,
1419 if (h_ret != H_SUCCESS) {
1420 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx "
1421 "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
1422 "e_pd=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
1423 h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
1424 shca->ipz_hca_handle.handle,
1425 e_origmr->ipz_mr_handle.handle,
1426 e_origmr->ib.ib_mr.lkey);
1427 ret = ehca2ib_return_code(h_ret);
1428 goto ehca_reg_smr_exit0;
1430 /* successful registration */
1431 e_newmr->num_pages = e_origmr->num_pages;
1432 e_newmr->num_4k = e_origmr->num_4k;
1433 e_newmr->start = iova_start;
1434 e_newmr->size = e_origmr->size;
1436 e_newmr->ipz_mr_handle = hipzout.handle;
1437 *lkey = hipzout.lkey;
1438 *rkey = hipzout.rkey;
1443 ehca_err(&shca->ib_device, "ret=%x shca=%p e_origmr=%p "
1444 "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
1445 ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
1447 } /* end ehca_reg_smr() */
1449 /*----------------------------------------------------------------------*/
1451 /* register internal max-MR to internal SHCA */
1452 int ehca_reg_internal_maxmr(
1453 struct ehca_shca *shca,
1454 struct ehca_pd *e_pd,
1455 struct ehca_mr **e_maxmr) /*OUT*/
1458 struct ehca_mr *e_mr;
1461 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
1462 struct ib_phys_buf ib_pbuf;
1464 u32 num_pages_4k; /* 4k portion "pages" */
1466 e_mr = ehca_mr_new();
1468 ehca_err(&shca->ib_device, "out of memory");
1470 goto ehca_reg_internal_maxmr_exit0;
1472 e_mr->flags |= EHCA_MR_FLAG_MAXMR;
1474 /* register internal max-MR on HCA */
1475 size_maxmr = (u64)high_memory - PAGE_OFFSET;
1476 iova_start = (u64*)KERNELBASE;
1478 ib_pbuf.size = size_maxmr;
1479 num_pages_mr = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
1481 num_pages_4k = NUM_CHUNKS(((u64)iova_start % EHCA_PAGESIZE)
1482 + size_maxmr, EHCA_PAGESIZE);
1484 pginfo.type = EHCA_MR_PGI_PHYS;
1485 pginfo.num_pages = num_pages_mr;
1486 pginfo.num_4k = num_pages_4k;
1487 pginfo.num_phys_buf = 1;
1488 pginfo.phys_buf_array = &ib_pbuf;
1490 ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
1491 &pginfo, &e_mr->ib.ib_mr.lkey,
1492 &e_mr->ib.ib_mr.rkey);
1494 ehca_err(&shca->ib_device, "reg of internal max MR failed, "
1495 "e_mr=%p iova_start=%p size_maxmr=%lx num_pages_mr=%x "
1496 "num_pages_4k=%x", e_mr, iova_start, size_maxmr,
1497 num_pages_mr, num_pages_4k);
1498 goto ehca_reg_internal_maxmr_exit1;
1501 /* successful registration of all pages */
1502 e_mr->ib.ib_mr.device = e_pd->ib_pd.device;
1503 e_mr->ib.ib_mr.pd = &e_pd->ib_pd;
1504 e_mr->ib.ib_mr.uobject = NULL;
1505 atomic_inc(&(e_pd->ib_pd.usecnt));
1506 atomic_set(&(e_mr->ib.ib_mr.usecnt), 0);
1510 ehca_reg_internal_maxmr_exit1:
1511 ehca_mr_delete(e_mr);
1512 ehca_reg_internal_maxmr_exit0:
1514 ehca_err(&shca->ib_device, "ret=%x shca=%p e_pd=%p e_maxmr=%p",
1515 ret, shca, e_pd, e_maxmr);
1517 } /* end ehca_reg_internal_maxmr() */
1519 /*----------------------------------------------------------------------*/
1521 int ehca_reg_maxmr(struct ehca_shca *shca,
1522 struct ehca_mr *e_newmr,
1525 struct ehca_pd *e_pd,
1530 struct ehca_mr *e_origmr = shca->maxmr;
1532 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1534 ehca_mrmw_map_acl(acl, &hipz_acl);
1535 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
1537 h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1538 (u64)iova_start, hipz_acl, e_pd->fw_pd,
1540 if (h_ret != H_SUCCESS) {
1541 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx "
1542 "e_origmr=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
1543 h_ret, e_origmr, shca->ipz_hca_handle.handle,
1544 e_origmr->ipz_mr_handle.handle,
1545 e_origmr->ib.ib_mr.lkey);
1546 return ehca2ib_return_code(h_ret);
1548 /* successful registration */
1549 e_newmr->num_pages = e_origmr->num_pages;
1550 e_newmr->num_4k = e_origmr->num_4k;
1551 e_newmr->start = iova_start;
1552 e_newmr->size = e_origmr->size;
1554 e_newmr->ipz_mr_handle = hipzout.handle;
1555 *lkey = hipzout.lkey;
1556 *rkey = hipzout.rkey;
1558 } /* end ehca_reg_maxmr() */
1560 /*----------------------------------------------------------------------*/
1562 int ehca_dereg_internal_maxmr(struct ehca_shca *shca)
1565 struct ehca_mr *e_maxmr;
1566 struct ib_pd *ib_pd;
1569 ehca_err(&shca->ib_device, "bad call, shca=%p", shca);
1571 goto ehca_dereg_internal_maxmr_exit0;
1574 e_maxmr = shca->maxmr;
1575 ib_pd = e_maxmr->ib.ib_mr.pd;
1576 shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */
1578 ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr);
1580 ehca_err(&shca->ib_device, "dereg internal max-MR failed, "
1581 "ret=%x e_maxmr=%p shca=%p lkey=%x",
1582 ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey);
1583 shca->maxmr = e_maxmr;
1584 goto ehca_dereg_internal_maxmr_exit0;
1587 atomic_dec(&ib_pd->usecnt);
1589 ehca_dereg_internal_maxmr_exit0:
1591 ehca_err(&shca->ib_device, "ret=%x shca=%p shca->maxmr=%p",
1592 ret, shca, shca->maxmr);
1594 } /* end ehca_dereg_internal_maxmr() */
1596 /*----------------------------------------------------------------------*/
1599 * check physical buffer array of MR verbs for validness and
1600 * calculates MR size
1602 int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array,
1607 struct ib_phys_buf *pbuf = phys_buf_array;
1611 if (num_phys_buf == 0) {
1612 ehca_gen_err("bad phys buf array len, num_phys_buf=0");
1615 /* check first buffer */
1616 if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) {
1617 ehca_gen_err("iova_start/addr mismatch, iova_start=%p "
1618 "pbuf->addr=%lx pbuf->size=%lx",
1619 iova_start, pbuf->addr, pbuf->size);
1622 if (((pbuf->addr + pbuf->size) % PAGE_SIZE) &&
1623 (num_phys_buf > 1)) {
1624 ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%lx "
1625 "pbuf->size=%lx", pbuf->addr, pbuf->size);
1629 for (i = 0; i < num_phys_buf; i++) {
1630 if ((i > 0) && (pbuf->addr % PAGE_SIZE)) {
1631 ehca_gen_err("bad address, i=%x pbuf->addr=%lx "
1633 i, pbuf->addr, pbuf->size);
1636 if (((i > 0) && /* not 1st */
1637 (i < (num_phys_buf - 1)) && /* not last */
1638 (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) {
1639 ehca_gen_err("bad size, i=%x pbuf->size=%lx",
1643 size_count += pbuf->size;
1649 } /* end ehca_mr_chk_buf_and_calc_size() */
1651 /*----------------------------------------------------------------------*/
1653 /* check page list of map FMR verb for validness */
1654 int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
1661 if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) {
1662 ehca_gen_err("bad list_len, list_len=%x "
1663 "e_fmr->fmr_max_pages=%x fmr=%p",
1664 list_len, e_fmr->fmr_max_pages, e_fmr);
1668 /* each page must be aligned */
1670 for (i = 0; i < list_len; i++) {
1671 if (*page % e_fmr->fmr_page_size) {
1672 ehca_gen_err("bad page, i=%x *page=%lx page=%p fmr=%p "
1673 "fmr_page_size=%x", i, *page, page, e_fmr,
1674 e_fmr->fmr_page_size);
1681 } /* end ehca_fmr_check_page_list() */
1683 /*----------------------------------------------------------------------*/
1685 /* setup page buffer from page info */
1686 int ehca_set_pagebuf(struct ehca_mr *e_mr,
1687 struct ehca_mr_pginfo *pginfo,
1692 struct ib_umem_chunk *prev_chunk;
1693 struct ib_umem_chunk *chunk;
1694 struct ib_phys_buf *pbuf;
1696 u64 num4k, pgaddr, offs4k;
1700 if (pginfo->type == EHCA_MR_PGI_PHYS) {
1701 /* loop over desired phys_buf_array entries */
1702 while (i < number) {
1703 pbuf = pginfo->phys_buf_array + pginfo->next_buf;
1704 num4k = NUM_CHUNKS((pbuf->addr % EHCA_PAGESIZE)
1705 + pbuf->size, EHCA_PAGESIZE);
1706 offs4k = (pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
1707 while (pginfo->next_4k < offs4k + num4k) {
1709 if ((pginfo->page_cnt >= pginfo->num_pages) ||
1710 (pginfo->page_4k_cnt >= pginfo->num_4k)) {
1711 ehca_gen_err("page_cnt >= num_pages, "
1718 pginfo->page_4k_cnt,
1721 goto ehca_set_pagebuf_exit0;
1723 *kpage = phys_to_abs(
1724 (pbuf->addr & EHCA_PAGEMASK)
1725 + (pginfo->next_4k * EHCA_PAGESIZE));
1726 if ( !(*kpage) && pbuf->addr ) {
1727 ehca_gen_err("pbuf->addr=%lx "
1729 "next_4k=%lx", pbuf->addr,
1733 goto ehca_set_pagebuf_exit0;
1735 (pginfo->page_4k_cnt)++;
1736 (pginfo->next_4k)++;
1737 if (pginfo->next_4k %
1738 (PAGE_SIZE / EHCA_PAGESIZE) == 0)
1739 (pginfo->page_cnt)++;
1742 if (i >= number) break;
1744 if (pginfo->next_4k >= offs4k + num4k) {
1745 (pginfo->next_buf)++;
1746 pginfo->next_4k = 0;
1749 } else if (pginfo->type == EHCA_MR_PGI_USER) {
1750 /* loop over desired chunk entries */
1751 chunk = pginfo->next_chunk;
1752 prev_chunk = pginfo->next_chunk;
1753 list_for_each_entry_continue(chunk,
1754 (&(pginfo->region->chunk_list)),
1756 for (i = pginfo->next_nmap; i < chunk->nmap; ) {
1757 pgaddr = ( page_to_pfn(chunk->page_list[i].page)
1759 *kpage = phys_to_abs(pgaddr +
1763 ehca_gen_err("pgaddr=%lx "
1764 "chunk->page_list[i]=%lx "
1765 "i=%x next_4k=%lx mr=%p",
1767 (u64)sg_dma_address(
1770 i, pginfo->next_4k, e_mr);
1772 goto ehca_set_pagebuf_exit0;
1774 (pginfo->page_4k_cnt)++;
1775 (pginfo->next_4k)++;
1777 if (pginfo->next_4k %
1778 (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
1779 (pginfo->page_cnt)++;
1780 (pginfo->next_nmap)++;
1781 pginfo->next_4k = 0;
1785 if (j >= number) break;
1787 if ((pginfo->next_nmap >= chunk->nmap) &&
1789 pginfo->next_nmap = 0;
1792 } else if (pginfo->next_nmap >= chunk->nmap) {
1793 pginfo->next_nmap = 0;
1795 } else if (j >= number)
1800 pginfo->next_chunk =
1801 list_prepare_entry(prev_chunk,
1802 (&(pginfo->region->chunk_list)),
1804 } else if (pginfo->type == EHCA_MR_PGI_FMR) {
1805 /* loop over desired page_list entries */
1806 fmrlist = pginfo->page_list + pginfo->next_listelem;
1807 for (i = 0; i < number; i++) {
1808 *kpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
1809 pginfo->next_4k * EHCA_PAGESIZE);
1811 ehca_gen_err("*fmrlist=%lx fmrlist=%p "
1812 "next_listelem=%lx next_4k=%lx",
1814 pginfo->next_listelem,
1817 goto ehca_set_pagebuf_exit0;
1819 (pginfo->page_4k_cnt)++;
1820 (pginfo->next_4k)++;
1822 if (pginfo->next_4k %
1823 (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) {
1824 (pginfo->page_cnt)++;
1825 (pginfo->next_listelem)++;
1827 pginfo->next_4k = 0;
1831 ehca_gen_err("bad pginfo->type=%x", pginfo->type);
1833 goto ehca_set_pagebuf_exit0;
1836 ehca_set_pagebuf_exit0:
1838 ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
1839 "num_4k=%lx next_buf=%lx next_4k=%lx number=%x "
1840 "kpage=%p page_cnt=%lx page_4k_cnt=%lx i=%x "
1841 "next_listelem=%lx region=%p next_chunk=%p "
1842 "next_nmap=%lx", ret, e_mr, pginfo, pginfo->type,
1843 pginfo->num_pages, pginfo->num_4k,
1844 pginfo->next_buf, pginfo->next_4k, number, kpage,
1845 pginfo->page_cnt, pginfo->page_4k_cnt, i,
1846 pginfo->next_listelem, pginfo->region,
1847 pginfo->next_chunk, pginfo->next_nmap);
1849 } /* end ehca_set_pagebuf() */
1851 /*----------------------------------------------------------------------*/
1853 /* setup 1 page from page info page buffer */
1854 int ehca_set_pagebuf_1(struct ehca_mr *e_mr,
1855 struct ehca_mr_pginfo *pginfo,
1859 struct ib_phys_buf *tmp_pbuf;
1861 struct ib_umem_chunk *chunk;
1862 struct ib_umem_chunk *prev_chunk;
1863 u64 pgaddr, num4k, offs4k;
1865 if (pginfo->type == EHCA_MR_PGI_PHYS) {
1867 if ((pginfo->page_cnt >= pginfo->num_pages) ||
1868 (pginfo->page_4k_cnt >= pginfo->num_4k)) {
1869 ehca_gen_err("page_cnt >= num_pages, page_cnt=%lx "
1870 "num_pages=%lx page_4k_cnt=%lx num_4k=%lx",
1871 pginfo->page_cnt, pginfo->num_pages,
1872 pginfo->page_4k_cnt, pginfo->num_4k);
1874 goto ehca_set_pagebuf_1_exit0;
1876 tmp_pbuf = pginfo->phys_buf_array + pginfo->next_buf;
1877 num4k = NUM_CHUNKS((tmp_pbuf->addr % EHCA_PAGESIZE) +
1878 tmp_pbuf->size, EHCA_PAGESIZE);
1879 offs4k = (tmp_pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
1880 *rpage = phys_to_abs((tmp_pbuf->addr & EHCA_PAGEMASK) +
1881 (pginfo->next_4k * EHCA_PAGESIZE));
1882 if ( !(*rpage) && tmp_pbuf->addr ) {
1883 ehca_gen_err("tmp_pbuf->addr=%lx"
1884 " tmp_pbuf->size=%lx next_4k=%lx",
1885 tmp_pbuf->addr, tmp_pbuf->size,
1888 goto ehca_set_pagebuf_1_exit0;
1890 (pginfo->page_4k_cnt)++;
1891 (pginfo->next_4k)++;
1892 if (pginfo->next_4k % (PAGE_SIZE / EHCA_PAGESIZE) == 0)
1893 (pginfo->page_cnt)++;
1894 if (pginfo->next_4k >= offs4k + num4k) {
1895 (pginfo->next_buf)++;
1896 pginfo->next_4k = 0;
1898 } else if (pginfo->type == EHCA_MR_PGI_USER) {
1899 chunk = pginfo->next_chunk;
1900 prev_chunk = pginfo->next_chunk;
1901 list_for_each_entry_continue(chunk,
1902 (&(pginfo->region->chunk_list)),
1904 pgaddr = ( page_to_pfn(chunk->page_list[
1905 pginfo->next_nmap].page)
1907 *rpage = phys_to_abs(pgaddr +
1908 (pginfo->next_4k * EHCA_PAGESIZE));
1910 ehca_gen_err("pgaddr=%lx chunk->page_list[]=%lx"
1911 " next_nmap=%lx next_4k=%lx mr=%p",
1912 pgaddr, (u64)sg_dma_address(
1916 pginfo->next_nmap, pginfo->next_4k,
1919 goto ehca_set_pagebuf_1_exit0;
1921 (pginfo->page_4k_cnt)++;
1922 (pginfo->next_4k)++;
1923 if (pginfo->next_4k %
1924 (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
1925 (pginfo->page_cnt)++;
1926 (pginfo->next_nmap)++;
1927 pginfo->next_4k = 0;
1929 if (pginfo->next_nmap >= chunk->nmap) {
1930 pginfo->next_nmap = 0;
1935 pginfo->next_chunk =
1936 list_prepare_entry(prev_chunk,
1937 (&(pginfo->region->chunk_list)),
1939 } else if (pginfo->type == EHCA_MR_PGI_FMR) {
1940 fmrlist = pginfo->page_list + pginfo->next_listelem;
1941 *rpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
1942 pginfo->next_4k * EHCA_PAGESIZE);
1944 ehca_gen_err("*fmrlist=%lx fmrlist=%p "
1945 "next_listelem=%lx next_4k=%lx",
1946 *fmrlist, fmrlist, pginfo->next_listelem,
1949 goto ehca_set_pagebuf_1_exit0;
1951 (pginfo->page_4k_cnt)++;
1952 (pginfo->next_4k)++;
1953 if (pginfo->next_4k %
1954 (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) {
1955 (pginfo->page_cnt)++;
1956 (pginfo->next_listelem)++;
1957 pginfo->next_4k = 0;
1960 ehca_gen_err("bad pginfo->type=%x", pginfo->type);
1962 goto ehca_set_pagebuf_1_exit0;
1965 ehca_set_pagebuf_1_exit0:
1967 ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
1968 "num_4k=%lx next_buf=%lx next_4k=%lx rpage=%p "
1969 "page_cnt=%lx page_4k_cnt=%lx next_listelem=%lx "
1970 "region=%p next_chunk=%p next_nmap=%lx", ret, e_mr,
1971 pginfo, pginfo->type, pginfo->num_pages,
1972 pginfo->num_4k, pginfo->next_buf, pginfo->next_4k,
1973 rpage, pginfo->page_cnt, pginfo->page_4k_cnt,
1974 pginfo->next_listelem, pginfo->region,
1975 pginfo->next_chunk, pginfo->next_nmap);
1977 } /* end ehca_set_pagebuf_1() */
1979 /*----------------------------------------------------------------------*/
1982 * check MR if it is a max-MR, i.e. uses whole memory
1983 * in case it's a max-MR 1 is returned, else 0
1985 int ehca_mr_is_maxmr(u64 size,
1988 /* a MR is treated as max-MR only if it fits following: */
1989 if ((size == ((u64)high_memory - PAGE_OFFSET)) &&
1990 (iova_start == (void*)KERNELBASE)) {
1991 ehca_gen_dbg("this is a max-MR");
1995 } /* end ehca_mr_is_maxmr() */
1997 /*----------------------------------------------------------------------*/
1999 /* map access control for MR/MW. This routine is used for MR and MW. */
2000 void ehca_mrmw_map_acl(int ib_acl,
2004 if (ib_acl & IB_ACCESS_REMOTE_READ)
2005 *hipz_acl |= HIPZ_ACCESSCTRL_R_READ;
2006 if (ib_acl & IB_ACCESS_REMOTE_WRITE)
2007 *hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE;
2008 if (ib_acl & IB_ACCESS_REMOTE_ATOMIC)
2009 *hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC;
2010 if (ib_acl & IB_ACCESS_LOCAL_WRITE)
2011 *hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE;
2012 if (ib_acl & IB_ACCESS_MW_BIND)
2013 *hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND;
2014 } /* end ehca_mrmw_map_acl() */
2016 /*----------------------------------------------------------------------*/
2018 /* sets page size in hipz access control for MR/MW. */
2019 void ehca_mrmw_set_pgsize_hipz_acl(u32 *hipz_acl) /*INOUT*/
2021 return; /* HCA supports only 4k */
2022 } /* end ehca_mrmw_set_pgsize_hipz_acl() */
2024 /*----------------------------------------------------------------------*/
2027 * reverse map access control for MR/MW.
2028 * This routine is used for MR and MW.
2030 void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
2031 int *ib_acl) /*OUT*/
2034 if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ)
2035 *ib_acl |= IB_ACCESS_REMOTE_READ;
2036 if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE)
2037 *ib_acl |= IB_ACCESS_REMOTE_WRITE;
2038 if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC)
2039 *ib_acl |= IB_ACCESS_REMOTE_ATOMIC;
2040 if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE)
2041 *ib_acl |= IB_ACCESS_LOCAL_WRITE;
2042 if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND)
2043 *ib_acl |= IB_ACCESS_MW_BIND;
2044 } /* end ehca_mrmw_reverse_map_acl() */
2047 /*----------------------------------------------------------------------*/
2050 * MR destructor and constructor
2051 * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
2052 * except struct ib_mr and spinlock
2054 void ehca_mr_deletenew(struct ehca_mr *mr)
2061 mr->fmr_page_size = 0;
2062 mr->fmr_max_pages = 0;
2063 mr->fmr_max_maps = 0;
2064 mr->fmr_map_cnt = 0;
2065 memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
2066 memset(&mr->galpas, 0, sizeof(mr->galpas));
2067 mr->nr_of_pages = 0;
2068 mr->pagearray = NULL;
2069 } /* end ehca_mr_deletenew() */
2071 int ehca_init_mrmw_cache(void)
2073 mr_cache = kmem_cache_create("ehca_cache_mr",
2074 sizeof(struct ehca_mr), 0,
2079 mw_cache = kmem_cache_create("ehca_cache_mw",
2080 sizeof(struct ehca_mw), 0,
2084 kmem_cache_destroy(mr_cache);
2091 void ehca_cleanup_mrmw_cache(void)
2094 kmem_cache_destroy(mr_cache);
2096 kmem_cache_destroy(mw_cache);