2 * Copyright (C) 2005 - 2008 ServerEngines
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@serverengines.com
14 * 209 N. Fair Oaks Ave
17 #include <linux/delay.h>
22 inline void mp_ring_create(struct mp_ring *ring, u32 num, u32 size, void *va)
25 memset(ring, 0, sizeof(struct mp_ring));
27 ring->pages = DIV_ROUND_UP(num * size, PAGE_SIZE);
28 ring->itemSize = size;
33 * -----------------------------------------------------------------------
34 * Interface for 2 index rings. i.e. consumer/producer rings
35 * --------------------------------------------------------------------------
38 /* Returns number items pending on ring. */
39 static inline u32 mp_ring_num_pending(struct mp_ring *ring)
44 return be_subc(ring->pidx, ring->cidx, ring->num);
47 /* Returns number items free on ring. */
48 static inline u32 mp_ring_num_empty(struct mp_ring *ring)
51 return ring->num - 1 - mp_ring_num_pending(ring);
55 static inline void mp_ring_consume(struct mp_ring *ring)
58 ASSERT(ring->pidx != ring->cidx);
60 ring->cidx = be_addc(ring->cidx, 1, ring->num);
64 static inline void mp_ring_produce(struct mp_ring *ring)
67 ring->pidx = be_addc(ring->pidx, 1, ring->num);
70 /* Consume count items */
71 static inline void mp_ring_consume_multiple(struct mp_ring *ring, u32 count)
74 ASSERT(mp_ring_num_pending(ring) >= count);
75 ring->cidx = be_addc(ring->cidx, count, ring->num);
78 static inline void *mp_ring_item(struct mp_ring *ring, u32 index)
81 ASSERT(index < ring->num);
82 ASSERT(ring->itemSize > 0);
83 return (u8 *) ring->va + index * ring->itemSize;
86 /* Ptr to produce item */
87 static inline void *mp_ring_producer_ptr(struct mp_ring *ring)
90 return mp_ring_item(ring, ring->pidx);
94 * Returns a pointer to the current location in the ring.
95 * This is used for rings with 1 index.
97 static inline void *mp_ring_current(struct mp_ring *ring)
100 ASSERT(ring->pidx == 0); /* not used */
102 return mp_ring_item(ring, ring->cidx);
106 * Increment index for rings with only 1 index.
107 * This is used for rings with 1 index.
109 static inline void *mp_ring_next(struct mp_ring *ring)
112 ASSERT(ring->num > 0);
113 ASSERT(ring->pidx == 0); /* not used */
115 ring->cidx = be_addc(ring->cidx, 1, ring->num);
116 return mp_ring_current(ring);
120 This routine waits for a previously posted mailbox WRB to be completed.
121 Specifically it waits for the mailbox to say that it's ready to accept
122 more data by setting the LSB of the mailbox pd register to 1.
124 pcontroller - The function object to post this data to
126 IRQL < DISPATCH_LEVEL
128 static void be_mcc_mailbox_wait(struct be_function_object *pfob)
130 struct MPU_MAILBOX_DB_AMAP mailbox_db;
135 /* No waiting for mailbox in emulated mode. */
139 mailbox_db.dw[0] = PD_READ(pfob, mcc_bootstrap_db);
140 ready = AMAP_GET_BITS_PTR(MPU_MAILBOX_DB, ready, &mailbox_db);
142 while (ready == false) {
143 if ((++i & 0x3FFFF) == 0) {
144 TRACE(DL_WARN, "Waiting for mailbox ready - %dk polls",
148 mailbox_db.dw[0] = PD_READ(pfob, mcc_bootstrap_db);
149 ready = AMAP_GET_BITS_PTR(MPU_MAILBOX_DB, ready, &mailbox_db);
154 This routine tells the MCC mailbox that there is data to processed
155 in the mailbox. It does this by setting the physical address for the
156 mailbox location and clearing the LSB. This routine returns immediately
157 and does not wait for the WRB to be processed.
159 pcontroller - The function object to post this data to
161 IRQL < DISPATCH_LEVEL
164 static void be_mcc_mailbox_notify(struct be_function_object *pfob)
166 struct MPU_MAILBOX_DB_AMAP mailbox_db;
169 ASSERT(pfob->mailbox.pa);
170 ASSERT(pfob->mailbox.va);
172 /* If emulated, do not ring the mailbox */
174 TRACE(DL_WARN, "MPU disabled. Skipping mailbox notify.");
178 /* form the higher bits in the address */
179 mailbox_db.dw[0] = 0; /* init */
180 AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, hi, &mailbox_db, 1);
181 AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, ready, &mailbox_db, 0);
184 pa = (u32) (pfob->mailbox.pa >> 34);
185 AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, address, &mailbox_db, pa);
187 /* Wait for the MPU to be ready */
188 be_mcc_mailbox_wait(pfob);
190 /* Ring doorbell 1st time */
191 PD_WRITE(pfob, mcc_bootstrap_db, mailbox_db.dw[0]);
193 /* Wait for 1st write to be acknowledged. */
194 be_mcc_mailbox_wait(pfob);
196 /* lower bits 30 bits from 4th bit (bits 4 to 33)*/
197 pa = (u32) (pfob->mailbox.pa >> 4) & 0x3FFFFFFF;
199 AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, hi, &mailbox_db, 0);
200 AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, ready, &mailbox_db, 0);
201 AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, address, &mailbox_db, pa);
203 /* Ring doorbell 2nd time */
204 PD_WRITE(pfob, mcc_bootstrap_db, mailbox_db.dw[0]);
208 This routine tells the MCC mailbox that there is data to processed
209 in the mailbox. It does this by setting the physical address for the
210 mailbox location and clearing the LSB. This routine spins until the
211 MPU writes a 1 into the LSB indicating that the data has been received
212 and is ready to be processed.
214 pcontroller - The function object to post this data to
216 IRQL < DISPATCH_LEVEL
219 be_mcc_mailbox_notify_and_wait(struct be_function_object *pfob)
224 be_mcc_mailbox_notify(pfob);
226 * Now wait for completion of WRB
228 be_mcc_mailbox_wait(pfob);
232 be_mcc_process_cqe(struct be_function_object *pfob,
233 struct MCC_CQ_ENTRY_AMAP *cqe)
235 struct be_mcc_wrb_context *wrb_context = NULL;
241 * A command completed. Commands complete out-of-order.
242 * Determine which command completed from the TAG.
244 offset = offsetof(struct BE_MCC_CQ_ENTRY_AMAP, mcc_tag)/8;
245 p = (u8 *) cqe + offset;
246 wrb_context = (struct be_mcc_wrb_context *)(void *)(size_t)(*(u64 *)p);
250 * Perform a response copy if requested.
251 * Only copy data if the FWCMD is successful.
253 status = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, completion_status, cqe);
254 if (status == MGMT_STATUS_SUCCESS && wrb_context->copy.length > 0) {
255 ASSERT(wrb_context->wrb);
256 ASSERT(wrb_context->copy.va);
257 p = (u8 *)wrb_context->wrb +
258 offsetof(struct BE_MCC_WRB_AMAP, payload)/8;
259 memcpy(wrb_context->copy.va,
260 (u8 *)p + wrb_context->copy.fwcmd_offset,
261 wrb_context->copy.length);
266 /* internal callback */
267 if (wrb_context->internal_cb) {
268 wrb_context->internal_cb(wrb_context->internal_cb_context,
269 status, wrb_context->wrb);
273 if (wrb_context->cb) {
274 wrb_context->cb(wrb_context->cb_context,
275 status, wrb_context->wrb);
277 /* Free the context structure */
278 _be_mcc_free_wrb_context(pfob, wrb_context);
281 void be_drive_mcc_wrb_queue(struct be_mcc_object *mcc)
283 struct be_function_object *pfob = NULL;
284 int status = BE_PENDING;
285 struct be_generic_q_ctxt *q_ctxt;
286 struct MCC_WRB_AMAP *wrb;
287 struct MCC_WRB_AMAP *queue_wrb;
288 u32 length, payload_length, sge_count, embedded;
291 BUILD_BUG_ON((sizeof(struct be_generic_q_ctxt) <
292 sizeof(struct be_queue_driver_context) +
293 sizeof(struct MCC_WRB_AMAP)));
294 pfob = mcc->parent_function;
296 spin_lock_irqsave(&pfob->post_lock, irql);
298 if (mcc->driving_backlog) {
299 spin_unlock_irqrestore(&pfob->post_lock, irql);
300 if (pfob->pend_queue_driving && pfob->mcc) {
301 pfob->pend_queue_driving = 0;
302 be_drive_mcc_wrb_queue(pfob->mcc);
306 /* Acquire the flag to limit 1 thread to redrive posts. */
307 mcc->driving_backlog = 1;
309 while (!list_empty(&mcc->backlog)) {
310 wrb = _be_mpu_peek_ring_wrb(mcc, true); /* Driving the queue */
312 break; /* No space in the ring yet. */
313 /* Get the next queued entry to process. */
314 q_ctxt = list_first_entry(&mcc->backlog,
315 struct be_generic_q_ctxt, context.list);
316 list_del(&q_ctxt->context.list);
317 pfob->mcc->backlog_length--;
319 * Compute the required length of the WRB.
320 * Since the queue element may be smaller than
321 * the complete WRB, copy only the required number of bytes.
323 queue_wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
324 embedded = AMAP_GET_BITS_PTR(MCC_WRB, embedded, queue_wrb);
326 payload_length = AMAP_GET_BITS_PTR(MCC_WRB,
327 payload_length, queue_wrb);
328 length = sizeof(struct be_mcc_wrb_header) +
331 sge_count = AMAP_GET_BITS_PTR(MCC_WRB, sge_count,
333 ASSERT(sge_count == 1); /* only 1 frag. */
334 length = sizeof(struct be_mcc_wrb_header) +
335 sge_count * sizeof(struct MCC_SGE_AMAP);
339 * Truncate the length based on the size of the
340 * queue element. Some elements that have output parameters
341 * can be smaller than the payload_length field would
342 * indicate. We really only need to copy the request
343 * parameters, not the response.
345 length = min(length, (u32) (q_ctxt->context.bytes -
346 offsetof(struct be_generic_q_ctxt, wrb_header)));
348 /* Copy the queue element WRB into the ring. */
349 memcpy(wrb, &q_ctxt->wrb_header, length);
351 /* Post the wrb. This should not fail assuming we have
352 * enough context structs. */
353 status = be_function_post_mcc_wrb(pfob, wrb, NULL,
354 q_ctxt->context.cb, q_ctxt->context.cb_context,
355 q_ctxt->context.internal_cb,
356 q_ctxt->context.internal_cb_context,
357 q_ctxt->context.optional_fwcmd_va,
358 &q_ctxt->context.copy);
360 if (status == BE_SUCCESS) {
362 * Synchronous completion. Since it was queued,
363 * we will invoke the callback.
364 * To the user, this is an asynchronous request.
366 spin_unlock_irqrestore(&pfob->post_lock, irql);
367 if (pfob->pend_queue_driving && pfob->mcc) {
368 pfob->pend_queue_driving = 0;
369 be_drive_mcc_wrb_queue(pfob->mcc);
372 ASSERT(q_ctxt->context.cb);
375 q_ctxt->context.cb_context,
378 spin_lock_irqsave(&pfob->post_lock, irql);
380 } else if (status != BE_PENDING) {
382 * Another resource failed. Should never happen
383 * if we have sufficient MCC_WRB_CONTEXT structs.
384 * Return to head of the queue.
386 TRACE(DL_WARN, "Failed to post a queued WRB. 0x%x",
388 list_add(&q_ctxt->context.list, &mcc->backlog);
389 pfob->mcc->backlog_length++;
394 /* Free the flag to limit 1 thread to redrive posts. */
395 mcc->driving_backlog = 0;
396 spin_unlock_irqrestore(&pfob->post_lock, irql);
399 /* This function asserts that the WRB was consumed in order. */
401 u32 be_mcc_wrb_consumed_in_order(struct be_mcc_object *mcc,
402 struct MCC_CQ_ENTRY_AMAP *cqe)
404 struct be_mcc_wrb_context *wrb_context = NULL;
406 u32 wrb_consumed_in_order;
412 * A command completed. Commands complete out-of-order.
413 * Determine which command completed from the TAG.
415 offset = offsetof(struct BE_MCC_CQ_ENTRY_AMAP, mcc_tag)/8;
416 p = (u8 *) cqe + offset;
417 wrb_context = (struct be_mcc_wrb_context *)(void *)(size_t)(*(u64 *)p);
421 wrb_index = (u32) (((u64)(size_t)wrb_context->ring_wrb -
422 (u64)(size_t)mcc->sq.ring.va) / sizeof(struct MCC_WRB_AMAP));
424 ASSERT(wrb_index < mcc->sq.ring.num);
426 wrb_consumed_in_order = (u32) (wrb_index == mcc->consumed_index);
427 mcc->consumed_index = be_addc(mcc->consumed_index, 1, mcc->sq.ring.num);
428 return wrb_consumed_in_order;
432 int be_mcc_process_cq(struct be_mcc_object *mcc, bool rearm)
434 struct be_function_object *pfob = NULL;
435 struct MCC_CQ_ENTRY_AMAP *cqe;
436 struct CQ_DB_AMAP db;
437 struct mp_ring *cq_ring = &mcc->cq.ring;
438 struct mp_ring *mp_ring = &mcc->sq.ring;
439 u32 num_processed = 0;
440 u32 consumed = 0, valid, completed, cqe_consumed, async_event;
442 pfob = mcc->parent_function;
444 spin_lock_irqsave(&pfob->cq_lock, pfob->cq_irq);
447 * Verify that only one thread is processing the CQ at once.
448 * We cannot hold the lock while processing the CQ due to
449 * the callbacks into the OS. Therefore, this flag is used
450 * to control it. If any of the threads want to
451 * rearm the CQ, we need to honor that.
453 if (mcc->processing != 0) {
454 mcc->rearm = mcc->rearm || rearm;
457 mcc->processing = 1; /* lock processing for this thread. */
458 mcc->rearm = rearm; /* set our rearm setting */
461 spin_unlock_irqrestore(&pfob->cq_lock, pfob->cq_irq);
463 cqe = mp_ring_current(cq_ring);
464 valid = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, valid, cqe);
467 if (num_processed >= 8) {
468 /* coalesce doorbells, but free space in cq
469 * ring while processing. */
470 db.dw[0] = 0; /* clear */
471 AMAP_SET_BITS_PTR(CQ_DB, qid, &db, cq_ring->id);
472 AMAP_SET_BITS_PTR(CQ_DB, rearm, &db, false);
473 AMAP_SET_BITS_PTR(CQ_DB, event, &db, false);
474 AMAP_SET_BITS_PTR(CQ_DB, num_popped, &db,
478 PD_WRITE(pfob, cq_db, db.dw[0]);
481 async_event = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, async_event, cqe);
483 /* This is an asynchronous event. */
484 struct ASYNC_EVENT_TRAILER_AMAP *async_trailer =
485 (struct ASYNC_EVENT_TRAILER_AMAP *)
486 ((u8 *) cqe + sizeof(struct MCC_CQ_ENTRY_AMAP) -
487 sizeof(struct ASYNC_EVENT_TRAILER_AMAP));
489 async_event = AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER,
490 async_event, async_trailer);
491 ASSERT(async_event == 1);
494 valid = AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER,
495 valid, async_trailer);
498 /* Call the async event handler if it is installed. */
501 AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER,
502 event_code, async_trailer);
503 mcc->async_cb(mcc->async_context,
504 (u32) event_code, (void *) cqe);
508 /* This is a completion entry. */
510 /* No vm forwarding in this driver. */
512 cqe_consumed = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY,
516 * A command on the MCC ring was consumed.
517 * Update the consumer index.
518 * These occur in order.
520 ASSERT(be_mcc_wrb_consumed_in_order(mcc, cqe));
524 completed = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY,
527 /* A command completed. Use tag to
528 * determine which command. */
529 be_mcc_process_cqe(pfob, cqe);
534 AMAP_SET_BITS_PTR(MCC_CQ_ENTRY, valid, cqe, false);
537 /* Update our tracking for the CQ ring. */
538 cqe = mp_ring_next(cq_ring);
539 valid = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, valid, cqe);
542 TRACE(DL_INFO, "num_processed:0x%x, and consumed:0x%x",
543 num_processed, consumed);
545 * Grab the CQ lock to synchronize the "rearm" setting for
546 * the doorbell, and for clearing the "processing" flag.
548 spin_lock_irqsave(&pfob->cq_lock, pfob->cq_irq);
551 * Rearm the cq. This is done based on the global mcc->rearm
552 * flag which combines the rearm parameter from the current
553 * call to process_cq and any other threads
554 * that tried to process the CQ while this one was active.
555 * This handles the situation where a sync. fwcmd was processing
556 * the CQ while the interrupt/dpc tries to process it.
557 * The sync process gets to continue -- but it is now
558 * responsible for the rearming.
560 if (num_processed > 0 || mcc->rearm == true) {
561 db.dw[0] = 0; /* clear */
562 AMAP_SET_BITS_PTR(CQ_DB, qid, &db, cq_ring->id);
563 AMAP_SET_BITS_PTR(CQ_DB, rearm, &db, mcc->rearm);
564 AMAP_SET_BITS_PTR(CQ_DB, event, &db, false);
565 AMAP_SET_BITS_PTR(CQ_DB, num_popped, &db, num_processed);
567 PD_WRITE(pfob, cq_db, db.dw[0]);
570 * Update the consumer index after ringing the CQ doorbell.
571 * We don't want another thread to post more WRBs before we
572 * have CQ space available.
574 mp_ring_consume_multiple(mp_ring, consumed);
576 /* Clear the processing flag. */
580 spin_unlock_irqrestore(&pfob->cq_lock, pfob->cq_irq);
582 * Use the local variable to detect if the current thread
583 * holds the WRB post lock. If rearm is false, this is
584 * either a synchronous command, or the upper layer driver is polling
585 * from a thread. We do not drive the queue from that
586 * context since the driver may hold the
587 * wrb post lock already.
590 be_drive_mcc_wrb_queue(mcc);
592 pfob->pend_queue_driving = 1;
598 *============================================================================
599 * P U B L I C R O U T I N E S
600 *============================================================================
604 This routine creates an MCC object. This object contains an MCC send queue
605 and a CQ private to the MCC.
607 pcontroller - Handle to a function object
609 EqObject - EQ object that will be used to dispatch this MCC
611 ppMccObject - Pointer to an internal Mcc Object returned.
613 Returns BE_SUCCESS if successfull,, otherwise a useful error code
616 IRQL < DISPATCH_LEVEL
620 be_mcc_ring_create(struct be_function_object *pfob,
621 struct ring_desc *rd, u32 length,
622 struct be_mcc_wrb_context *context_array,
623 u32 num_context_entries,
624 struct be_cq_object *cq, struct be_mcc_object *mcc)
628 struct FWCMD_COMMON_MCC_CREATE *fwcmd = NULL;
629 struct MCC_WRB_AMAP *wrb = NULL;
630 u32 num_entries_encoded, n, i;
634 if (length < sizeof(struct MCC_WRB_AMAP) * 2) {
635 TRACE(DL_ERR, "Invalid MCC ring length:%d", length);
639 * Reduce the actual ring size to be less than the number
640 * of context entries. This ensures that we run out of
641 * ring WRBs first so the queuing works correctly. We never
642 * queue based on context structs.
644 if (num_context_entries + 1 <
645 length / sizeof(struct MCC_WRB_AMAP) - 1) {
648 (num_context_entries + 2) * sizeof(struct MCC_WRB_AMAP);
650 if (is_power_of_2(max_length))
651 length = __roundup_pow_of_two(max_length+1) / 2;
653 length = __roundup_pow_of_two(max_length) / 2;
655 ASSERT(length <= max_length);
658 "MCC ring length reduced based on context entries."
659 " length:%d wrbs:%d context_entries:%d", length,
660 (int) (length / sizeof(struct MCC_WRB_AMAP)),
661 num_context_entries);
664 spin_lock_irqsave(&pfob->post_lock, irql);
666 num_entries_encoded =
667 be_ring_length_to_encoding(length, sizeof(struct MCC_WRB_AMAP));
669 /* Init MCC object. */
670 memset(mcc, 0, sizeof(*mcc));
671 mcc->parent_function = pfob;
674 INIT_LIST_HEAD(&mcc->backlog);
676 wrb = be_function_peek_mcc_wrb(pfob);
679 TRACE(DL_ERR, "No free MCC WRBs in create EQ.");
680 status = BE_STATUS_NO_MCC_WRB;
683 /* Prepares an embedded fwcmd, including request/response sizes. */
684 fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_MCC_CREATE);
686 fwcmd->params.request.num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
688 * Program MCC ring context
690 AMAP_SET_BITS_PTR(MCC_RING_CONTEXT, pdid,
691 &fwcmd->params.request.context, 0);
692 AMAP_SET_BITS_PTR(MCC_RING_CONTEXT, invalid,
693 &fwcmd->params.request.context, false);
694 AMAP_SET_BITS_PTR(MCC_RING_CONTEXT, ring_size,
695 &fwcmd->params.request.context, num_entries_encoded);
698 AMAP_SET_BITS_PTR(MCC_RING_CONTEXT,
699 cq_id, &fwcmd->params.request.context, n);
700 be_rd_to_pa_list(rd, fwcmd->params.request.pages,
701 ARRAY_SIZE(fwcmd->params.request.pages));
702 /* Post the f/w command */
703 status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
704 NULL, NULL, fwcmd, NULL);
705 if (status != BE_SUCCESS) {
706 TRACE(DL_ERR, "MCC to create CQ failed.");
710 * Create a linked list of context structures
712 mcc->wrb_context.base = context_array;
713 mcc->wrb_context.num = num_context_entries;
714 INIT_LIST_HEAD(&mcc->wrb_context.list_head);
715 memset(context_array, 0,
716 sizeof(struct be_mcc_wrb_context) * num_context_entries);
717 for (i = 0; i < mcc->wrb_context.num; i++) {
718 list_add_tail(&context_array[i].next,
719 &mcc->wrb_context.list_head);
724 * Create an mcc_ring for tracking WRB hw ring
728 mp_ring_create(&mcc->sq.ring, length / sizeof(struct MCC_WRB_AMAP),
729 sizeof(struct MCC_WRB_AMAP), va);
730 mcc->sq.ring.id = fwcmd->params.response.id;
732 * Init a mcc_ring for tracking the MCC CQ.
735 mp_ring_create(&mcc->cq.ring, cq->num_entries,
736 sizeof(struct MCC_CQ_ENTRY_AMAP), cq->va);
737 mcc->cq.ring.id = cq->cq_id;
739 /* Force zeroing of CQ. */
740 memset(cq->va, 0, cq->num_entries * sizeof(struct MCC_CQ_ENTRY_AMAP));
742 /* Initialize debug index. */
743 mcc->consumed_index = 0;
745 atomic_inc(&cq->ref_count);
748 TRACE(DL_INFO, "MCC ring created. id:%d bytes:%d cq_id:%d cq_entries:%d"
749 " num_context:%d", mcc->sq.ring.id, length,
750 cq->cq_id, cq->num_entries, num_context_entries);
753 spin_unlock_irqrestore(&pfob->post_lock, irql);
754 if (pfob->pend_queue_driving && pfob->mcc) {
755 pfob->pend_queue_driving = 0;
756 be_drive_mcc_wrb_queue(pfob->mcc);
762 This routine destroys an MCC send queue
764 MccObject - Internal Mcc Object to be destroyed.
766 Returns BE_SUCCESS if successfull, otherwise an error code is returned.
768 IRQL < DISPATCH_LEVEL
770 The caller of this routine must ensure that no other WRB may be posted
771 until this routine returns.
774 int be_mcc_ring_destroy(struct be_mcc_object *mcc)
777 struct be_function_object *pfob = mcc->parent_function;
780 ASSERT(mcc->processing == 0);
783 * Remove the ring from the function object.
784 * This transitions back to mailbox mode.
788 /* Send fwcmd to destroy the queue. (Using the mailbox.) */
789 status = be_function_ring_destroy(mcc->parent_function, mcc->sq.ring.id,
790 FWCMD_RING_TYPE_MCC, NULL, NULL, NULL, NULL);
793 /* Release the SQ reference to the CQ */
794 atomic_dec(&mcc->cq_object->ref_count);
800 mcc_wrb_sync_cb(void *context, int staus, struct MCC_WRB_AMAP *wrb)
802 struct be_mcc_wrb_context *wrb_context =
803 (struct be_mcc_wrb_context *) context;
805 *wrb_context->users_final_status = staus;
809 This routine posts a command to the MCC send queue
811 mcc - Internal Mcc Object to be destroyed.
815 Returns BE_SUCCESS if successfull, otherwise an error code is returned.
817 IRQL < DISPATCH_LEVEL if CompletionCallback is not NULL
818 IRQL <=DISPATCH_LEVEL if CompletionCallback is NULL
820 If this routine is called with CompletionCallback != NULL the
821 call is considered to be asynchronous and will return as soon
822 as the WRB is posted to the MCC with BE_PENDING.
824 If CompletionCallback is NULL, then this routine will not return until
825 a completion for this MCC command has been processed.
826 If called at DISPATCH_LEVEL the CompletionCallback must be NULL.
828 This routine should only be called if the MPU has been boostraped past
834 _be_mpu_post_wrb_ring(struct be_mcc_object *mcc, struct MCC_WRB_AMAP *wrb,
835 struct be_mcc_wrb_context *wrb_context)
838 struct MCC_WRB_AMAP *ring_wrb = NULL;
839 int status = BE_PENDING;
840 int final_status = BE_PENDING;
841 mcc_wrb_cqe_callback cb = NULL;
842 struct MCC_DB_AMAP mcc_db;
845 ASSERT(mp_ring_num_empty(&mcc->sq.ring) > 0);
847 * Input wrb is most likely the next wrb in the ring, since the client
848 * can peek at the address.
850 ring_wrb = mp_ring_producer_ptr(&mcc->sq.ring);
851 if (wrb != ring_wrb) {
852 /* If not equal, copy it into the ring. */
853 memcpy(ring_wrb, wrb, sizeof(struct MCC_WRB_AMAP));
856 wrb_context->ring_wrb = ring_wrb;
858 embedded = AMAP_GET_BITS_PTR(MCC_WRB, embedded, ring_wrb);
860 /* embedded commands will have the response within the WRB. */
861 wrb_context->wrb = ring_wrb;
864 * non-embedded commands will not have the response
865 * within the WRB, and they may complete out-of-order.
866 * The WRB will not be valid to inspect
867 * during the completion.
869 wrb_context->wrb = NULL;
871 cb = wrb_context->cb;
874 /* Assign our internal callback if this is a
875 * synchronous call. */
876 wrb_context->cb = mcc_wrb_sync_cb;
877 wrb_context->cb_context = wrb_context;
878 wrb_context->users_final_status = &final_status;
880 /* Increment producer index */
882 mcc_db.dw[0] = 0; /* initialize */
883 AMAP_SET_BITS_PTR(MCC_DB, rid, &mcc_db, mcc->sq.ring.id);
884 AMAP_SET_BITS_PTR(MCC_DB, numPosted, &mcc_db, 1);
886 mp_ring_produce(&mcc->sq.ring);
887 PD_WRITE(mcc->parent_function, mpu_mcc_db, mcc_db.dw[0]);
888 TRACE(DL_INFO, "pidx: %x and cidx: %x.", mcc->sq.ring.pidx,
892 int polls = 0; /* At >= 1 us per poll */
893 /* Wait until this command completes, polling the CQ. */
895 TRACE(DL_INFO, "FWCMD submitted in the poll mode.");
896 /* Do not rearm CQ in this context. */
897 be_mcc_process_cq(mcc, false);
899 if (final_status == BE_PENDING) {
900 if ((++polls & 0x7FFFF) == 0) {
902 "Warning : polling MCC CQ for %d"
903 "ms.", polls / 1000);
909 /* final_status changed when the command completes */
910 } while (final_status == BE_PENDING);
912 status = final_status;
918 struct MCC_WRB_AMAP *
919 _be_mpu_peek_ring_wrb(struct be_mcc_object *mcc, bool driving_queue)
921 /* If we have queued items, do not allow a post to bypass the queue. */
922 if (!driving_queue && !list_empty(&mcc->backlog))
925 if (mp_ring_num_empty(&mcc->sq.ring) <= 0)
927 return (struct MCC_WRB_AMAP *) mp_ring_producer_ptr(&mcc->sq.ring);
931 be_mpu_init_mailbox(struct be_function_object *pfob, struct ring_desc *mailbox)
934 pfob->mailbox.va = mailbox->va;
935 pfob->mailbox.pa = cpu_to_le64(mailbox->pa);
936 pfob->mailbox.length = mailbox->length;
938 ASSERT(((u32)(size_t)pfob->mailbox.va & 0xf) == 0);
939 ASSERT(((u32)(size_t)pfob->mailbox.pa & 0xf) == 0);
941 * Issue the WRB to set MPU endianness
944 u64 *endian_check = (u64 *) (pfob->mailbox.va +
945 offsetof(struct BE_MCC_MAILBOX_AMAP, wrb)/8);
946 *endian_check = 0xFF1234FFFF5678FFULL;
949 be_mcc_mailbox_notify_and_wait(pfob);
956 This routine posts a command to the MCC mailbox.
958 FuncObj - Function Object to post the WRB on behalf of.
960 CompletionCallback - Address of a callback routine to invoke once the WRB
962 CompletionCallbackContext - Opaque context to be passed during the call to
963 the CompletionCallback.
964 Returns BE_SUCCESS if successfull, otherwise an error code is returned.
966 IRQL <=DISPATCH_LEVEL if CompletionCallback is NULL
968 This routine will block until a completion for this MCC command has been
969 processed. If called at DISPATCH_LEVEL the CompletionCallback must be NULL.
971 This routine should only be called if the MPU has not been boostraped past
975 _be_mpu_post_wrb_mailbox(struct be_function_object *pfob,
976 struct MCC_WRB_AMAP *wrb, struct be_mcc_wrb_context *wrb_context)
978 struct MCC_MAILBOX_AMAP *mailbox = NULL;
979 struct MCC_WRB_AMAP *mb_wrb;
980 struct MCC_CQ_ENTRY_AMAP *mb_cq;
983 ASSERT(pfob->mcc == NULL);
984 mailbox = pfob->mailbox.va;
987 offset = offsetof(struct BE_MCC_MAILBOX_AMAP, wrb)/8;
988 mb_wrb = (struct MCC_WRB_AMAP *) (u8 *)mailbox + offset;
990 memset(mailbox, 0, sizeof(*mailbox));
991 memcpy(mb_wrb, wrb, sizeof(struct MCC_WRB_AMAP));
993 /* The callback can inspect the final WRB to get output parameters. */
994 wrb_context->wrb = mb_wrb;
996 be_mcc_mailbox_notify_and_wait(pfob);
998 /* A command completed. Use tag to determine which command. */
999 offset = offsetof(struct BE_MCC_MAILBOX_AMAP, cq)/8;
1000 mb_cq = (struct MCC_CQ_ENTRY_AMAP *) ((u8 *)mailbox + offset);
1001 be_mcc_process_cqe(pfob, mb_cq);
1003 status = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, completion_status, mb_cq);
1009 struct be_mcc_wrb_context *
1010 _be_mcc_allocate_wrb_context(struct be_function_object *pfob)
1012 struct be_mcc_wrb_context *context = NULL;
1015 spin_lock_irqsave(&pfob->mcc_context_lock, irq);
1017 if (!pfob->mailbox.default_context_allocated) {
1018 /* Use the single default context that we
1019 * always have allocated. */
1020 pfob->mailbox.default_context_allocated = true;
1021 context = &pfob->mailbox.default_context;
1022 } else if (pfob->mcc) {
1023 /* Get a context from the free list. If any are available. */
1024 if (!list_empty(&pfob->mcc->wrb_context.list_head)) {
1025 context = list_first_entry(
1026 &pfob->mcc->wrb_context.list_head,
1027 struct be_mcc_wrb_context, next);
1031 spin_unlock_irqrestore(&pfob->mcc_context_lock, irq);
1037 _be_mcc_free_wrb_context(struct be_function_object *pfob,
1038 struct be_mcc_wrb_context *context)
1044 * Zero during free to try and catch any bugs where the context
1045 * is accessed after a free.
1047 memset(context, 0, sizeof(context));
1049 spin_lock_irqsave(&pfob->mcc_context_lock, irq);
1051 if (context == &pfob->mailbox.default_context) {
1052 /* Free the default context. */
1053 ASSERT(pfob->mailbox.default_context_allocated);
1054 pfob->mailbox.default_context_allocated = false;
1056 /* Add to free list. */
1058 list_add_tail(&context->next,
1059 &pfob->mcc->wrb_context.list_head);
1062 spin_unlock_irqrestore(&pfob->mcc_context_lock, irq);
1066 be_mcc_add_async_event_callback(struct be_mcc_object *mcc_object,
1067 mcc_async_event_callback cb, void *cb_context)
1069 /* Lock against anyone trying to change the callback/context pointers
1070 * while being used. */
1071 spin_lock_irqsave(&mcc_object->parent_function->cq_lock,
1072 mcc_object->parent_function->cq_irq);
1074 /* Assign the async callback. */
1075 mcc_object->async_context = cb_context;
1076 mcc_object->async_cb = cb;
1078 spin_unlock_irqrestore(&mcc_object->parent_function->cq_lock,
1079 mcc_object->parent_function->cq_irq);
1084 #define MPU_EP_CONTROL 0
1085 #define MPU_EP_SEMAPHORE 0xac
1088 *-------------------------------------------------------------------
1089 * Function: be_wait_for_POST_complete
1090 * Waits until the BladeEngine POST completes (either in error or success).
1092 * return status - BE_SUCCESS (0) on success. Negative error code on failure.
1093 *-------------------------------------------------------------------
1095 static int be_wait_for_POST_complete(struct be_function_object *pfob)
1097 struct MGMT_HBA_POST_STATUS_STRUCT_AMAP status;
1099 u32 post_error, post_stage;
1101 const u32 us_per_loop = 1000; /* 1000us */
1102 const u32 print_frequency_loops = 1000000 / us_per_loop;
1103 const u32 max_loops = 60 * print_frequency_loops;
1107 * Wait for arm fw indicating it is done or a fatal error happened.
1108 * Note: POST can take some time to complete depending on configuration
1109 * settings (consider ARM attempts to acquire an IP address
1114 status.dw[0] = ioread32(pfob->csr_va + MPU_EP_SEMAPHORE);
1115 post_error = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT,
1117 post_stage = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT,
1119 if (0 == (loops % print_frequency_loops)) {
1120 /* Print current status */
1121 TRACE(DL_INFO, "POST status = 0x%x (stage = 0x%x)",
1122 status.dw[0], post_stage);
1124 udelay(us_per_loop);
1125 } while ((post_error != 1) &&
1126 (post_stage != POST_STAGE_ARMFW_READY) &&
1127 (++loops < max_loops));
1129 if (post_error == 1) {
1130 TRACE(DL_ERR, "POST error! Status = 0x%x (stage = 0x%x)",
1131 status.dw[0], post_stage);
1133 } else if (post_stage != POST_STAGE_ARMFW_READY) {
1134 TRACE(DL_ERR, "POST time-out! Status = 0x%x (stage = 0x%x)",
1135 status.dw[0], post_stage);
1144 *-------------------------------------------------------------------
1145 * Function: be_kickoff_and_wait_for_POST
1146 * Interacts with the BladeEngine management processor to initiate POST, and
1147 * subsequently waits until POST completes (either in error or success).
1148 * The caller must acquire the reset semaphore before initiating POST
1149 * to prevent multiple drivers interacting with the management processor.
1150 * Once POST is complete the caller must release the reset semaphore.
1151 * Callers who only want to wait for POST complete may call
1152 * be_wait_for_POST_complete.
1154 * return status - BE_SUCCESS (0) on success. Negative error code on failure.
1155 *-------------------------------------------------------------------
1158 be_kickoff_and_wait_for_POST(struct be_function_object *pfob)
1160 struct MGMT_HBA_POST_STATUS_STRUCT_AMAP status;
1163 const u32 us_per_loop = 1000; /* 1000us */
1164 const u32 print_frequency_loops = 1000000 / us_per_loop;
1165 const u32 max_loops = 5 * print_frequency_loops;
1167 u32 post_error, post_stage;
1169 /* Wait for arm fw awaiting host ready or a fatal error happened. */
1170 TRACE(DL_INFO, "Wait for BladeEngine ready to POST");
1172 status.dw[0] = ioread32(pfob->csr_va + MPU_EP_SEMAPHORE);
1173 post_error = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT,
1175 post_stage = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT,
1177 if (0 == (loops % print_frequency_loops)) {
1178 /* Print current status */
1179 TRACE(DL_INFO, "POST status = 0x%x (stage = 0x%x)",
1180 status.dw[0], post_stage);
1182 udelay(us_per_loop);
1183 } while ((post_error != 1) &&
1184 (post_stage < POST_STAGE_AWAITING_HOST_RDY) &&
1185 (++loops < max_loops));
1187 if (post_error == 1) {
1188 TRACE(DL_ERR, "Pre-POST error! Status = 0x%x (stage = 0x%x)",
1189 status.dw[0], post_stage);
1191 } else if (post_stage == POST_STAGE_AWAITING_HOST_RDY) {
1192 iowrite32(POST_STAGE_HOST_RDY, pfob->csr_va + MPU_EP_SEMAPHORE);
1194 /* Wait for POST to complete */
1195 s = be_wait_for_POST_complete(pfob);
1198 * Either a timeout waiting for host ready signal or POST has
1199 * moved ahead without requiring a host ready signal.
1200 * Might as well give POST a chance to complete
1201 * (or timeout again).
1203 s = be_wait_for_POST_complete(pfob);
1209 *-------------------------------------------------------------------
1210 * Function: be_pci_soft_reset
1211 * This function is called to issue a BladeEngine soft reset.
1212 * Callers should acquire the soft reset semaphore before calling this
1213 * function. Additionaly, callers should ensure they cannot be pre-empted
1214 * while the routine executes. Upon completion of this routine, callers
1215 * should release the reset semaphore. This routine implicitly waits
1216 * for BladeEngine POST to complete.
1218 * return status - BE_SUCCESS (0) on success. Negative error code on failure.
1219 *-------------------------------------------------------------------
1221 int be_pci_soft_reset(struct be_function_object *pfob)
1223 struct PCICFG_SOFT_RESET_CSR_AMAP soft_reset;
1224 struct PCICFG_ONLINE0_CSR_AMAP pciOnline0;
1225 struct PCICFG_ONLINE1_CSR_AMAP pciOnline1;
1226 struct EP_CONTROL_CSR_AMAP epControlCsr;
1227 int status = BE_SUCCESS;
1228 u32 i, soft_reset_bit;
1230 TRACE(DL_NOTE, "PCI reset...");
1232 /* Issue soft reset #1 to get BladeEngine into a known state. */
1233 soft_reset.dw[0] = PCICFG0_READ(pfob, soft_reset);
1234 AMAP_SET_BITS_PTR(PCICFG_SOFT_RESET_CSR, softreset, soft_reset.dw, 1);
1235 PCICFG0_WRITE(pfob, host_timer_int_ctrl, soft_reset.dw[0]);
1237 * wait til soft reset is deasserted - hardware
1238 * deasserts after some time.
1243 soft_reset.dw[0] = PCICFG0_READ(pfob, soft_reset);
1244 soft_reset_bit = AMAP_GET_BITS_PTR(PCICFG_SOFT_RESET_CSR,
1245 softreset, soft_reset.dw);
1246 } while (soft_reset_bit && (i++ < 1024));
1247 if (soft_reset_bit != 0) {
1248 TRACE(DL_ERR, "Soft-reset #1 did not deassert as expected.");
1252 /* Mask everything */
1253 PCICFG0_WRITE(pfob, ue_status_low_mask, 0xFFFFFFFF);
1254 PCICFG0_WRITE(pfob, ue_status_hi_mask, 0xFFFFFFFF);
1256 * Set everything offline except MPU IRAM (it is offline with
1257 * the soft-reset, but soft-reset does not reset the PCICFG registers!)
1259 pciOnline0.dw[0] = 0;
1260 pciOnline1.dw[0] = 0;
1261 AMAP_SET_BITS_PTR(PCICFG_ONLINE1_CSR, mpu_iram_online,
1263 PCICFG0_WRITE(pfob, online0, pciOnline0.dw[0]);
1264 PCICFG0_WRITE(pfob, online1, pciOnline1.dw[0]);
1268 /* Issue soft reset #2. */
1269 AMAP_SET_BITS_PTR(PCICFG_SOFT_RESET_CSR, softreset, soft_reset.dw, 1);
1270 PCICFG0_WRITE(pfob, host_timer_int_ctrl, soft_reset.dw[0]);
1272 * wait til soft reset is deasserted - hardware
1273 * deasserts after some time.
1278 soft_reset.dw[0] = PCICFG0_READ(pfob, soft_reset);
1279 soft_reset_bit = AMAP_GET_BITS_PTR(PCICFG_SOFT_RESET_CSR,
1280 softreset, soft_reset.dw);
1281 } while (soft_reset_bit && (i++ < 1024));
1282 if (soft_reset_bit != 0) {
1283 TRACE(DL_ERR, "Soft-reset #1 did not deassert as expected.");
1291 /* Take MPU out of reset. */
1293 epControlCsr.dw[0] = ioread32(pfob->csr_va + MPU_EP_CONTROL);
1294 AMAP_SET_BITS_PTR(EP_CONTROL_CSR, CPU_reset, &epControlCsr, 0);
1295 iowrite32((u32)epControlCsr.dw[0], pfob->csr_va + MPU_EP_CONTROL);
1297 /* Kickoff BE POST and wait for completion */
1298 status = be_kickoff_and_wait_for_POST(pfob);
1306 *-------------------------------------------------------------------
1307 * Function: be_pci_reset_required
1308 * This private function is called to detect if a host entity is
1309 * required to issue a PCI soft reset and subsequently drive
1310 * BladeEngine POST. Scenarios where this is required:
1311 * 1) BIOS-less configuration
1312 * 2) Hot-swap/plug/power-on
1314 * return true if a reset is required, false otherwise
1315 *-------------------------------------------------------------------
1317 static bool be_pci_reset_required(struct be_function_object *pfob)
1319 struct MGMT_HBA_POST_STATUS_STRUCT_AMAP status;
1320 bool do_reset = false;
1321 u32 post_error, post_stage;
1324 * Read the POST status register
1326 status.dw[0] = ioread32(pfob->csr_va + MPU_EP_SEMAPHORE);
1327 post_error = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT, error,
1329 post_stage = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT, stage,
1331 if (post_stage <= POST_STAGE_AWAITING_HOST_RDY) {
1333 * If BladeEngine is waiting for host ready indication,
1334 * we want to do a PCI reset.
1343 *-------------------------------------------------------------------
1344 * Function: be_drive_POST
1345 * This function is called to drive BladeEngine POST. The
1346 * caller should ensure they cannot be pre-empted while this routine executes.
1348 * return status - BE_SUCCESS (0) on success. Negative error code on failure.
1349 *-------------------------------------------------------------------
1351 int be_drive_POST(struct be_function_object *pfob)
1355 if (false != be_pci_reset_required(pfob)) {
1356 /* PCI reset is needed (implicitly starts and waits for POST) */
1357 status = be_pci_soft_reset(pfob);
1359 /* No PCI reset is needed, start POST */
1360 status = be_kickoff_and_wait_for_POST(pfob);