2 * drivers/s390/char/sclp.c
3 * core function to access sclp interface
6 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
11 #include <linux/module.h>
12 #include <linux/err.h>
13 #include <linux/spinlock.h>
14 #include <linux/interrupt.h>
15 #include <linux/timer.h>
16 #include <linux/reboot.h>
17 #include <linux/jiffies.h>
18 #include <asm/types.h>
19 #include <asm/s390_ext.h>
23 #define SCLP_HEADER "sclp: "
25 /* Structure for register_early_external_interrupt. */
26 static ext_int_info_t ext_int_info_hwc;
28 /* Lock to protect internal data consistency. */
29 static DEFINE_SPINLOCK(sclp_lock);
31 /* Mask of events that we can receive from the sclp interface. */
32 static sccb_mask_t sclp_receive_mask;
34 /* Mask of events that we can send to the sclp interface. */
35 static sccb_mask_t sclp_send_mask;
37 /* List of registered event listeners and senders. */
38 static struct list_head sclp_reg_list;
40 /* List of queued requests. */
41 static struct list_head sclp_req_queue;
43 /* Data for read and and init requests. */
44 static struct sclp_req sclp_read_req;
45 static struct sclp_req sclp_init_req;
46 static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
47 static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
49 /* Timer for request retries. */
50 static struct timer_list sclp_request_timer;
52 /* Internal state: is the driver initialized? */
53 static volatile enum sclp_init_state_t {
54 sclp_init_state_uninitialized,
55 sclp_init_state_initializing,
56 sclp_init_state_initialized
57 } sclp_init_state = sclp_init_state_uninitialized;
59 /* Internal state: is a request active at the sclp? */
60 static volatile enum sclp_running_state_t {
61 sclp_running_state_idle,
62 sclp_running_state_running,
63 sclp_running_state_reset_pending
64 } sclp_running_state = sclp_running_state_idle;
66 /* Internal state: is a read request pending? */
67 static volatile enum sclp_reading_state_t {
68 sclp_reading_state_idle,
69 sclp_reading_state_reading
70 } sclp_reading_state = sclp_reading_state_idle;
72 /* Internal state: is the driver currently serving requests? */
73 static volatile enum sclp_activation_state_t {
74 sclp_activation_state_active,
75 sclp_activation_state_deactivating,
76 sclp_activation_state_inactive,
77 sclp_activation_state_activating
78 } sclp_activation_state = sclp_activation_state_active;
80 /* Internal state: is an init mask request pending? */
81 static volatile enum sclp_mask_state_t {
83 sclp_mask_state_initializing
84 } sclp_mask_state = sclp_mask_state_idle;
86 /* Maximum retry counts */
87 #define SCLP_INIT_RETRY 3
88 #define SCLP_MASK_RETRY 3
90 /* Timeout intervals in seconds.*/
91 #define SCLP_BUSY_INTERVAL 10
92 #define SCLP_RETRY_INTERVAL 30
94 static void sclp_process_queue(void);
95 static int sclp_init_mask(int calculate);
96 static int sclp_init(void);
98 /* Perform service call. Return 0 on success, non-zero otherwise. */
100 service_call(sclp_cmdw_t command, void *sccb)
105 " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
108 : "=&d" (cc) : "d" (command), "a" (__pa(sccb))
117 static inline void __sclp_make_read_req(void);
120 __sclp_queue_read_req(void)
122 if (sclp_reading_state == sclp_reading_state_idle) {
123 sclp_reading_state = sclp_reading_state_reading;
124 __sclp_make_read_req();
125 /* Add request to head of queue */
126 list_add(&sclp_read_req.list, &sclp_req_queue);
130 /* Set up request retry timer. Called while sclp_lock is locked. */
132 __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
135 del_timer(&sclp_request_timer);
136 sclp_request_timer.function = function;
137 sclp_request_timer.data = data;
138 sclp_request_timer.expires = jiffies + time;
139 add_timer(&sclp_request_timer);
142 /* Request timeout handler. Restart the request queue. If DATA is non-zero,
143 * force restart of running request. */
145 sclp_request_timeout(unsigned long data)
149 spin_lock_irqsave(&sclp_lock, flags);
151 if (sclp_running_state == sclp_running_state_running) {
152 /* Break running state and queue NOP read event request
153 * to get a defined interface state. */
154 __sclp_queue_read_req();
155 sclp_running_state = sclp_running_state_idle;
158 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
159 sclp_request_timeout, 0);
161 spin_unlock_irqrestore(&sclp_lock, flags);
162 sclp_process_queue();
165 /* Try to start a request. Return zero if the request was successfully
166 * started or if it will be started at a later time. Return non-zero otherwise.
167 * Called while sclp_lock is locked. */
169 __sclp_start_request(struct sclp_req *req)
173 if (sclp_running_state != sclp_running_state_idle)
175 del_timer(&sclp_request_timer);
176 rc = service_call(req->command, req->sccb);
180 /* Sucessfully started request */
181 req->status = SCLP_REQ_RUNNING;
182 sclp_running_state = sclp_running_state_running;
183 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
184 sclp_request_timeout, 1);
186 } else if (rc == -EBUSY) {
187 /* Try again later */
188 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
189 sclp_request_timeout, 0);
193 req->status = SCLP_REQ_FAILED;
197 /* Try to start queued requests. */
199 sclp_process_queue(void)
201 struct sclp_req *req;
205 spin_lock_irqsave(&sclp_lock, flags);
206 if (sclp_running_state != sclp_running_state_idle) {
207 spin_unlock_irqrestore(&sclp_lock, flags);
210 del_timer(&sclp_request_timer);
211 while (!list_empty(&sclp_req_queue)) {
212 req = list_entry(sclp_req_queue.next, struct sclp_req, list);
213 rc = __sclp_start_request(req);
217 if (req->start_count > 1) {
218 /* Cannot abort already submitted request - could still
219 * be active at the SCLP */
220 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
221 sclp_request_timeout, 0);
224 /* Post-processing for aborted request */
225 list_del(&req->list);
227 spin_unlock_irqrestore(&sclp_lock, flags);
228 req->callback(req, req->callback_data);
229 spin_lock_irqsave(&sclp_lock, flags);
232 spin_unlock_irqrestore(&sclp_lock, flags);
235 /* Queue a new request. Return zero on success, non-zero otherwise. */
237 sclp_add_request(struct sclp_req *req)
242 spin_lock_irqsave(&sclp_lock, flags);
243 if ((sclp_init_state != sclp_init_state_initialized ||
244 sclp_activation_state != sclp_activation_state_active) &&
245 req != &sclp_init_req) {
246 spin_unlock_irqrestore(&sclp_lock, flags);
249 req->status = SCLP_REQ_QUEUED;
250 req->start_count = 0;
251 list_add_tail(&req->list, &sclp_req_queue);
253 /* Start if request is first in list */
254 if (sclp_running_state == sclp_running_state_idle &&
255 req->list.prev == &sclp_req_queue) {
256 rc = __sclp_start_request(req);
258 list_del(&req->list);
260 spin_unlock_irqrestore(&sclp_lock, flags);
264 EXPORT_SYMBOL(sclp_add_request);
266 /* Dispatch events found in request buffer to registered listeners. Return 0
267 * if all events were dispatched, non-zero otherwise. */
269 sclp_dispatch_evbufs(struct sccb_header *sccb)
272 struct evbuf_header *evbuf;
274 struct sclp_register *reg;
278 spin_lock_irqsave(&sclp_lock, flags);
280 for (offset = sizeof(struct sccb_header); offset < sccb->length;
281 offset += evbuf->length) {
282 /* Search for event handler */
283 evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
285 list_for_each(l, &sclp_reg_list) {
286 reg = list_entry(l, struct sclp_register, list);
287 if (reg->receive_mask & (1 << (32 - evbuf->type)))
292 if (reg && reg->receiver_fn) {
293 spin_unlock_irqrestore(&sclp_lock, flags);
294 reg->receiver_fn(evbuf);
295 spin_lock_irqsave(&sclp_lock, flags);
296 } else if (reg == NULL)
299 spin_unlock_irqrestore(&sclp_lock, flags);
303 /* Read event data request callback. */
305 sclp_read_cb(struct sclp_req *req, void *data)
308 struct sccb_header *sccb;
310 sccb = (struct sccb_header *) req->sccb;
311 if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
312 sccb->response_code == 0x220))
313 sclp_dispatch_evbufs(sccb);
314 spin_lock_irqsave(&sclp_lock, flags);
315 sclp_reading_state = sclp_reading_state_idle;
316 spin_unlock_irqrestore(&sclp_lock, flags);
319 /* Prepare read event data request. Called while sclp_lock is locked. */
321 __sclp_make_read_req(void)
323 struct sccb_header *sccb;
325 sccb = (struct sccb_header *) sclp_read_sccb;
327 memset(&sclp_read_req, 0, sizeof(struct sclp_req));
328 sclp_read_req.command = SCLP_CMDW_READDATA;
329 sclp_read_req.status = SCLP_REQ_QUEUED;
330 sclp_read_req.start_count = 0;
331 sclp_read_req.callback = sclp_read_cb;
332 sclp_read_req.sccb = sccb;
333 sccb->length = PAGE_SIZE;
334 sccb->function_code = 0;
335 sccb->control_mask[2] = 0x80;
338 /* Search request list for request with matching sccb. Return request if found,
339 * NULL otherwise. Called while sclp_lock is locked. */
340 static inline struct sclp_req *
341 __sclp_find_req(u32 sccb)
344 struct sclp_req *req;
346 list_for_each(l, &sclp_req_queue) {
347 req = list_entry(l, struct sclp_req, list);
348 if (sccb == (u32) (addr_t) req->sccb)
354 /* Handler for external interruption. Perform request post-processing.
355 * Prepare read event data request if necessary. Start processing of next
356 * request on queue. */
358 sclp_interrupt_handler(__u16 code)
360 struct sclp_req *req;
364 spin_lock(&sclp_lock);
365 finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
366 evbuf_pending = S390_lowcore.ext_params & 0x3;
368 del_timer(&sclp_request_timer);
369 sclp_running_state = sclp_running_state_reset_pending;
370 req = __sclp_find_req(finished_sccb);
372 /* Request post-processing */
373 list_del(&req->list);
374 req->status = SCLP_REQ_DONE;
376 spin_unlock(&sclp_lock);
377 req->callback(req, req->callback_data);
378 spin_lock(&sclp_lock);
381 sclp_running_state = sclp_running_state_idle;
383 if (evbuf_pending && sclp_receive_mask != 0 &&
384 sclp_activation_state == sclp_activation_state_active)
385 __sclp_queue_read_req();
386 spin_unlock(&sclp_lock);
387 sclp_process_queue();
390 /* Convert interval in jiffies to TOD ticks. */
392 sclp_tod_from_jiffies(unsigned long jiffies)
394 return (u64) (jiffies / HZ) << 32;
397 /* Wait until a currently running request finished. Note: while this function
398 * is running, no timers are served on the calling CPU. */
403 unsigned long cr0, cr0_sync;
407 /* We'll be disabling timer interrupts, so we need a custom timeout
410 if (timer_pending(&sclp_request_timer)) {
411 /* Get timeout TOD value */
412 timeout = get_clock() +
413 sclp_tod_from_jiffies(sclp_request_timer.expires -
416 local_irq_save(flags);
417 /* Prevent bottom half from executing once we force interrupts open */
418 irq_context = in_interrupt();
421 /* Enable service-signal interruption, disable timer interrupts */
423 __ctl_store(cr0, 0, 0);
425 cr0_sync |= 0x00000200;
426 cr0_sync &= 0xFFFFF3AC;
427 __ctl_load(cr0_sync, 0, 0);
428 __raw_local_irq_stosm(0x01);
429 /* Loop until driver state indicates finished request */
430 while (sclp_running_state != sclp_running_state_idle) {
431 /* Check for expired request timer */
432 if (timer_pending(&sclp_request_timer) &&
433 get_clock() > timeout &&
434 del_timer(&sclp_request_timer))
435 sclp_request_timer.function(sclp_request_timer.data);
440 __ctl_load(cr0, 0, 0);
443 local_irq_restore(flags);
446 EXPORT_SYMBOL(sclp_sync_wait);
448 /* Dispatch changes in send and receive mask to registered listeners. */
450 sclp_dispatch_state_change(void)
453 struct sclp_register *reg;
455 sccb_mask_t receive_mask;
456 sccb_mask_t send_mask;
459 spin_lock_irqsave(&sclp_lock, flags);
461 list_for_each(l, &sclp_reg_list) {
462 reg = list_entry(l, struct sclp_register, list);
463 receive_mask = reg->receive_mask & sclp_receive_mask;
464 send_mask = reg->send_mask & sclp_send_mask;
465 if (reg->sclp_receive_mask != receive_mask ||
466 reg->sclp_send_mask != send_mask) {
467 reg->sclp_receive_mask = receive_mask;
468 reg->sclp_send_mask = send_mask;
473 spin_unlock_irqrestore(&sclp_lock, flags);
474 if (reg && reg->state_change_fn)
475 reg->state_change_fn(reg);
479 struct sclp_statechangebuf {
480 struct evbuf_header header;
481 u8 validity_sclp_active_facility_mask : 1;
482 u8 validity_sclp_receive_mask : 1;
483 u8 validity_sclp_send_mask : 1;
484 u8 validity_read_data_function_mask : 1;
487 u64 sclp_active_facility_mask;
488 sccb_mask_t sclp_receive_mask;
489 sccb_mask_t sclp_send_mask;
490 u32 read_data_function_mask;
491 } __attribute__((packed));
494 /* State change event callback. Inform listeners of changes. */
496 sclp_state_change_cb(struct evbuf_header *evbuf)
499 struct sclp_statechangebuf *scbuf;
501 scbuf = (struct sclp_statechangebuf *) evbuf;
502 if (scbuf->mask_length != sizeof(sccb_mask_t))
504 spin_lock_irqsave(&sclp_lock, flags);
505 if (scbuf->validity_sclp_receive_mask)
506 sclp_receive_mask = scbuf->sclp_receive_mask;
507 if (scbuf->validity_sclp_send_mask)
508 sclp_send_mask = scbuf->sclp_send_mask;
509 spin_unlock_irqrestore(&sclp_lock, flags);
510 sclp_dispatch_state_change();
513 static struct sclp_register sclp_state_change_event = {
514 .receive_mask = EvTyp_StateChange_Mask,
515 .receiver_fn = sclp_state_change_cb
518 /* Calculate receive and send mask of currently registered listeners.
519 * Called while sclp_lock is locked. */
521 __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
524 struct sclp_register *t;
528 list_for_each(l, &sclp_reg_list) {
529 t = list_entry(l, struct sclp_register, list);
530 *receive_mask |= t->receive_mask;
531 *send_mask |= t->send_mask;
535 /* Register event listener. Return 0 on success, non-zero otherwise. */
537 sclp_register(struct sclp_register *reg)
540 sccb_mask_t receive_mask;
541 sccb_mask_t send_mask;
547 spin_lock_irqsave(&sclp_lock, flags);
548 /* Check event mask for collisions */
549 __sclp_get_mask(&receive_mask, &send_mask);
550 if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
551 spin_unlock_irqrestore(&sclp_lock, flags);
554 /* Trigger initial state change callback */
555 reg->sclp_receive_mask = 0;
556 reg->sclp_send_mask = 0;
557 list_add(®->list, &sclp_reg_list);
558 spin_unlock_irqrestore(&sclp_lock, flags);
559 rc = sclp_init_mask(1);
561 spin_lock_irqsave(&sclp_lock, flags);
562 list_del(®->list);
563 spin_unlock_irqrestore(&sclp_lock, flags);
568 EXPORT_SYMBOL(sclp_register);
570 /* Unregister event listener. */
572 sclp_unregister(struct sclp_register *reg)
576 spin_lock_irqsave(&sclp_lock, flags);
577 list_del(®->list);
578 spin_unlock_irqrestore(&sclp_lock, flags);
582 EXPORT_SYMBOL(sclp_unregister);
584 /* Remove event buffers which are marked processed. Return the number of
585 * remaining event buffers. */
587 sclp_remove_processed(struct sccb_header *sccb)
589 struct evbuf_header *evbuf;
593 evbuf = (struct evbuf_header *) (sccb + 1);
595 remaining = sccb->length - sizeof(struct sccb_header);
596 while (remaining > 0) {
597 remaining -= evbuf->length;
598 if (evbuf->flags & 0x80) {
599 sccb->length -= evbuf->length;
600 memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
604 evbuf = (struct evbuf_header *)
605 ((addr_t) evbuf + evbuf->length);
611 EXPORT_SYMBOL(sclp_remove_processed);
614 struct sccb_header header;
617 sccb_mask_t receive_mask;
618 sccb_mask_t send_mask;
619 sccb_mask_t sclp_send_mask;
620 sccb_mask_t sclp_receive_mask;
621 } __attribute__((packed));
623 /* Prepare init mask request. Called while sclp_lock is locked. */
625 __sclp_make_init_req(u32 receive_mask, u32 send_mask)
627 struct init_sccb *sccb;
629 sccb = (struct init_sccb *) sclp_init_sccb;
631 memset(&sclp_init_req, 0, sizeof(struct sclp_req));
632 sclp_init_req.command = SCLP_CMDW_WRITEMASK;
633 sclp_init_req.status = SCLP_REQ_FILLED;
634 sclp_init_req.start_count = 0;
635 sclp_init_req.callback = NULL;
636 sclp_init_req.callback_data = NULL;
637 sclp_init_req.sccb = sccb;
638 sccb->header.length = sizeof(struct init_sccb);
639 sccb->mask_length = sizeof(sccb_mask_t);
640 sccb->receive_mask = receive_mask;
641 sccb->send_mask = send_mask;
642 sccb->sclp_receive_mask = 0;
643 sccb->sclp_send_mask = 0;
646 /* Start init mask request. If calculate is non-zero, calculate the mask as
647 * requested by registered listeners. Use zero mask otherwise. Return 0 on
648 * success, non-zero otherwise. */
650 sclp_init_mask(int calculate)
653 struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
654 sccb_mask_t receive_mask;
655 sccb_mask_t send_mask;
660 spin_lock_irqsave(&sclp_lock, flags);
661 /* Check if interface is in appropriate state */
662 if (sclp_mask_state != sclp_mask_state_idle) {
663 spin_unlock_irqrestore(&sclp_lock, flags);
666 if (sclp_activation_state == sclp_activation_state_inactive) {
667 spin_unlock_irqrestore(&sclp_lock, flags);
670 sclp_mask_state = sclp_mask_state_initializing;
673 __sclp_get_mask(&receive_mask, &send_mask);
679 for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
680 /* Prepare request */
681 __sclp_make_init_req(receive_mask, send_mask);
682 spin_unlock_irqrestore(&sclp_lock, flags);
683 if (sclp_add_request(&sclp_init_req)) {
684 /* Try again later */
685 wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
686 while (time_before(jiffies, wait))
688 spin_lock_irqsave(&sclp_lock, flags);
691 while (sclp_init_req.status != SCLP_REQ_DONE &&
692 sclp_init_req.status != SCLP_REQ_FAILED)
694 spin_lock_irqsave(&sclp_lock, flags);
695 if (sclp_init_req.status == SCLP_REQ_DONE &&
696 sccb->header.response_code == 0x20) {
697 /* Successful request */
699 sclp_receive_mask = sccb->sclp_receive_mask;
700 sclp_send_mask = sccb->sclp_send_mask;
702 sclp_receive_mask = 0;
705 spin_unlock_irqrestore(&sclp_lock, flags);
706 sclp_dispatch_state_change();
707 spin_lock_irqsave(&sclp_lock, flags);
712 sclp_mask_state = sclp_mask_state_idle;
713 spin_unlock_irqrestore(&sclp_lock, flags);
717 /* Deactivate SCLP interface. On success, new requests will be rejected,
718 * events will no longer be dispatched. Return 0 on success, non-zero
721 sclp_deactivate(void)
726 spin_lock_irqsave(&sclp_lock, flags);
727 /* Deactivate can only be called when active */
728 if (sclp_activation_state != sclp_activation_state_active) {
729 spin_unlock_irqrestore(&sclp_lock, flags);
732 sclp_activation_state = sclp_activation_state_deactivating;
733 spin_unlock_irqrestore(&sclp_lock, flags);
734 rc = sclp_init_mask(0);
735 spin_lock_irqsave(&sclp_lock, flags);
737 sclp_activation_state = sclp_activation_state_inactive;
739 sclp_activation_state = sclp_activation_state_active;
740 spin_unlock_irqrestore(&sclp_lock, flags);
744 EXPORT_SYMBOL(sclp_deactivate);
746 /* Reactivate SCLP interface after sclp_deactivate. On success, new
747 * requests will be accepted, events will be dispatched again. Return 0 on
748 * success, non-zero otherwise. */
750 sclp_reactivate(void)
755 spin_lock_irqsave(&sclp_lock, flags);
756 /* Reactivate can only be called when inactive */
757 if (sclp_activation_state != sclp_activation_state_inactive) {
758 spin_unlock_irqrestore(&sclp_lock, flags);
761 sclp_activation_state = sclp_activation_state_activating;
762 spin_unlock_irqrestore(&sclp_lock, flags);
763 rc = sclp_init_mask(1);
764 spin_lock_irqsave(&sclp_lock, flags);
766 sclp_activation_state = sclp_activation_state_active;
768 sclp_activation_state = sclp_activation_state_inactive;
769 spin_unlock_irqrestore(&sclp_lock, flags);
773 EXPORT_SYMBOL(sclp_reactivate);
775 /* Handler for external interruption used during initialization. Modify
776 * request state to done. */
778 sclp_check_handler(__u16 code)
782 finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
783 /* Is this the interrupt we are waiting for? */
784 if (finished_sccb == 0)
786 if (finished_sccb != (u32) (addr_t) sclp_init_sccb) {
787 printk(KERN_WARNING SCLP_HEADER "unsolicited interrupt "
788 "for buffer at 0x%x\n", finished_sccb);
791 spin_lock(&sclp_lock);
792 if (sclp_running_state == sclp_running_state_running) {
793 sclp_init_req.status = SCLP_REQ_DONE;
794 sclp_running_state = sclp_running_state_idle;
796 spin_unlock(&sclp_lock);
799 /* Initial init mask request timed out. Modify request state to failed. */
801 sclp_check_timeout(unsigned long data)
805 spin_lock_irqsave(&sclp_lock, flags);
806 if (sclp_running_state == sclp_running_state_running) {
807 sclp_init_req.status = SCLP_REQ_FAILED;
808 sclp_running_state = sclp_running_state_idle;
810 spin_unlock_irqrestore(&sclp_lock, flags);
813 /* Perform a check of the SCLP interface. Return zero if the interface is
814 * available and there are no pending requests from a previous instance.
815 * Return non-zero otherwise. */
817 sclp_check_interface(void)
819 struct init_sccb *sccb;
824 spin_lock_irqsave(&sclp_lock, flags);
825 /* Prepare init mask command */
826 rc = register_early_external_interrupt(0x2401, sclp_check_handler,
829 spin_unlock_irqrestore(&sclp_lock, flags);
832 for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
833 __sclp_make_init_req(0, 0);
834 sccb = (struct init_sccb *) sclp_init_req.sccb;
835 rc = service_call(sclp_init_req.command, sccb);
838 sclp_init_req.status = SCLP_REQ_RUNNING;
839 sclp_running_state = sclp_running_state_running;
840 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
841 sclp_check_timeout, 0);
842 spin_unlock_irqrestore(&sclp_lock, flags);
843 /* Enable service-signal interruption - needs to happen
844 * with IRQs enabled. */
846 /* Wait for signal from interrupt or timeout */
848 /* Disable service-signal interruption - needs to happen
849 * with IRQs enabled. */
851 spin_lock_irqsave(&sclp_lock, flags);
852 del_timer(&sclp_request_timer);
853 if (sclp_init_req.status == SCLP_REQ_DONE &&
854 sccb->header.response_code == 0x20) {
860 unregister_early_external_interrupt(0x2401, sclp_check_handler,
862 spin_unlock_irqrestore(&sclp_lock, flags);
866 /* Reboot event handler. Reset send and receive mask to prevent pending SCLP
867 * events from interfering with rebooted system. */
869 sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
875 static struct notifier_block sclp_reboot_notifier = {
876 .notifier_call = sclp_reboot_event
879 /* Initialize SCLP driver. Return zero if driver is operational, non-zero
887 if (!MACHINE_HAS_SCLP)
889 spin_lock_irqsave(&sclp_lock, flags);
890 /* Check for previous or running initialization */
891 if (sclp_init_state != sclp_init_state_uninitialized) {
892 spin_unlock_irqrestore(&sclp_lock, flags);
895 sclp_init_state = sclp_init_state_initializing;
896 /* Set up variables */
897 INIT_LIST_HEAD(&sclp_req_queue);
898 INIT_LIST_HEAD(&sclp_reg_list);
899 list_add(&sclp_state_change_event.list, &sclp_reg_list);
900 init_timer(&sclp_request_timer);
901 /* Check interface */
902 spin_unlock_irqrestore(&sclp_lock, flags);
903 rc = sclp_check_interface();
904 spin_lock_irqsave(&sclp_lock, flags);
906 sclp_init_state = sclp_init_state_uninitialized;
907 spin_unlock_irqrestore(&sclp_lock, flags);
910 /* Register reboot handler */
911 rc = register_reboot_notifier(&sclp_reboot_notifier);
913 sclp_init_state = sclp_init_state_uninitialized;
914 spin_unlock_irqrestore(&sclp_lock, flags);
917 /* Register interrupt handler */
918 rc = register_early_external_interrupt(0x2401, sclp_interrupt_handler,
921 unregister_reboot_notifier(&sclp_reboot_notifier);
922 sclp_init_state = sclp_init_state_uninitialized;
923 spin_unlock_irqrestore(&sclp_lock, flags);
926 sclp_init_state = sclp_init_state_initialized;
927 spin_unlock_irqrestore(&sclp_lock, flags);
928 /* Enable service-signal external interruption - needs to happen with