]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/acpi/osl.c
ACPI: misc cleanups
[linux-2.6-omap-h63xx.git] / drivers / acpi / osl.c
1 /*
2  *  acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3  *
4  *  Copyright (C) 2000       Andrew Henroid
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *
8  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9  *
10  *  This program is free software; you can redistribute it and/or modify
11  *  it under the terms of the GNU General Public License as published by
12  *  the Free Software Foundation; either version 2 of the License, or
13  *  (at your option) any later version.
14  *
15  *  This program is distributed in the hope that it will be useful,
16  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *  GNU General Public License for more details.
19  *
20  *  You should have received a copy of the GNU General Public License
21  *  along with this program; if not, write to the Free Software
22  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
23  *
24  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25  *
26  */
27
28 #include <linux/module.h>
29 #include <linux/kernel.h>
30 #include <linux/slab.h>
31 #include <linux/mm.h>
32 #include <linux/pci.h>
33 #include <linux/interrupt.h>
34 #include <linux/kmod.h>
35 #include <linux/delay.h>
36 #include <linux/dmi.h>
37 #include <linux/workqueue.h>
38 #include <linux/nmi.h>
39 #include <linux/acpi.h>
40 #include <acpi/acpi.h>
41 #include <asm/io.h>
42 #include <acpi/acpi_bus.h>
43 #include <acpi/processor.h>
44 #include <asm/uaccess.h>
45
46 #include <linux/efi.h>
47
48 #define _COMPONENT              ACPI_OS_SERVICES
49 ACPI_MODULE_NAME("osl");
50 #define PREFIX          "ACPI: "
51 struct acpi_os_dpc {
52         acpi_osd_exec_callback function;
53         void *context;
54         struct work_struct work;
55 };
56
57 #ifdef CONFIG_ACPI_CUSTOM_DSDT
58 #include CONFIG_ACPI_CUSTOM_DSDT_FILE
59 #endif
60
61 #ifdef ENABLE_DEBUGGER
62 #include <linux/kdb.h>
63
64 /* stuff for debugger support */
65 int acpi_in_debugger;
66 EXPORT_SYMBOL(acpi_in_debugger);
67
68 extern char line_buf[80];
69 #endif                          /*ENABLE_DEBUGGER */
70
71 static unsigned int acpi_irq_irq;
72 static acpi_osd_handler acpi_irq_handler;
73 static void *acpi_irq_context;
74 static struct workqueue_struct *kacpid_wq;
75 static struct workqueue_struct *kacpi_notify_wq;
76
77 #define OSI_STRING_LENGTH_MAX 64        /* arbitrary */
78 static char osi_additional_string[OSI_STRING_LENGTH_MAX];
79
80 /*
81  * "Ode to _OSI(Linux)"
82  *
83  * osi_linux -- Control response to BIOS _OSI(Linux) query.
84  *
85  * As Linux evolves, the features that it supports change.
86  * So an OSI string such as "Linux" is not specific enough
87  * to be useful across multiple versions of Linux.  It
88  * doesn't identify any particular feature, interface,
89  * or even any particular version of Linux...
90  *
91  * Unfortunately, Linux-2.6.22 and earlier responded "yes"
92  * to a BIOS _OSI(Linux) query.  When
93  * a reference mobile BIOS started using it, its use
94  * started to spread to many vendor platforms.
95  * As it is not supportable, we need to halt that spread.
96  *
97  * Today, most BIOS references to _OSI(Linux) are noise --
98  * they have no functional effect and are just dead code
99  * carried over from the reference BIOS.
100  *
101  * The next most common case is that _OSI(Linux) harms Linux,
102  * usually by causing the BIOS to follow paths that are
103  * not tested during Windows validation.
104  *
105  * Finally, there is a short list of platforms
106  * where OSI(Linux) benefits Linux.
107  *
108  * In Linux-2.6.23, OSI(Linux) is first disabled by default.
109  * DMI is used to disable the dmesg warning about OSI(Linux)
110  * on platforms where it is known to have no effect.
111  * But a dmesg warning remains for systems where
112  * we do not know if OSI(Linux) is good or bad for the system.
113  * DMI is also used to enable OSI(Linux) for the machines
114  * that are known to need it.
115  *
116  * BIOS writers should NOT query _OSI(Linux) on future systems.
117  * It will be ignored by default, and to get Linux to
118  * not ignore it will require a kernel source update to
119  * add a DMI entry, or a boot-time "acpi_osi=Linux" invocation.
120  */
121 #define OSI_LINUX_ENABLE 0
122
123 struct osi_linux {
124         unsigned int    enable:1;
125         unsigned int    dmi:1;
126         unsigned int    cmdline:1;
127         unsigned int    known:1;
128 } osi_linux = { OSI_LINUX_ENABLE, 0, 0, 0};
129
130 static void __init acpi_request_region (struct acpi_generic_address *addr,
131         unsigned int length, char *desc)
132 {
133         struct resource *res;
134
135         if (!addr->address || !length)
136                 return;
137
138         if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
139                 res = request_region(addr->address, length, desc);
140         else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
141                 res = request_mem_region(addr->address, length, desc);
142 }
143
144 static int __init acpi_reserve_resources(void)
145 {
146         acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
147                 "ACPI PM1a_EVT_BLK");
148
149         acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
150                 "ACPI PM1b_EVT_BLK");
151
152         acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
153                 "ACPI PM1a_CNT_BLK");
154
155         acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
156                 "ACPI PM1b_CNT_BLK");
157
158         if (acpi_gbl_FADT.pm_timer_length == 4)
159                 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
160
161         acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
162                 "ACPI PM2_CNT_BLK");
163
164         /* Length of GPE blocks must be a non-negative multiple of 2 */
165
166         if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
167                 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
168                                acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
169
170         if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
171                 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
172                                acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
173
174         return 0;
175 }
176 device_initcall(acpi_reserve_resources);
177
178 acpi_status __init acpi_os_initialize(void)
179 {
180         return AE_OK;
181 }
182
183 acpi_status acpi_os_initialize1(void)
184 {
185         /*
186          * Initialize PCI configuration space access, as we'll need to access
187          * it while walking the namespace (bus 0 and root bridges w/ _BBNs).
188          */
189         if (!raw_pci_ops) {
190                 printk(KERN_ERR PREFIX
191                        "Access to PCI configuration space unavailable\n");
192                 return AE_NULL_ENTRY;
193         }
194         kacpid_wq = create_singlethread_workqueue("kacpid");
195         kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
196         BUG_ON(!kacpid_wq);
197         BUG_ON(!kacpi_notify_wq);
198         return AE_OK;
199 }
200
201 acpi_status acpi_os_terminate(void)
202 {
203         if (acpi_irq_handler) {
204                 acpi_os_remove_interrupt_handler(acpi_irq_irq,
205                                                  acpi_irq_handler);
206         }
207
208         destroy_workqueue(kacpid_wq);
209         destroy_workqueue(kacpi_notify_wq);
210
211         return AE_OK;
212 }
213
214 void acpi_os_printf(const char *fmt, ...)
215 {
216         va_list args;
217         va_start(args, fmt);
218         acpi_os_vprintf(fmt, args);
219         va_end(args);
220 }
221
222 void acpi_os_vprintf(const char *fmt, va_list args)
223 {
224         static char buffer[512];
225
226         vsprintf(buffer, fmt, args);
227
228 #ifdef ENABLE_DEBUGGER
229         if (acpi_in_debugger) {
230                 kdb_printf("%s", buffer);
231         } else {
232                 printk("%s", buffer);
233         }
234 #else
235         printk("%s", buffer);
236 #endif
237 }
238
239 acpi_physical_address __init acpi_os_get_root_pointer(void)
240 {
241         if (efi_enabled) {
242                 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
243                         return efi.acpi20;
244                 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
245                         return efi.acpi;
246                 else {
247                         printk(KERN_ERR PREFIX
248                                "System description tables not found\n");
249                         return 0;
250                 }
251         } else
252                 return acpi_find_rsdp();
253 }
254
255 void __iomem *acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
256 {
257         if (phys > ULONG_MAX) {
258                 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
259                 return NULL;
260         }
261         if (acpi_gbl_permanent_mmap)
262                 /*
263                 * ioremap checks to ensure this is in reserved space
264                 */
265                 return ioremap((unsigned long)phys, size);
266         else
267                 return __acpi_map_table((unsigned long)phys, size);
268 }
269 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
270
271 void acpi_os_unmap_memory(void __iomem * virt, acpi_size size)
272 {
273         if (acpi_gbl_permanent_mmap) {
274                 iounmap(virt);
275         }
276 }
277 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
278
279 #ifdef ACPI_FUTURE_USAGE
280 acpi_status
281 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
282 {
283         if (!phys || !virt)
284                 return AE_BAD_PARAMETER;
285
286         *phys = virt_to_phys(virt);
287
288         return AE_OK;
289 }
290 #endif
291
292 #define ACPI_MAX_OVERRIDE_LEN 100
293
294 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
295
296 acpi_status
297 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
298                             acpi_string * new_val)
299 {
300         if (!init_val || !new_val)
301                 return AE_BAD_PARAMETER;
302
303         *new_val = NULL;
304         if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
305                 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
306                        acpi_os_name);
307                 *new_val = acpi_os_name;
308         }
309
310         return AE_OK;
311 }
312
313 acpi_status
314 acpi_os_table_override(struct acpi_table_header * existing_table,
315                        struct acpi_table_header ** new_table)
316 {
317         if (!existing_table || !new_table)
318                 return AE_BAD_PARAMETER;
319
320 #ifdef CONFIG_ACPI_CUSTOM_DSDT
321         if (strncmp(existing_table->signature, "DSDT", 4) == 0)
322                 *new_table = (struct acpi_table_header *)AmlCode;
323         else
324                 *new_table = NULL;
325 #else
326         *new_table = NULL;
327 #endif
328         return AE_OK;
329 }
330
331 static irqreturn_t acpi_irq(int irq, void *dev_id)
332 {
333         return (*acpi_irq_handler) (acpi_irq_context) ? IRQ_HANDLED : IRQ_NONE;
334 }
335
336 acpi_status
337 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
338                                   void *context)
339 {
340         unsigned int irq;
341
342         /*
343          * Ignore the GSI from the core, and use the value in our copy of the
344          * FADT. It may not be the same if an interrupt source override exists
345          * for the SCI.
346          */
347         gsi = acpi_gbl_FADT.sci_interrupt;
348         if (acpi_gsi_to_irq(gsi, &irq) < 0) {
349                 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
350                        gsi);
351                 return AE_OK;
352         }
353
354         acpi_irq_handler = handler;
355         acpi_irq_context = context;
356         if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
357                 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
358                 return AE_NOT_ACQUIRED;
359         }
360         acpi_irq_irq = irq;
361
362         return AE_OK;
363 }
364
365 acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
366 {
367         if (irq) {
368                 free_irq(irq, acpi_irq);
369                 acpi_irq_handler = NULL;
370                 acpi_irq_irq = 0;
371         }
372
373         return AE_OK;
374 }
375
376 /*
377  * Running in interpreter thread context, safe to sleep
378  */
379
380 void acpi_os_sleep(acpi_integer ms)
381 {
382         schedule_timeout_interruptible(msecs_to_jiffies(ms));
383 }
384
385 void acpi_os_stall(u32 us)
386 {
387         while (us) {
388                 u32 delay = 1000;
389
390                 if (delay > us)
391                         delay = us;
392                 udelay(delay);
393                 touch_nmi_watchdog();
394                 us -= delay;
395         }
396 }
397
398 /*
399  * Support ACPI 3.0 AML Timer operand
400  * Returns 64-bit free-running, monotonically increasing timer
401  * with 100ns granularity
402  */
403 u64 acpi_os_get_timer(void)
404 {
405         static u64 t;
406
407 #ifdef  CONFIG_HPET
408         /* TBD: use HPET if available */
409 #endif
410
411 #ifdef  CONFIG_X86_PM_TIMER
412         /* TBD: default to PM timer if HPET was not available */
413 #endif
414         if (!t)
415                 printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n");
416
417         return ++t;
418 }
419
420 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
421 {
422         u32 dummy;
423
424         if (!value)
425                 value = &dummy;
426
427         *value = 0;
428         if (width <= 8) {
429                 *(u8 *) value = inb(port);
430         } else if (width <= 16) {
431                 *(u16 *) value = inw(port);
432         } else if (width <= 32) {
433                 *(u32 *) value = inl(port);
434         } else {
435                 BUG();
436         }
437
438         return AE_OK;
439 }
440
441 EXPORT_SYMBOL(acpi_os_read_port);
442
443 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
444 {
445         if (width <= 8) {
446                 outb(value, port);
447         } else if (width <= 16) {
448                 outw(value, port);
449         } else if (width <= 32) {
450                 outl(value, port);
451         } else {
452                 BUG();
453         }
454
455         return AE_OK;
456 }
457
458 EXPORT_SYMBOL(acpi_os_write_port);
459
460 acpi_status
461 acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
462 {
463         u32 dummy;
464         void __iomem *virt_addr;
465
466         virt_addr = ioremap(phys_addr, width);
467         if (!value)
468                 value = &dummy;
469
470         switch (width) {
471         case 8:
472                 *(u8 *) value = readb(virt_addr);
473                 break;
474         case 16:
475                 *(u16 *) value = readw(virt_addr);
476                 break;
477         case 32:
478                 *(u32 *) value = readl(virt_addr);
479                 break;
480         default:
481                 BUG();
482         }
483
484         iounmap(virt_addr);
485
486         return AE_OK;
487 }
488
489 acpi_status
490 acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
491 {
492         void __iomem *virt_addr;
493
494         virt_addr = ioremap(phys_addr, width);
495
496         switch (width) {
497         case 8:
498                 writeb(value, virt_addr);
499                 break;
500         case 16:
501                 writew(value, virt_addr);
502                 break;
503         case 32:
504                 writel(value, virt_addr);
505                 break;
506         default:
507                 BUG();
508         }
509
510         iounmap(virt_addr);
511
512         return AE_OK;
513 }
514
515 acpi_status
516 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
517                                void *value, u32 width)
518 {
519         int result, size;
520
521         if (!value)
522                 return AE_BAD_PARAMETER;
523
524         switch (width) {
525         case 8:
526                 size = 1;
527                 break;
528         case 16:
529                 size = 2;
530                 break;
531         case 32:
532                 size = 4;
533                 break;
534         default:
535                 return AE_ERROR;
536         }
537
538         BUG_ON(!raw_pci_ops);
539
540         result = raw_pci_ops->read(pci_id->segment, pci_id->bus,
541                                    PCI_DEVFN(pci_id->device, pci_id->function),
542                                    reg, size, value);
543
544         return (result ? AE_ERROR : AE_OK);
545 }
546
547 acpi_status
548 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
549                                 acpi_integer value, u32 width)
550 {
551         int result, size;
552
553         switch (width) {
554         case 8:
555                 size = 1;
556                 break;
557         case 16:
558                 size = 2;
559                 break;
560         case 32:
561                 size = 4;
562                 break;
563         default:
564                 return AE_ERROR;
565         }
566
567         BUG_ON(!raw_pci_ops);
568
569         result = raw_pci_ops->write(pci_id->segment, pci_id->bus,
570                                     PCI_DEVFN(pci_id->device, pci_id->function),
571                                     reg, size, value);
572
573         return (result ? AE_ERROR : AE_OK);
574 }
575
576 /* TODO: Change code to take advantage of driver model more */
577 static void acpi_os_derive_pci_id_2(acpi_handle rhandle,        /* upper bound  */
578                                     acpi_handle chandle,        /* current node */
579                                     struct acpi_pci_id **id,
580                                     int *is_bridge, u8 * bus_number)
581 {
582         acpi_handle handle;
583         struct acpi_pci_id *pci_id = *id;
584         acpi_status status;
585         unsigned long temp;
586         acpi_object_type type;
587         u8 tu8;
588
589         acpi_get_parent(chandle, &handle);
590         if (handle != rhandle) {
591                 acpi_os_derive_pci_id_2(rhandle, handle, &pci_id, is_bridge,
592                                         bus_number);
593
594                 status = acpi_get_type(handle, &type);
595                 if ((ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE))
596                         return;
597
598                 status =
599                     acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
600                                           &temp);
601                 if (ACPI_SUCCESS(status)) {
602                         pci_id->device = ACPI_HIWORD(ACPI_LODWORD(temp));
603                         pci_id->function = ACPI_LOWORD(ACPI_LODWORD(temp));
604
605                         if (*is_bridge)
606                                 pci_id->bus = *bus_number;
607
608                         /* any nicer way to get bus number of bridge ? */
609                         status =
610                             acpi_os_read_pci_configuration(pci_id, 0x0e, &tu8,
611                                                            8);
612                         if (ACPI_SUCCESS(status)
613                             && ((tu8 & 0x7f) == 1 || (tu8 & 0x7f) == 2)) {
614                                 status =
615                                     acpi_os_read_pci_configuration(pci_id, 0x18,
616                                                                    &tu8, 8);
617                                 if (!ACPI_SUCCESS(status)) {
618                                         /* Certainly broken...  FIX ME */
619                                         return;
620                                 }
621                                 *is_bridge = 1;
622                                 pci_id->bus = tu8;
623                                 status =
624                                     acpi_os_read_pci_configuration(pci_id, 0x19,
625                                                                    &tu8, 8);
626                                 if (ACPI_SUCCESS(status)) {
627                                         *bus_number = tu8;
628                                 }
629                         } else
630                                 *is_bridge = 0;
631                 }
632         }
633 }
634
635 void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound  */
636                            acpi_handle chandle, /* current node */
637                            struct acpi_pci_id **id)
638 {
639         int is_bridge = 1;
640         u8 bus_number = (*id)->bus;
641
642         acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
643 }
644
645 static void acpi_os_execute_deferred(struct work_struct *work)
646 {
647         struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
648         if (!dpc) {
649                 printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
650                 return;
651         }
652
653         dpc->function(dpc->context);
654         kfree(dpc);
655
656         /* Yield cpu to notify thread */
657         cond_resched();
658
659         return;
660 }
661
662 static void acpi_os_execute_notify(struct work_struct *work)
663 {
664         struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
665
666         if (!dpc) {
667                 printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
668                 return;
669         }
670
671         dpc->function(dpc->context);
672
673         kfree(dpc);
674
675         return;
676 }
677
678 /*******************************************************************************
679  *
680  * FUNCTION:    acpi_os_execute
681  *
682  * PARAMETERS:  Type               - Type of the callback
683  *              Function           - Function to be executed
684  *              Context            - Function parameters
685  *
686  * RETURN:      Status
687  *
688  * DESCRIPTION: Depending on type, either queues function for deferred execution or
689  *              immediately executes function on a separate thread.
690  *
691  ******************************************************************************/
692
693 acpi_status acpi_os_execute(acpi_execute_type type,
694                             acpi_osd_exec_callback function, void *context)
695 {
696         acpi_status status = AE_OK;
697         struct acpi_os_dpc *dpc;
698
699         ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
700                           "Scheduling function [%p(%p)] for deferred execution.\n",
701                           function, context));
702
703         if (!function)
704                 return AE_BAD_PARAMETER;
705
706         /*
707          * Allocate/initialize DPC structure.  Note that this memory will be
708          * freed by the callee.  The kernel handles the work_struct list  in a
709          * way that allows us to also free its memory inside the callee.
710          * Because we may want to schedule several tasks with different
711          * parameters we can't use the approach some kernel code uses of
712          * having a static work_struct.
713          */
714
715         dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
716         if (!dpc)
717                 return_ACPI_STATUS(AE_NO_MEMORY);
718
719         dpc->function = function;
720         dpc->context = context;
721
722         if (type == OSL_NOTIFY_HANDLER) {
723                 INIT_WORK(&dpc->work, acpi_os_execute_notify);
724                 if (!queue_work(kacpi_notify_wq, &dpc->work)) {
725                         status = AE_ERROR;
726                         kfree(dpc);
727                 }
728         } else {
729                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
730                 if (!queue_work(kacpid_wq, &dpc->work)) {
731                         ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
732                                   "Call to queue_work() failed.\n"));
733                         status = AE_ERROR;
734                         kfree(dpc);
735                 }
736         }
737         return_ACPI_STATUS(status);
738 }
739
740 EXPORT_SYMBOL(acpi_os_execute);
741
742 void acpi_os_wait_events_complete(void *context)
743 {
744         flush_workqueue(kacpid_wq);
745 }
746
747 EXPORT_SYMBOL(acpi_os_wait_events_complete);
748
749 /*
750  * Allocate the memory for a spinlock and initialize it.
751  */
752 acpi_status acpi_os_create_lock(acpi_spinlock * handle)
753 {
754         spin_lock_init(*handle);
755
756         return AE_OK;
757 }
758
759 /*
760  * Deallocate the memory for a spinlock.
761  */
762 void acpi_os_delete_lock(acpi_spinlock handle)
763 {
764         return;
765 }
766
767 acpi_status
768 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
769 {
770         struct semaphore *sem = NULL;
771
772
773         sem = acpi_os_allocate(sizeof(struct semaphore));
774         if (!sem)
775                 return AE_NO_MEMORY;
776         memset(sem, 0, sizeof(struct semaphore));
777
778         sema_init(sem, initial_units);
779
780         *handle = (acpi_handle *) sem;
781
782         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
783                           *handle, initial_units));
784
785         return AE_OK;
786 }
787
788 /*
789  * TODO: A better way to delete semaphores?  Linux doesn't have a
790  * 'delete_semaphore()' function -- may result in an invalid
791  * pointer dereference for non-synchronized consumers.  Should
792  * we at least check for blocked threads and signal/cancel them?
793  */
794
795 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
796 {
797         struct semaphore *sem = (struct semaphore *)handle;
798
799
800         if (!sem)
801                 return AE_BAD_PARAMETER;
802
803         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
804
805         kfree(sem);
806         sem = NULL;
807
808         return AE_OK;
809 }
810
811 /*
812  * TODO: The kernel doesn't have a 'down_timeout' function -- had to
813  * improvise.  The process is to sleep for one scheduler quantum
814  * until the semaphore becomes available.  Downside is that this
815  * may result in starvation for timeout-based waits when there's
816  * lots of semaphore activity.
817  *
818  * TODO: Support for units > 1?
819  */
820 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
821 {
822         acpi_status status = AE_OK;
823         struct semaphore *sem = (struct semaphore *)handle;
824         int ret = 0;
825
826
827         if (!sem || (units < 1))
828                 return AE_BAD_PARAMETER;
829
830         if (units > 1)
831                 return AE_SUPPORT;
832
833         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
834                           handle, units, timeout));
835
836         /*
837          * This can be called during resume with interrupts off.
838          * Like boot-time, we should be single threaded and will
839          * always get the lock if we try -- timeout or not.
840          * If this doesn't succeed, then we will oops courtesy of
841          * might_sleep() in down().
842          */
843         if (!down_trylock(sem))
844                 return AE_OK;
845
846         switch (timeout) {
847                 /*
848                  * No Wait:
849                  * --------
850                  * A zero timeout value indicates that we shouldn't wait - just
851                  * acquire the semaphore if available otherwise return AE_TIME
852                  * (a.k.a. 'would block').
853                  */
854         case 0:
855                 if (down_trylock(sem))
856                         status = AE_TIME;
857                 break;
858
859                 /*
860                  * Wait Indefinitely:
861                  * ------------------
862                  */
863         case ACPI_WAIT_FOREVER:
864                 down(sem);
865                 break;
866
867                 /*
868                  * Wait w/ Timeout:
869                  * ----------------
870                  */
871         default:
872                 // TODO: A better timeout algorithm?
873                 {
874                         int i = 0;
875                         static const int quantum_ms = 1000 / HZ;
876
877                         ret = down_trylock(sem);
878                         for (i = timeout; (i > 0 && ret != 0); i -= quantum_ms) {
879                                 schedule_timeout_interruptible(1);
880                                 ret = down_trylock(sem);
881                         }
882
883                         if (ret != 0)
884                                 status = AE_TIME;
885                 }
886                 break;
887         }
888
889         if (ACPI_FAILURE(status)) {
890                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
891                                   "Failed to acquire semaphore[%p|%d|%d], %s",
892                                   handle, units, timeout,
893                                   acpi_format_exception(status)));
894         } else {
895                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
896                                   "Acquired semaphore[%p|%d|%d]", handle,
897                                   units, timeout));
898         }
899
900         return status;
901 }
902
903 /*
904  * TODO: Support for units > 1?
905  */
906 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
907 {
908         struct semaphore *sem = (struct semaphore *)handle;
909
910
911         if (!sem || (units < 1))
912                 return AE_BAD_PARAMETER;
913
914         if (units > 1)
915                 return AE_SUPPORT;
916
917         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
918                           units));
919
920         up(sem);
921
922         return AE_OK;
923 }
924
925 #ifdef ACPI_FUTURE_USAGE
926 u32 acpi_os_get_line(char *buffer)
927 {
928
929 #ifdef ENABLE_DEBUGGER
930         if (acpi_in_debugger) {
931                 u32 chars;
932
933                 kdb_read(buffer, sizeof(line_buf));
934
935                 /* remove the CR kdb includes */
936                 chars = strlen(buffer) - 1;
937                 buffer[chars] = '\0';
938         }
939 #endif
940
941         return 0;
942 }
943 #endif                          /*  ACPI_FUTURE_USAGE  */
944
945 acpi_status acpi_os_signal(u32 function, void *info)
946 {
947         switch (function) {
948         case ACPI_SIGNAL_FATAL:
949                 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
950                 break;
951         case ACPI_SIGNAL_BREAKPOINT:
952                 /*
953                  * AML Breakpoint
954                  * ACPI spec. says to treat it as a NOP unless
955                  * you are debugging.  So if/when we integrate
956                  * AML debugger into the kernel debugger its
957                  * hook will go here.  But until then it is
958                  * not useful to print anything on breakpoints.
959                  */
960                 break;
961         default:
962                 break;
963         }
964
965         return AE_OK;
966 }
967
968 static int __init acpi_os_name_setup(char *str)
969 {
970         char *p = acpi_os_name;
971         int count = ACPI_MAX_OVERRIDE_LEN - 1;
972
973         if (!str || !*str)
974                 return 0;
975
976         for (; count-- && str && *str; str++) {
977                 if (isalnum(*str) || *str == ' ' || *str == ':')
978                         *p++ = *str;
979                 else if (*str == '\'' || *str == '"')
980                         continue;
981                 else
982                         break;
983         }
984         *p = 0;
985
986         return 1;
987
988 }
989
990 __setup("acpi_os_name=", acpi_os_name_setup);
991
992 static void __init set_osi_linux(unsigned int enable)
993 {
994         if (osi_linux.enable != enable) {
995                 osi_linux.enable = enable;
996                 printk(KERN_NOTICE PREFIX "%sed _OSI(Linux)\n",
997                         enable ? "Add": "Delet");
998         }
999         return;
1000 }
1001
1002 static void __init acpi_cmdline_osi_linux(unsigned int enable)
1003 {
1004         osi_linux.cmdline = 1;  /* cmdline set the default */
1005         set_osi_linux(enable);
1006
1007         return;
1008 }
1009
1010 void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
1011 {
1012         osi_linux.dmi = 1;      /* DMI knows that this box asks OSI(Linux) */
1013
1014         printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
1015
1016         if (enable == -1)
1017                 return;
1018
1019         osi_linux.known = 1;    /* DMI knows which OSI(Linux) default needed */
1020
1021         set_osi_linux(enable);
1022
1023         return;
1024 }
1025
1026 /*
1027  * Modify the list of "OS Interfaces" reported to BIOS via _OSI
1028  *
1029  * empty string disables _OSI
1030  * string starting with '!' disables that string
1031  * otherwise string is added to list, augmenting built-in strings
1032  */
1033 static int __init acpi_osi_setup(char *str)
1034 {
1035         if (str == NULL || *str == '\0') {
1036                 printk(KERN_INFO PREFIX "_OSI method disabled\n");
1037                 acpi_gbl_create_osi_method = FALSE;
1038         } else if (!strcmp("!Linux", str)) {
1039                 acpi_cmdline_osi_linux(0);      /* !enable */
1040         } else if (*str == '!') {
1041                 if (acpi_osi_invalidate(++str) == AE_OK)
1042                         printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
1043         } else if (!strcmp("Linux", str)) {
1044                 acpi_cmdline_osi_linux(1);      /* enable */
1045         } else if (*osi_additional_string == '\0') {
1046                 strncpy(osi_additional_string, str, OSI_STRING_LENGTH_MAX);
1047                 printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
1048         }
1049
1050         return 1;
1051 }
1052
1053 __setup("acpi_osi=", acpi_osi_setup);
1054
1055 /* enable serialization to combat AE_ALREADY_EXISTS errors */
1056 static int __init acpi_serialize_setup(char *str)
1057 {
1058         printk(KERN_INFO PREFIX "serialize enabled\n");
1059
1060         acpi_gbl_all_methods_serialized = TRUE;
1061
1062         return 1;
1063 }
1064
1065 __setup("acpi_serialize", acpi_serialize_setup);
1066
1067 /*
1068  * Wake and Run-Time GPES are expected to be separate.
1069  * We disable wake-GPEs at run-time to prevent spurious
1070  * interrupts.
1071  *
1072  * However, if a system exists that shares Wake and
1073  * Run-time events on the same GPE this flag is available
1074  * to tell Linux to keep the wake-time GPEs enabled at run-time.
1075  */
1076 static int __init acpi_wake_gpes_always_on_setup(char *str)
1077 {
1078         printk(KERN_INFO PREFIX "wake GPEs not disabled\n");
1079
1080         acpi_gbl_leave_wake_gpes_disabled = FALSE;
1081
1082         return 1;
1083 }
1084
1085 __setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup);
1086
1087 /*
1088  * Acquire a spinlock.
1089  *
1090  * handle is a pointer to the spinlock_t.
1091  */
1092
1093 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1094 {
1095         acpi_cpu_flags flags;
1096         spin_lock_irqsave(lockp, flags);
1097         return flags;
1098 }
1099
1100 /*
1101  * Release a spinlock. See above.
1102  */
1103
1104 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1105 {
1106         spin_unlock_irqrestore(lockp, flags);
1107 }
1108
1109 #ifndef ACPI_USE_LOCAL_CACHE
1110
1111 /*******************************************************************************
1112  *
1113  * FUNCTION:    acpi_os_create_cache
1114  *
1115  * PARAMETERS:  name      - Ascii name for the cache
1116  *              size      - Size of each cached object
1117  *              depth     - Maximum depth of the cache (in objects) <ignored>
1118  *              cache     - Where the new cache object is returned
1119  *
1120  * RETURN:      status
1121  *
1122  * DESCRIPTION: Create a cache object
1123  *
1124  ******************************************************************************/
1125
1126 acpi_status
1127 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1128 {
1129         *cache = kmem_cache_create(name, size, 0, 0, NULL);
1130         if (*cache == NULL)
1131                 return AE_ERROR;
1132         else
1133                 return AE_OK;
1134 }
1135
1136 /*******************************************************************************
1137  *
1138  * FUNCTION:    acpi_os_purge_cache
1139  *
1140  * PARAMETERS:  Cache           - Handle to cache object
1141  *
1142  * RETURN:      Status
1143  *
1144  * DESCRIPTION: Free all objects within the requested cache.
1145  *
1146  ******************************************************************************/
1147
1148 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1149 {
1150         kmem_cache_shrink(cache);
1151         return (AE_OK);
1152 }
1153
1154 /*******************************************************************************
1155  *
1156  * FUNCTION:    acpi_os_delete_cache
1157  *
1158  * PARAMETERS:  Cache           - Handle to cache object
1159  *
1160  * RETURN:      Status
1161  *
1162  * DESCRIPTION: Free all objects within the requested cache and delete the
1163  *              cache object.
1164  *
1165  ******************************************************************************/
1166
1167 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1168 {
1169         kmem_cache_destroy(cache);
1170         return (AE_OK);
1171 }
1172
1173 /*******************************************************************************
1174  *
1175  * FUNCTION:    acpi_os_release_object
1176  *
1177  * PARAMETERS:  Cache       - Handle to cache object
1178  *              Object      - The object to be released
1179  *
1180  * RETURN:      None
1181  *
1182  * DESCRIPTION: Release an object to the specified cache.  If cache is full,
1183  *              the object is deleted.
1184  *
1185  ******************************************************************************/
1186
1187 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1188 {
1189         kmem_cache_free(cache, object);
1190         return (AE_OK);
1191 }
1192
1193 /**
1194  *      acpi_dmi_dump - dump DMI slots needed for blacklist entry
1195  *
1196  *      Returns 0 on success
1197  */
1198 int acpi_dmi_dump(void)
1199 {
1200
1201         if (!dmi_available)
1202                 return -1;
1203
1204         printk(KERN_NOTICE PREFIX "DMI System Vendor: %s\n",
1205                 dmi_get_slot(DMI_SYS_VENDOR));
1206         printk(KERN_NOTICE PREFIX "DMI Product Name: %s\n",
1207                 dmi_get_slot(DMI_PRODUCT_NAME));
1208         printk(KERN_NOTICE PREFIX "DMI Product Version: %s\n",
1209                 dmi_get_slot(DMI_PRODUCT_VERSION));
1210         printk(KERN_NOTICE PREFIX "DMI Board Name: %s\n",
1211                 dmi_get_slot(DMI_BOARD_NAME));
1212         printk(KERN_NOTICE PREFIX "DMI BIOS Vendor: %s\n",
1213                 dmi_get_slot(DMI_BIOS_VENDOR));
1214         printk(KERN_NOTICE PREFIX "DMI BIOS Date: %s\n",
1215                 dmi_get_slot(DMI_BIOS_DATE));
1216
1217         return 0;
1218 }
1219
1220
1221 /******************************************************************************
1222  *
1223  * FUNCTION:    acpi_os_validate_interface
1224  *
1225  * PARAMETERS:  interface           - Requested interface to be validated
1226  *
1227  * RETURN:      AE_OK if interface is supported, AE_SUPPORT otherwise
1228  *
1229  * DESCRIPTION: Match an interface string to the interfaces supported by the
1230  *              host. Strings originate from an AML call to the _OSI method.
1231  *
1232  *****************************************************************************/
1233
1234 acpi_status
1235 acpi_os_validate_interface (char *interface)
1236 {
1237         if (!strncmp(osi_additional_string, interface, OSI_STRING_LENGTH_MAX))
1238                 return AE_OK;
1239         if (!strcmp("Linux", interface)) {
1240
1241                 printk(KERN_NOTICE PREFIX
1242                         "BIOS _OSI(Linux) query %s%s\n",
1243                         osi_linux.enable ? "honored" : "ignored",
1244                         osi_linux.cmdline ? " via cmdline" :
1245                         osi_linux.dmi ? " via DMI" : "");
1246
1247                 if (!osi_linux.dmi) {
1248                         if (acpi_dmi_dump())
1249                                 printk(KERN_NOTICE PREFIX
1250                                         "[please extract dmidecode output]\n");
1251                         printk(KERN_NOTICE PREFIX
1252                                 "Please send DMI info above to "
1253                                 "linux-acpi@vger.kernel.org\n");
1254                 }
1255                 if (!osi_linux.known && !osi_linux.cmdline) {
1256                         printk(KERN_NOTICE PREFIX
1257                                 "If \"acpi_osi=%sLinux\" works better, "
1258                                 "please notify linux-acpi@vger.kernel.org\n",
1259                                 osi_linux.enable ? "!" : "");
1260                 }
1261
1262                 if (osi_linux.enable)
1263                         return AE_OK;
1264         }
1265         return AE_SUPPORT;
1266 }
1267
1268 /******************************************************************************
1269  *
1270  * FUNCTION:    acpi_os_validate_address
1271  *
1272  * PARAMETERS:  space_id             - ACPI space ID
1273  *              address             - Physical address
1274  *              length              - Address length
1275  *
1276  * RETURN:      AE_OK if address/length is valid for the space_id. Otherwise,
1277  *              should return AE_AML_ILLEGAL_ADDRESS.
1278  *
1279  * DESCRIPTION: Validate a system address via the host OS. Used to validate
1280  *              the addresses accessed by AML operation regions.
1281  *
1282  *****************************************************************************/
1283
1284 acpi_status
1285 acpi_os_validate_address (
1286     u8                   space_id,
1287     acpi_physical_address   address,
1288     acpi_size               length)
1289 {
1290
1291     return AE_OK;
1292 }
1293
1294 #endif