2 * drivers/w1/masters/omap_hdq.c
4 * Copyright (C) 2007 Texas Instruments, Inc.
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include <linux/interrupt.h>
15 #include <linux/err.h>
16 #include <linux/clk.h>
19 #include <mach/hardware.h>
22 #include "../w1_int.h"
24 #define MOD_NAME "OMAP_HDQ:"
26 #define OMAP_HDQ_REVISION 0x00
27 #define OMAP_HDQ_TX_DATA 0x04
28 #define OMAP_HDQ_RX_DATA 0x08
29 #define OMAP_HDQ_CTRL_STATUS 0x0c
30 #define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK (1<<6)
31 #define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE (1<<5)
32 #define OMAP_HDQ_CTRL_STATUS_GO (1<<4)
33 #define OMAP_HDQ_CTRL_STATUS_INITIALIZATION (1<<2)
34 #define OMAP_HDQ_CTRL_STATUS_DIR (1<<1)
35 #define OMAP_HDQ_CTRL_STATUS_MODE (1<<0)
36 #define OMAP_HDQ_INT_STATUS 0x10
37 #define OMAP_HDQ_INT_STATUS_TXCOMPLETE (1<<2)
38 #define OMAP_HDQ_INT_STATUS_RXCOMPLETE (1<<1)
39 #define OMAP_HDQ_INT_STATUS_TIMEOUT (1<<0)
40 #define OMAP_HDQ_SYSCONFIG 0x14
41 #define OMAP_HDQ_SYSCONFIG_SOFTRESET (1<<1)
42 #define OMAP_HDQ_SYSCONFIG_AUTOIDLE (1<<0)
43 #define OMAP_HDQ_SYSSTATUS 0x18
44 #define OMAP_HDQ_SYSSTATUS_RESETDONE (1<<0)
46 #define OMAP_HDQ_FLAG_CLEAR 0
47 #define OMAP_HDQ_FLAG_SET 1
48 #define OMAP_HDQ_TIMEOUT (HZ/5)
50 #define OMAP_HDQ_MAX_USER 4
52 DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
57 resource_size_t hdq_base;
58 struct semaphore hdq_semlock;
63 spinlock_t hdq_spinlock;
66 static int omap_hdq_get(struct hdq_data *hdq_data);
67 static int omap_hdq_put(struct hdq_data *hdq_data);
68 static int omap_hdq_break(struct hdq_data *hdq_data);
70 static int __init omap_hdq_probe(struct platform_device *pdev);
71 static int omap_hdq_remove(struct platform_device *pdev);
73 static struct platform_driver omap_hdq_driver = {
74 .probe = omap_hdq_probe,
75 .remove = omap_hdq_remove,
83 static u8 omap_w1_read_byte(void *_hdq);
84 static void omap_w1_write_byte(void *_hdq, u8 byte);
85 static u8 omap_w1_reset_bus(void *_hdq);
86 static void omap_w1_search_bus(void *_hdq, u8 search_type,
87 w1_slave_found_callback slave_found);
90 static struct w1_bus_master omap_w1_master = {
91 .read_byte = omap_w1_read_byte,
92 .write_byte = omap_w1_write_byte,
93 .reset_bus = omap_w1_reset_bus,
94 .search = omap_w1_search_bus,
98 * HDQ register I/O routines
100 static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
102 return omap_readb(hdq_data->hdq_base + offset);
105 static inline u8 hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
107 omap_writeb(val, hdq_data->hdq_base + offset);
112 static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
115 u8 new_val = (omap_readb(hdq_data->hdq_base + offset) & ~mask)
117 omap_writeb(new_val, hdq_data->hdq_base + offset);
123 * Wait for one or more bits in flag change.
124 * HDQ_FLAG_SET: wait until any bit in the flag is set.
125 * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared.
126 * return 0 on success and -ETIMEDOUT in the case of timeout.
128 static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
129 u8 flag, u8 flag_set, u8 *status)
132 unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
134 if (flag_set == OMAP_HDQ_FLAG_CLEAR) {
135 /* wait for the flag clear */
136 while (((*status = hdq_reg_in(hdq_data, offset)) & flag)
137 && time_before(jiffies, timeout)) {
138 set_current_state(TASK_UNINTERRUPTIBLE);
141 if (unlikely(*status & flag))
143 } else if (flag_set == OMAP_HDQ_FLAG_SET) {
144 /* wait for the flag set */
145 while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
146 && time_before(jiffies, timeout)) {
147 set_current_state(TASK_UNINTERRUPTIBLE);
150 if (unlikely(!(*status & flag)))
159 * write out a byte and fill *status with HDQ_INT_STATUS
162 hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
166 unsigned long irqflags;
170 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
171 /* clear interrupt flags via a dummy read */
172 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
173 /* ISR loads it with new INT_STATUS */
174 hdq_data->hdq_irqstatus = 0;
175 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
177 hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
180 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
181 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
182 /* wait for the TXCOMPLETE bit */
183 ret = wait_event_interruptible_timeout(hdq_wait_queue,
184 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
185 if (unlikely(ret < 0)) {
186 dev_dbg(hdq_data->dev, "wait interrupted");
190 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
191 *status = hdq_data->hdq_irqstatus;
192 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
193 /* check irqstatus */
194 if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
195 dev_dbg(hdq_data->dev, "timeout waiting for"
196 "TXCOMPLETE/RXCOMPLETE, %x", *status);
200 /* wait for the GO bit return to zero */
201 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
202 OMAP_HDQ_CTRL_STATUS_GO,
203 OMAP_HDQ_FLAG_CLEAR, &tmp_status);
205 dev_dbg(hdq_data->dev, "timeout waiting GO bit"
206 "return to zero, %x", tmp_status);
214 * HDQ Interrupt service routine.
216 static irqreturn_t hdq_isr(int irq, void *_hdq)
218 struct hdq_data *hdq_data = _hdq;
219 unsigned long irqflags;
221 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
222 hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
223 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
224 dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus);
226 if (hdq_data->hdq_irqstatus &
227 (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
228 | OMAP_HDQ_INT_STATUS_TIMEOUT)) {
229 /* wake up sleeping process */
230 wake_up_interruptible(&hdq_wait_queue);
237 * HDQ Mode: always return success.
239 static u8 omap_w1_reset_bus(void *_hdq)
245 * W1 search callback function.
247 static void omap_w1_search_bus(void *_hdq, u8 search_type,
248 w1_slave_found_callback slave_found)
250 u64 module_id, rn_le, cs, id;
257 rn_le = cpu_to_le64(module_id);
259 * HDQ might not obey truly the 1-wire spec.
260 * So calculate CRC based on module parameter.
262 cs = w1_calc_crc8((u8 *)&rn_le, 7);
263 id = (cs << 56) | module_id;
265 slave_found(_hdq, id);
268 static int _omap_hdq_reset(struct hdq_data *hdq_data)
273 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, OMAP_HDQ_SYSCONFIG_SOFTRESET);
275 * Select HDQ mode & enable clocks.
276 * It is observed that INT flags can't be cleared via a read and GO/INIT
277 * won't return to zero if interrupt is disabled. So we always enable
280 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
281 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
282 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
284 /* wait for reset to complete */
285 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS,
286 OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status);
288 dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x",
291 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
292 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
293 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
294 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
295 OMAP_HDQ_SYSCONFIG_AUTOIDLE);
302 * Issue break pulse to the device.
305 omap_hdq_break(struct hdq_data *hdq_data)
309 unsigned long irqflags;
311 ret = down_interruptible(&hdq_data->hdq_semlock);
315 if (!hdq_data->hdq_usecount) {
316 up(&hdq_data->hdq_semlock);
320 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
321 /* clear interrupt flags via a dummy read */
322 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
323 /* ISR loads it with new INT_STATUS */
324 hdq_data->hdq_irqstatus = 0;
325 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
327 /* set the INIT and GO bit */
328 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
329 OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
330 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
331 OMAP_HDQ_CTRL_STATUS_GO);
333 /* wait for the TIMEOUT bit */
334 ret = wait_event_interruptible_timeout(hdq_wait_queue,
335 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
336 if (unlikely(ret < 0)) {
337 dev_dbg(hdq_data->dev, "wait interrupted");
338 up(&hdq_data->hdq_semlock);
342 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
343 tmp_status = hdq_data->hdq_irqstatus;
344 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
345 /* check irqstatus */
346 if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
347 dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x",
349 up(&hdq_data->hdq_semlock);
353 * wait for both INIT and GO bits rerurn to zero.
354 * zero wait time expected for interrupt mode.
356 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
357 OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
358 OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR,
361 dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
362 "return to zero, %x", tmp_status);
364 up(&hdq_data->hdq_semlock);
368 static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
372 unsigned long irqflags;
374 ret = down_interruptible(&hdq_data->hdq_semlock);
378 if (!hdq_data->hdq_usecount) {
379 up(&hdq_data->hdq_semlock);
383 if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
384 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
385 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
386 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
388 * The RX comes immediately after TX. It
389 * triggers another interrupt before we
390 * sleep. So we have to wait for RXCOMPLETE bit.
393 unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
394 while (!(hdq_data->hdq_irqstatus
395 & OMAP_HDQ_INT_STATUS_RXCOMPLETE)
396 && time_before(jiffies, timeout)) {
397 set_current_state(TASK_UNINTERRUPTIBLE);
401 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
402 OMAP_HDQ_CTRL_STATUS_DIR);
403 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
404 status = hdq_data->hdq_irqstatus;
405 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
406 /* check irqstatus */
407 if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
408 dev_dbg(hdq_data->dev, "timeout waiting for"
409 "RXCOMPLETE, %x", status);
410 up(&hdq_data->hdq_semlock);
414 /* the data is ready. Read it in! */
415 *val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
416 up(&hdq_data->hdq_semlock);
423 * Enable clocks and set the controller to HDQ mode.
426 omap_hdq_get(struct hdq_data *hdq_data)
430 ret = down_interruptible(&hdq_data->hdq_semlock);
434 if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) {
435 dev_dbg(hdq_data->dev, "attempt to exceed the max use count");
436 up(&hdq_data->hdq_semlock);
439 hdq_data->hdq_usecount++;
440 try_module_get(THIS_MODULE);
441 if (1 == hdq_data->hdq_usecount) {
442 if (clk_enable(hdq_data->hdq_ick)) {
443 dev_dbg(hdq_data->dev, "Can not enable ick\n");
444 clk_put(hdq_data->hdq_ick);
445 clk_put(hdq_data->hdq_fck);
446 up(&hdq_data->hdq_semlock);
449 if (clk_enable(hdq_data->hdq_fck)) {
450 dev_dbg(hdq_data->dev, "Can not enable fck\n");
451 clk_put(hdq_data->hdq_ick);
452 clk_put(hdq_data->hdq_fck);
453 up(&hdq_data->hdq_semlock);
457 /* make sure HDQ is out of reset */
458 if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) &
459 OMAP_HDQ_SYSSTATUS_RESETDONE)) {
460 ret = _omap_hdq_reset(hdq_data);
462 /* back up the count */
463 hdq_data->hdq_usecount--;
465 /* select HDQ mode & enable clocks */
466 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
467 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
468 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
469 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
470 OMAP_HDQ_SYSCONFIG_AUTOIDLE);
471 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
475 up(&hdq_data->hdq_semlock);
480 * Disable clocks to the module.
483 omap_hdq_put(struct hdq_data *hdq_data)
487 ret = down_interruptible(&hdq_data->hdq_semlock);
491 if (0 == hdq_data->hdq_usecount) {
492 dev_dbg(hdq_data->dev, "attempt to decrement use count"
496 hdq_data->hdq_usecount--;
497 module_put(THIS_MODULE);
498 if (0 == hdq_data->hdq_usecount) {
499 clk_disable(hdq_data->hdq_ick);
500 clk_disable(hdq_data->hdq_fck);
503 up(&hdq_data->hdq_semlock);
508 * Used to control the call to omap_hdq_get and omap_hdq_put.
509 * HDQ Protocol: Write the CMD|REG_address first, followed by
510 * the data wrire or read.
512 static int init_trans;
515 * Read a byte of data from the device.
517 static u8 omap_w1_read_byte(void *_hdq)
519 struct hdq_data *hdq_data = _hdq;
523 ret = hdq_read_byte(hdq_data, &val);
526 omap_hdq_put(hdq_data);
530 /* Write followed by a read, release the module */
533 omap_hdq_put(hdq_data);
540 * Write a byte of data to the device.
542 static void omap_w1_write_byte(void *_hdq, u8 byte)
544 struct hdq_data *hdq_data = _hdq;
547 /* First write to initialize the transfer */
549 omap_hdq_get(hdq_data);
553 hdq_write_byte(hdq_data, byte, &status);
554 dev_dbg(hdq_data->dev, "Ctrl status %x\n", status);
556 /* Second write, data transfered. Release the module */
557 if (init_trans > 1) {
558 omap_hdq_put(hdq_data);
565 static int __init omap_hdq_probe(struct platform_device *pdev)
567 struct hdq_data *hdq_data;
568 struct resource *res;
575 hdq_data = kmalloc(sizeof(*hdq_data), GFP_KERNEL);
577 dev_dbg(&pdev->dev, "unable to allocate memory\n");
582 hdq_data->dev = &pdev->dev;
583 platform_set_drvdata(pdev, hdq_data);
585 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
587 dev_dbg(&pdev->dev, "unable to get resource\n");
592 hdq_data->hdq_base = res->start;
594 /* get interface & functional clock objects */
595 hdq_data->hdq_ick = clk_get(&pdev->dev, "hdq_ick");
596 hdq_data->hdq_fck = clk_get(&pdev->dev, "hdq_fck");
598 if (IS_ERR(hdq_data->hdq_ick) || IS_ERR(hdq_data->hdq_fck)) {
599 dev_dbg(&pdev->dev, "Can't get HDQ clock objects\n");
600 if (IS_ERR(hdq_data->hdq_ick)) {
601 ret = PTR_ERR(hdq_data->hdq_ick);
604 if (IS_ERR(hdq_data->hdq_fck)) {
605 ret = PTR_ERR(hdq_data->hdq_fck);
606 clk_put(hdq_data->hdq_ick);
611 hdq_data->hdq_usecount = 0;
612 sema_init(&hdq_data->hdq_semlock, 1);
614 if (clk_enable(hdq_data->hdq_ick)) {
615 dev_dbg(&pdev->dev, "Can not enable ick\n");
620 if (clk_enable(hdq_data->hdq_fck)) {
621 dev_dbg(&pdev->dev, "Can not enable fck\n");
626 rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
627 dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
628 (rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
630 spin_lock_init(&hdq_data->hdq_spinlock);
631 omap_hdq_break(hdq_data);
633 irq = platform_get_irq(pdev, 0);
639 ret = request_irq(irq, hdq_isr, IRQF_DISABLED, "omap_hdq", hdq_data);
641 dev_dbg(&pdev->dev, "could not request irq\n");
645 /* don't clock the HDQ until it is needed */
646 clk_disable(hdq_data->hdq_ick);
647 clk_disable(hdq_data->hdq_fck);
649 omap_w1_master.data = hdq_data;
651 ret = w1_add_master_device(&omap_w1_master);
653 dev_dbg(&pdev->dev, "Failure in registering w1 master\n");
661 clk_disable(hdq_data->hdq_fck);
664 clk_disable(hdq_data->hdq_ick);
667 clk_put(hdq_data->hdq_ick);
668 clk_put(hdq_data->hdq_fck);
671 hdq_data->hdq_base = NULL;
674 platform_set_drvdata(pdev, NULL);
682 static int omap_hdq_remove(struct platform_device *pdev)
684 struct hdq_data *hdq_data = platform_get_drvdata(pdev);
686 down_interruptible(&hdq_data->hdq_semlock);
687 if (0 != hdq_data->hdq_usecount) {
688 dev_dbg(&pdev->dev, "removed when use count is not zero\n");
691 up(&hdq_data->hdq_semlock);
693 /* remove module dependency */
694 clk_put(hdq_data->hdq_ick);
695 clk_put(hdq_data->hdq_fck);
696 free_irq(INT_24XX_HDQ_IRQ, hdq_data);
697 platform_set_drvdata(pdev, NULL);
706 return platform_driver_register(&omap_hdq_driver);
712 platform_driver_unregister(&omap_hdq_driver);
715 module_init(omap_hdq_init);
716 module_exit(omap_hdq_exit);
718 module_param(W1_ID, int, S_IRUSR);
720 MODULE_AUTHOR("Texas Instruments");
721 MODULE_DESCRIPTION("HDQ driver Library");
722 MODULE_LICENSE("GPL");