]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - Documentation/RCU/rcuref.txt
hwmon: (lm78) Convert to a new-style i2c driver
[linux-2.6-omap-h63xx.git] / Documentation / RCU / rcuref.txt
index 3f60db41b2f0fdce0f6944cf527b3594f2ff8e8e..4202ad0931300fe776697a081494a433b54e3b61 100644 (file)
@@ -1,7 +1,7 @@
-Refcounter design for elements of lists/arrays protected by RCU.
+Reference-count design for elements of lists/arrays protected by RCU.
 
-Refcounting on elements of  lists which are protected by traditional
-reader/writer spinlocks or semaphores are straight forward as in:
+Reference counting on elements of lists which are protected by traditional
+reader/writer spinlocks or semaphores are straightforward:
 
 1.                             2.
 add()                          search_and_reference()
@@ -28,40 +28,39 @@ release_referenced()                        delete()
                                            ...
                                        }
 
-If this list/array is made lock free using rcu as in changing the
-write_lock in add() and delete() to spin_lock and changing read_lock
-in search_and_reference to rcu_read_lock(), the atomic_get in
-search_and_reference could potentially hold reference to an element which
-has already been deleted from the list/array.  atomic_inc_not_zero takes
-care of this scenario. search_and_reference should look as;
+If this list/array is made lock free using RCU as in changing the
+write_lock() in add() and delete() to spin_lock() and changing read_lock()
+in search_and_reference() to rcu_read_lock(), the atomic_inc() in
+search_and_reference() could potentially hold reference to an element which
+has already been deleted from the list/array.  Use atomic_inc_not_zero()
+in this scenario as follows:
 
 1.                                     2.
 add()                                  search_and_reference()
 {                                      {
     alloc_object                           rcu_read_lock();
     ...                                            search_for_element
-    atomic_set(&el->rc, 1);                if (atomic_inc_not_zero(&el->rc)) {
-    write_lock(&list_lock);                    rcu_read_unlock();
+    atomic_set(&el->rc, 1);                if (!atomic_inc_not_zero(&el->rc)) {
+    spin_lock(&list_lock);                     rcu_read_unlock();
                                                return FAIL;
     add_element                                    }
     ...                                            ...
-    write_unlock(&list_lock);              rcu_read_unlock();
+    spin_unlock(&list_lock);               rcu_read_unlock();
 }                                      }
 3.                                     4.
 release_referenced()                   delete()
 {                                      {
-    ...                                            write_lock(&list_lock);
-    atomic_dec(&el->rc, relfunc)           ...
-    ...                                            delete_element
-}                                          write_unlock(&list_lock);
-                                           ...
+    ...                                            spin_lock(&list_lock);
+    if (atomic_dec_and_test(&el->rc))       ...
+        call_rcu(&el->head, el_free);       delete_element
+    ...                                     spin_unlock(&list_lock);
+}                                          ...
                                            if (atomic_dec_and_test(&el->rc))
                                                call_rcu(&el->head, el_free);
                                            ...
                                        }
 
-Sometimes, reference to the element need to be obtained in the
-update (write) stream.  In such cases, atomic_inc_not_zero might be an
-overkill since the spinlock serialising list updates are held. atomic_inc
-is to be used in such cases.
-
+Sometimes, a reference to the element needs to be obtained in the
+update (write) stream.  In such cases, atomic_inc_not_zero() might be
+overkill, since we hold the update-side spinlock.  One might instead
+use atomic_inc() in such cases.