]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/net/wireless/rt2x00/rt2x00queue.c
Merge branch 'cpus4096-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-omap-h63xx.git] / drivers / net / wireless / rt2x00 / rt2x00queue.c
index e9f4261054bc905daf474357519cb0be5ea78361..eaec6bd93ed5e0adc3f5a0911a8d9dac72356db1 100644 (file)
@@ -55,14 +55,12 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev,
        /*
         * For IV/EIV/ICV assembly we must make sure there is
         * at least 8 bytes bytes available in headroom for IV/EIV
-        * and 4 bytes for ICV data as tailroon.
+        * and 8 bytes for ICV data as tailroon.
         */
-#ifdef CONFIG_RT2X00_LIB_CRYPTO
        if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
                head_size += 8;
-               tail_size += 4;
+               tail_size += 8;
        }
-#endif /* CONFIG_RT2X00_LIB_CRYPTO */
 
        /*
         * Allocate skbuffer.
@@ -174,7 +172,7 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
        txdesc->cw_max = entry->queue->cw_max;
        txdesc->aifs = entry->queue->aifs;
 
-       /* Data length + CRC + IV/EIV/ICV/MMIC (when using encryption) */
+       /* Data length + CRC */
        data_length = entry->skb->len + 4;
 
        /*
@@ -183,34 +181,17 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
        if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
                __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
 
-#ifdef CONFIG_RT2X00_LIB_CRYPTO
        if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags) &&
            !entry->skb->do_not_encrypt) {
-               struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
-
-               __set_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags);
-
-               txdesc->cipher = rt2x00crypto_key_to_cipher(hw_key);
-
-               if (hw_key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
-                       __set_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags);
-
-               txdesc->key_idx = hw_key->hw_key_idx;
-               txdesc->iv_offset = ieee80211_get_hdrlen_from_skb(entry->skb);
+               /* Apply crypto specific descriptor information */
+               rt2x00crypto_create_tx_descriptor(entry, txdesc);
 
                /*
                 * Extend frame length to include all encryption overhead
                 * that will be added by the hardware.
                 */
                data_length += rt2x00crypto_tx_overhead(tx_info);
-
-               if (!(hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV))
-                       __set_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags);
-
-               if (!(hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
-                       __set_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags);
        }
-#endif /* CONFIG_RT2X00_LIB_CRYPTO */
 
        /*
         * Check if this is a RTS/CTS frame
@@ -231,14 +212,7 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
         * Determine retry information.
         */
        txdesc->retry_limit = tx_info->control.rates[0].count - 1;
-       /*
-        * XXX: If at this point we knew whether the HW is going to use
-        *      the RETRY_MODE bit or the retry_limit (currently all
-        *      use the RETRY_MODE bit) we could do something like b43
-        *      does, set the RETRY_MODE bit when the RC algorithm is
-        *      requesting more than the long retry limit.
-        */
-       if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
+       if (txdesc->retry_limit >= rt2x00dev->long_retry)
                __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
 
        /*
@@ -319,8 +293,8 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
                /*
                 * Convert length to microseconds.
                 */
-               residual = get_duration_res(data_length, hwrate->bitrate);
-               duration = get_duration(data_length, hwrate->bitrate);
+               residual = GET_DURATION_RES(data_length, hwrate->bitrate);
+               duration = GET_DURATION(data_length, hwrate->bitrate);
 
                if (residual != 0) {
                        duration++;
@@ -386,7 +360,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
        u8 rate_idx, rate_flags;
 
        if (unlikely(rt2x00queue_full(queue)))
-               return -EINVAL;
+               return -ENOBUFS;
 
        if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
                ERROR(queue->rt2x00dev,
@@ -415,7 +389,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
        tx_info = IEEE80211_SKB_CB(skb);
        rate_idx = tx_info->control.rates[0].idx;
        rate_flags = tx_info->control.rates[0].flags;
-       skbdesc = get_skb_frame_desc(entry->skb);
+       skbdesc = get_skb_frame_desc(skb);
        memset(skbdesc, 0, sizeof(*skbdesc));
        skbdesc->entry = entry;
        skbdesc->tx_rate_idx = rate_idx;
@@ -428,19 +402,21 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
         */
        if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
            !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
-               rt2x00crypto_tx_remove_iv(skb, iv_len);
+               if (test_bit(CONFIG_CRYPTO_COPY_IV, &queue->rt2x00dev->flags))
+                       rt2x00crypto_tx_copy_iv(skb, iv_len);
+               else
+                       rt2x00crypto_tx_remove_iv(skb, iv_len);
        }
 
        /*
         * It could be possible that the queue was corrupted and this
-        * call failed. Just drop the frame, we cannot rollback and pass
-        * the frame to mac80211 because the skb->cb has now been tainted.
+        * call failed. Since we always return NETDEV_TX_OK to mac80211,
+        * this frame will simply be dropped.
         */
        if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry))) {
                clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
-               dev_kfree_skb_any(entry->skb);
                entry->skb = NULL;
-               return 0;
+               return -EIO;
        }
 
        if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
@@ -589,40 +565,18 @@ static void rt2x00queue_reset(struct data_queue *queue)
        spin_unlock_irqrestore(&queue->lock, irqflags);
 }
 
-void rt2x00queue_init_rx(struct rt2x00_dev *rt2x00dev)
-{
-       struct data_queue *queue = rt2x00dev->rx;
-       unsigned int i;
-
-       rt2x00queue_reset(queue);
-
-       if (!rt2x00dev->ops->lib->init_rxentry)
-               return;
-
-       for (i = 0; i < queue->limit; i++) {
-               queue->entries[i].flags = 0;
-
-               rt2x00dev->ops->lib->init_rxentry(rt2x00dev,
-                                                 &queue->entries[i]);
-       }
-}
-
-void rt2x00queue_init_tx(struct rt2x00_dev *rt2x00dev)
+void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
 {
        struct data_queue *queue;
        unsigned int i;
 
-       txall_queue_for_each(rt2x00dev, queue) {
+       queue_for_each(rt2x00dev, queue) {
                rt2x00queue_reset(queue);
 
-               if (!rt2x00dev->ops->lib->init_txentry)
-                       continue;
-
                for (i = 0; i < queue->limit; i++) {
                        queue->entries[i].flags = 0;
 
-                       rt2x00dev->ops->lib->init_txentry(rt2x00dev,
-                                                         &queue->entries[i]);
+                       rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
                }
        }
 }