X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=net%2Fmac80211%2Fmesh_pathtbl.c;h=3c72557df45adee87cba306b2d560089e71cdb49;hb=0d876c6a96e2cabf8632e8066b617d9c2dec9518;hp=0a60f55f32abc81644dfd1179b4ee291982aeaf3;hpb=17dce5dfe38ae2fb359b61e855f5d8a3a8b7892b;p=linux-2.6-omap-h63xx.git diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 0a60f55f32a..3c72557df45 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -36,6 +36,7 @@ struct mpath_node { }; static struct mesh_table *mesh_paths; +static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */ /* This lock will have the grow table function as writer and add / delete nodes * as readers. When reading the table (i.e. doing lookups) we are well protected @@ -94,6 +95,34 @@ struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) return NULL; } +struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) +{ + struct mesh_path *mpath; + struct hlist_node *n; + struct hlist_head *bucket; + struct mesh_table *tbl; + struct mpath_node *node; + + tbl = rcu_dereference(mpp_paths); + + bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; + hlist_for_each_entry_rcu(node, n, bucket, list) { + mpath = node->mpath; + if (mpath->sdata == sdata && + memcmp(dst, mpath->dst, ETH_ALEN) == 0) { + if (MPATH_EXPIRED(mpath)) { + spin_lock_bh(&mpath->state_lock); + if (MPATH_EXPIRED(mpath)) + mpath->flags &= ~MESH_PATH_ACTIVE; + spin_unlock_bh(&mpath->state_lock); + } + return mpath; + } + } + return NULL; +} + + /** * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index * @idx: index @@ -153,7 +182,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) if (is_multicast_ether_addr(dst)) return -ENOTSUPP; - if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0) + if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0) return -ENOSPC; err = -ENOMEM; @@ -221,7 +250,92 @@ err_exists: err_node_alloc: kfree(new_mpath); err_path_alloc: - atomic_dec(&sdata->u.sta.mpaths); + atomic_dec(&sdata->u.mesh.mpaths); + return err; +} + + +int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) +{ + struct mesh_path *mpath, *new_mpath; + struct mpath_node *node, *new_node; + struct hlist_head *bucket; + struct hlist_node *n; + int grow = 0; + int err = 0; + u32 hash_idx; + + + if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0) + /* never add ourselves as neighbours */ + return -ENOTSUPP; + + if (is_multicast_ether_addr(dst)) + return -ENOTSUPP; + + err = -ENOMEM; + new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL); + if (!new_mpath) + goto err_path_alloc; + + new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL); + if (!new_node) + goto err_node_alloc; + + read_lock(&pathtbl_resize_lock); + memcpy(new_mpath->dst, dst, ETH_ALEN); + memcpy(new_mpath->mpp, mpp, ETH_ALEN); + new_mpath->sdata = sdata; + new_mpath->flags = 0; + skb_queue_head_init(&new_mpath->frame_queue); + new_node->mpath = new_mpath; + new_mpath->exp_time = jiffies; + spin_lock_init(&new_mpath->state_lock); + + hash_idx = mesh_table_hash(dst, sdata, mpp_paths); + bucket = &mpp_paths->hash_buckets[hash_idx]; + + spin_lock(&mpp_paths->hashwlock[hash_idx]); + + err = -EEXIST; + hlist_for_each_entry(node, n, bucket, list) { + mpath = node->mpath; + if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0) + goto err_exists; + } + + hlist_add_head_rcu(&new_node->list, bucket); + if (atomic_inc_return(&mpp_paths->entries) >= + mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1)) + grow = 1; + + spin_unlock(&mpp_paths->hashwlock[hash_idx]); + read_unlock(&pathtbl_resize_lock); + if (grow) { + struct mesh_table *oldtbl, *newtbl; + + write_lock(&pathtbl_resize_lock); + oldtbl = mpp_paths; + newtbl = mesh_table_grow(mpp_paths); + if (!newtbl) { + write_unlock(&pathtbl_resize_lock); + return 0; + } + rcu_assign_pointer(mpp_paths, newtbl); + write_unlock(&pathtbl_resize_lock); + + synchronize_rcu(); + mesh_table_free(oldtbl, false); + } + return 0; + +err_exists: + spin_unlock(&mpp_paths->hashwlock[hash_idx]); + read_unlock(&pathtbl_resize_lock); + kfree(new_node); +err_node_alloc: + kfree(new_mpath); +err_path_alloc: return err; } @@ -306,7 +420,7 @@ static void mesh_path_node_reclaim(struct rcu_head *rp) struct ieee80211_sub_if_data *sdata = node->mpath->sdata; del_timer_sync(&node->mpath->timer); - atomic_dec(&sdata->u.sta.mpaths); + atomic_dec(&sdata->u.mesh.mpaths); kfree(node->mpath); kfree(node); } @@ -401,7 +515,7 @@ void mesh_path_discard_frame(struct sk_buff *skb, } kfree_skb(skb); - sdata->u.sta.mshstats.dropped_frames_no_route++; + sdata->u.mesh.mshstats.dropped_frames_no_route++; } /** @@ -475,11 +589,21 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl) int mesh_pathtbl_init(void) { mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); + if (!mesh_paths) + return -ENOMEM; mesh_paths->free_node = &mesh_path_node_free; mesh_paths->copy_node = &mesh_path_node_copy; mesh_paths->mean_chain_len = MEAN_CHAIN_LEN; - if (!mesh_paths) + + mpp_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); + if (!mpp_paths) { + mesh_table_free(mesh_paths, true); return -ENOMEM; + } + mpp_paths->free_node = &mesh_path_node_free; + mpp_paths->copy_node = &mesh_path_node_copy; + mpp_paths->mean_chain_len = MEAN_CHAIN_LEN; + return 0; } @@ -511,4 +635,5 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata) void mesh_pathtbl_unregister(void) { mesh_table_free(mesh_paths, true); + mesh_table_free(mpp_paths, true); }