The following commit has been merged in the master branch:
commit 8a90300f7dbdecb7c15d8de332aec08eaa7e3e59
Author: Linus Lüssing <linus.luessing(a)ascom.ch>
Date: Sat Oct 23 01:48:02 2010 +0000
batman-adv: Adding sysfs ABI documentation for hop_penalty
Signed-off-by: Linus Lüssing <linus.luessing(a)ascom.ch>
diff --git a/sysfs-class-net-mesh b/sysfs-class-net-mesh
index b4cdb60..bd20e14 100644
--- a/sysfs-class-net-mesh
+++ b/sysfs-class-net-mesh
@@ -29,6 +29,13 @@ Description:
Defines the interval in milliseconds in which batman
sends its protocol messages.
+What: /sys/class/net/<mesh_iface>/mesh/hop_penalty
+Date: Oct 2010
+Contact: Linus Lüssing <linus.luessing(a)web.de>
+Description:
+ Defines the penalty which will be applied to an
+ originator message's tq-field on every hop.
+
What: /sys/class/net/<mesh_iface>/mesh/vis_mode
Date: May 2010
Contact: Marek Lindner <lindner_marek(a)yahoo.de>
--
batman-adv
The following commit has been merged in the master branch:
commit cb62435257e0b1aab836ae161d6c0a143c77ecf0
Author: Linus Lüssing <linus.luessing(a)ascom.ch>
Date: Sat Oct 23 01:46:15 2010 +0000
batman-adv: Make hop_penalty configurable via sysfs
When having a mixed topology of both very mobile and rather static
nodes, you are usually best advised to set the originator interval on
all nodes to a level best suited for the most mobile node.
However, if most of the nodes are rather static, this can create a lot
of undesired overhead as a trade-off then. If setting the interval too
low on the static nodes, a mobile node might be chosen as a router for
too long, not switching away from it fast enough because of its
mobility and the low frequency of ogms of static nodes.
Exposing the hop_penalty is especially useful for the stated scenario: A
static node can keep the default originator interval, a mobile node can
select a quicker one resulting in faster route updates towards this
mobile node. Additionally, such a mobile node could select a higher hop
penalty (or even set it to 255 to disable acting as a router for other
nodes) to make it less desirable, letting other nodes avoid selecting
this mobile node as a router.
Signed-off-by: Linus Lüssing <linus.luessing(a)ascom.ch>
diff --git a/bat_sysfs.c b/bat_sysfs.c
index 5ea5c11..e85a922 100644
--- a/bat_sysfs.c
+++ b/bat_sysfs.c
@@ -298,6 +298,7 @@ BAT_ATTR_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu);
static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode);
static BAT_ATTR(gw_mode, S_IRUGO | S_IWUSR, show_gw_mode, store_gw_mode);
BAT_ATTR_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL);
+BAT_ATTR_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, TQ_MAX_VALUE, NULL);
#ifdef CONFIG_BATMAN_ADV_DEBUG
BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 3, NULL);
#endif
@@ -309,6 +310,7 @@ static struct bat_attribute *mesh_attrs[] = {
&bat_attr_vis_mode,
&bat_attr_gw_mode,
&bat_attr_orig_interval,
+ &bat_attr_hop_penalty,
#ifdef CONFIG_BATMAN_ADV_DEBUG
&bat_attr_log_level,
#endif
diff --git a/main.h b/main.h
index 05e4ac8..b305f55 100644
--- a/main.h
+++ b/main.h
@@ -52,8 +52,6 @@
#define TQ_LOCAL_BIDRECT_RECV_MINIMUM 1
#define TQ_TOTAL_BIDRECT_LIMIT 1
-#define TQ_HOP_PENALTY 10
-
#define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE)
#define PACKBUFF_SIZE 2000
diff --git a/send.c b/send.c
index 7097ff0..a6cb644 100644
--- a/send.c
+++ b/send.c
@@ -36,9 +36,10 @@
static void send_outstanding_bcast_packet(struct work_struct *work);
/* apply hop penalty for a normal link */
-static uint8_t hop_penalty(const uint8_t tq)
+static uint8_t hop_penalty(const uint8_t tq, struct bat_priv *bat_priv)
{
- return (tq * (TQ_MAX_VALUE - TQ_HOP_PENALTY)) / (TQ_MAX_VALUE);
+ int hop_penalty = atomic_read(&bat_priv->hop_penalty);
+ return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE);
}
/* when do we schedule our own packet to be sent */
@@ -340,7 +341,7 @@ void schedule_forward_packet(struct orig_node *orig_node,
}
/* apply hop penalty */
- batman_packet->tq = hop_penalty(batman_packet->tq);
+ batman_packet->tq = hop_penalty(batman_packet->tq, bat_priv);
bat_dbg(DBG_BATMAN, bat_priv,
"Forwarding packet: tq_orig: %i, tq_avg: %i, "
diff --git a/soft-interface.c b/soft-interface.c
index 7a899b5..69c283c 100644
--- a/soft-interface.c
+++ b/soft-interface.c
@@ -593,6 +593,7 @@ struct net_device *softif_create(char *name)
atomic_set(&bat_priv->gw_mode, GW_MODE_OFF);
atomic_set(&bat_priv->gw_class, 0);
atomic_set(&bat_priv->orig_interval, 1000);
+ atomic_set(&bat_priv->hop_penalty, 10);
atomic_set(&bat_priv->log_level, 0);
atomic_set(&bat_priv->fragmentation, 1);
atomic_set(&bat_priv->bcast_queue_left, BCAST_QUEUE_LEN);
diff --git a/types.h b/types.h
index 44b3c07..299769e 100644
--- a/types.h
+++ b/types.h
@@ -131,6 +131,7 @@ struct bat_priv {
atomic_t gw_mode; /* GW_MODE_* */
atomic_t gw_class; /* uint */
atomic_t orig_interval; /* uint */
+ atomic_t hop_penalty; /* uint */
atomic_t log_level; /* uint */
atomic_t bcast_seqno;
atomic_t bcast_queue_left;
--
batman-adv
The following commit has been merged in the master branch:
commit 9951febd1503775edb82978f97c636c223b00d4f
Author: Sven Eckelmann <sven.eckelmann(a)gmx.de>
Date: Sat Oct 23 01:42:12 2010 +0000
batman-adv: Move hash callback related function to header
To enable inlining of the function pointers hashdata_choose_cb,
hashdata_choose_cb and hashdata_free_cb, also the hash functions which
uses them must be inlined by the called function.
This should increase the performance, but also increases the size of the
generated machine code slightly.
Reported-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Sven Eckelmann <sven.eckelmann(a)gmx.de>
diff --git a/hash.c b/hash.c
index 6361a31..7d04987 100644
--- a/hash.c
+++ b/hash.c
@@ -33,30 +33,6 @@ static void hash_init(struct hashtable_t *hash)
hash->table[i] = NULL;
}
-/* remove the hash structure. if hashdata_free_cb != NULL, this function will be
- * called to remove the elements inside of the hash. if you don't remove the
- * elements, memory might be leaked. */
-void hash_delete(struct hashtable_t *hash, hashdata_free_cb free_cb, void *arg)
-{
- struct element_t *bucket, *last_bucket;
- int i;
-
- for (i = 0; i < hash->size; i++) {
- bucket = hash->table[i];
-
- while (bucket != NULL) {
- if (free_cb != NULL)
- free_cb(bucket->data, arg);
-
- last_bucket = bucket;
- bucket = bucket->next;
- kfree(last_bucket);
- }
- }
-
- hash_destroy(hash);
-}
-
/* free only the hashtable and the hash itself. */
void hash_destroy(struct hashtable_t *hash)
{
@@ -159,70 +135,6 @@ struct hashtable_t *hash_new(int size)
return hash;
}
-/* adds data to the hashtable. returns 0 on success, -1 on error */
-int hash_add(struct hashtable_t *hash, hashdata_compare_cb compare,
- hashdata_choose_cb choose, void *data)
-{
- int index;
- struct element_t *bucket, *prev_bucket = NULL;
-
- if (!hash)
- return -1;
-
- index = choose(data, hash->size);
- bucket = hash->table[index];
-
- while (bucket != NULL) {
- if (compare(bucket->data, data))
- return -1;
-
- prev_bucket = bucket;
- bucket = bucket->next;
- }
-
- /* found the tail of the list, add new element */
- bucket = kmalloc(sizeof(struct element_t), GFP_ATOMIC);
-
- if (bucket == NULL)
- return -1;
-
- bucket->data = data;
- bucket->next = NULL;
-
- /* and link it */
- if (prev_bucket == NULL)
- hash->table[index] = bucket;
- else
- prev_bucket->next = bucket;
-
- hash->elements++;
- return 0;
-}
-
-/* finds data, based on the key in keydata. returns the found data on success,
- * or NULL on error */
-void *hash_find(struct hashtable_t *hash, hashdata_compare_cb compare,
- hashdata_choose_cb choose, void *keydata)
-{
- int index;
- struct element_t *bucket;
-
- if (!hash)
- return NULL;
-
- index = choose(keydata , hash->size);
- bucket = hash->table[index];
-
- while (bucket != NULL) {
- if (compare(bucket->data, keydata))
- return bucket->data;
-
- bucket = bucket->next;
- }
-
- return NULL;
-}
-
/* remove bucket (this might be used in hash_iterate() if you already found the
* bucket you want to delete and don't need the overhead to find it again with
* hash_remove(). But usually, you don't want to use this function, as it
@@ -243,65 +155,3 @@ void *hash_remove_bucket(struct hashtable_t *hash, struct hash_it_t *hash_it_t)
return data_save;
}
-
-/* removes data from hash, if found. returns pointer do data on success, so you
- * can remove the used structure yourself, or NULL on error . data could be the
- * structure you use with just the key filled, we just need the key for
- * comparing. */
-void *hash_remove(struct hashtable_t *hash, hashdata_compare_cb compare,
- hashdata_choose_cb choose, void *data)
-{
- struct hash_it_t hash_it_t;
-
- hash_it_t.index = choose(data, hash->size);
- hash_it_t.bucket = hash->table[hash_it_t.index];
- hash_it_t.prev_bucket = NULL;
-
- while (hash_it_t.bucket != NULL) {
- if (compare(hash_it_t.bucket->data, data)) {
- hash_it_t.first_bucket =
- (hash_it_t.bucket ==
- hash->table[hash_it_t.index] ?
- &hash->table[hash_it_t.index] : NULL);
- return hash_remove_bucket(hash, &hash_it_t);
- }
-
- hash_it_t.prev_bucket = hash_it_t.bucket;
- hash_it_t.bucket = hash_it_t.bucket->next;
- }
-
- return NULL;
-}
-
-/* resize the hash, returns the pointer to the new hash or NULL on
- * error. removes the old hash on success. */
-struct hashtable_t *hash_resize(struct hashtable_t *hash,
- hashdata_compare_cb compare,
- hashdata_choose_cb choose, int size)
-{
- struct hashtable_t *new_hash;
- struct element_t *bucket;
- int i;
-
- /* initialize a new hash with the new size */
- new_hash = hash_new(size);
-
- if (new_hash == NULL)
- return NULL;
-
- /* copy the elements */
- for (i = 0; i < hash->size; i++) {
- bucket = hash->table[i];
-
- while (bucket != NULL) {
- hash_add(new_hash, compare, choose, bucket->data);
- bucket = bucket->next;
- }
- }
-
- /* remove hash and eventual overflow buckets but not the content
- * itself. */
- hash_delete(hash, NULL, NULL);
-
- return new_hash;
-}
diff --git a/hash.h b/hash.h
index 85ee12b..efc4c28 100644
--- a/hash.h
+++ b/hash.h
@@ -66,35 +66,163 @@ struct hashtable_t *hash_new(int size);
* fiddles with hash-internals. */
void *hash_remove_bucket(struct hashtable_t *hash, struct hash_it_t *hash_it_t);
+/* free only the hashtable and the hash itself. */
+void hash_destroy(struct hashtable_t *hash);
+
/* remove the hash structure. if hashdata_free_cb != NULL, this function will be
* called to remove the elements inside of the hash. if you don't remove the
* elements, memory might be leaked. */
-void hash_delete(struct hashtable_t *hash, hashdata_free_cb free_cb, void *arg);
+static inline void hash_delete(struct hashtable_t *hash,
+ hashdata_free_cb free_cb, void *arg)
+{
+ struct element_t *bucket, *last_bucket;
+ int i;
-/* free only the hashtable and the hash itself. */
-void hash_destroy(struct hashtable_t *hash);
+ for (i = 0; i < hash->size; i++) {
+ bucket = hash->table[i];
+
+ while (bucket != NULL) {
+ if (free_cb != NULL)
+ free_cb(bucket->data, arg);
+
+ last_bucket = bucket;
+ bucket = bucket->next;
+ kfree(last_bucket);
+ }
+ }
+
+ hash_destroy(hash);
+}
/* adds data to the hashtable. returns 0 on success, -1 on error */
-int hash_add(struct hashtable_t *hash, hashdata_compare_cb compare,
- hashdata_choose_cb choose, void *data);
+static inline int hash_add(struct hashtable_t *hash,
+ hashdata_compare_cb compare,
+ hashdata_choose_cb choose, void *data)
+{
+ int index;
+ struct element_t *bucket, *prev_bucket = NULL;
+
+ if (!hash)
+ return -1;
+
+ index = choose(data, hash->size);
+ bucket = hash->table[index];
+
+ while (bucket != NULL) {
+ if (compare(bucket->data, data))
+ return -1;
+
+ prev_bucket = bucket;
+ bucket = bucket->next;
+ }
+
+ /* found the tail of the list, add new element */
+ bucket = kmalloc(sizeof(struct element_t), GFP_ATOMIC);
+
+ if (bucket == NULL)
+ return -1;
+
+ bucket->data = data;
+ bucket->next = NULL;
+
+ /* and link it */
+ if (prev_bucket == NULL)
+ hash->table[index] = bucket;
+ else
+ prev_bucket->next = bucket;
+
+ hash->elements++;
+ return 0;
+}
/* removes data from hash, if found. returns pointer do data on success, so you
* can remove the used structure yourself, or NULL on error . data could be the
* structure you use with just the key filled, we just need the key for
* comparing. */
-void *hash_remove(struct hashtable_t *hash, hashdata_compare_cb compare,
- hashdata_choose_cb choose, void *data);
+static inline void *hash_remove(struct hashtable_t *hash,
+ hashdata_compare_cb compare,
+ hashdata_choose_cb choose, void *data)
+{
+ struct hash_it_t hash_it_t;
+
+ hash_it_t.index = choose(data, hash->size);
+ hash_it_t.bucket = hash->table[hash_it_t.index];
+ hash_it_t.prev_bucket = NULL;
+
+ while (hash_it_t.bucket != NULL) {
+ if (compare(hash_it_t.bucket->data, data)) {
+ hash_it_t.first_bucket =
+ (hash_it_t.bucket ==
+ hash->table[hash_it_t.index] ?
+ &hash->table[hash_it_t.index] : NULL);
+ return hash_remove_bucket(hash, &hash_it_t);
+ }
+
+ hash_it_t.prev_bucket = hash_it_t.bucket;
+ hash_it_t.bucket = hash_it_t.bucket->next;
+ }
+
+ return NULL;
+}
/* finds data, based on the key in keydata. returns the found data on success,
* or NULL on error */
-void *hash_find(struct hashtable_t *hash, hashdata_compare_cb compare,
- hashdata_choose_cb choose, void *keydata);
+static inline void *hash_find(struct hashtable_t *hash,
+ hashdata_compare_cb compare,
+ hashdata_choose_cb choose, void *keydata)
+{
+ int index;
+ struct element_t *bucket;
+
+ if (!hash)
+ return NULL;
+
+ index = choose(keydata , hash->size);
+ bucket = hash->table[index];
+
+ while (bucket != NULL) {
+ if (compare(bucket->data, keydata))
+ return bucket->data;
+
+ bucket = bucket->next;
+ }
+
+ return NULL;
+}
/* resize the hash, returns the pointer to the new hash or NULL on
* error. removes the old hash on success */
-struct hashtable_t *hash_resize(struct hashtable_t *hash,
- hashdata_compare_cb compare,
- hashdata_choose_cb choose, int size);
+static inline struct hashtable_t *hash_resize(struct hashtable_t *hash,
+ hashdata_compare_cb compare,
+ hashdata_choose_cb choose,
+ int size)
+{
+ struct hashtable_t *new_hash;
+ struct element_t *bucket;
+ int i;
+
+ /* initialize a new hash with the new size */
+ new_hash = hash_new(size);
+
+ if (new_hash == NULL)
+ return NULL;
+
+ /* copy the elements */
+ for (i = 0; i < hash->size; i++) {
+ bucket = hash->table[i];
+
+ while (bucket != NULL) {
+ hash_add(new_hash, compare, choose, bucket->data);
+ bucket = bucket->next;
+ }
+ }
+
+ /* remove hash and eventual overflow buckets but not the content
+ * itself. */
+ hash_delete(hash, NULL, NULL);
+
+ return new_hash;
+}
/* iterate though the hash. first element is selected with iter_in NULL. use
* the returned iterator to access the elements until hash_it_t returns NULL. */
--
batman-adv
The following commit has been merged in the master branch:
commit 17f3fe54fcc2e24e77e0cf87873f784d2244cd6d
Author: Sven Eckelmann <sven.eckelmann(a)gmx.de>
Date: Sat Oct 23 01:42:14 2010 +0000
batman-adv: Make hash_iterate inlineable
hash_iterate is next to the function pointers the most called function
related to hashes which benefits from inlining as it is uses in loops.
Reported-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Sven Eckelmann <sven.eckelmann(a)gmx.de>
diff --git a/hash.c b/hash.c
index 7d04987..bfe943c 100644
--- a/hash.c
+++ b/hash.c
@@ -40,78 +40,6 @@ void hash_destroy(struct hashtable_t *hash)
kfree(hash);
}
-/* iterate though the hash. First element is selected if an iterator
- * initialized with HASHIT() is supplied as iter. Use the returned
- * (or supplied) iterator to access the elements until hash_iterate returns
- * NULL. */
-
-struct hash_it_t *hash_iterate(struct hashtable_t *hash,
- struct hash_it_t *iter)
-{
- if (!hash)
- return NULL;
- if (!iter)
- return NULL;
-
- /* sanity checks first (if our bucket got deleted in the last
- * iteration): */
- if (iter->bucket != NULL) {
- if (iter->first_bucket != NULL) {
- /* we're on the first element and it got removed after
- * the last iteration. */
- if ((*iter->first_bucket) != iter->bucket) {
- /* there are still other elements in the list */
- if ((*iter->first_bucket) != NULL) {
- iter->prev_bucket = NULL;
- iter->bucket = (*iter->first_bucket);
- iter->first_bucket =
- &hash->table[iter->index];
- return iter;
- } else {
- iter->bucket = NULL;
- }
- }
- } else if (iter->prev_bucket != NULL) {
- /*
- * we're not on the first element, and the bucket got
- * removed after the last iteration. the last bucket's
- * next pointer is not pointing to our actual bucket
- * anymore. select the next.
- */
- if (iter->prev_bucket->next != iter->bucket)
- iter->bucket = iter->prev_bucket;
- }
- }
-
- /* now as we are sane, select the next one if there is some */
- if (iter->bucket != NULL) {
- if (iter->bucket->next != NULL) {
- iter->prev_bucket = iter->bucket;
- iter->bucket = iter->bucket->next;
- iter->first_bucket = NULL;
- return iter;
- }
- }
-
- /* if not returned yet, we've reached the last one on the index and have
- * to search forward */
- iter->index++;
- /* go through the entries of the hash table */
- while (iter->index < hash->size) {
- if ((hash->table[iter->index]) != NULL) {
- iter->prev_bucket = NULL;
- iter->bucket = hash->table[iter->index];
- iter->first_bucket = &hash->table[iter->index];
- return iter;
- } else {
- iter->index++;
- }
- }
-
- /* nothing to iterate over anymore */
- return NULL;
-}
-
/* allocates and clears the hash */
struct hashtable_t *hash_new(int size)
{
diff --git a/hash.h b/hash.h
index efc4c28..a8e4dd1 100644
--- a/hash.h
+++ b/hash.h
@@ -224,9 +224,75 @@ static inline struct hashtable_t *hash_resize(struct hashtable_t *hash,
return new_hash;
}
-/* iterate though the hash. first element is selected with iter_in NULL. use
- * the returned iterator to access the elements until hash_it_t returns NULL. */
-struct hash_it_t *hash_iterate(struct hashtable_t *hash,
- struct hash_it_t *iter_in);
+/* iterate though the hash. First element is selected if an iterator
+ * initialized with HASHIT() is supplied as iter. Use the returned
+ * (or supplied) iterator to access the elements until hash_iterate returns
+ * NULL. */
+static inline struct hash_it_t *hash_iterate(struct hashtable_t *hash,
+ struct hash_it_t *iter)
+{
+ if (!hash)
+ return NULL;
+ if (!iter)
+ return NULL;
+
+ /* sanity checks first (if our bucket got deleted in the last
+ * iteration): */
+ if (iter->bucket != NULL) {
+ if (iter->first_bucket != NULL) {
+ /* we're on the first element and it got removed after
+ * the last iteration. */
+ if ((*iter->first_bucket) != iter->bucket) {
+ /* there are still other elements in the list */
+ if ((*iter->first_bucket) != NULL) {
+ iter->prev_bucket = NULL;
+ iter->bucket = (*iter->first_bucket);
+ iter->first_bucket =
+ &hash->table[iter->index];
+ return iter;
+ } else {
+ iter->bucket = NULL;
+ }
+ }
+ } else if (iter->prev_bucket != NULL) {
+ /*
+ * we're not on the first element, and the bucket got
+ * removed after the last iteration. the last bucket's
+ * next pointer is not pointing to our actual bucket
+ * anymore. select the next.
+ */
+ if (iter->prev_bucket->next != iter->bucket)
+ iter->bucket = iter->prev_bucket;
+ }
+ }
+
+ /* now as we are sane, select the next one if there is some */
+ if (iter->bucket != NULL) {
+ if (iter->bucket->next != NULL) {
+ iter->prev_bucket = iter->bucket;
+ iter->bucket = iter->bucket->next;
+ iter->first_bucket = NULL;
+ return iter;
+ }
+ }
+
+ /* if not returned yet, we've reached the last one on the index and have
+ * to search forward */
+ iter->index++;
+ /* go through the entries of the hash table */
+ while (iter->index < hash->size) {
+ if ((hash->table[iter->index]) != NULL) {
+ iter->prev_bucket = NULL;
+ iter->bucket = hash->table[iter->index];
+ iter->first_bucket = &hash->table[iter->index];
+ return iter;
+ } else {
+ iter->index++;
+ }
+ }
+
+ /* nothing to iterate over anymore */
+ return NULL;
+}
#endif /* _NET_BATMAN_ADV_HASH_H_ */
--
batman-adv