From: Simon Wunderlich siwu@hrz.tu-chemnitz.de
This patch removes the (ugly and racy) packet receiving thread and the kernel socket usage. Instead, packets are received directly by registering the ethernet type and handling skbs instead of self-allocated buffers.
Some consequences and comments:
* we don't copy the payload data when forwarding/sending/receiving data anymore. This should boost performance. * packets from/to different interfaces can be (theoretically) processed simultaneously. Only the big originator hash lock might be in the way. * no more polling or sleeping/wakeup/scheduling issues when receiving packets * this might introduce new race conditions. * aggregation and vis code still use packet buffers and are not (yet) converted. * all spinlocks were converted to irqsave/restore versions to solve some lifelock issues when preempted. This might be overkill, some of these locks might be reverted later. * skb copies are only done if neccesary to avoid overhead
performance differences:
* we made some "benchmarks" with intel laptops. * bandwidth on Gigabit Ethernet increased from ~500 MBit/s to ~920 MBit/s * ping latency decresed from ~2ms to ~0.2 ms
I did some tests on my 9 node qemu environment and could confirm that usual sending/receiving, forwarding, vis, batctl ping etc works.
Signed-off-by: Simon Wunderlich siwu@hrz.tu-chemnitz.de Acked-by: Sven Eckelmann sven.eckelmann@gmx.de Acked-by: Marek Lindner lindner_marek@yahoo.de Acked-by: Linus Lüssing linus.luessing@web.de Signed-off-by: Andrew Lunn andrew@lunn.ch --- drivers/staging/batman-adv/aggregation.c | 13 +- drivers/staging/batman-adv/device.c | 24 +- drivers/staging/batman-adv/hard-interface.c | 154 ++++++--- drivers/staging/batman-adv/hard-interface.h | 4 + drivers/staging/batman-adv/main.c | 29 +- drivers/staging/batman-adv/originator.c | 18 +- drivers/staging/batman-adv/proc.c | 10 +- drivers/staging/batman-adv/routing.c | 561 ++++++++++++--------------- drivers/staging/batman-adv/routing.h | 8 +- drivers/staging/batman-adv/send.c | 93 +++-- drivers/staging/batman-adv/send.h | 5 +- drivers/staging/batman-adv/soft-interface.c | 76 ++-- drivers/staging/batman-adv/soft-interface.h | 3 +- drivers/staging/batman-adv/types.h | 2 +- drivers/staging/batman-adv/vis.c | 55 ++-- 15 files changed, 548 insertions(+), 507 deletions(-)
diff --git a/drivers/staging/batman-adv/aggregation.c b/drivers/staging/batman-adv/aggregation.c index 9c6e681..7917322 100644 --- a/drivers/staging/batman-adv/aggregation.c +++ b/drivers/staging/batman-adv/aggregation.c @@ -96,6 +96,7 @@ static void new_aggregated_packet(unsigned char *packet_buff, int own_packet) { struct forw_packet *forw_packet_aggr; + unsigned long flags;
forw_packet_aggr = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC); if (!forw_packet_aggr) @@ -115,6 +116,7 @@ static void new_aggregated_packet(unsigned char *packet_buff, packet_buff, forw_packet_aggr->packet_len);
+ forw_packet_aggr->skb = NULL; forw_packet_aggr->own = own_packet; forw_packet_aggr->if_incoming = if_incoming; forw_packet_aggr->num_packets = 0; @@ -126,9 +128,9 @@ static void new_aggregated_packet(unsigned char *packet_buff, forw_packet_aggr->direct_link_flags |= 1;
/* add new packet to packet list */ - spin_lock(&forw_bat_list_lock); + spin_lock_irqsave(&forw_bat_list_lock, flags); hlist_add_head(&forw_packet_aggr->list, &forw_bat_list); - spin_unlock(&forw_bat_list_lock); + spin_unlock_irqrestore(&forw_bat_list_lock, flags);
/* start timer for this packet */ INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work, @@ -168,9 +170,10 @@ void add_bat_packet_to_list(unsigned char *packet_buff, int packet_len, struct batman_packet *batman_packet = (struct batman_packet *)packet_buff; bool direct_link = batman_packet->flags & DIRECTLINK ? 1 : 0; + unsigned long flags;
/* find position for the packet in the forward queue */ - spin_lock(&forw_bat_list_lock); + spin_lock_irqsave(&forw_bat_list_lock, flags); /* own packets are not to be aggregated */ if ((atomic_read(&aggregation_enabled)) && (!own_packet)) { hlist_for_each_entry(forw_packet_pos, tmp_node, &forw_bat_list, @@ -191,7 +194,7 @@ void add_bat_packet_to_list(unsigned char *packet_buff, int packet_len, * suitable aggregation packet found */ if (forw_packet_aggr == NULL) { /* the following section can run without the lock */ - spin_unlock(&forw_bat_list_lock); + spin_unlock_irqrestore(&forw_bat_list_lock, flags); new_aggregated_packet(packet_buff, packet_len, send_time, direct_link, if_incoming, own_packet); @@ -199,7 +202,7 @@ void add_bat_packet_to_list(unsigned char *packet_buff, int packet_len, aggregate(forw_packet_aggr, packet_buff, packet_len, direct_link); - spin_unlock(&forw_bat_list_lock); + spin_unlock_irqrestore(&forw_bat_list_lock, flags); } }
diff --git a/drivers/staging/batman-adv/device.c b/drivers/staging/batman-adv/device.c index 92cf8d5..ab2b0b1 100644 --- a/drivers/staging/batman-adv/device.c +++ b/drivers/staging/batman-adv/device.c @@ -133,8 +133,9 @@ int bat_device_release(struct inode *inode, struct file *file) (struct device_client *)file->private_data; struct device_packet *device_packet; struct list_head *list_pos, *list_pos_tmp; + unsigned long flags;
- spin_lock(&device_client->lock); + spin_lock_irqsave(&device_client->lock, flags);
/* for all packets in the queue ... */ list_for_each_safe(list_pos, list_pos_tmp, &device_client->queue_list) { @@ -146,7 +147,7 @@ int bat_device_release(struct inode *inode, struct file *file) }
device_client_hash[device_client->index] = NULL; - spin_unlock(&device_client->lock); + spin_unlock_irqrestore(&device_client->lock, flags);
kfree(device_client); dec_module_count(); @@ -161,6 +162,7 @@ ssize_t bat_device_read(struct file *file, char __user *buf, size_t count, (struct device_client *)file->private_data; struct device_packet *device_packet; int error; + unsigned long flags;
if ((file->f_flags & O_NONBLOCK) && (device_client->queue_len == 0)) return -EAGAIN; @@ -177,14 +179,14 @@ ssize_t bat_device_read(struct file *file, char __user *buf, size_t count, if (error) return error;
- spin_lock(&device_client->lock); + spin_lock_irqsave(&device_client->lock, flags);
device_packet = list_first_entry(&device_client->queue_list, struct device_packet, list); list_del(&device_packet->list); device_client->queue_len--;
- spin_unlock(&device_client->lock); + spin_unlock_irqrestore(&device_client->lock, flags);
error = __copy_to_user(buf, &device_packet->icmp_packet, sizeof(struct icmp_packet)); @@ -205,6 +207,7 @@ ssize_t bat_device_write(struct file *file, const char __user *buff, struct icmp_packet icmp_packet; struct orig_node *orig_node; struct batman_if *batman_if; + unsigned long flags;
if (len < sizeof(struct icmp_packet)) { bat_dbg(DBG_BATMAN, "batman-adv:Error - can't send packet from char device: invalid packet size\n"); @@ -239,7 +242,7 @@ ssize_t bat_device_write(struct file *file, const char __user *buff, if (atomic_read(&module_state) != MODULE_ACTIVE) goto dst_unreach;
- spin_lock(&orig_hash_lock); + spin_lock_irqsave(&orig_hash_lock, flags); orig_node = ((struct orig_node *)hash_find(orig_hash, icmp_packet.dst));
if (!orig_node) @@ -261,11 +264,11 @@ ssize_t bat_device_write(struct file *file, const char __user *buff, sizeof(struct icmp_packet), batman_if, orig_node->router->addr);
- spin_unlock(&orig_hash_lock); + spin_unlock_irqrestore(&orig_hash_lock, flags); goto out;
unlock: - spin_unlock(&orig_hash_lock); + spin_unlock_irqrestore(&orig_hash_lock, flags); dst_unreach: icmp_packet.msg_type = DESTINATION_UNREACHABLE; bat_device_add_packet(device_client, &icmp_packet); @@ -290,6 +293,7 @@ void bat_device_add_packet(struct device_client *device_client, struct icmp_packet *icmp_packet) { struct device_packet *device_packet; + unsigned long flags;
device_packet = kmalloc(sizeof(struct device_packet), GFP_KERNEL);
@@ -300,12 +304,12 @@ void bat_device_add_packet(struct device_client *device_client, memcpy(&device_packet->icmp_packet, icmp_packet, sizeof(struct icmp_packet));
- spin_lock(&device_client->lock); + spin_lock_irqsave(&device_client->lock, flags);
/* while waiting for the lock the device_client could have been * deleted */ if (!device_client_hash[icmp_packet->uid]) { - spin_unlock(&device_client->lock); + spin_unlock_irqrestore(&device_client->lock, flags); kfree(device_packet); return; } @@ -322,7 +326,7 @@ void bat_device_add_packet(struct device_client *device_client, device_client->queue_len--; }
- spin_unlock(&device_client->lock); + spin_unlock_irqrestore(&device_client->lock, flags);
wake_up(&device_client->queue_wait); } diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c index 7c88592..cc59c30 100644 --- a/drivers/staging/batman-adv/hard-interface.c +++ b/drivers/staging/batman-adv/hard-interface.c @@ -153,9 +153,6 @@ void hardif_deactivate_interface(struct batman_if *batman_if) if (batman_if->if_active != IF_ACTIVE) return;
- if (batman_if->raw_sock) - sock_release(batman_if->raw_sock); - /** * batman_if->net_dev has been acquired by dev_get_by_name() in * proc_interfaces_write() and has to be unreferenced. @@ -164,9 +161,6 @@ void hardif_deactivate_interface(struct batman_if *batman_if) if (batman_if->net_dev) dev_put(batman_if->net_dev);
- batman_if->raw_sock = NULL; - batman_if->net_dev = NULL; - batman_if->if_active = IF_INACTIVE; active_ifs--;
@@ -177,9 +171,6 @@ void hardif_deactivate_interface(struct batman_if *batman_if) /* (re)activate given interface. */ static void hardif_activate_interface(struct batman_if *batman_if) { - struct sockaddr_ll bind_addr; - int retval; - if (batman_if->if_active != IF_INACTIVE) return;
@@ -191,35 +182,8 @@ static void hardif_activate_interface(struct batman_if *batman_if) if (!batman_if->net_dev) goto dev_err;
- retval = sock_create_kern(PF_PACKET, SOCK_RAW, - __constant_htons(ETH_P_BATMAN), - &batman_if->raw_sock); - - if (retval < 0) { - printk(KERN_ERR "batman-adv:Can't create raw socket: %i\n", - retval); - goto sock_err; - } - - bind_addr.sll_family = AF_PACKET; - bind_addr.sll_ifindex = batman_if->net_dev->ifindex; - bind_addr.sll_protocol = 0; /* is set by the kernel */ - - retval = kernel_bind(batman_if->raw_sock, - (struct sockaddr *)&bind_addr, sizeof(bind_addr)); - - if (retval < 0) { - printk(KERN_ERR "batman-adv:Can't create bind raw socket: %i\n", - retval); - goto bind_err; - } - check_known_mac_addr(batman_if->net_dev->dev_addr);
- batman_if->raw_sock->sk->sk_user_data = - batman_if->raw_sock->sk->sk_data_ready; - batman_if->raw_sock->sk->sk_data_ready = batman_data_ready; - addr_to_string(batman_if->addr_str, batman_if->net_dev->dev_addr);
memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig, @@ -239,12 +203,7 @@ static void hardif_activate_interface(struct batman_if *batman_if)
return;
-bind_err: - sock_release(batman_if->raw_sock); -sock_err: - dev_put(batman_if->net_dev); dev_err: - batman_if->raw_sock = NULL; batman_if->net_dev = NULL; }
@@ -318,6 +277,7 @@ int hardif_add_interface(char *dev, int if_num) struct batman_if *batman_if; struct batman_packet *batman_packet; struct orig_node *orig_node; + unsigned long flags; HASHIT(hashit);
batman_if = kmalloc(sizeof(struct batman_if), GFP_KERNEL); @@ -327,7 +287,6 @@ int hardif_add_interface(char *dev, int if_num) return -1; }
- batman_if->raw_sock = NULL; batman_if->net_dev = NULL;
if ((if_num == 0) && (num_hna > 0)) @@ -375,17 +334,17 @@ int hardif_add_interface(char *dev, int if_num)
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on * if_num */ - spin_lock(&orig_hash_lock); + spin_lock_irqsave(&orig_hash_lock, flags);
while (hash_iterate(orig_hash, &hashit)) { orig_node = hashit.bucket->data; if (resize_orig(orig_node, if_num) == -1) { - spin_unlock(&orig_hash_lock); + spin_unlock_irqrestore(&orig_hash_lock, flags); goto out; } }
- spin_unlock(&orig_hash_lock); + spin_unlock_irqrestore(&orig_hash_lock, flags);
if (!hardif_is_interface_up(batman_if->dev)) printk(KERN_ERR "batman-adv:Not using interface %s (retrying later): interface not active\n", batman_if->dev); @@ -443,6 +402,111 @@ out: return NOTIFY_DONE; }
+/* find batman interface by netdev. assumes rcu_read_lock on */ +static struct batman_if *find_batman_if(struct net_device *dev) +{ + struct batman_if *batman_if; + + rcu_read_lock(); + list_for_each_entry_rcu(batman_if, &if_list, list) { + if (batman_if->net_dev == dev) { + rcu_read_unlock(); + return batman_if; + } + } + rcu_read_unlock(); + return NULL; +} + + +/* receive a packet with the batman ethertype coming on a hard + * interface */ +int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *ptype, struct net_device *orig_dev) +{ + struct batman_packet *batman_packet; + struct batman_if *batman_if; + struct net_device_stats *stats; + int ret; + + skb = skb_share_check(skb, GFP_ATOMIC); + + if (skb == NULL) + goto err_free; + + /* packet should hold at least type and version */ + if (unlikely(skb_headlen(skb) < 2)) + goto err_free; + + /* expect a valid ethernet header here. */ + if (unlikely(skb->mac_len != sizeof(struct ethhdr) + || !skb_mac_header(skb))) + goto err_free; + + batman_if = find_batman_if(skb->dev); + if (!batman_if) + goto err_free; + + stats = &skb->dev->stats; + stats->rx_packets++; + stats->rx_bytes += skb->len; + + batman_packet = (struct batman_packet *)skb->data; + + if (batman_packet->version != COMPAT_VERSION) { + bat_dbg(DBG_BATMAN, + "Drop packet: incompatible batman version (%i)\n", + batman_packet->version); + goto err_free; + } + + /* all receive handlers return whether they received or reused + * the supplied skb. if not, we have to free the skb. */ + + switch (batman_packet->packet_type) { + /* batman originator packet */ + case BAT_PACKET: + ret = recv_bat_packet(skb, batman_if); + break; + + /* batman icmp packet */ + case BAT_ICMP: + ret = recv_icmp_packet(skb); + break; + + /* unicast packet */ + case BAT_UNICAST: + ret = recv_unicast_packet(skb); + break; + + /* broadcast packet */ + case BAT_BCAST: + ret = recv_bcast_packet(skb); + break; + + /* vis packet */ + case BAT_VIS: + ret = recv_vis_packet(skb); + break; + default: + ret = NET_RX_DROP; + } + if (ret == NET_RX_DROP) + kfree_skb(skb); + + /* return NET_RX_SUCCESS in any case as we + * most probably dropped the packet for + * routing-logical reasons. */ + + return NET_RX_SUCCESS; + +err_free: + kfree_skb(skb); + return NET_RX_DROP; + +} + + struct notifier_block hard_if_notifier = { .notifier_call = hard_if_event, }; diff --git a/drivers/staging/batman-adv/hard-interface.h b/drivers/staging/batman-adv/hard-interface.h index 742358c..97c6ecb 100644 --- a/drivers/staging/batman-adv/hard-interface.h +++ b/drivers/staging/batman-adv/hard-interface.h @@ -32,5 +32,9 @@ void hardif_deactivate_interface(struct batman_if *batman_if); char hardif_get_active_if_num(void); void hardif_check_interfaces_status(void); void hardif_check_interfaces_status_wq(struct work_struct *work); +int batman_skb_recv(struct sk_buff *skb, + struct net_device *dev, + struct packet_type *ptype, + struct net_device *orig_dev); int hardif_min_mtu(void); void update_min_mtu(void); diff --git a/drivers/staging/batman-adv/main.c b/drivers/staging/batman-adv/main.c index 434c600..c733504 100644 --- a/drivers/staging/batman-adv/main.c +++ b/drivers/staging/batman-adv/main.c @@ -50,11 +50,14 @@ int16_t num_ifs;
struct net_device *soft_device;
-static struct task_struct *kthread_task; - unsigned char broadcastAddr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; atomic_t module_state;
+static struct packet_type batman_adv_packet_type __read_mostly = { + .type = cpu_to_be16(ETH_P_BATMAN), + .func = batman_skb_recv, +}; + struct workqueue_struct *bat_event_workqueue;
#ifdef CONFIG_BATMAN_ADV_DEBUG @@ -113,6 +116,7 @@ int init_module(void) }
register_netdevice_notifier(&hard_if_notifier); + dev_add_pack(&batman_adv_packet_type);
printk(KERN_INFO "batman-adv:B.A.T.M.A.N. advanced %s%s (compatibility version %i) loaded \n", SOURCE_VERSION, REVISION_VERSION_STR, COMPAT_VERSION); @@ -135,6 +139,8 @@ void cleanup_module(void) soft_device = NULL; }
+ dev_remove_pack(&batman_adv_packet_type); + unregister_netdevice_notifier(&hard_if_notifier); cleanup_procfs();
@@ -162,16 +168,6 @@ void activate_module(void) if (vis_init() < 1) goto err;
- /* (re)start kernel thread for packet processing */ - if (!kthread_task) { - kthread_task = kthread_run(packet_recv_thread, NULL, "batman-adv"); - - if (IS_ERR(kthread_task)) { - printk(KERN_ERR "batman-adv:Unable to start packet receive thread\n"); - kthread_task = NULL; - } - } - update_min_mtu(); atomic_set(&module_state, MODULE_ACTIVE); goto end; @@ -193,14 +189,7 @@ void shutdown_module(void)
vis_quit();
- /* deactivate kernel thread for packet processing (if running) */ - if (kthread_task) { - atomic_set(&exit_cond, 1); - wake_up_interruptible(&thread_wait); - kthread_stop(kthread_task); - - kthread_task = NULL; - } + /* TODO: unregister BATMAN pack */
originator_free();
diff --git a/drivers/staging/batman-adv/originator.c b/drivers/staging/batman-adv/originator.c index 849e480..3fa7baf 100644 --- a/drivers/staging/batman-adv/originator.c +++ b/drivers/staging/batman-adv/originator.c @@ -37,35 +37,38 @@ static void start_purge_timer(void)
int originator_init(void) { + unsigned long flags; if (orig_hash) return 1;
- spin_lock(&orig_hash_lock); + spin_lock_irqsave(&orig_hash_lock, flags); orig_hash = hash_new(128, compare_orig, choose_orig);
if (!orig_hash) goto err;
- spin_unlock(&orig_hash_lock); + spin_unlock_irqrestore(&orig_hash_lock, flags); start_purge_timer(); return 1;
err: - spin_unlock(&orig_hash_lock); + spin_unlock_irqrestore(&orig_hash_lock, flags); return 0; }
void originator_free(void) { + unsigned long flags; + if (!orig_hash) return;
cancel_delayed_work_sync(&purge_orig_wq);
- spin_lock(&orig_hash_lock); + spin_lock_irqsave(&orig_hash_lock, flags); hash_delete(orig_hash, free_orig_node); orig_hash = NULL; - spin_unlock(&orig_hash_lock); + spin_unlock_irqrestore(&orig_hash_lock, flags); }
struct neigh_node * @@ -243,8 +246,9 @@ void purge_orig(struct work_struct *work) { HASHIT(hashit); struct orig_node *orig_node; + unsigned long flags;
- spin_lock(&orig_hash_lock); + spin_lock_irqsave(&orig_hash_lock, flags);
/* for all origins... */ while (hash_iterate(orig_hash, &hashit)) { @@ -255,7 +259,7 @@ void purge_orig(struct work_struct *work) } }
- spin_unlock(&orig_hash_lock); + spin_unlock_irqrestore(&orig_hash_lock, flags);
start_purge_timer(); } diff --git a/drivers/staging/batman-adv/proc.c b/drivers/staging/batman-adv/proc.c index d9fde94..61e1d0d 100644 --- a/drivers/staging/batman-adv/proc.c +++ b/drivers/staging/batman-adv/proc.c @@ -189,6 +189,7 @@ static int proc_originators_read(struct seq_file *seq, void *offset) struct neigh_node *neigh_node; int batman_count = 0; char orig_str[ETH_STR_LEN], router_str[ETH_STR_LEN]; + unsigned long flags;
rcu_read_lock(); if (list_empty(&if_list)) { @@ -211,7 +212,7 @@ static int proc_originators_read(struct seq_file *seq, void *offset) ((struct batman_if *)if_list.next)->addr_str);
rcu_read_unlock(); - spin_lock(&orig_hash_lock); + spin_lock_irqsave(&orig_hash_lock, flags);
while (hash_iterate(orig_hash, &hashit)) {
@@ -242,7 +243,7 @@ static int proc_originators_read(struct seq_file *seq, void *offset)
}
- spin_unlock(&orig_hash_lock); + spin_unlock_irqrestore(&orig_hash_lock, flags);
if (batman_count == 0) seq_printf(seq, "No batman nodes in range ... \n"); @@ -376,6 +377,7 @@ static int proc_vis_data_read(struct seq_file *seq, void *offset) HLIST_HEAD(vis_if_list); int i; char tmp_addr_str[ETH_STR_LEN]; + unsigned long flags;
rcu_read_lock(); if (list_empty(&if_list) || (!is_vis_server())) { @@ -385,7 +387,7 @@ static int proc_vis_data_read(struct seq_file *seq, void *offset)
rcu_read_unlock();
- spin_lock(&vis_hash_lock); + spin_lock_irqsave(&vis_hash_lock, flags); while (hash_iterate(vis_hash, &hashit)) { info = hashit.bucket->data; entries = (struct vis_info_entry *) @@ -402,7 +404,7 @@ static int proc_vis_data_read(struct seq_file *seq, void *offset) proc_vis_read_prim_sec(seq, &vis_if_list); seq_printf(seq, "\n"); } - spin_unlock(&vis_hash_lock); + spin_unlock_irqrestore(&vis_hash_lock, flags);
end: return 0; diff --git a/drivers/staging/batman-adv/routing.c b/drivers/staging/batman-adv/routing.c index 25a1424..9cf1ee5 100644 --- a/drivers/staging/batman-adv/routing.c +++ b/drivers/staging/batman-adv/routing.c @@ -36,15 +36,16 @@
DECLARE_WAIT_QUEUE_HEAD(thread_wait);
-static atomic_t data_ready_cond; atomic_t exit_cond; + void slide_own_bcast_window(struct batman_if *batman_if) { HASHIT(hashit); struct orig_node *orig_node; TYPE_OF_WORD *word; + unsigned long flags;
- spin_lock(&orig_hash_lock); + spin_lock_irqsave(&orig_hash_lock, flags);
while (hash_iterate(orig_hash, &hashit)) { orig_node = hashit.bucket->data; @@ -55,7 +56,7 @@ void slide_own_bcast_window(struct batman_if *batman_if) bit_packet_count(word); }
- spin_unlock(&orig_hash_lock); + spin_unlock_irqrestore(&orig_hash_lock, flags); }
static void update_HNA(struct orig_node *orig_node, @@ -365,10 +366,9 @@ static char count_real_packets(struct ethhdr *ethhdr, }
void receive_bat_packet(struct ethhdr *ethhdr, - struct batman_packet *batman_packet, - unsigned char *hna_buff, - int hna_buff_len, - struct batman_if *if_incoming) + struct batman_packet *batman_packet, + unsigned char *hna_buff, int hna_buff_len, + struct batman_if *if_incoming) { struct batman_if *batman_if; struct orig_node *orig_neigh_node, *orig_node; @@ -566,95 +566,118 @@ void receive_bat_packet(struct ethhdr *ethhdr, 0, hna_buff_len, if_incoming); }
- -static int receive_raw_packet(struct socket *raw_sock, - unsigned char *packet_buff, int packet_buff_len) +int recv_bat_packet(struct sk_buff *skb, + struct batman_if *batman_if) { - struct kvec iov; - struct msghdr msg; + struct ethhdr *ethhdr; + unsigned long flags;
- iov.iov_base = packet_buff; - iov.iov_len = packet_buff_len; + /* drop packet if it has not necessary minimum size */ + if (skb_headlen(skb) < sizeof(struct batman_packet)) + return NET_RX_DROP;
- msg.msg_flags = MSG_DONTWAIT; /* non-blocking */ - msg.msg_name = NULL; - msg.msg_namelen = 0; - msg.msg_control = NULL; + ethhdr = (struct ethhdr *)skb_mac_header(skb);
- return kernel_recvmsg(raw_sock, &msg, &iov, 1, packet_buff_len, - MSG_DONTWAIT); -} - -static void recv_bat_packet(struct ethhdr *ethhdr, - unsigned char *packet_buff, - int result, - struct batman_if *batman_if) -{ /* packet with broadcast indication but unicast recipient */ if (!is_bcast(ethhdr->h_dest)) - return; + return NET_RX_DROP;
/* packet with broadcast sender address */ if (is_bcast(ethhdr->h_source)) - return; - - /* drop packet if it has not at least one batman packet as payload */ - if (result < sizeof(struct ethhdr) + sizeof(struct batman_packet)) - return; - - spin_lock(&orig_hash_lock); + return NET_RX_DROP; + + spin_lock_irqsave(&orig_hash_lock, flags); + /* TODO: we use headlen instead of "length", because + * only this data is paged in. */ + /* TODO: is another skb_copy needed here? there will be + * written on the data, but nobody (?) should further use + * this data */ receive_aggr_bat_packet(ethhdr, - packet_buff + sizeof(struct ethhdr), - result - sizeof(struct ethhdr), + skb->data, + skb_headlen(skb), batman_if); - spin_unlock(&orig_hash_lock); + spin_unlock_irqrestore(&orig_hash_lock, flags); + + kfree_skb(skb); + return NET_RX_SUCCESS; }
-static void recv_my_icmp_packet(struct ethhdr *ethhdr, - struct icmp_packet *icmp_packet, - unsigned char *packet_buff, - int result) +static int recv_my_icmp_packet(struct sk_buff *skb) { struct orig_node *orig_node; + struct icmp_packet *icmp_packet; + struct ethhdr *ethhdr; + struct sk_buff *skb_old; + struct batman_if *batman_if; + int ret; + unsigned long flags; + uint8_t dstaddr[ETH_ALEN]; + + icmp_packet = (struct icmp_packet *) skb->data; + ethhdr = (struct ethhdr *) skb_mac_header(skb);
/* add data to device queue */ if (icmp_packet->msg_type != ECHO_REQUEST) { bat_device_receive_packet(icmp_packet); - return; + return NET_RX_DROP; }
/* answer echo request (ping) */ /* get routing information */ - spin_lock(&orig_hash_lock); + spin_lock_irqsave(&orig_hash_lock, flags); orig_node = ((struct orig_node *)hash_find(orig_hash, icmp_packet->orig)); + ret = NET_RX_DROP;
if ((orig_node != NULL) && (orig_node->batman_if != NULL) && (orig_node->router != NULL)) { + + /* don't lock while sending the packets ... we therefore + * copy the required data before sending */ + batman_if = orig_node->batman_if; + memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); + spin_unlock_irqrestore(&orig_hash_lock, flags); + + /* create a copy of the skb, if needed, to modify it. */ + skb_old = NULL; + if (!skb_clone_writable(skb, sizeof(struct icmp_packet))) { + skb_old = skb; + skb = skb_copy(skb, GFP_ATOMIC); + if (!skb) + return NET_RX_DROP; + icmp_packet = (struct icmp_packet *) skb->data; + kfree_skb(skb_old); + } + memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); memcpy(icmp_packet->orig, ethhdr->h_dest, ETH_ALEN); icmp_packet->msg_type = ECHO_REPLY; icmp_packet->ttl = TTL;
- send_raw_packet(packet_buff + sizeof(struct ethhdr), - result - sizeof(struct ethhdr), - orig_node->batman_if, - orig_node->router->addr); - } + send_skb_packet(skb, batman_if, dstaddr); + ret = NET_RX_SUCCESS;
- spin_unlock(&orig_hash_lock); - return; + } else + spin_unlock_irqrestore(&orig_hash_lock, flags); + + return ret; }
-static void recv_icmp_ttl_exceeded(struct icmp_packet *icmp_packet, - struct ethhdr *ethhdr, - unsigned char *packet_buff, - int result, - struct batman_if *batman_if) +static int recv_icmp_ttl_exceeded(struct sk_buff *skb) { unsigned char src_str[ETH_STR_LEN], dst_str[ETH_STR_LEN]; struct orig_node *orig_node; + struct icmp_packet *icmp_packet; + struct ethhdr *ethhdr; + struct sk_buff *skb_old; + struct batman_if *batman_if; + int ret; + unsigned long flags; + uint8_t dstaddr[ETH_ALEN]; + + icmp_packet = (struct icmp_packet *) skb->data; + ethhdr = (struct ethhdr *) skb_mac_header(skb);
addr_to_string(src_str, icmp_packet->orig); addr_to_string(dst_str, icmp_packet->dst); @@ -663,74 +686,93 @@ static void recv_icmp_ttl_exceeded(struct icmp_packet *icmp_packet,
/* send TTL exceeded if packet is an echo request (traceroute) */ if (icmp_packet->msg_type != ECHO_REQUEST) - return; + return NET_RX_DROP;
/* get routing information */ - spin_lock(&orig_hash_lock); + spin_lock_irqsave(&orig_hash_lock, flags); orig_node = ((struct orig_node *) hash_find(orig_hash, icmp_packet->orig)); + ret = NET_RX_DROP;
if ((orig_node != NULL) && (orig_node->batman_if != NULL) && (orig_node->router != NULL)) { + + /* don't lock while sending the packets ... we therefore + * copy the required data before sending */ + batman_if = orig_node->batman_if; + memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); + spin_unlock_irqrestore(&orig_hash_lock, flags); + + /* create a copy of the skb, if needed, to modify it. */ + if (!skb_clone_writable(skb, sizeof(struct icmp_packet))) { + skb_old = skb; + skb = skb_copy(skb, GFP_ATOMIC); + if (!skb) + return NET_RX_DROP; + icmp_packet = (struct icmp_packet *) skb->data; + kfree_skb(skb_old); + } + memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); memcpy(icmp_packet->orig, ethhdr->h_dest, ETH_ALEN); icmp_packet->msg_type = TTL_EXCEEDED; icmp_packet->ttl = TTL;
- send_raw_packet(packet_buff + sizeof(struct ethhdr), - result - sizeof(struct ethhdr), - orig_node->batman_if, - orig_node->router->addr); + send_skb_packet(skb, batman_if, dstaddr); + ret = NET_RX_SUCCESS;
- } + } else + spin_unlock_irqrestore(&orig_hash_lock, flags);
- spin_unlock(&orig_hash_lock); + return ret; }
- -static void recv_icmp_packet(struct ethhdr *ethhdr, - unsigned char *packet_buff, - int result, - struct batman_if *batman_if) +int recv_icmp_packet(struct sk_buff *skb) { struct icmp_packet *icmp_packet; + struct ethhdr *ethhdr; struct orig_node *orig_node; + struct sk_buff *skb_old; + struct batman_if *batman_if; + int hdr_size = sizeof(struct icmp_packet); + int ret; + unsigned long flags; + uint8_t dstaddr[ETH_ALEN]; + + /* drop packet if it has not necessary minimum size */ + if (skb_headlen(skb) < hdr_size) + return NET_RX_DROP; + + ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* packet with unicast indication but broadcast recipient */ if (is_bcast(ethhdr->h_dest)) - return; + return NET_RX_DROP;
/* packet with broadcast sender address */ if (is_bcast(ethhdr->h_source)) - return; + return NET_RX_DROP;
/* not for me */ if (!is_my_mac(ethhdr->h_dest)) - return; + return NET_RX_DROP;
- /* drop packet if it has not necessary minimum size */ - if (result < sizeof(struct ethhdr) + sizeof(struct icmp_packet)) - return; - - icmp_packet = (struct icmp_packet *) - (packet_buff + sizeof(struct ethhdr)); + icmp_packet = (struct icmp_packet *) skb->data;
/* packet for me */ if (is_my_mac(icmp_packet->dst)) - recv_my_icmp_packet(ethhdr, icmp_packet, packet_buff, result); + return recv_my_icmp_packet(skb);
/* TTL exceeded */ - if (icmp_packet->ttl < 2) { - recv_icmp_ttl_exceeded(icmp_packet, ethhdr, packet_buff, result, - batman_if); - return; + if (icmp_packet->ttl < 2) + return recv_icmp_ttl_exceeded(skb);
- } + ret = NET_RX_DROP;
/* get routing information */ - spin_lock(&orig_hash_lock); + spin_lock_irqsave(&orig_hash_lock, flags); orig_node = ((struct orig_node *) hash_find(orig_hash, icmp_packet->dst));
@@ -738,133 +780,169 @@ static void recv_icmp_packet(struct ethhdr *ethhdr, (orig_node->batman_if != NULL) && (orig_node->router != NULL)) {
+ /* don't lock while sending the packets ... we therefore + * copy the required data before sending */ + batman_if = orig_node->batman_if; + memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); + spin_unlock_irqrestore(&orig_hash_lock, flags); + + /* create a copy of the skb, if needed, to modify it. */ + if (!skb_clone_writable(skb, sizeof(struct icmp_packet))) { + skb_old = skb; + skb = skb_copy(skb, GFP_ATOMIC); + if (!skb) + return NET_RX_DROP; + icmp_packet = (struct icmp_packet *) skb->data; + kfree_skb(skb_old); + } + /* decrement ttl */ icmp_packet->ttl--;
/* route it */ - send_raw_packet(packet_buff + sizeof(struct ethhdr), - result - sizeof(struct ethhdr), - orig_node->batman_if, - orig_node->router->addr); - } - spin_unlock(&orig_hash_lock); + send_skb_packet(skb, batman_if, dstaddr); + ret = NET_RX_SUCCESS; + + } else + spin_unlock_irqrestore(&orig_hash_lock, flags); + + return ret; }
-static void recv_unicast_packet(struct ethhdr *ethhdr, - unsigned char *packet_buff, - int result, - struct batman_if *batman_if) +int recv_unicast_packet(struct sk_buff *skb) { struct unicast_packet *unicast_packet; unsigned char src_str[ETH_STR_LEN], dst_str[ETH_STR_LEN]; struct orig_node *orig_node; - int hdr_size = sizeof(struct ethhdr) + sizeof(struct unicast_packet); + struct ethhdr *ethhdr; + struct batman_if *batman_if; + struct sk_buff *skb_old; + uint8_t dstaddr[ETH_ALEN]; + int hdr_size = sizeof(struct unicast_packet); + int ret; + unsigned long flags; + + /* drop packet if it has not necessary minimum size */ + if (skb_headlen(skb) < hdr_size) + return NET_RX_DROP; + + ethhdr = (struct ethhdr *) skb_mac_header(skb);
/* packet with unicast indication but broadcast recipient */ if (is_bcast(ethhdr->h_dest)) - return; + return NET_RX_DROP;
/* packet with broadcast sender address */ if (is_bcast(ethhdr->h_source)) - return; + return NET_RX_DROP;
/* not for me */ if (!is_my_mac(ethhdr->h_dest)) - return; - - /* drop packet if it has not necessary minimum size */ - if (result < hdr_size) - return; + return NET_RX_DROP;
- unicast_packet = (struct unicast_packet *) - (packet_buff + sizeof(struct ethhdr)); + unicast_packet = (struct unicast_packet *) skb->data;
/* packet for me */ if (is_my_mac(unicast_packet->dest)) { - interface_rx(soft_device, packet_buff + hdr_size, - result - hdr_size); - return; - + interface_rx(skb, hdr_size); + return NET_RX_SUCCESS; }
/* TTL exceeded */ if (unicast_packet->ttl < 2) { - addr_to_string(src_str, ((struct ethhdr *) - (unicast_packet + 1))->h_source); - addr_to_string(dst_str, unicast_packet->dest); + addr_to_string(src_str, ethhdr->h_source); + addr_to_string(dst_str, ethhdr->h_dest);
printk(KERN_WARNING "batman-adv:Warning - can't send packet from %s to %s: ttl exceeded\n", src_str, dst_str); - return; + return NET_RX_DROP; }
+ ret = NET_RX_DROP; /* get routing information */ - spin_lock(&orig_hash_lock); + spin_lock_irqsave(&orig_hash_lock, flags); orig_node = ((struct orig_node *) hash_find(orig_hash, unicast_packet->dest));
if ((orig_node != NULL) && (orig_node->batman_if != NULL) && (orig_node->router != NULL)) { + + /* don't lock while sending the packets ... we therefore + * copy the required data before sending */ + batman_if = orig_node->batman_if; + memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); + spin_unlock_irqrestore(&orig_hash_lock, flags); + + /* create a copy of the skb, if needed, to modify it. */ + if (!skb_clone_writable(skb, sizeof(struct unicast_packet))) { + skb_old = skb; + skb = skb_copy(skb, GFP_ATOMIC); + if (!skb) + return NET_RX_DROP; + unicast_packet = (struct unicast_packet *) skb->data; + kfree_skb(skb_old); + } /* decrement ttl */ unicast_packet->ttl--;
/* route it */ - send_raw_packet(packet_buff + sizeof(struct ethhdr), - result - sizeof(struct ethhdr), - orig_node->batman_if, - orig_node->router->addr); - } - spin_unlock(&orig_hash_lock); + send_skb_packet(skb, batman_if, dstaddr); + ret = NET_RX_SUCCESS; + + } else + spin_unlock_irqrestore(&orig_hash_lock, flags); + + return ret; }
-static void recv_bcast_packet(struct ethhdr *ethhdr, - unsigned char *packet_buff, - int result, - struct batman_if *batman_if) +int recv_bcast_packet(struct sk_buff *skb) { struct orig_node *orig_node; struct bcast_packet *bcast_packet; - int hdr_size = sizeof(struct ethhdr) + sizeof(struct bcast_packet); + struct ethhdr *ethhdr; + int hdr_size = sizeof(struct bcast_packet); + unsigned long flags; + + /* drop packet if it has not necessary minimum size */ + if (skb_headlen(skb) < hdr_size) + return NET_RX_DROP; + + ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* packet with broadcast indication but unicast recipient */ if (!is_bcast(ethhdr->h_dest)) - return; + return NET_RX_DROP;
/* packet with broadcast sender address */ if (is_bcast(ethhdr->h_source)) - return; - - /* drop packet if it has not necessary minimum size */ - if (result < hdr_size) - return; + return NET_RX_DROP;
/* ignore broadcasts sent by myself */ if (is_my_mac(ethhdr->h_source)) - return; + return NET_RX_DROP;
- bcast_packet = (struct bcast_packet *) - (packet_buff + sizeof(struct ethhdr)); + bcast_packet = (struct bcast_packet *) skb->data;
/* ignore broadcasts originated by myself */ if (is_my_mac(bcast_packet->orig)) - return; + return NET_RX_DROP;
- spin_lock(&orig_hash_lock); + spin_lock_irqsave(&orig_hash_lock, flags); orig_node = ((struct orig_node *) hash_find(orig_hash, bcast_packet->orig));
if (orig_node == NULL) { - spin_unlock(&orig_hash_lock); - return; + spin_unlock_irqrestore(&orig_hash_lock, flags); + return NET_RX_DROP; }
/* check flood history */ if (get_bit_status(orig_node->bcast_bits, orig_node->last_bcast_seqno, ntohs(bcast_packet->seqno))) { - spin_unlock(&orig_hash_lock); - return; + spin_unlock_irqrestore(&orig_hash_lock, flags); + return NET_RX_DROP; }
/* mark broadcast in flood history */ @@ -873,210 +951,59 @@ static void recv_bcast_packet(struct ethhdr *ethhdr, orig_node->last_bcast_seqno, 1)) orig_node->last_bcast_seqno = ntohs(bcast_packet->seqno);
- spin_unlock(&orig_hash_lock); + spin_unlock_irqrestore(&orig_hash_lock, flags); + + /* rebroadcast packet */ + add_bcast_packet_to_list(skb);
/* broadcast for me */ - interface_rx(soft_device, packet_buff + hdr_size, result - hdr_size); + interface_rx(skb, hdr_size);
- /* rebroadcast packet */ - add_bcast_packet_to_list(packet_buff + sizeof(struct ethhdr), - result - sizeof(struct ethhdr)); + return NET_RX_SUCCESS; }
-static void recv_vis_packet(struct ethhdr *ethhdr, - unsigned char *packet_buff, - int result) +int recv_vis_packet(struct sk_buff *skb) { struct vis_packet *vis_packet; - int hdr_size = sizeof(struct ethhdr) + sizeof(struct vis_packet); - int vis_info_len; + struct ethhdr *ethhdr; + int hdr_size = sizeof(struct vis_packet); + int ret;
- /* drop if too short. */ - if (result < hdr_size) - return; + if (skb_headlen(skb) < hdr_size) + return NET_RX_DROP; + + vis_packet = (struct vis_packet *) skb->data; + ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* not for me */ if (!is_my_mac(ethhdr->h_dest)) - return; - - vis_packet = (struct vis_packet *)(packet_buff + sizeof(struct ethhdr)); - vis_info_len = result - hdr_size; + return NET_RX_DROP;
/* ignore own packets */ if (is_my_mac(vis_packet->vis_orig)) - return; + return NET_RX_DROP;
if (is_my_mac(vis_packet->sender_orig)) - return; + return NET_RX_DROP;
switch (vis_packet->vis_type) { case VIS_TYPE_SERVER_SYNC: - receive_server_sync_packet(vis_packet, vis_info_len); + /* TODO: handle fragmented skbs properly */ + receive_server_sync_packet(vis_packet, skb_headlen(skb)); + ret = NET_RX_SUCCESS; break;
case VIS_TYPE_CLIENT_UPDATE: - receive_client_update_packet(vis_packet, vis_info_len); + /* TODO: handle fragmented skbs properly */ + receive_client_update_packet(vis_packet, skb_headlen(skb)); + ret = NET_RX_SUCCESS; break;
default: /* ignore unknown packet */ + ret = NET_RX_DROP; break; } + return ret; }
-static int recv_one_packet(struct batman_if *batman_if, - unsigned char *packet_buff) -{ - int result; - struct ethhdr *ethhdr; - struct batman_packet *batman_packet; - - result = receive_raw_packet(batman_if->raw_sock, packet_buff, - PACKBUFF_SIZE); - if (result <= 0) - return result; - - if (result < sizeof(struct ethhdr) + 2) - return 0; - - ethhdr = (struct ethhdr *)packet_buff; - batman_packet = (struct batman_packet *) - (packet_buff + sizeof(struct ethhdr)); - - if (batman_packet->version != COMPAT_VERSION) { - bat_dbg(DBG_BATMAN, - "Drop packet: incompatible batman version (%i)\n", - batman_packet->version); - return 0; - } - - switch (batman_packet->packet_type) { - /* batman originator packet */ - case BAT_PACKET: - recv_bat_packet(ethhdr, packet_buff, result, batman_if); - break; - - /* batman icmp packet */ - case BAT_ICMP: - recv_icmp_packet(ethhdr, packet_buff, result, batman_if); - break; - - /* unicast packet */ - case BAT_UNICAST: - recv_unicast_packet(ethhdr, packet_buff, result, batman_if); - break; - - /* broadcast packet */ - case BAT_BCAST: - recv_bcast_packet(ethhdr, - packet_buff, result, batman_if); - break; - - /* vis packet */ - case BAT_VIS: - recv_vis_packet(ethhdr, packet_buff, result); - break; - } - return 0; -} - - -static int discard_one_packet(struct batman_if *batman_if, - unsigned char *packet_buff) -{ - int result = -EAGAIN; - - if (batman_if->raw_sock) { - result = receive_raw_packet(batman_if->raw_sock, - packet_buff, - PACKBUFF_SIZE); - } - return result; -} - - -static bool is_interface_active(struct batman_if *batman_if) -{ - if (batman_if->if_active != IF_ACTIVE) - return false; - - return true; -} - -static void service_interface(struct batman_if *batman_if, - unsigned char *packet_buff) - -{ - int result; - - do { - if (is_interface_active(batman_if)) - result = recv_one_packet(batman_if, packet_buff); - else - result = discard_one_packet(batman_if, packet_buff); - } while (result >= 0); - - /* we perform none blocking reads, so EAGAIN indicates there - are no more packets to read. Anything else is a real - error.*/
- if ((result < 0) && (result != -EAGAIN)) - printk(KERN_ERR "batman-adv:Could not receive packet from interface %s: %i\n", batman_if->dev, result); -} - -static void service_interfaces(unsigned char *packet_buffer) -{ - struct batman_if *batman_if; - rcu_read_lock(); - list_for_each_entry_rcu(batman_if, &if_list, list) { - rcu_read_unlock(); - service_interface(batman_if, packet_buffer); - rcu_read_lock(); - } - rcu_read_unlock(); -} - - -int packet_recv_thread(void *data) -{ - unsigned char *packet_buff; - - atomic_set(&data_ready_cond, 0); - atomic_set(&exit_cond, 0); - packet_buff = kmalloc(PACKBUFF_SIZE, GFP_KERNEL); - if (!packet_buff) { - printk(KERN_ERR"batman-adv:Could allocate memory for the packet buffer. :(\n"); - return -1; - } - - while ((!kthread_should_stop()) && (!atomic_read(&exit_cond))) { - - wait_event_interruptible(thread_wait, - (atomic_read(&data_ready_cond) || - atomic_read(&exit_cond))); - - atomic_set(&data_ready_cond, 0); - - if (kthread_should_stop() || atomic_read(&exit_cond)) - break; - - service_interfaces(packet_buff); - } - kfree(packet_buff); - - /* do not exit until kthread_stop() is actually called, - * otherwise it will wait for us forever. */ - while (!kthread_should_stop()) - schedule(); - - return 0; -} - -void batman_data_ready(struct sock *sk, int len) -{ - void (*data_ready)(struct sock *, int) = sk->sk_user_data; - - data_ready(sk, len); - - atomic_set(&data_ready_cond, 1); - wake_up_interruptible(&thread_wait); -} diff --git a/drivers/staging/batman-adv/routing.h b/drivers/staging/batman-adv/routing.h index 890a4f5..c217241 100644 --- a/drivers/staging/batman-adv/routing.h +++ b/drivers/staging/batman-adv/routing.h @@ -25,8 +25,6 @@ extern wait_queue_head_t thread_wait; extern atomic_t exit_cond;
void slide_own_bcast_window(struct batman_if *batman_if); -void batman_data_ready(struct sock *sk, int len); -int packet_recv_thread(void *data); void receive_bat_packet(struct ethhdr *ethhdr, struct batman_packet *batman_packet, unsigned char *hna_buff, int hna_buff_len, @@ -34,3 +32,9 @@ void receive_bat_packet(struct ethhdr *ethhdr, void update_routes(struct orig_node *orig_node, struct neigh_node *neigh_node, unsigned char *hna_buff, int hna_buff_len); +int recv_icmp_packet(struct sk_buff *skb); +int recv_unicast_packet(struct sk_buff *skb); +int recv_bcast_packet(struct sk_buff *skb); +int recv_vis_packet(struct sk_buff *skb); +int recv_bat_packet(struct sk_buff *skb, + struct batman_if *batman_if); diff --git a/drivers/staging/batman-adv/send.c b/drivers/staging/batman-adv/send.c index 49b1534..fd48f3f 100644 --- a/drivers/staging/batman-adv/send.c +++ b/drivers/staging/batman-adv/send.c @@ -23,6 +23,7 @@ #include "send.h" #include "routing.h" #include "translation-table.h" +#include "soft-interface.h" #include "hard-interface.h" #include "types.h" #include "vis.h" @@ -58,51 +59,69 @@ static unsigned long forward_send_time(void) return send_time; }
-/* sends a raw packet. */ -void send_raw_packet(unsigned char *pack_buff, int pack_buff_len, - struct batman_if *batman_if, uint8_t *dst_addr) +/* send out an already prepared packet to the given address via the + * specified batman interface */ +int send_skb_packet(struct sk_buff *skb, + struct batman_if *batman_if, + uint8_t *dst_addr) { struct ethhdr *ethhdr; - struct sk_buff *skb; - int retval; - char *data;
if (batman_if->if_active != IF_ACTIVE) - return; + goto send_skb_err; + + if (unlikely(!batman_if->net_dev)) + goto send_skb_err;
if (!(batman_if->net_dev->flags & IFF_UP)) { printk(KERN_WARNING "batman-adv:Interface %s is not up - can't send packet via that interface!\n", batman_if->dev); - return; + goto send_skb_err; }
- skb = dev_alloc_skb(pack_buff_len + sizeof(struct ethhdr)); - if (!skb) - return; - data = skb_put(skb, pack_buff_len + sizeof(struct ethhdr)); + /* push to the ethernet header. */ + if (my_skb_push(skb, sizeof(struct ethhdr)) < 0) + goto send_skb_err;
- memcpy(data + sizeof(struct ethhdr), pack_buff, pack_buff_len); + skb_reset_mac_header(skb);
- ethhdr = (struct ethhdr *) data; + ethhdr = (struct ethhdr *) skb_mac_header(skb); memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN); memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN); ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
- skb_reset_mac_header(skb); skb_set_network_header(skb, ETH_HLEN); skb->priority = TC_PRIO_CONTROL; skb->protocol = __constant_htons(ETH_P_BATMAN); + skb->dev = batman_if->net_dev;
/* dev_queue_xmit() returns a negative result on error. However on * congestion and traffic shaping, it drops and returns NET_XMIT_DROP * (which is > 0). This will not be treated as an error. */ - retval = dev_queue_xmit(skb); - if (retval < 0) - printk(KERN_WARNING - "batman-adv:Can't write to raw socket: %i\n", - retval); + + return dev_queue_xmit(skb); +send_skb_err: + kfree_skb(skb); + return NET_XMIT_DROP; +} + +/* sends a raw packet. */ +void send_raw_packet(unsigned char *pack_buff, int pack_buff_len, + struct batman_if *batman_if, uint8_t *dst_addr) +{ + struct sk_buff *skb; + char *data; + + skb = dev_alloc_skb(pack_buff_len + sizeof(struct ethhdr)); + if (!skb) + return; + data = skb_put(skb, pack_buff_len + sizeof(struct ethhdr)); + memcpy(data + sizeof(struct ethhdr), pack_buff, pack_buff_len); + /* pull back to the batman "network header" */ + skb_pull(skb, sizeof(struct ethhdr)); + send_skb_packet(skb, batman_if, dst_addr); }
/* Send a packet to a given interface */ @@ -331,6 +350,8 @@ void schedule_forward_packet(struct orig_node *orig_node,
static void forw_packet_free(struct forw_packet *forw_packet) { + if (forw_packet->skb) + kfree_skb(forw_packet->skb); kfree(forw_packet->packet_buff); kfree(forw_packet); } @@ -353,7 +374,7 @@ static void _add_bcast_packet_to_list(struct forw_packet *forw_packet, send_time); }
-void add_bcast_packet_to_list(unsigned char *packet_buff, int packet_len) +void add_bcast_packet_to_list(struct sk_buff *skb) { struct forw_packet *forw_packet;
@@ -361,14 +382,16 @@ void add_bcast_packet_to_list(unsigned char *packet_buff, int packet_len) if (!forw_packet) return;
- forw_packet->packet_buff = kmalloc(packet_len, GFP_ATOMIC); - if (!forw_packet->packet_buff) { + skb = skb_copy(skb, GFP_ATOMIC); + if (!skb) { kfree(forw_packet); return; }
- forw_packet->packet_len = packet_len; - memcpy(forw_packet->packet_buff, packet_buff, forw_packet->packet_len); + skb_reset_mac_header(skb); + + forw_packet->skb = skb; + forw_packet->packet_buff = NULL;
/* how often did we send the bcast packet ? */ forw_packet->num_packets = 0; @@ -384,6 +407,7 @@ void send_outstanding_bcast_packet(struct work_struct *work) struct forw_packet *forw_packet = container_of(delayed_work, struct forw_packet, delayed_work); unsigned long flags; + struct sk_buff *skb1;
spin_lock_irqsave(&forw_bcast_list_lock, flags); hlist_del(&forw_packet->list); @@ -392,8 +416,10 @@ void send_outstanding_bcast_packet(struct work_struct *work) /* rebroadcast packet */ rcu_read_lock(); list_for_each_entry_rcu(batman_if, &if_list, list) { - send_raw_packet(forw_packet->packet_buff, - forw_packet->packet_len, + /* send a copy of the saved skb */ + skb1 = skb_copy(forw_packet->skb, GFP_ATOMIC); + if (skb1) + send_skb_packet(skb1, batman_if, broadcastAddr); } rcu_read_unlock(); @@ -415,10 +441,11 @@ void send_outstanding_bat_packet(struct work_struct *work) container_of(work, struct delayed_work, work); struct forw_packet *forw_packet = container_of(delayed_work, struct forw_packet, delayed_work); + unsigned long flags;
- spin_lock(&forw_bat_list_lock); + spin_lock_irqsave(&forw_bat_list_lock, flags); hlist_del(&forw_packet->list); - spin_unlock(&forw_bat_list_lock); + spin_unlock_irqrestore(&forw_bat_list_lock, flags);
send_packet(forw_packet);
@@ -459,18 +486,18 @@ void purge_outstanding_packets(void) spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
/* free batman packet list */ - spin_lock(&forw_bat_list_lock); + spin_lock_irqsave(&forw_bat_list_lock, flags); hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, &forw_bat_list, list) {
- spin_unlock(&forw_bat_list_lock); + spin_unlock_irqrestore(&forw_bat_list_lock, flags);
/** * send_outstanding_bat_packet() will lock the list to * delete the item from the list */ cancel_delayed_work_sync(&forw_packet->delayed_work); - spin_lock(&forw_bat_list_lock); + spin_lock_irqsave(&forw_bat_list_lock, flags); } - spin_unlock(&forw_bat_list_lock); + spin_unlock_irqrestore(&forw_bat_list_lock, flags); } diff --git a/drivers/staging/batman-adv/send.h b/drivers/staging/batman-adv/send.h index 59d5009..5fc6f34 100644 --- a/drivers/staging/batman-adv/send.h +++ b/drivers/staging/batman-adv/send.h @@ -22,6 +22,9 @@ #include "types.h"
void send_own_packet_work(struct work_struct *work); +int send_skb_packet(struct sk_buff *skb, + struct batman_if *batman_if, + uint8_t *dst_addr); void send_raw_packet(unsigned char *pack_buff, int pack_buff_len, struct batman_if *batman_if, uint8_t *dst_addr); void schedule_own_packet(struct batman_if *batman_if); @@ -30,7 +33,7 @@ void schedule_forward_packet(struct orig_node *orig_node, struct batman_packet *batman_packet, uint8_t directlink, int hna_buff_len, struct batman_if *if_outgoing); -void add_bcast_packet_to_list(unsigned char *packet_buff, int packet_len); +void add_bcast_packet_to_list(struct sk_buff *skb); void send_outstanding_bcast_packet(struct work_struct *work); void send_outstanding_bat_packet(struct work_struct *work); void purge_outstanding_packets(void); diff --git a/drivers/staging/batman-adv/soft-interface.c b/drivers/staging/batman-adv/soft-interface.c index 168a4e1..8ae3483 100644 --- a/drivers/staging/batman-adv/soft-interface.c +++ b/drivers/staging/batman-adv/soft-interface.c @@ -34,7 +34,6 @@ static uint16_t bcast_seqno = 1; /* give own bcast messages seq numbers to avoid * broadcast storms */ static int32_t skb_packets; static int32_t skb_bad_packets; -static int32_t lock_dropped;
unsigned char mainIfAddr[ETH_ALEN]; static unsigned char mainIfAddr_default[ETH_ALEN]; @@ -67,12 +66,12 @@ int main_if_was_up(void) return (memcmp(mainIfAddr, mainIfAddr_default, ETH_ALEN) != 0 ? 1 : 0); }
-static int my_skb_push(struct sk_buff *skb, unsigned int len) +int my_skb_push(struct sk_buff *skb, unsigned int len) { int result = 0;
skb_packets++; - if (skb->data - len < skb->head) { + if (skb_headroom(skb) < len) { skb_bad_packets++; result = pskb_expand_head(skb, len, 0, GFP_ATOMIC);
@@ -169,7 +168,10 @@ int interface_tx(struct sk_buff *skb, struct net_device *dev) struct orig_node *orig_node; struct ethhdr *ethhdr = (struct ethhdr *)skb->data; struct bat_priv *priv = netdev_priv(dev); + struct batman_if *batman_if; + uint8_t dstaddr[6]; int data_len = skb->len; + unsigned long flags;
if (atomic_read(&module_state) != MODULE_ACTIVE) goto dropped; @@ -185,7 +187,6 @@ int interface_tx(struct sk_buff *skb, struct net_device *dev) goto dropped;
bcast_packet = (struct bcast_packet *)skb->data; - bcast_packet->version = COMPAT_VERSION;
/* batman packet type: broadcast */ @@ -194,27 +195,21 @@ int interface_tx(struct sk_buff *skb, struct net_device *dev) /* hw address of first interface is the orig mac because only * this mac is known throughout the mesh */ memcpy(bcast_packet->orig, mainIfAddr, ETH_ALEN); + /* set broadcast sequence number */ bcast_packet->seqno = htons(bcast_seqno);
bcast_seqno++;
/* broadcast packet */ - add_bcast_packet_to_list(skb->data, skb->len); + add_bcast_packet_to_list(skb); + /* a copy is stored in the bcast list, therefore removing + * the original skb. */ + kfree_skb(skb);
/* unicast packet */ } else { - - /* simply spin_lock()ing can deadlock when the lock is already - * hold. */ - /* TODO: defer the work in a working queue instead of - * dropping */ - if (!spin_trylock(&orig_hash_lock)) { - lock_dropped++; - printk(KERN_WARNING "batman-adv:%d packets dropped because lock was hold\n", lock_dropped); - goto dropped; - } - + spin_lock_irqsave(&orig_hash_lock, flags); /* get routing information */ orig_node = ((struct orig_node *)hash_find(orig_hash, ethhdr->h_dest)); @@ -243,14 +238,17 @@ int interface_tx(struct sk_buff *skb, struct net_device *dev) if (orig_node->batman_if->if_active != IF_ACTIVE) goto unlock;
- send_raw_packet(skb->data, skb->len, - orig_node->batman_if, - orig_node->router->addr); + /* don't lock while sending the packets ... we therefore + * copy the required data before sending */ + + batman_if = orig_node->batman_if; + memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); + spin_unlock_irqrestore(&orig_hash_lock, flags); + + send_skb_packet(skb, batman_if, dstaddr); } else { goto unlock; } - - spin_unlock(&orig_hash_lock); }
priv->stats.tx_packets++; @@ -258,42 +256,44 @@ int interface_tx(struct sk_buff *skb, struct net_device *dev) goto end;
unlock: - spin_unlock(&orig_hash_lock); + spin_unlock_irqrestore(&orig_hash_lock, flags); dropped: priv->stats.tx_dropped++; end: - kfree_skb(skb); return 0; }
-void interface_rx(struct net_device *dev, void *packet, int packet_len) +void interface_rx(struct sk_buff *skb, int hdr_size) { - struct sk_buff *skb; + struct net_device *dev = soft_device; struct bat_priv *priv = netdev_priv(dev);
- skb = dev_alloc_skb(packet_len); - - if (!skb) { - priv->stats.rx_dropped++; - goto out; + /* check if enough space is available for pulling, and pull */ + if (!pskb_may_pull(skb, hdr_size)) { + kfree_skb(skb); + return; } + skb_pull_rcsum(skb, hdr_size); +/* skb_set_mac_header(skb, -sizeof(struct ethhdr));*/
- memcpy(skb_put(skb, packet_len), packet, packet_len); - - /* Write metadata, and then pass to the receive level */ skb->dev = dev; skb->protocol = eth_type_trans(skb, dev); - skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* should not be neccesary anymore as we use skb_pull_rcsum() + * TODO: please verify this and remove this TODO + * -- Dec 21st 2009, Simon Wunderlich */ + +/* skb->ip_summed = CHECKSUM_UNNECESSARY;*/ + + /* TODO: set skb->pkt_type to PACKET_BROADCAST, PACKET_MULTICAST, + * PACKET_OTHERHOST or PACKET_HOST */
priv->stats.rx_packets++; - priv->stats.rx_bytes += packet_len; + priv->stats.rx_bytes += skb->len;
dev->last_rx = jiffies;
netif_rx(skb); - -out: - return; }
/* ethtool */ diff --git a/drivers/staging/batman-adv/soft-interface.h b/drivers/staging/batman-adv/soft-interface.h index 515e276..c0cad81 100644 --- a/drivers/staging/batman-adv/soft-interface.h +++ b/drivers/staging/batman-adv/soft-interface.h @@ -28,6 +28,7 @@ struct net_device_stats *interface_stats(struct net_device *dev); int interface_set_mac_addr(struct net_device *dev, void *addr); int interface_change_mtu(struct net_device *dev, int new_mtu); int interface_tx(struct sk_buff *skb, struct net_device *dev); -void interface_rx(struct net_device *dev, void *packet, int packet_len); +void interface_rx(struct sk_buff *skb, int hdr_size); +int my_skb_push(struct sk_buff *skb, unsigned int len);
extern unsigned char mainIfAddr[]; diff --git a/drivers/staging/batman-adv/types.h b/drivers/staging/batman-adv/types.h index d708e6f..dec1b54 100644 --- a/drivers/staging/batman-adv/types.h +++ b/drivers/staging/batman-adv/types.h @@ -39,7 +39,6 @@ struct batman_if { char if_active; char addr_str[ETH_STR_LEN]; struct net_device *net_dev; - struct socket *raw_sock; atomic_t seqno; unsigned char *packet_buff; int packet_len; @@ -113,6 +112,7 @@ struct forw_packet { /* structure for forw_list maintaining packet struct hlist_node list; unsigned long send_time; uint8_t own; + struct sk_buff *skb; unsigned char *packet_buff; uint16_t packet_len; uint32_t direct_link_flags; diff --git a/drivers/staging/batman-adv/vis.c b/drivers/staging/batman-adv/vis.c index 3c66645..2633d0f 100644 --- a/drivers/staging/batman-adv/vis.c +++ b/drivers/staging/batman-adv/vis.c @@ -52,12 +52,13 @@ static void free_info(void *data) /* set the mode of the visualization to client or server */ void vis_set_mode(int mode) { - spin_lock(&vis_hash_lock); + unsigned long flags; + spin_lock_irqsave(&vis_hash_lock, flags);
if (my_vis_info != NULL) my_vis_info->packet.vis_type = mode;
- spin_unlock(&vis_hash_lock); + spin_unlock_irqrestore(&vis_hash_lock, flags); }
/* is_vis_server(), locked outside */ @@ -74,10 +75,11 @@ static int is_vis_server_locked(void) int is_vis_server(void) { int ret = 0; + unsigned long flags;
- spin_lock(&vis_hash_lock); + spin_lock_irqsave(&vis_hash_lock, flags); ret = is_vis_server_locked(); - spin_unlock(&vis_hash_lock); + spin_unlock_irqrestore(&vis_hash_lock, flags);
return ret; } @@ -269,8 +271,9 @@ void receive_server_sync_packet(struct vis_packet *vis_packet, int vis_info_len) { struct vis_info *info; int is_new; + unsigned long flags;
- spin_lock(&vis_hash_lock); + spin_lock_irqsave(&vis_hash_lock, flags); info = add_packet(vis_packet, vis_info_len, &is_new); if (info == NULL) goto end; @@ -283,7 +286,7 @@ void receive_server_sync_packet(struct vis_packet *vis_packet, int vis_info_len) list_add_tail(&info->send_list, &send_list); } end: - spin_unlock(&vis_hash_lock); + spin_unlock_irqrestore(&vis_hash_lock, flags); }
/* handle an incoming client update packet and schedule forward if needed. */ @@ -292,12 +295,13 @@ void receive_client_update_packet(struct vis_packet *vis_packet, { struct vis_info *info; int is_new; + unsigned long flags;
/* clients shall not broadcast. */ if (is_bcast(vis_packet->target_orig)) return;
- spin_lock(&vis_hash_lock); + spin_lock_irqsave(&vis_hash_lock, flags); info = add_packet(vis_packet, vis_info_len, &is_new); if (info == NULL) goto end; @@ -319,7 +323,7 @@ void receive_client_update_packet(struct vis_packet *vis_packet, list_add_tail(&info->send_list, &send_list); } end: - spin_unlock(&vis_hash_lock); + spin_unlock_irqrestore(&vis_hash_lock, flags); }
/* Walk the originators and find the VIS server with the best tq. Set the packet @@ -370,7 +374,7 @@ static int generate_vis_packet(void)
info->first_seen = jiffies;
- spin_lock(&orig_hash_lock); + spin_lock_irqsave(&orig_hash_lock, flags); memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN); info->packet.ttl = TTL; info->packet.seqno++; @@ -379,7 +383,7 @@ static int generate_vis_packet(void) if (!is_vis_server_locked()) { best_tq = find_best_vis_server(info); if (best_tq < 0) { - spin_unlock(&orig_hash_lock); + spin_unlock_irqrestore(&orig_hash_lock, flags); return -1; } } @@ -403,13 +407,13 @@ static int generate_vis_packet(void) info->packet.entries++;
if (vis_packet_full(info)) { - spin_unlock(&orig_hash_lock); + spin_unlock_irqrestore(&orig_hash_lock, flags); return 0; } } }
- spin_unlock(&orig_hash_lock); + spin_unlock_irqrestore(&orig_hash_lock, flags);
spin_lock_irqsave(&hna_local_hash_lock, flags); while (hash_iterate(hna_local_hash, &hashit_local)) { @@ -450,8 +454,9 @@ static void broadcast_vis_packet(struct vis_info *info, int packet_length) { HASHIT(hashit); struct orig_node *orig_node; + unsigned long flags;
- spin_lock(&orig_hash_lock); + spin_lock_irqsave(&orig_hash_lock, flags);
/* send to all routers in range. */ while (hash_iterate(orig_hash, &hashit)) { @@ -478,14 +483,15 @@ static void broadcast_vis_packet(struct vis_info *info, int packet_length) } } memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN); - spin_unlock(&orig_hash_lock); + spin_unlock_irqrestore(&orig_hash_lock, flags); }
static void unicast_vis_packet(struct vis_info *info, int packet_length) { struct orig_node *orig_node; + unsigned long flags;
- spin_lock(&orig_hash_lock); + spin_lock_irqsave(&orig_hash_lock, flags); orig_node = ((struct orig_node *) hash_find(orig_hash, info->packet.target_orig));
@@ -496,7 +502,7 @@ static void unicast_vis_packet(struct vis_info *info, int packet_length) orig_node->batman_if, orig_node->router->addr); } - spin_unlock(&orig_hash_lock); + spin_unlock_irqrestore(&orig_hash_lock, flags); }
/* only send one vis packet. called from send_vis_packets() */ @@ -526,8 +532,9 @@ static void send_vis_packet(struct vis_info *info) static void send_vis_packets(struct work_struct *work) { struct vis_info *info, *temp; + unsigned long flags;
- spin_lock(&vis_hash_lock); + spin_lock_irqsave(&vis_hash_lock, flags); purge_vis_packets();
if (generate_vis_packet() == 0) @@ -538,7 +545,7 @@ static void send_vis_packets(struct work_struct *work) list_del_init(&info->send_list); send_vis_packet(info); } - spin_unlock(&vis_hash_lock); + spin_unlock_irqrestore(&vis_hash_lock, flags); start_vis_timer(); } static DECLARE_DELAYED_WORK(vis_timer_wq, send_vis_packets); @@ -547,10 +554,11 @@ static DECLARE_DELAYED_WORK(vis_timer_wq, send_vis_packets); * initialized (e.g. bat0 is initialized, interfaces have been added) */ int vis_init(void) { + unsigned long flags; if (vis_hash) return 1;
- spin_lock(&vis_hash_lock); + spin_lock_irqsave(&vis_hash_lock, flags);
vis_hash = hash_new(256, vis_info_cmp, vis_info_choose); if (!vis_hash) { @@ -588,12 +596,12 @@ int vis_init(void) goto err; }
- spin_unlock(&vis_hash_lock); + spin_unlock_irqrestore(&vis_hash_lock, flags); start_vis_timer(); return 1;
err: - spin_unlock(&vis_hash_lock); + spin_unlock_irqrestore(&vis_hash_lock, flags); vis_quit(); return 0; } @@ -601,17 +609,18 @@ err: /* shutdown vis-server */ void vis_quit(void) { + unsigned long flags; if (!vis_hash) return;
cancel_delayed_work_sync(&vis_timer_wq);
- spin_lock(&vis_hash_lock); + spin_lock_irqsave(&vis_hash_lock, flags); /* properly remove, kill timers ... */ hash_delete(vis_hash, free_info); vis_hash = NULL; my_vis_info = NULL; - spin_unlock(&vis_hash_lock); + spin_unlock_irqrestore(&vis_hash_lock, flags); }
/* schedule packets for (re)transmission */