blob: 9d34be6283046b14b61211447f09465002f40adb [file] [log] [blame]
Sven Eckelmann9f6446c2015-04-23 13:16:35 +02001/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Antonio Quartulliebf38fb2013-11-03 20:40:48 +010015 * along with this program; if not, see <http://www.gnu.org/licenses/>.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000016 */
17
18#include "main.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020019
20#include <linux/atomic.h>
21#include <linux/bug.h>
22#include <linux/byteorder/generic.h>
23#include <linux/crc32c.h>
24#include <linux/errno.h>
25#include <linux/fs.h>
26#include <linux/if_ether.h>
27#include <linux/if_vlan.h>
28#include <linux/init.h>
29#include <linux/ip.h>
30#include <linux/ipv6.h>
31#include <linux/kernel.h>
32#include <linux/list.h>
Sven Eckelmann2c72d652015-06-21 14:45:14 +020033#include <linux/lockdep.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020034#include <linux/module.h>
35#include <linux/moduleparam.h>
36#include <linux/netdevice.h>
37#include <linux/pkt_sched.h>
38#include <linux/rculist.h>
39#include <linux/rcupdate.h>
40#include <linux/seq_file.h>
41#include <linux/skbuff.h>
42#include <linux/slab.h>
43#include <linux/spinlock.h>
44#include <linux/stddef.h>
45#include <linux/string.h>
46#include <linux/workqueue.h>
47#include <net/dsfield.h>
48#include <net/rtnetlink.h>
49
50#include "bat_algo.h"
51#include "bridge_loop_avoidance.h"
Sven Eckelmannb706b132012-06-10 23:58:51 +020052#include "debugfs.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020053#include "distributed-arp-table.h"
54#include "gateway_client.h"
55#include "gateway_common.h"
56#include "hard-interface.h"
57#include "icmp_socket.h"
58#include "multicast.h"
59#include "network-coding.h"
60#include "originator.h"
61#include "packet.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000062#include "routing.h"
63#include "send.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000064#include "soft-interface.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000065#include "translation-table.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000066
Sven Eckelmannc3caf512011-05-03 11:51:38 +020067/* List manipulations on hardif_list have to be rtnl_lock()'ed,
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +020068 * list traversals just rcu-locked
69 */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020070struct list_head batadv_hardif_list;
Sven Eckelmannee11ad62012-05-16 20:23:19 +020071static int (*batadv_rx_handler[256])(struct sk_buff *,
Sven Eckelmann56303d32012-06-05 22:31:31 +020072 struct batadv_hard_iface *);
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020073char batadv_routing_algo[20] = "BATMAN_IV";
Sven Eckelmannee11ad62012-05-16 20:23:19 +020074static struct hlist_head batadv_algo_list;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000075
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020076unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000077
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020078struct workqueue_struct *batadv_event_workqueue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000079
Sven Eckelmannee11ad62012-05-16 20:23:19 +020080static void batadv_recv_handler_init(void);
Marek Lindnerffa995e2012-03-01 15:35:17 +080081
Sven Eckelmannee11ad62012-05-16 20:23:19 +020082static int __init batadv_init(void)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000083{
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020084 INIT_LIST_HEAD(&batadv_hardif_list);
Sven Eckelmannee11ad62012-05-16 20:23:19 +020085 INIT_HLIST_HEAD(&batadv_algo_list);
Marek Lindner1c280472011-11-28 17:40:17 +080086
Sven Eckelmannee11ad62012-05-16 20:23:19 +020087 batadv_recv_handler_init();
Marek Lindnerffa995e2012-03-01 15:35:17 +080088
Sven Eckelmann81c524f2012-05-12 02:09:22 +020089 batadv_iv_init();
Matthias Schiffer6c519ba2013-09-27 18:03:39 +020090 batadv_nc_init();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000091
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020092 batadv_event_workqueue = create_singlethread_workqueue("bat_events");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000093
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020094 if (!batadv_event_workqueue)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000095 return -ENOMEM;
96
Sven Eckelmann9039dc72012-05-12 02:09:33 +020097 batadv_socket_init();
Sven Eckelmann40a072d2012-05-12 02:09:23 +020098 batadv_debugfs_init();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000099
Sven Eckelmann95638772012-05-12 02:09:31 +0200100 register_netdevice_notifier(&batadv_hard_if_notifier);
Sven Eckelmanna4ac28c2013-02-11 17:10:26 +0800101 rtnl_link_register(&batadv_link_ops);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000102
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100103 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200104 BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000105
106 return 0;
107}
108
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200109static void __exit batadv_exit(void)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000110{
Sven Eckelmann40a072d2012-05-12 02:09:23 +0200111 batadv_debugfs_destroy();
Sven Eckelmanna4ac28c2013-02-11 17:10:26 +0800112 rtnl_link_unregister(&batadv_link_ops);
Sven Eckelmann95638772012-05-12 02:09:31 +0200113 unregister_netdevice_notifier(&batadv_hard_if_notifier);
114 batadv_hardif_remove_interfaces();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000115
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200116 flush_workqueue(batadv_event_workqueue);
117 destroy_workqueue(batadv_event_workqueue);
118 batadv_event_workqueue = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000119
120 rcu_barrier();
121}
122
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200123int batadv_mesh_init(struct net_device *soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000124{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200125 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200126 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000127
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000128 spin_lock_init(&bat_priv->forw_bat_list_lock);
129 spin_lock_init(&bat_priv->forw_bcast_list_lock);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200130 spin_lock_init(&bat_priv->tt.changes_list_lock);
131 spin_lock_init(&bat_priv->tt.req_list_lock);
132 spin_lock_init(&bat_priv->tt.roam_list_lock);
133 spin_lock_init(&bat_priv->tt.last_changeset_lock);
Antonio Quartullia70a9aa2013-07-30 22:16:24 +0200134 spin_lock_init(&bat_priv->tt.commit_lock);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200135 spin_lock_init(&bat_priv->gw.list_lock);
Linus Lüssingab498862014-02-15 17:47:53 +0100136#ifdef CONFIG_BATMAN_ADV_MCAST
137 spin_lock_init(&bat_priv->mcast.want_lists_lock);
138#endif
Marek Lindneref261572013-04-23 21:39:57 +0800139 spin_lock_init(&bat_priv->tvlv.container_list_lock);
140 spin_lock_init(&bat_priv->tvlv.handler_list_lock);
Antonio Quartulli5d2c05b2013-07-02 11:04:34 +0200141 spin_lock_init(&bat_priv->softif_vlan_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000142
143 INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
144 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200145 INIT_HLIST_HEAD(&bat_priv->gw.list);
Linus Lüssingab498862014-02-15 17:47:53 +0100146#ifdef CONFIG_BATMAN_ADV_MCAST
147 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_unsnoopables_list);
Linus Lüssing4c8755d2014-02-15 17:47:54 +0100148 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv4_list);
149 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv6_list);
Linus Lüssingab498862014-02-15 17:47:53 +0100150#endif
Sven Eckelmann807736f2012-07-15 22:26:51 +0200151 INIT_LIST_HEAD(&bat_priv->tt.changes_list);
Marek Lindner7c26a532015-06-28 22:16:06 +0800152 INIT_HLIST_HEAD(&bat_priv->tt.req_list);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200153 INIT_LIST_HEAD(&bat_priv->tt.roam_list);
Linus Lüssingc5caf4e2014-02-15 17:47:49 +0100154#ifdef CONFIG_BATMAN_ADV_MCAST
155 INIT_HLIST_HEAD(&bat_priv->mcast.mla_list);
156#endif
Marek Lindneref261572013-04-23 21:39:57 +0800157 INIT_HLIST_HEAD(&bat_priv->tvlv.container_list);
158 INIT_HLIST_HEAD(&bat_priv->tvlv.handler_list);
Antonio Quartulli5d2c05b2013-07-02 11:04:34 +0200159 INIT_HLIST_HEAD(&bat_priv->softif_vlan_list);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000160
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200161 ret = batadv_originator_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200162 if (ret < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000163 goto err;
164
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200165 ret = batadv_tt_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200166 if (ret < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000167 goto err;
168
Sven Eckelmann08adf152012-05-12 13:38:47 +0200169 ret = batadv_bla_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200170 if (ret < 0)
Simon Wunderlich23721382012-01-22 20:00:19 +0100171 goto err;
172
Antonio Quartulli2f1dfbe2012-06-30 20:01:19 +0200173 ret = batadv_dat_init(bat_priv);
174 if (ret < 0)
175 goto err;
176
Matthias Schiffer6c519ba2013-09-27 18:03:39 +0200177 ret = batadv_nc_mesh_init(bat_priv);
Martin Hundebølld353d8d2013-01-25 11:12:38 +0100178 if (ret < 0)
179 goto err;
180
Marek Lindner414254e2013-04-23 21:39:58 +0800181 batadv_gw_init(bat_priv);
Linus Lüssing60432d72014-02-15 17:47:51 +0100182 batadv_mcast_init(bat_priv);
Marek Lindner414254e2013-04-23 21:39:58 +0800183
Sven Eckelmann807736f2012-07-15 22:26:51 +0200184 atomic_set(&bat_priv->gw.reselect, 0);
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200185 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200186
187 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000188
189err:
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200190 batadv_mesh_free(soft_iface);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200191 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000192}
193
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200194void batadv_mesh_free(struct net_device *soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000195{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200196 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000197
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200198 atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000199
Sven Eckelmann9455e342012-05-12 02:09:37 +0200200 batadv_purge_outstanding_packets(bat_priv, NULL);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000201
Simon Wunderlichbd3524c2015-08-03 19:13:58 +0200202 batadv_gw_node_free(bat_priv);
Matthias Schiffer6c519ba2013-09-27 18:03:39 +0200203 batadv_nc_mesh_free(bat_priv);
Antonio Quartullia4361862013-05-07 01:06:18 +0200204 batadv_dat_free(bat_priv);
Sven Eckelmann08adf152012-05-12 13:38:47 +0200205 batadv_bla_free(bat_priv);
Simon Wunderlich23721382012-01-22 20:00:19 +0100206
Linus Lüssingc5caf4e2014-02-15 17:47:49 +0100207 batadv_mcast_free(bat_priv);
208
Antonio Quartullia4361862013-05-07 01:06:18 +0200209 /* Free the TT and the originator tables only after having terminated
210 * all the other depending components which may use these structures for
211 * their purposes.
212 */
213 batadv_tt_free(bat_priv);
214
215 /* Since the originator table clean up routine is accessing the TT
216 * tables as well, it has to be invoked after the TT tables have been
217 * freed and marked as empty. This ensures that no cleanup RCU callbacks
218 * accessing the TT data are scheduled for later execution.
219 */
220 batadv_originator_free(bat_priv);
Antonio Quartulli2f1dfbe2012-06-30 20:01:19 +0200221
Marek Lindner414254e2013-04-23 21:39:58 +0800222 batadv_gw_free(bat_priv);
223
Martin Hundebøllf8214862012-04-20 17:02:45 +0200224 free_percpu(bat_priv->bat_counters);
Martin Hundebøllf69ae772013-04-17 21:13:16 +0200225 bat_priv->bat_counters = NULL;
Martin Hundebøllf8214862012-04-20 17:02:45 +0200226
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200227 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000228}
229
David S. Miller6e0895c2013-04-22 20:32:51 -0400230/**
231 * batadv_is_my_mac - check if the given mac address belongs to any of the real
232 * interfaces in the current mesh
233 * @bat_priv: the bat priv with all the soft interface information
234 * @addr: the address to check
Markus Pargmanne8ad3b12014-12-26 12:41:38 +0100235 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200236 * Return: 'true' if the mac address was found, false otherwise.
David S. Miller6e0895c2013-04-22 20:32:51 -0400237 */
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200238bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000239{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200240 const struct batadv_hard_iface *hard_iface;
Markus Pargmanne8ad3b12014-12-26 12:41:38 +0100241 bool is_my_mac = false;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000242
243 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200244 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200245 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000246 continue;
247
Antonio Quartullife8a93b2013-04-03 19:10:26 +0200248 if (hard_iface->soft_iface != bat_priv->soft_iface)
249 continue;
250
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200251 if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
Markus Pargmanne8ad3b12014-12-26 12:41:38 +0100252 is_my_mac = true;
253 break;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000254 }
255 }
256 rcu_read_unlock();
Markus Pargmanne8ad3b12014-12-26 12:41:38 +0100257 return is_my_mac;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000258}
259
Marek Lindner30da63a2012-08-03 17:15:46 +0200260/**
261 * batadv_seq_print_text_primary_if_get - called from debugfs table printing
262 * function that requires the primary interface
263 * @seq: debugfs table seq_file struct
264 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200265 * Return: primary interface if found or NULL otherwise.
Marek Lindner30da63a2012-08-03 17:15:46 +0200266 */
267struct batadv_hard_iface *
268batadv_seq_print_text_primary_if_get(struct seq_file *seq)
269{
270 struct net_device *net_dev = (struct net_device *)seq->private;
271 struct batadv_priv *bat_priv = netdev_priv(net_dev);
272 struct batadv_hard_iface *primary_if;
273
274 primary_if = batadv_primary_if_get_selected(bat_priv);
275
276 if (!primary_if) {
277 seq_printf(seq,
278 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
279 net_dev->name);
280 goto out;
281 }
282
283 if (primary_if->if_status == BATADV_IF_ACTIVE)
284 goto out;
285
286 seq_printf(seq,
287 "BATMAN mesh %s disabled - primary interface not active\n",
288 net_dev->name);
289 batadv_hardif_free_ref(primary_if);
290 primary_if = NULL;
291
292out:
293 return primary_if;
294}
295
Simon Wunderlichc54f38c2013-07-29 17:56:44 +0200296/**
Marek Lindner411d6ed2013-05-08 13:31:59 +0800297 * batadv_max_header_len - calculate maximum encapsulation overhead for a
298 * payload packet
299 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200300 * Return: the maximum encapsulation overhead in bytes.
Marek Lindner411d6ed2013-05-08 13:31:59 +0800301 */
302int batadv_max_header_len(void)
303{
304 int header_len = 0;
305
306 header_len = max_t(int, header_len,
307 sizeof(struct batadv_unicast_packet));
308 header_len = max_t(int, header_len,
309 sizeof(struct batadv_unicast_4addr_packet));
310 header_len = max_t(int, header_len,
311 sizeof(struct batadv_bcast_packet));
312
313#ifdef CONFIG_BATMAN_ADV_NC
314 header_len = max_t(int, header_len,
315 sizeof(struct batadv_coded_packet));
316#endif
317
Marek Lindner1df0cbd2014-01-15 20:31:18 +0800318 return header_len + ETH_HLEN;
Marek Lindner411d6ed2013-05-08 13:31:59 +0800319}
320
321/**
Simon Wunderlichc54f38c2013-07-29 17:56:44 +0200322 * batadv_skb_set_priority - sets skb priority according to packet content
323 * @skb: the packet to be sent
324 * @offset: offset to the packet content
325 *
326 * This function sets a value between 256 and 263 (802.1d priority), which
327 * can be interpreted by the cfg80211 or other drivers.
328 */
329void batadv_skb_set_priority(struct sk_buff *skb, int offset)
330{
331 struct iphdr ip_hdr_tmp, *ip_hdr;
332 struct ipv6hdr ip6_hdr_tmp, *ip6_hdr;
333 struct ethhdr ethhdr_tmp, *ethhdr;
334 struct vlan_ethhdr *vhdr, vhdr_tmp;
335 u32 prio;
336
337 /* already set, do nothing */
338 if (skb->priority >= 256 && skb->priority <= 263)
339 return;
340
341 ethhdr = skb_header_pointer(skb, offset, sizeof(*ethhdr), &ethhdr_tmp);
342 if (!ethhdr)
343 return;
344
345 switch (ethhdr->h_proto) {
346 case htons(ETH_P_8021Q):
347 vhdr = skb_header_pointer(skb, offset + sizeof(*vhdr),
348 sizeof(*vhdr), &vhdr_tmp);
349 if (!vhdr)
350 return;
351 prio = ntohs(vhdr->h_vlan_TCI) & VLAN_PRIO_MASK;
352 prio = prio >> VLAN_PRIO_SHIFT;
353 break;
354 case htons(ETH_P_IP):
355 ip_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
356 sizeof(*ip_hdr), &ip_hdr_tmp);
357 if (!ip_hdr)
358 return;
359 prio = (ipv4_get_dsfield(ip_hdr) & 0xfc) >> 5;
360 break;
361 case htons(ETH_P_IPV6):
362 ip6_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
363 sizeof(*ip6_hdr), &ip6_hdr_tmp);
364 if (!ip6_hdr)
365 return;
366 prio = (ipv6_get_dsfield(ip6_hdr) & 0xfc) >> 5;
367 break;
368 default:
369 return;
370 }
371
372 skb->priority = prio + 256;
373}
374
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200375static int batadv_recv_unhandled_packet(struct sk_buff *skb,
Sven Eckelmann56303d32012-06-05 22:31:31 +0200376 struct batadv_hard_iface *recv_if)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800377{
378 return NET_RX_DROP;
379}
380
381/* incoming packets with the batman ethertype received on any active hard
382 * interface
383 */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200384int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
385 struct packet_type *ptype,
386 struct net_device *orig_dev)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800387{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200388 struct batadv_priv *bat_priv;
Sven Eckelmann96412692012-06-05 22:31:30 +0200389 struct batadv_ogm_packet *batadv_ogm_packet;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200390 struct batadv_hard_iface *hard_iface;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200391 u8 idx;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800392 int ret;
393
Sven Eckelmann56303d32012-06-05 22:31:31 +0200394 hard_iface = container_of(ptype, struct batadv_hard_iface,
395 batman_adv_ptype);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800396 skb = skb_share_check(skb, GFP_ATOMIC);
397
398 /* skb was released by skb_share_check() */
399 if (!skb)
400 goto err_out;
401
402 /* packet should hold at least type and version */
403 if (unlikely(!pskb_may_pull(skb, 2)))
404 goto err_free;
405
406 /* expect a valid ethernet header here. */
407 if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
408 goto err_free;
409
410 if (!hard_iface->soft_iface)
411 goto err_free;
412
413 bat_priv = netdev_priv(hard_iface->soft_iface);
414
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200415 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800416 goto err_free;
417
418 /* discard frames on not active interfaces */
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200419 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800420 goto err_free;
421
Sven Eckelmann96412692012-06-05 22:31:30 +0200422 batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800423
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100424 if (batadv_ogm_packet->version != BATADV_COMPAT_VERSION) {
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200425 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200426 "Drop packet: incompatible batman version (%i)\n",
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100427 batadv_ogm_packet->version);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800428 goto err_free;
429 }
430
Martin Hundebølle0d96772014-09-17 08:56:19 +0200431 /* reset control block to avoid left overs from previous users */
432 memset(skb->cb, 0, sizeof(struct batadv_skb_cb));
433
Marek Lindnerffa995e2012-03-01 15:35:17 +0800434 /* all receive handlers return whether they received or reused
435 * the supplied skb. if not, we have to free the skb.
436 */
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100437 idx = batadv_ogm_packet->packet_type;
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200438 ret = (*batadv_rx_handler[idx])(skb, hard_iface);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800439
440 if (ret == NET_RX_DROP)
441 kfree_skb(skb);
442
443 /* return NET_RX_SUCCESS in any case as we
444 * most probably dropped the packet for
445 * routing-logical reasons.
446 */
447 return NET_RX_SUCCESS;
448
449err_free:
450 kfree_skb(skb);
451err_out:
452 return NET_RX_DROP;
453}
454
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200455static void batadv_recv_handler_init(void)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800456{
457 int i;
458
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200459 for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
460 batadv_rx_handler[i] = batadv_recv_unhandled_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800461
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200462 for (i = BATADV_UNICAST_MIN; i <= BATADV_UNICAST_MAX; i++)
463 batadv_rx_handler[i] = batadv_recv_unhandled_unicast_packet;
464
Simon Wunderlich031ace82013-12-17 19:12:12 +0100465 /* compile time checks for sizes */
466 BUILD_BUG_ON(sizeof(struct batadv_bla_claim_dst) != 6);
467 BUILD_BUG_ON(sizeof(struct batadv_ogm_packet) != 24);
468 BUILD_BUG_ON(sizeof(struct batadv_icmp_header) != 20);
469 BUILD_BUG_ON(sizeof(struct batadv_icmp_packet) != 20);
470 BUILD_BUG_ON(sizeof(struct batadv_icmp_packet_rr) != 116);
471 BUILD_BUG_ON(sizeof(struct batadv_unicast_packet) != 10);
472 BUILD_BUG_ON(sizeof(struct batadv_unicast_4addr_packet) != 18);
473 BUILD_BUG_ON(sizeof(struct batadv_frag_packet) != 20);
474 BUILD_BUG_ON(sizeof(struct batadv_bcast_packet) != 14);
475 BUILD_BUG_ON(sizeof(struct batadv_coded_packet) != 46);
476 BUILD_BUG_ON(sizeof(struct batadv_unicast_tvlv_packet) != 20);
477 BUILD_BUG_ON(sizeof(struct batadv_tvlv_hdr) != 4);
478 BUILD_BUG_ON(sizeof(struct batadv_tvlv_gateway_data) != 8);
479 BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_vlan_data) != 8);
480 BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_change) != 12);
481 BUILD_BUG_ON(sizeof(struct batadv_tvlv_roam_adv) != 8);
Simon Wunderlich80067c82013-04-25 10:37:22 +0200482
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200483 /* broadcast packet */
484 batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
485
486 /* unicast packets ... */
Antonio Quartulli7cdcf6d2012-10-01 09:57:35 +0200487 /* unicast with 4 addresses packet */
488 batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800489 /* unicast packet */
Sven Eckelmannacd34af2012-06-03 22:19:21 +0200490 batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
Marek Lindneref261572013-04-23 21:39:57 +0800491 /* unicast tvlv packet */
492 batadv_rx_handler[BATADV_UNICAST_TVLV] = batadv_recv_unicast_tvlv;
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200493 /* batman icmp packet */
494 batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200495 /* Fragmented packets */
496 batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_frag_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800497}
498
Sven Eckelmann56303d32012-06-05 22:31:31 +0200499int
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200500batadv_recv_handler_register(u8 packet_type,
Sven Eckelmann56303d32012-06-05 22:31:31 +0200501 int (*recv_handler)(struct sk_buff *,
502 struct batadv_hard_iface *))
Marek Lindnerffa995e2012-03-01 15:35:17 +0800503{
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200504 int (*curr)(struct sk_buff *,
505 struct batadv_hard_iface *);
506 curr = batadv_rx_handler[packet_type];
507
508 if ((curr != batadv_recv_unhandled_packet) &&
509 (curr != batadv_recv_unhandled_unicast_packet))
Marek Lindnerffa995e2012-03-01 15:35:17 +0800510 return -EBUSY;
511
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200512 batadv_rx_handler[packet_type] = recv_handler;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800513 return 0;
514}
515
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200516void batadv_recv_handler_unregister(u8 packet_type)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800517{
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200518 batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800519}
520
Sven Eckelmann56303d32012-06-05 22:31:31 +0200521static struct batadv_algo_ops *batadv_algo_get(char *name)
Marek Lindner1c280472011-11-28 17:40:17 +0800522{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200523 struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
Marek Lindner1c280472011-11-28 17:40:17 +0800524
Sasha Levinb67bfe02013-02-27 17:06:00 -0800525 hlist_for_each_entry(bat_algo_ops_tmp, &batadv_algo_list, list) {
Marek Lindner1c280472011-11-28 17:40:17 +0800526 if (strcmp(bat_algo_ops_tmp->name, name) != 0)
527 continue;
528
529 bat_algo_ops = bat_algo_ops_tmp;
530 break;
531 }
532
533 return bat_algo_ops;
534}
535
Sven Eckelmann56303d32012-06-05 22:31:31 +0200536int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
Marek Lindner1c280472011-11-28 17:40:17 +0800537{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200538 struct batadv_algo_ops *bat_algo_ops_tmp;
Marek Lindner1c280472011-11-28 17:40:17 +0800539
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200540 bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
Marek Lindner1c280472011-11-28 17:40:17 +0800541 if (bat_algo_ops_tmp) {
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100542 pr_info("Trying to register already registered routing algorithm: %s\n",
543 bat_algo_ops->name);
Markus Pargmann9fb6c652014-12-26 12:41:40 +0100544 return -EEXIST;
Marek Lindner1c280472011-11-28 17:40:17 +0800545 }
546
Marek Lindner01c42242011-11-28 21:31:55 +0800547 /* all algorithms must implement all ops (for now) */
Marek Lindnerc2aca022012-02-07 17:20:45 +0800548 if (!bat_algo_ops->bat_iface_enable ||
Marek Lindner00a50072012-02-07 17:20:47 +0800549 !bat_algo_ops->bat_iface_disable ||
Marek Lindnerc3229392012-03-11 06:17:50 +0800550 !bat_algo_ops->bat_iface_update_mac ||
Marek Lindnercd8b78e2012-02-07 17:20:49 +0800551 !bat_algo_ops->bat_primary_iface_set ||
Marek Lindner01c42242011-11-28 21:31:55 +0800552 !bat_algo_ops->bat_ogm_schedule ||
Antonio Quartullia3285a82013-09-02 12:15:04 +0200553 !bat_algo_ops->bat_ogm_emit ||
Antonio Quartullic43c9812013-09-02 12:15:05 +0200554 !bat_algo_ops->bat_neigh_cmp ||
Simon Wunderlich18165f62015-08-08 02:01:50 +0200555 !bat_algo_ops->bat_neigh_is_similar_or_better) {
Marek Lindner01c42242011-11-28 21:31:55 +0800556 pr_info("Routing algo '%s' does not implement required ops\n",
557 bat_algo_ops->name);
Markus Pargmann9fb6c652014-12-26 12:41:40 +0100558 return -EINVAL;
Marek Lindner01c42242011-11-28 21:31:55 +0800559 }
560
Marek Lindner1c280472011-11-28 17:40:17 +0800561 INIT_HLIST_NODE(&bat_algo_ops->list);
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200562 hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
Marek Lindner1c280472011-11-28 17:40:17 +0800563
Markus Pargmann9fb6c652014-12-26 12:41:40 +0100564 return 0;
Marek Lindner1c280472011-11-28 17:40:17 +0800565}
566
Sven Eckelmann56303d32012-06-05 22:31:31 +0200567int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
Marek Lindner1c280472011-11-28 17:40:17 +0800568{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200569 struct batadv_algo_ops *bat_algo_ops;
Marek Lindner1c280472011-11-28 17:40:17 +0800570
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200571 bat_algo_ops = batadv_algo_get(name);
Marek Lindner1c280472011-11-28 17:40:17 +0800572 if (!bat_algo_ops)
Markus Pargmannf372d092014-12-26 12:41:41 +0100573 return -EINVAL;
Marek Lindner1c280472011-11-28 17:40:17 +0800574
575 bat_priv->bat_algo_ops = bat_algo_ops;
Marek Lindner1c280472011-11-28 17:40:17 +0800576
Markus Pargmannf372d092014-12-26 12:41:41 +0100577 return 0;
Marek Lindner1c280472011-11-28 17:40:17 +0800578}
579
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200580int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
Marek Lindner1c280472011-11-28 17:40:17 +0800581{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200582 struct batadv_algo_ops *bat_algo_ops;
Marek Lindner1c280472011-11-28 17:40:17 +0800583
Antonio Quartulli0c814652013-03-21 09:23:29 +0100584 seq_puts(seq, "Available routing algorithms:\n");
Marek Lindner1c280472011-11-28 17:40:17 +0800585
Sasha Levinb67bfe02013-02-27 17:06:00 -0800586 hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
Marek Lindner854d2a62015-07-17 22:25:59 +0800587 seq_printf(seq, " * %s\n", bat_algo_ops->name);
Marek Lindner1c280472011-11-28 17:40:17 +0800588 }
589
590 return 0;
591}
592
Sven Eckelmann95a066d2012-10-17 21:10:39 +0200593/**
594 * batadv_skb_crc32 - calculate CRC32 of the whole packet and skip bytes in
595 * the header
596 * @skb: skb pointing to fragmented socket buffers
597 * @payload_ptr: Pointer to position inside the head buffer of the skb
598 * marking the start of the data to be CRC'ed
599 *
600 * payload_ptr must always point to an address in the skb head buffer and not to
601 * a fragment.
Sven Eckelmann7afcbbe2015-10-31 12:29:29 +0100602 *
603 * Return: big endian crc32c of the checksummed data
Sven Eckelmann95a066d2012-10-17 21:10:39 +0200604 */
605__be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
606{
607 u32 crc = 0;
608 unsigned int from;
609 unsigned int to = skb->len;
610 struct skb_seq_state st;
611 const u8 *data;
612 unsigned int len;
613 unsigned int consumed = 0;
614
615 from = (unsigned int)(payload_ptr - skb->data);
616
617 skb_prepare_seq_read(skb, from, to, &st);
618 while ((len = skb_seq_read(consumed, &data, &st)) != 0) {
619 crc = crc32c(crc, data, len);
620 consumed += len;
621 }
Sven Eckelmann95a066d2012-10-17 21:10:39 +0200622
623 return htonl(crc);
624}
625
Marek Lindneref261572013-04-23 21:39:57 +0800626/**
627 * batadv_tvlv_handler_free_ref - decrement the tvlv handler refcounter and
628 * possibly free it
629 * @tvlv_handler: the tvlv handler to free
630 */
631static void
632batadv_tvlv_handler_free_ref(struct batadv_tvlv_handler *tvlv_handler)
633{
634 if (atomic_dec_and_test(&tvlv_handler->refcount))
635 kfree_rcu(tvlv_handler, rcu);
636}
637
638/**
639 * batadv_tvlv_handler_get - retrieve tvlv handler from the tvlv handler list
640 * based on the provided type and version (both need to match)
641 * @bat_priv: the bat priv with all the soft interface information
642 * @type: tvlv handler type to look for
643 * @version: tvlv handler version to look for
644 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200645 * Return: tvlv handler if found or NULL otherwise.
Marek Lindneref261572013-04-23 21:39:57 +0800646 */
647static struct batadv_tvlv_handler
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200648*batadv_tvlv_handler_get(struct batadv_priv *bat_priv, u8 type, u8 version)
Marek Lindneref261572013-04-23 21:39:57 +0800649{
650 struct batadv_tvlv_handler *tvlv_handler_tmp, *tvlv_handler = NULL;
651
652 rcu_read_lock();
653 hlist_for_each_entry_rcu(tvlv_handler_tmp,
654 &bat_priv->tvlv.handler_list, list) {
655 if (tvlv_handler_tmp->type != type)
656 continue;
657
658 if (tvlv_handler_tmp->version != version)
659 continue;
660
661 if (!atomic_inc_not_zero(&tvlv_handler_tmp->refcount))
662 continue;
663
664 tvlv_handler = tvlv_handler_tmp;
665 break;
666 }
667 rcu_read_unlock();
668
669 return tvlv_handler;
670}
671
672/**
673 * batadv_tvlv_container_free_ref - decrement the tvlv container refcounter and
674 * possibly free it
Martin Hundebølla0e28772014-07-15 09:41:08 +0200675 * @tvlv: the tvlv container to free
Marek Lindneref261572013-04-23 21:39:57 +0800676 */
677static void batadv_tvlv_container_free_ref(struct batadv_tvlv_container *tvlv)
678{
679 if (atomic_dec_and_test(&tvlv->refcount))
680 kfree(tvlv);
681}
682
683/**
684 * batadv_tvlv_container_get - retrieve tvlv container from the tvlv container
685 * list based on the provided type and version (both need to match)
686 * @bat_priv: the bat priv with all the soft interface information
687 * @type: tvlv container type to look for
688 * @version: tvlv container version to look for
689 *
690 * Has to be called with the appropriate locks being acquired
691 * (tvlv.container_list_lock).
692 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200693 * Return: tvlv container if found or NULL otherwise.
Marek Lindneref261572013-04-23 21:39:57 +0800694 */
695static struct batadv_tvlv_container
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200696*batadv_tvlv_container_get(struct batadv_priv *bat_priv, u8 type, u8 version)
Marek Lindneref261572013-04-23 21:39:57 +0800697{
698 struct batadv_tvlv_container *tvlv_tmp, *tvlv = NULL;
699
700 hlist_for_each_entry(tvlv_tmp, &bat_priv->tvlv.container_list, list) {
701 if (tvlv_tmp->tvlv_hdr.type != type)
702 continue;
703
704 if (tvlv_tmp->tvlv_hdr.version != version)
705 continue;
706
707 if (!atomic_inc_not_zero(&tvlv_tmp->refcount))
708 continue;
709
710 tvlv = tvlv_tmp;
711 break;
712 }
713
714 return tvlv;
715}
716
717/**
718 * batadv_tvlv_container_list_size - calculate the size of the tvlv container
719 * list entries
720 * @bat_priv: the bat priv with all the soft interface information
721 *
722 * Has to be called with the appropriate locks being acquired
723 * (tvlv.container_list_lock).
724 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200725 * Return: size of all currently registered tvlv containers in bytes.
Marek Lindneref261572013-04-23 21:39:57 +0800726 */
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200727static u16 batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
Marek Lindneref261572013-04-23 21:39:57 +0800728{
729 struct batadv_tvlv_container *tvlv;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200730 u16 tvlv_len = 0;
Marek Lindneref261572013-04-23 21:39:57 +0800731
732 hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
733 tvlv_len += sizeof(struct batadv_tvlv_hdr);
734 tvlv_len += ntohs(tvlv->tvlv_hdr.len);
735 }
736
737 return tvlv_len;
738}
739
740/**
741 * batadv_tvlv_container_remove - remove tvlv container from the tvlv container
742 * list
Sven Eckelmann2c72d652015-06-21 14:45:14 +0200743 * @bat_priv: the bat priv with all the soft interface information
Marek Lindneref261572013-04-23 21:39:57 +0800744 * @tvlv: the to be removed tvlv container
745 *
746 * Has to be called with the appropriate locks being acquired
747 * (tvlv.container_list_lock).
748 */
Sven Eckelmann2c72d652015-06-21 14:45:14 +0200749static void batadv_tvlv_container_remove(struct batadv_priv *bat_priv,
750 struct batadv_tvlv_container *tvlv)
Marek Lindneref261572013-04-23 21:39:57 +0800751{
Sven Eckelmann008a3742015-11-03 19:20:34 +0100752 lockdep_assert_held(&bat_priv->tvlv.container_list_lock);
Sven Eckelmann2c72d652015-06-21 14:45:14 +0200753
Marek Lindneref261572013-04-23 21:39:57 +0800754 if (!tvlv)
755 return;
756
757 hlist_del(&tvlv->list);
758
759 /* first call to decrement the counter, second call to free */
760 batadv_tvlv_container_free_ref(tvlv);
761 batadv_tvlv_container_free_ref(tvlv);
762}
763
764/**
765 * batadv_tvlv_container_unregister - unregister tvlv container based on the
766 * provided type and version (both need to match)
767 * @bat_priv: the bat priv with all the soft interface information
768 * @type: tvlv container type to unregister
769 * @version: tvlv container type to unregister
770 */
771void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200772 u8 type, u8 version)
Marek Lindneref261572013-04-23 21:39:57 +0800773{
774 struct batadv_tvlv_container *tvlv;
775
776 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
777 tvlv = batadv_tvlv_container_get(bat_priv, type, version);
Sven Eckelmann2c72d652015-06-21 14:45:14 +0200778 batadv_tvlv_container_remove(bat_priv, tvlv);
Marek Lindneref261572013-04-23 21:39:57 +0800779 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
780}
781
782/**
783 * batadv_tvlv_container_register - register tvlv type, version and content
784 * to be propagated with each (primary interface) OGM
785 * @bat_priv: the bat priv with all the soft interface information
786 * @type: tvlv container type
787 * @version: tvlv container version
788 * @tvlv_value: tvlv container content
789 * @tvlv_value_len: tvlv container content length
790 *
791 * If a container of the same type and version was already registered the new
792 * content is going to replace the old one.
793 */
794void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200795 u8 type, u8 version,
796 void *tvlv_value, u16 tvlv_value_len)
Marek Lindneref261572013-04-23 21:39:57 +0800797{
798 struct batadv_tvlv_container *tvlv_old, *tvlv_new;
799
800 if (!tvlv_value)
801 tvlv_value_len = 0;
802
803 tvlv_new = kzalloc(sizeof(*tvlv_new) + tvlv_value_len, GFP_ATOMIC);
804 if (!tvlv_new)
805 return;
806
807 tvlv_new->tvlv_hdr.version = version;
808 tvlv_new->tvlv_hdr.type = type;
809 tvlv_new->tvlv_hdr.len = htons(tvlv_value_len);
810
811 memcpy(tvlv_new + 1, tvlv_value, ntohs(tvlv_new->tvlv_hdr.len));
812 INIT_HLIST_NODE(&tvlv_new->list);
813 atomic_set(&tvlv_new->refcount, 1);
814
815 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
816 tvlv_old = batadv_tvlv_container_get(bat_priv, type, version);
Sven Eckelmann2c72d652015-06-21 14:45:14 +0200817 batadv_tvlv_container_remove(bat_priv, tvlv_old);
Marek Lindneref261572013-04-23 21:39:57 +0800818 hlist_add_head(&tvlv_new->list, &bat_priv->tvlv.container_list);
819 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
820}
821
822/**
Antonio Quartulli3f687852014-11-02 11:29:56 +0100823 * batadv_tvlv_realloc_packet_buff - reallocate packet buffer to accommodate
Marek Lindneref261572013-04-23 21:39:57 +0800824 * requested packet size
825 * @packet_buff: packet buffer
826 * @packet_buff_len: packet buffer size
Martin Hundebølla0e28772014-07-15 09:41:08 +0200827 * @min_packet_len: requested packet minimum size
Marek Lindneref261572013-04-23 21:39:57 +0800828 * @additional_packet_len: requested additional packet size on top of minimum
829 * size
830 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200831 * Return: true of the packet buffer could be changed to the requested size,
Marek Lindneref261572013-04-23 21:39:57 +0800832 * false otherwise.
833 */
834static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
835 int *packet_buff_len,
836 int min_packet_len,
837 int additional_packet_len)
838{
839 unsigned char *new_buff;
840
841 new_buff = kmalloc(min_packet_len + additional_packet_len, GFP_ATOMIC);
842
843 /* keep old buffer if kmalloc should fail */
Markus Pargmann16b9ce82014-12-26 12:41:23 +0100844 if (!new_buff)
845 return false;
Marek Lindneref261572013-04-23 21:39:57 +0800846
Markus Pargmann16b9ce82014-12-26 12:41:23 +0100847 memcpy(new_buff, *packet_buff, min_packet_len);
848 kfree(*packet_buff);
849 *packet_buff = new_buff;
850 *packet_buff_len = min_packet_len + additional_packet_len;
851
852 return true;
Marek Lindneref261572013-04-23 21:39:57 +0800853}
854
855/**
856 * batadv_tvlv_container_ogm_append - append tvlv container content to given
857 * OGM packet buffer
858 * @bat_priv: the bat priv with all the soft interface information
859 * @packet_buff: ogm packet buffer
860 * @packet_buff_len: ogm packet buffer size including ogm header and tvlv
861 * content
862 * @packet_min_len: ogm header size to be preserved for the OGM itself
863 *
864 * The ogm packet might be enlarged or shrunk depending on the current size
865 * and the size of the to-be-appended tvlv containers.
866 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200867 * Return: size of all appended tvlv containers in bytes.
Marek Lindneref261572013-04-23 21:39:57 +0800868 */
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200869u16 batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
870 unsigned char **packet_buff,
871 int *packet_buff_len, int packet_min_len)
Marek Lindneref261572013-04-23 21:39:57 +0800872{
873 struct batadv_tvlv_container *tvlv;
874 struct batadv_tvlv_hdr *tvlv_hdr;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200875 u16 tvlv_value_len;
Marek Lindneref261572013-04-23 21:39:57 +0800876 void *tvlv_value;
877 bool ret;
878
879 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
880 tvlv_value_len = batadv_tvlv_container_list_size(bat_priv);
881
882 ret = batadv_tvlv_realloc_packet_buff(packet_buff, packet_buff_len,
883 packet_min_len, tvlv_value_len);
884
885 if (!ret)
886 goto end;
887
888 if (!tvlv_value_len)
889 goto end;
890
891 tvlv_value = (*packet_buff) + packet_min_len;
892
893 hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
894 tvlv_hdr = tvlv_value;
895 tvlv_hdr->type = tvlv->tvlv_hdr.type;
896 tvlv_hdr->version = tvlv->tvlv_hdr.version;
897 tvlv_hdr->len = tvlv->tvlv_hdr.len;
898 tvlv_value = tvlv_hdr + 1;
899 memcpy(tvlv_value, tvlv + 1, ntohs(tvlv->tvlv_hdr.len));
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200900 tvlv_value = (u8 *)tvlv_value + ntohs(tvlv->tvlv_hdr.len);
Marek Lindneref261572013-04-23 21:39:57 +0800901 }
902
903end:
904 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
905 return tvlv_value_len;
906}
907
908/**
909 * batadv_tvlv_call_handler - parse the given tvlv buffer to call the
910 * appropriate handlers
911 * @bat_priv: the bat priv with all the soft interface information
912 * @tvlv_handler: tvlv callback function handling the tvlv content
Sven Eckelmannc05a57f2015-08-26 10:31:51 +0200913 * @ogm_source: flag indicating whether the tvlv is an ogm or a unicast packet
Marek Lindneref261572013-04-23 21:39:57 +0800914 * @orig_node: orig node emitting the ogm packet
915 * @src: source mac address of the unicast packet
916 * @dst: destination mac address of the unicast packet
917 * @tvlv_value: tvlv content
918 * @tvlv_value_len: tvlv content length
919 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200920 * Return: success if handler was not found or the return value of the handler
Marek Lindneref261572013-04-23 21:39:57 +0800921 * callback.
922 */
923static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
924 struct batadv_tvlv_handler *tvlv_handler,
925 bool ogm_source,
926 struct batadv_orig_node *orig_node,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200927 u8 *src, u8 *dst,
928 void *tvlv_value, u16 tvlv_value_len)
Marek Lindneref261572013-04-23 21:39:57 +0800929{
930 if (!tvlv_handler)
931 return NET_RX_SUCCESS;
932
933 if (ogm_source) {
934 if (!tvlv_handler->ogm_handler)
935 return NET_RX_SUCCESS;
936
937 if (!orig_node)
938 return NET_RX_SUCCESS;
939
940 tvlv_handler->ogm_handler(bat_priv, orig_node,
941 BATADV_NO_FLAGS,
942 tvlv_value, tvlv_value_len);
943 tvlv_handler->flags |= BATADV_TVLV_HANDLER_OGM_CALLED;
944 } else {
945 if (!src)
946 return NET_RX_SUCCESS;
947
948 if (!dst)
949 return NET_RX_SUCCESS;
950
951 if (!tvlv_handler->unicast_handler)
952 return NET_RX_SUCCESS;
953
954 return tvlv_handler->unicast_handler(bat_priv, src,
955 dst, tvlv_value,
956 tvlv_value_len);
957 }
958
959 return NET_RX_SUCCESS;
960}
961
962/**
963 * batadv_tvlv_containers_process - parse the given tvlv buffer to call the
964 * appropriate handlers
965 * @bat_priv: the bat priv with all the soft interface information
Sven Eckelmannc05a57f2015-08-26 10:31:51 +0200966 * @ogm_source: flag indicating whether the tvlv is an ogm or a unicast packet
Marek Lindneref261572013-04-23 21:39:57 +0800967 * @orig_node: orig node emitting the ogm packet
968 * @src: source mac address of the unicast packet
969 * @dst: destination mac address of the unicast packet
970 * @tvlv_value: tvlv content
971 * @tvlv_value_len: tvlv content length
972 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200973 * Return: success when processing an OGM or the return value of all called
Marek Lindneref261572013-04-23 21:39:57 +0800974 * handler callbacks.
975 */
976int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
977 bool ogm_source,
978 struct batadv_orig_node *orig_node,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200979 u8 *src, u8 *dst,
980 void *tvlv_value, u16 tvlv_value_len)
Marek Lindneref261572013-04-23 21:39:57 +0800981{
982 struct batadv_tvlv_handler *tvlv_handler;
983 struct batadv_tvlv_hdr *tvlv_hdr;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200984 u16 tvlv_value_cont_len;
985 u8 cifnotfound = BATADV_TVLV_HANDLER_OGM_CIFNOTFND;
Marek Lindneref261572013-04-23 21:39:57 +0800986 int ret = NET_RX_SUCCESS;
987
988 while (tvlv_value_len >= sizeof(*tvlv_hdr)) {
989 tvlv_hdr = tvlv_value;
990 tvlv_value_cont_len = ntohs(tvlv_hdr->len);
991 tvlv_value = tvlv_hdr + 1;
992 tvlv_value_len -= sizeof(*tvlv_hdr);
993
994 if (tvlv_value_cont_len > tvlv_value_len)
995 break;
996
997 tvlv_handler = batadv_tvlv_handler_get(bat_priv,
998 tvlv_hdr->type,
999 tvlv_hdr->version);
1000
1001 ret |= batadv_tvlv_call_handler(bat_priv, tvlv_handler,
1002 ogm_source, orig_node,
1003 src, dst, tvlv_value,
1004 tvlv_value_cont_len);
1005 if (tvlv_handler)
1006 batadv_tvlv_handler_free_ref(tvlv_handler);
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001007 tvlv_value = (u8 *)tvlv_value + tvlv_value_cont_len;
Marek Lindneref261572013-04-23 21:39:57 +08001008 tvlv_value_len -= tvlv_value_cont_len;
1009 }
1010
1011 if (!ogm_source)
1012 return ret;
1013
1014 rcu_read_lock();
1015 hlist_for_each_entry_rcu(tvlv_handler,
1016 &bat_priv->tvlv.handler_list, list) {
1017 if ((tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND) &&
1018 !(tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CALLED))
1019 tvlv_handler->ogm_handler(bat_priv, orig_node,
1020 cifnotfound, NULL, 0);
1021
1022 tvlv_handler->flags &= ~BATADV_TVLV_HANDLER_OGM_CALLED;
1023 }
1024 rcu_read_unlock();
1025
1026 return NET_RX_SUCCESS;
1027}
1028
1029/**
1030 * batadv_tvlv_ogm_receive - process an incoming ogm and call the appropriate
1031 * handlers
1032 * @bat_priv: the bat priv with all the soft interface information
1033 * @batadv_ogm_packet: ogm packet containing the tvlv containers
1034 * @orig_node: orig node emitting the ogm packet
1035 */
1036void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
1037 struct batadv_ogm_packet *batadv_ogm_packet,
1038 struct batadv_orig_node *orig_node)
1039{
1040 void *tvlv_value;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001041 u16 tvlv_value_len;
Marek Lindneref261572013-04-23 21:39:57 +08001042
1043 if (!batadv_ogm_packet)
1044 return;
1045
1046 tvlv_value_len = ntohs(batadv_ogm_packet->tvlv_len);
1047 if (!tvlv_value_len)
1048 return;
1049
1050 tvlv_value = batadv_ogm_packet + 1;
1051
1052 batadv_tvlv_containers_process(bat_priv, true, orig_node, NULL, NULL,
1053 tvlv_value, tvlv_value_len);
1054}
1055
1056/**
1057 * batadv_tvlv_handler_register - register tvlv handler based on the provided
1058 * type and version (both need to match) for ogm tvlv payload and/or unicast
1059 * payload
1060 * @bat_priv: the bat priv with all the soft interface information
1061 * @optr: ogm tvlv handler callback function. This function receives the orig
1062 * node, flags and the tvlv content as argument to process.
1063 * @uptr: unicast tvlv handler callback function. This function receives the
1064 * source & destination of the unicast packet as well as the tvlv content
1065 * to process.
1066 * @type: tvlv handler type to be registered
1067 * @version: tvlv handler version to be registered
1068 * @flags: flags to enable or disable TVLV API behavior
1069 */
1070void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
1071 void (*optr)(struct batadv_priv *bat_priv,
1072 struct batadv_orig_node *orig,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001073 u8 flags,
Marek Lindneref261572013-04-23 21:39:57 +08001074 void *tvlv_value,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001075 u16 tvlv_value_len),
Marek Lindneref261572013-04-23 21:39:57 +08001076 int (*uptr)(struct batadv_priv *bat_priv,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001077 u8 *src, u8 *dst,
Marek Lindneref261572013-04-23 21:39:57 +08001078 void *tvlv_value,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001079 u16 tvlv_value_len),
1080 u8 type, u8 version, u8 flags)
Marek Lindneref261572013-04-23 21:39:57 +08001081{
1082 struct batadv_tvlv_handler *tvlv_handler;
1083
1084 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
1085 if (tvlv_handler) {
1086 batadv_tvlv_handler_free_ref(tvlv_handler);
1087 return;
1088 }
1089
1090 tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
1091 if (!tvlv_handler)
1092 return;
1093
1094 tvlv_handler->ogm_handler = optr;
1095 tvlv_handler->unicast_handler = uptr;
1096 tvlv_handler->type = type;
1097 tvlv_handler->version = version;
1098 tvlv_handler->flags = flags;
1099 atomic_set(&tvlv_handler->refcount, 1);
1100 INIT_HLIST_NODE(&tvlv_handler->list);
1101
1102 spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
1103 hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
1104 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
1105}
1106
1107/**
1108 * batadv_tvlv_handler_unregister - unregister tvlv handler based on the
1109 * provided type and version (both need to match)
1110 * @bat_priv: the bat priv with all the soft interface information
1111 * @type: tvlv handler type to be unregistered
1112 * @version: tvlv handler version to be unregistered
1113 */
1114void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001115 u8 type, u8 version)
Marek Lindneref261572013-04-23 21:39:57 +08001116{
1117 struct batadv_tvlv_handler *tvlv_handler;
1118
1119 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
1120 if (!tvlv_handler)
1121 return;
1122
1123 batadv_tvlv_handler_free_ref(tvlv_handler);
1124 spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
1125 hlist_del_rcu(&tvlv_handler->list);
1126 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
1127 batadv_tvlv_handler_free_ref(tvlv_handler);
1128}
1129
1130/**
1131 * batadv_tvlv_unicast_send - send a unicast packet with tvlv payload to the
1132 * specified host
1133 * @bat_priv: the bat priv with all the soft interface information
1134 * @src: source mac address of the unicast packet
1135 * @dst: destination mac address of the unicast packet
1136 * @type: tvlv type
1137 * @version: tvlv version
1138 * @tvlv_value: tvlv content
1139 * @tvlv_value_len: tvlv content length
1140 */
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001141void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, u8 *src,
1142 u8 *dst, u8 type, u8 version,
1143 void *tvlv_value, u16 tvlv_value_len)
Marek Lindneref261572013-04-23 21:39:57 +08001144{
1145 struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
1146 struct batadv_tvlv_hdr *tvlv_hdr;
1147 struct batadv_orig_node *orig_node;
Markus Elfring8bbb7cb2015-11-15 09:00:42 +01001148 struct sk_buff *skb;
Marek Lindneref261572013-04-23 21:39:57 +08001149 unsigned char *tvlv_buff;
1150 unsigned int tvlv_len;
1151 ssize_t hdr_len = sizeof(*unicast_tvlv_packet);
Marek Lindneref261572013-04-23 21:39:57 +08001152
1153 orig_node = batadv_orig_hash_find(bat_priv, dst);
1154 if (!orig_node)
Markus Elfring8bbb7cb2015-11-15 09:00:42 +01001155 return;
Marek Lindneref261572013-04-23 21:39:57 +08001156
1157 tvlv_len = sizeof(*tvlv_hdr) + tvlv_value_len;
1158
1159 skb = netdev_alloc_skb_ip_align(NULL, ETH_HLEN + hdr_len + tvlv_len);
1160 if (!skb)
1161 goto out;
1162
1163 skb->priority = TC_PRIO_CONTROL;
1164 skb_reserve(skb, ETH_HLEN);
1165 tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len);
1166 unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff;
Simon Wunderlicha40d9b02013-12-02 20:38:31 +01001167 unicast_tvlv_packet->packet_type = BATADV_UNICAST_TVLV;
1168 unicast_tvlv_packet->version = BATADV_COMPAT_VERSION;
1169 unicast_tvlv_packet->ttl = BATADV_TTL;
Marek Lindneref261572013-04-23 21:39:57 +08001170 unicast_tvlv_packet->reserved = 0;
1171 unicast_tvlv_packet->tvlv_len = htons(tvlv_len);
1172 unicast_tvlv_packet->align = 0;
Antonio Quartulli8fdd0152014-01-22 00:42:11 +01001173 ether_addr_copy(unicast_tvlv_packet->src, src);
1174 ether_addr_copy(unicast_tvlv_packet->dst, dst);
Marek Lindneref261572013-04-23 21:39:57 +08001175
1176 tvlv_buff = (unsigned char *)(unicast_tvlv_packet + 1);
1177 tvlv_hdr = (struct batadv_tvlv_hdr *)tvlv_buff;
1178 tvlv_hdr->version = version;
1179 tvlv_hdr->type = type;
1180 tvlv_hdr->len = htons(tvlv_value_len);
1181 tvlv_buff += sizeof(*tvlv_hdr);
1182 memcpy(tvlv_buff, tvlv_value, tvlv_value_len);
1183
Markus Elfring8bbb7cb2015-11-15 09:00:42 +01001184 if (batadv_send_skb_to_orig(skb, orig_node, NULL) == NET_XMIT_DROP)
Marek Lindneref261572013-04-23 21:39:57 +08001185 kfree_skb(skb);
Markus Elfring8bbb7cb2015-11-15 09:00:42 +01001186out:
1187 batadv_orig_node_free_ref(orig_node);
Marek Lindneref261572013-04-23 21:39:57 +08001188}
1189
Antonio Quartullic018ad32013-06-04 12:11:39 +02001190/**
1191 * batadv_get_vid - extract the VLAN identifier from skb if any
1192 * @skb: the buffer containing the packet
1193 * @header_len: length of the batman header preceding the ethernet header
1194 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001195 * Return: VID with the BATADV_VLAN_HAS_TAG flag when the packet embedded in the
1196 * skb is vlan tagged. Otherwise BATADV_NO_FLAGS.
Antonio Quartullic018ad32013-06-04 12:11:39 +02001197 */
1198unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len)
1199{
1200 struct ethhdr *ethhdr = (struct ethhdr *)(skb->data + header_len);
1201 struct vlan_ethhdr *vhdr;
1202 unsigned short vid;
1203
1204 if (ethhdr->h_proto != htons(ETH_P_8021Q))
1205 return BATADV_NO_FLAGS;
1206
1207 if (!pskb_may_pull(skb, header_len + VLAN_ETH_HLEN))
1208 return BATADV_NO_FLAGS;
1209
1210 vhdr = (struct vlan_ethhdr *)(skb->data + header_len);
1211 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
1212 vid |= BATADV_VLAN_HAS_TAG;
1213
1214 return vid;
1215}
1216
Antonio Quartullieceb22a2013-11-16 12:03:51 +01001217/**
1218 * batadv_vlan_ap_isola_get - return the AP isolation status for the given vlan
1219 * @bat_priv: the bat priv with all the soft interface information
1220 * @vid: the VLAN identifier for which the AP isolation attributed as to be
1221 * looked up
1222 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001223 * Return: true if AP isolation is on for the VLAN idenfied by vid, false
Antonio Quartullieceb22a2013-11-16 12:03:51 +01001224 * otherwise
1225 */
1226bool batadv_vlan_ap_isola_get(struct batadv_priv *bat_priv, unsigned short vid)
1227{
1228 bool ap_isolation_enabled = false;
1229 struct batadv_softif_vlan *vlan;
1230
1231 /* if the AP isolation is requested on a VLAN, then check for its
1232 * setting in the proper VLAN private data structure
1233 */
1234 vlan = batadv_softif_vlan_get(bat_priv, vid);
1235 if (vlan) {
1236 ap_isolation_enabled = atomic_read(&vlan->ap_isolation);
1237 batadv_softif_vlan_free_ref(vlan);
1238 }
1239
1240 return ap_isolation_enabled;
1241}
1242
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001243static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
Marek Lindnerd419be12011-12-10 19:45:53 +08001244{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001245 struct batadv_algo_ops *bat_algo_ops;
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001246 char *algo_name = (char *)val;
1247 size_t name_len = strlen(algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +08001248
Marek Lindner293c9c12013-04-27 16:22:28 +08001249 if (name_len > 0 && algo_name[name_len - 1] == '\n')
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001250 algo_name[name_len - 1] = '\0';
1251
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001252 bat_algo_ops = batadv_algo_get(algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +08001253 if (!bat_algo_ops) {
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001254 pr_err("Routing algorithm '%s' is not supported\n", algo_name);
Marek Lindnerd419be12011-12-10 19:45:53 +08001255 return -EINVAL;
1256 }
1257
Marek Lindnerd8cb54862012-04-18 17:16:39 +08001258 return param_set_copystring(algo_name, kp);
Marek Lindnerd419be12011-12-10 19:45:53 +08001259}
1260
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001261static const struct kernel_param_ops batadv_param_ops_ra = {
1262 .set = batadv_param_set_ra,
Marek Lindnerd419be12011-12-10 19:45:53 +08001263 .get = param_get_string,
1264};
1265
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001266static struct kparam_string batadv_param_string_ra = {
Sven Eckelmann3193e8f2012-05-12 02:09:42 +02001267 .maxlen = sizeof(batadv_routing_algo),
1268 .string = batadv_routing_algo,
Marek Lindnerd419be12011-12-10 19:45:53 +08001269};
1270
Sven Eckelmannee11ad62012-05-16 20:23:19 +02001271module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
1272 0644);
1273module_init(batadv_init);
1274module_exit(batadv_exit);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001275
1276MODULE_LICENSE("GPL");
1277
Sven Eckelmann42d0b042012-06-03 22:19:17 +02001278MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
1279MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
1280MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE);
1281MODULE_VERSION(BATADV_SOURCE_VERSION);