David Herrmann | 4d3381f | 2017-01-21 17:26:12 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Randomized tests for eBPF longest-prefix-match maps |
| 3 | * |
| 4 | * This program runs randomized tests against the lpm-bpf-map. It implements a |
| 5 | * "Trivial Longest Prefix Match" (tlpm) based on simple, linear, singly linked |
| 6 | * lists. The implementation should be pretty straightforward. |
| 7 | * |
| 8 | * Based on tlpm, this inserts randomized data into bpf-lpm-maps and verifies |
| 9 | * the trie-based bpf-map implementation behaves the same way as tlpm. |
| 10 | */ |
| 11 | |
| 12 | #include <assert.h> |
| 13 | #include <errno.h> |
| 14 | #include <inttypes.h> |
| 15 | #include <linux/bpf.h> |
| 16 | #include <stdio.h> |
| 17 | #include <stdlib.h> |
| 18 | #include <string.h> |
| 19 | #include <time.h> |
| 20 | #include <unistd.h> |
| 21 | #include <arpa/inet.h> |
| 22 | #include <sys/time.h> |
| 23 | #include <sys/resource.h> |
| 24 | |
Mickaël Salaün | 10ecc72 | 2017-02-10 00:21:39 +0100 | [diff] [blame] | 25 | #include <bpf/bpf.h> |
David Herrmann | 4d3381f | 2017-01-21 17:26:12 +0100 | [diff] [blame] | 26 | #include "bpf_util.h" |
| 27 | |
| 28 | struct tlpm_node { |
| 29 | struct tlpm_node *next; |
| 30 | size_t n_bits; |
| 31 | uint8_t key[]; |
| 32 | }; |
| 33 | |
| 34 | static struct tlpm_node *tlpm_add(struct tlpm_node *list, |
| 35 | const uint8_t *key, |
| 36 | size_t n_bits) |
| 37 | { |
| 38 | struct tlpm_node *node; |
| 39 | size_t n; |
| 40 | |
| 41 | /* add new entry with @key/@n_bits to @list and return new head */ |
| 42 | |
| 43 | n = (n_bits + 7) / 8; |
| 44 | node = malloc(sizeof(*node) + n); |
| 45 | assert(node); |
| 46 | |
| 47 | node->next = list; |
| 48 | node->n_bits = n_bits; |
| 49 | memcpy(node->key, key, n); |
| 50 | |
| 51 | return node; |
| 52 | } |
| 53 | |
| 54 | static void tlpm_clear(struct tlpm_node *list) |
| 55 | { |
| 56 | struct tlpm_node *node; |
| 57 | |
| 58 | /* free all entries in @list */ |
| 59 | |
| 60 | while ((node = list)) { |
| 61 | list = list->next; |
| 62 | free(node); |
| 63 | } |
| 64 | } |
| 65 | |
| 66 | static struct tlpm_node *tlpm_match(struct tlpm_node *list, |
| 67 | const uint8_t *key, |
| 68 | size_t n_bits) |
| 69 | { |
| 70 | struct tlpm_node *best = NULL; |
| 71 | size_t i; |
| 72 | |
| 73 | /* Perform longest prefix-match on @key/@n_bits. That is, iterate all |
| 74 | * entries and match each prefix against @key. Remember the "best" |
| 75 | * entry we find (i.e., the longest prefix that matches) and return it |
| 76 | * to the caller when done. |
| 77 | */ |
| 78 | |
| 79 | for ( ; list; list = list->next) { |
| 80 | for (i = 0; i < n_bits && i < list->n_bits; ++i) { |
| 81 | if ((key[i / 8] & (1 << (7 - i % 8))) != |
| 82 | (list->key[i / 8] & (1 << (7 - i % 8)))) |
| 83 | break; |
| 84 | } |
| 85 | |
| 86 | if (i >= list->n_bits) { |
| 87 | if (!best || i > best->n_bits) |
| 88 | best = list; |
| 89 | } |
| 90 | } |
| 91 | |
| 92 | return best; |
| 93 | } |
| 94 | |
| 95 | static void test_lpm_basic(void) |
| 96 | { |
| 97 | struct tlpm_node *list = NULL, *t1, *t2; |
| 98 | |
| 99 | /* very basic, static tests to verify tlpm works as expected */ |
| 100 | |
| 101 | assert(!tlpm_match(list, (uint8_t[]){ 0xff }, 8)); |
| 102 | |
| 103 | t1 = list = tlpm_add(list, (uint8_t[]){ 0xff }, 8); |
| 104 | assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff }, 8)); |
| 105 | assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 16)); |
| 106 | assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0x00 }, 16)); |
| 107 | assert(!tlpm_match(list, (uint8_t[]){ 0x7f }, 8)); |
| 108 | assert(!tlpm_match(list, (uint8_t[]){ 0xfe }, 8)); |
| 109 | assert(!tlpm_match(list, (uint8_t[]){ 0xff }, 7)); |
| 110 | |
| 111 | t2 = list = tlpm_add(list, (uint8_t[]){ 0xff, 0xff }, 16); |
| 112 | assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff }, 8)); |
| 113 | assert(t2 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 16)); |
| 114 | assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 15)); |
| 115 | assert(!tlpm_match(list, (uint8_t[]){ 0x7f, 0xff }, 16)); |
| 116 | |
| 117 | tlpm_clear(list); |
| 118 | } |
| 119 | |
| 120 | static void test_lpm_order(void) |
| 121 | { |
| 122 | struct tlpm_node *t1, *t2, *l1 = NULL, *l2 = NULL; |
| 123 | size_t i, j; |
| 124 | |
| 125 | /* Verify the tlpm implementation works correctly regardless of the |
| 126 | * order of entries. Insert a random set of entries into @l1, and copy |
| 127 | * the same data in reverse order into @l2. Then verify a lookup of |
| 128 | * random keys will yield the same result in both sets. |
| 129 | */ |
| 130 | |
| 131 | for (i = 0; i < (1 << 12); ++i) |
| 132 | l1 = tlpm_add(l1, (uint8_t[]){ |
| 133 | rand() % 0xff, |
| 134 | rand() % 0xff, |
| 135 | }, rand() % 16 + 1); |
| 136 | |
| 137 | for (t1 = l1; t1; t1 = t1->next) |
| 138 | l2 = tlpm_add(l2, t1->key, t1->n_bits); |
| 139 | |
| 140 | for (i = 0; i < (1 << 8); ++i) { |
| 141 | uint8_t key[] = { rand() % 0xff, rand() % 0xff }; |
| 142 | |
| 143 | t1 = tlpm_match(l1, key, 16); |
| 144 | t2 = tlpm_match(l2, key, 16); |
| 145 | |
| 146 | assert(!t1 == !t2); |
| 147 | if (t1) { |
| 148 | assert(t1->n_bits == t2->n_bits); |
| 149 | for (j = 0; j < t1->n_bits; ++j) |
| 150 | assert((t1->key[j / 8] & (1 << (7 - j % 8))) == |
| 151 | (t2->key[j / 8] & (1 << (7 - j % 8)))); |
| 152 | } |
| 153 | } |
| 154 | |
| 155 | tlpm_clear(l1); |
| 156 | tlpm_clear(l2); |
| 157 | } |
| 158 | |
| 159 | static void test_lpm_map(int keysize) |
| 160 | { |
| 161 | size_t i, j, n_matches, n_nodes, n_lookups; |
| 162 | struct tlpm_node *t, *list = NULL; |
| 163 | struct bpf_lpm_trie_key *key; |
| 164 | uint8_t *data, *value; |
| 165 | int r, map; |
| 166 | |
| 167 | /* Compare behavior of tlpm vs. bpf-lpm. Create a randomized set of |
| 168 | * prefixes and insert it into both tlpm and bpf-lpm. Then run some |
| 169 | * randomized lookups and verify both maps return the same result. |
| 170 | */ |
| 171 | |
| 172 | n_matches = 0; |
| 173 | n_nodes = 1 << 8; |
| 174 | n_lookups = 1 << 16; |
| 175 | |
| 176 | data = alloca(keysize); |
| 177 | memset(data, 0, keysize); |
| 178 | |
| 179 | value = alloca(keysize + 1); |
| 180 | memset(value, 0, keysize + 1); |
| 181 | |
| 182 | key = alloca(sizeof(*key) + keysize); |
| 183 | memset(key, 0, sizeof(*key) + keysize); |
| 184 | |
Mickaël Salaün | f4874d0 | 2017-02-10 00:21:43 +0100 | [diff] [blame] | 185 | map = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, |
David Herrmann | 4d3381f | 2017-01-21 17:26:12 +0100 | [diff] [blame] | 186 | sizeof(*key) + keysize, |
| 187 | keysize + 1, |
| 188 | 4096, |
| 189 | BPF_F_NO_PREALLOC); |
| 190 | assert(map >= 0); |
| 191 | |
| 192 | for (i = 0; i < n_nodes; ++i) { |
| 193 | for (j = 0; j < keysize; ++j) |
| 194 | value[j] = rand() & 0xff; |
| 195 | value[keysize] = rand() % (8 * keysize + 1); |
| 196 | |
| 197 | list = tlpm_add(list, value, value[keysize]); |
| 198 | |
| 199 | key->prefixlen = value[keysize]; |
| 200 | memcpy(key->data, value, keysize); |
Mickaël Salaün | 10ecc72 | 2017-02-10 00:21:39 +0100 | [diff] [blame] | 201 | r = bpf_map_update_elem(map, key, value, 0); |
David Herrmann | 4d3381f | 2017-01-21 17:26:12 +0100 | [diff] [blame] | 202 | assert(!r); |
| 203 | } |
| 204 | |
| 205 | for (i = 0; i < n_lookups; ++i) { |
| 206 | for (j = 0; j < keysize; ++j) |
| 207 | data[j] = rand() & 0xff; |
| 208 | |
| 209 | t = tlpm_match(list, data, 8 * keysize); |
| 210 | |
| 211 | key->prefixlen = 8 * keysize; |
| 212 | memcpy(key->data, data, keysize); |
Mickaël Salaün | e5ff7c4 | 2017-02-10 00:21:40 +0100 | [diff] [blame] | 213 | r = bpf_map_lookup_elem(map, key, value); |
David Herrmann | 4d3381f | 2017-01-21 17:26:12 +0100 | [diff] [blame] | 214 | assert(!r || errno == ENOENT); |
| 215 | assert(!t == !!r); |
| 216 | |
| 217 | if (t) { |
| 218 | ++n_matches; |
| 219 | assert(t->n_bits == value[keysize]); |
| 220 | for (j = 0; j < t->n_bits; ++j) |
| 221 | assert((t->key[j / 8] & (1 << (7 - j % 8))) == |
| 222 | (value[j / 8] & (1 << (7 - j % 8)))); |
| 223 | } |
| 224 | } |
| 225 | |
| 226 | close(map); |
| 227 | tlpm_clear(list); |
| 228 | |
| 229 | /* With 255 random nodes in the map, we are pretty likely to match |
| 230 | * something on every lookup. For statistics, use this: |
| 231 | * |
| 232 | * printf(" nodes: %zu\n" |
| 233 | * "lookups: %zu\n" |
| 234 | * "matches: %zu\n", n_nodes, n_lookups, n_matches); |
| 235 | */ |
| 236 | } |
| 237 | |
| 238 | /* Test the implementation with some 'real world' examples */ |
| 239 | |
| 240 | static void test_lpm_ipaddr(void) |
| 241 | { |
| 242 | struct bpf_lpm_trie_key *key_ipv4; |
| 243 | struct bpf_lpm_trie_key *key_ipv6; |
| 244 | size_t key_size_ipv4; |
| 245 | size_t key_size_ipv6; |
| 246 | int map_fd_ipv4; |
| 247 | int map_fd_ipv6; |
| 248 | __u64 value; |
| 249 | |
| 250 | key_size_ipv4 = sizeof(*key_ipv4) + sizeof(__u32); |
| 251 | key_size_ipv6 = sizeof(*key_ipv6) + sizeof(__u32) * 4; |
| 252 | key_ipv4 = alloca(key_size_ipv4); |
| 253 | key_ipv6 = alloca(key_size_ipv6); |
| 254 | |
Mickaël Salaün | f4874d0 | 2017-02-10 00:21:43 +0100 | [diff] [blame] | 255 | map_fd_ipv4 = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, |
David Herrmann | 4d3381f | 2017-01-21 17:26:12 +0100 | [diff] [blame] | 256 | key_size_ipv4, sizeof(value), |
| 257 | 100, BPF_F_NO_PREALLOC); |
| 258 | assert(map_fd_ipv4 >= 0); |
| 259 | |
Mickaël Salaün | f4874d0 | 2017-02-10 00:21:43 +0100 | [diff] [blame] | 260 | map_fd_ipv6 = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, |
David Herrmann | 4d3381f | 2017-01-21 17:26:12 +0100 | [diff] [blame] | 261 | key_size_ipv6, sizeof(value), |
| 262 | 100, BPF_F_NO_PREALLOC); |
| 263 | assert(map_fd_ipv6 >= 0); |
| 264 | |
| 265 | /* Fill data some IPv4 and IPv6 address ranges */ |
| 266 | value = 1; |
| 267 | key_ipv4->prefixlen = 16; |
| 268 | inet_pton(AF_INET, "192.168.0.0", key_ipv4->data); |
Mickaël Salaün | 10ecc72 | 2017-02-10 00:21:39 +0100 | [diff] [blame] | 269 | assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0); |
David Herrmann | 4d3381f | 2017-01-21 17:26:12 +0100 | [diff] [blame] | 270 | |
| 271 | value = 2; |
| 272 | key_ipv4->prefixlen = 24; |
| 273 | inet_pton(AF_INET, "192.168.0.0", key_ipv4->data); |
Mickaël Salaün | 10ecc72 | 2017-02-10 00:21:39 +0100 | [diff] [blame] | 274 | assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0); |
David Herrmann | 4d3381f | 2017-01-21 17:26:12 +0100 | [diff] [blame] | 275 | |
| 276 | value = 3; |
| 277 | key_ipv4->prefixlen = 24; |
| 278 | inet_pton(AF_INET, "192.168.128.0", key_ipv4->data); |
Mickaël Salaün | 10ecc72 | 2017-02-10 00:21:39 +0100 | [diff] [blame] | 279 | assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0); |
David Herrmann | 4d3381f | 2017-01-21 17:26:12 +0100 | [diff] [blame] | 280 | |
| 281 | value = 5; |
| 282 | key_ipv4->prefixlen = 24; |
| 283 | inet_pton(AF_INET, "192.168.1.0", key_ipv4->data); |
Mickaël Salaün | 10ecc72 | 2017-02-10 00:21:39 +0100 | [diff] [blame] | 284 | assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0); |
David Herrmann | 4d3381f | 2017-01-21 17:26:12 +0100 | [diff] [blame] | 285 | |
| 286 | value = 4; |
| 287 | key_ipv4->prefixlen = 23; |
| 288 | inet_pton(AF_INET, "192.168.0.0", key_ipv4->data); |
Mickaël Salaün | 10ecc72 | 2017-02-10 00:21:39 +0100 | [diff] [blame] | 289 | assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0); |
David Herrmann | 4d3381f | 2017-01-21 17:26:12 +0100 | [diff] [blame] | 290 | |
| 291 | value = 0xdeadbeef; |
| 292 | key_ipv6->prefixlen = 64; |
| 293 | inet_pton(AF_INET6, "2a00:1450:4001:814::200e", key_ipv6->data); |
Mickaël Salaün | 10ecc72 | 2017-02-10 00:21:39 +0100 | [diff] [blame] | 294 | assert(bpf_map_update_elem(map_fd_ipv6, key_ipv6, &value, 0) == 0); |
David Herrmann | 4d3381f | 2017-01-21 17:26:12 +0100 | [diff] [blame] | 295 | |
| 296 | /* Set tprefixlen to maximum for lookups */ |
| 297 | key_ipv4->prefixlen = 32; |
| 298 | key_ipv6->prefixlen = 128; |
| 299 | |
| 300 | /* Test some lookups that should come back with a value */ |
| 301 | inet_pton(AF_INET, "192.168.128.23", key_ipv4->data); |
Mickaël Salaün | e5ff7c4 | 2017-02-10 00:21:40 +0100 | [diff] [blame] | 302 | assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == 0); |
David Herrmann | 4d3381f | 2017-01-21 17:26:12 +0100 | [diff] [blame] | 303 | assert(value == 3); |
| 304 | |
| 305 | inet_pton(AF_INET, "192.168.0.1", key_ipv4->data); |
Mickaël Salaün | e5ff7c4 | 2017-02-10 00:21:40 +0100 | [diff] [blame] | 306 | assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == 0); |
David Herrmann | 4d3381f | 2017-01-21 17:26:12 +0100 | [diff] [blame] | 307 | assert(value == 2); |
| 308 | |
| 309 | inet_pton(AF_INET6, "2a00:1450:4001:814::", key_ipv6->data); |
Mickaël Salaün | e5ff7c4 | 2017-02-10 00:21:40 +0100 | [diff] [blame] | 310 | assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == 0); |
David Herrmann | 4d3381f | 2017-01-21 17:26:12 +0100 | [diff] [blame] | 311 | assert(value == 0xdeadbeef); |
| 312 | |
| 313 | inet_pton(AF_INET6, "2a00:1450:4001:814::1", key_ipv6->data); |
Mickaël Salaün | e5ff7c4 | 2017-02-10 00:21:40 +0100 | [diff] [blame] | 314 | assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == 0); |
David Herrmann | 4d3381f | 2017-01-21 17:26:12 +0100 | [diff] [blame] | 315 | assert(value == 0xdeadbeef); |
| 316 | |
| 317 | /* Test some lookups that should not match any entry */ |
| 318 | inet_pton(AF_INET, "10.0.0.1", key_ipv4->data); |
Mickaël Salaün | e5ff7c4 | 2017-02-10 00:21:40 +0100 | [diff] [blame] | 319 | assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -1 && |
David Herrmann | 4d3381f | 2017-01-21 17:26:12 +0100 | [diff] [blame] | 320 | errno == ENOENT); |
| 321 | |
| 322 | inet_pton(AF_INET, "11.11.11.11", key_ipv4->data); |
Mickaël Salaün | e5ff7c4 | 2017-02-10 00:21:40 +0100 | [diff] [blame] | 323 | assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -1 && |
David Herrmann | 4d3381f | 2017-01-21 17:26:12 +0100 | [diff] [blame] | 324 | errno == ENOENT); |
| 325 | |
| 326 | inet_pton(AF_INET6, "2a00:ffff::", key_ipv6->data); |
Mickaël Salaün | e5ff7c4 | 2017-02-10 00:21:40 +0100 | [diff] [blame] | 327 | assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == -1 && |
David Herrmann | 4d3381f | 2017-01-21 17:26:12 +0100 | [diff] [blame] | 328 | errno == ENOENT); |
| 329 | |
| 330 | close(map_fd_ipv4); |
| 331 | close(map_fd_ipv6); |
| 332 | } |
| 333 | |
| 334 | int main(void) |
| 335 | { |
| 336 | struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY }; |
| 337 | int i, ret; |
| 338 | |
| 339 | /* we want predictable, pseudo random tests */ |
| 340 | srand(0xf00ba1); |
| 341 | |
| 342 | /* allow unlimited locked memory */ |
| 343 | ret = setrlimit(RLIMIT_MEMLOCK, &limit); |
| 344 | if (ret < 0) |
| 345 | perror("Unable to lift memlock rlimit"); |
| 346 | |
| 347 | test_lpm_basic(); |
| 348 | test_lpm_order(); |
| 349 | |
| 350 | /* Test with 8, 16, 24, 32, ... 128 bit prefix length */ |
| 351 | for (i = 1; i <= 16; ++i) |
| 352 | test_lpm_map(i); |
| 353 | |
| 354 | test_lpm_ipaddr(); |
| 355 | |
| 356 | printf("test_lpm: OK\n"); |
| 357 | return 0; |
| 358 | } |