Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

zebra: netlink FPM interface using zebra data plane #5510

Merged
merged 21 commits into from
Apr 14, 2020
Merged
Changes from 1 commit
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
e57a3fa
zebra: generalize netlink route talk function
rzalamena Nov 29, 2019
9266b31
zebra: simplify some netlink debug messages
rzalamena Nov 29, 2019
b9c8751
zebra: simplify netlink_route_multipath
rzalamena Dec 2, 2019
f78fe8f
zebra: export netlink function and change return
rzalamena Dec 4, 2019
f73a846
zebra: dataplane context reset and init apis
Dec 10, 2019
d35f447
zebra: data plane plugin for FPM netlink
rzalamena Dec 4, 2019
018e77b
zebra: data plane FPM RIB walk code
rzalamena Dec 4, 2019
d4d4ec1
zebra: adapt and export rmac netlink functions
rzalamena Dec 9, 2019
bda10ad
zebra: data plane FPM RMAC walk code
rzalamena Dec 9, 2019
3bdd7fc
zebra: CLI commands for new FPM interface
rzalamena Dec 10, 2019
6cc059c
zebra: implement FPM counters
rzalamena Dec 11, 2019
a179ba3
zebra: simplify FPM buffer full detection
rzalamena Dec 11, 2019
ad4d102
zebra: improve FPM output buffer handling
rzalamena Dec 12, 2019
ba803a2
zebra: queue data plane context for FPM
rzalamena Dec 12, 2019
edfeff4
zebra: use atomic operations in FPM
rzalamena Dec 17, 2019
c871e6c
build: fix data plane FPM netlink module
rzalamena Jan 3, 2020
770a8d2
zebra: fix style on data plane FPM module
rzalamena Jan 8, 2020
f2a0ba3
zebra: data plane FPM add support RMAC VNI
rzalamena Jan 13, 2020
a50404a
zebra: fix some formatting/style issues
rzalamena Feb 10, 2020
e5e444d
zebra: hide verbose data plane FPM log messages
rzalamena Feb 11, 2020
9d5c326
zebra: fix hash_backet typo in data plane FPM
rzalamena Mar 23, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
zebra: queue data plane context for FPM
Enqueue all contexts inside FPM to avoid losing updates and to move all
processing to the FPM thread.

This helps in situations with huge amount of routes (e.g. BGP peer
flapping with a million routes).

Signed-off-by: Rafael Zalamena <rzalamena@opensourcerouting.org>
  • Loading branch information
rzalamena committed Apr 14, 2020
commit ba803a2fbed5459a05244f7742de8c5df7ff2d81
77 changes: 74 additions & 3 deletions zebra/dplane_fpm_nl.c
Original file line number Diff line number Diff line change
Expand Up @@ -76,12 +76,22 @@ struct fpm_nl_ctx {
struct stream *obuf;
pthread_mutex_t obuf_mutex;

/*
* data plane context queue:
* When a FPM server connection becomes a bottleneck, we must keep the
* data plane contexts until we get a chance to process them.
*/
struct dplane_ctx_q ctxqueue;
pthread_mutex_t ctxqueue_mutex;

/* data plane events. */
struct zebra_dplane_provider *prov;
struct frr_pthread *fthread;
struct thread *t_connect;
struct thread *t_read;
struct thread *t_write;
struct thread *t_event;
struct thread *t_dequeue;

/* zebra events. */
struct thread *t_ribreset;
Expand Down Expand Up @@ -112,6 +122,10 @@ struct fpm_nl_ctx {

/* Amount of data plane context processed. */
uint64_t dplane_contexts;
/* Amount of data plane contexts enqueued. */
uint64_t ctxqueue_len;
/* Peak amount of data plane contexts enqueued. */
uint64_t ctxqueue_len_peak;

/* Amount of buffer full events. */
uint64_t buffer_full;
Expand Down Expand Up @@ -266,6 +280,10 @@ DEFUN(fpm_show_counters, fpm_show_counters_cmd,
SHOW_COUNTER("Connection errors", gfnc->counters.connection_errors);
SHOW_COUNTER("Data plane items processed",
gfnc->counters.dplane_contexts);
SHOW_COUNTER("Data plane items enqueued",
gfnc->counters.ctxqueue_len);
SHOW_COUNTER("Data plane items queue peak",
gfnc->counters.ctxqueue_len_peak);
SHOW_COUNTER("Buffer full hits", gfnc->counters.buffer_full);
SHOW_COUNTER("User FPM configurations", gfnc->counters.user_configures);
SHOW_COUNTER("User FPM disable requests", gfnc->counters.user_disables);
Expand All @@ -292,6 +310,10 @@ DEFUN(fpm_show_counters_json, fpm_show_counters_json_cmd,
json_object_int_add(jo, "connection-closes", gfnc->counters.connection_closes);
json_object_int_add(jo, "connection-errors", gfnc->counters.connection_errors);
json_object_int_add(jo, "data-plane-contexts", gfnc->counters.dplane_contexts);
json_object_int_add(jo, "data-plane-contexts-queue",
gfnc->counters.ctxqueue_len);
json_object_int_add(jo, "data-plane-contexts-queue-peak",
gfnc->counters.ctxqueue_len_peak);
json_object_int_add(jo, "buffer-full-hits", gfnc->counters.buffer_full);
json_object_int_add(jo, "user-configures", gfnc->counters.user_configures);
json_object_int_add(jo, "user-disables", gfnc->counters.user_disables);
Expand Down Expand Up @@ -866,6 +888,41 @@ static int fpm_rmac_reset(struct thread *t)
return 0;
}

static int fpm_process_queue(struct thread *t)
{
struct fpm_nl_ctx *fnc = THREAD_ARG(t);
struct zebra_dplane_ctx *ctx;

frr_mutex_lock_autounlock(&fnc->ctxqueue_mutex);

while (true) {
/* No space available yet. */
if (STREAM_WRITEABLE(fnc->obuf) < NL_PKT_BUF_SIZE)
break;

/* Dequeue next item or quit processing. */
ctx = dplane_ctx_dequeue(&fnc->ctxqueue);
if (ctx == NULL)
break;

fpm_nl_enqueue(fnc, ctx);

/* Account the processed entries. */
fnc->counters.dplane_contexts++;
fnc->counters.ctxqueue_len--;

dplane_ctx_set_status(ctx, ZEBRA_DPLANE_REQUEST_SUCCESS);
dplane_provider_enqueue_out_ctx(fnc->prov, ctx);
}

/* Check for more items in the queue. */
if (fnc->counters.ctxqueue_len)
thread_add_timer(fnc->fthread->master, fpm_process_queue,
fnc, 0, &fnc->t_dequeue);

return 0;
}

/**
* Handles external (e.g. CLI, data plane or others) events.
*/
Expand Down Expand Up @@ -919,6 +976,9 @@ static int fpm_nl_start(struct zebra_dplane_provider *prov)
pthread_mutex_init(&fnc->obuf_mutex, NULL);
fnc->socket = -1;
fnc->disabled = true;
fnc->prov = prov;
TAILQ_INIT(&fnc->ctxqueue);
pthread_mutex_init(&fnc->ctxqueue_mutex, NULL);

return 0;
}
Expand Down Expand Up @@ -953,15 +1013,26 @@ static int fpm_nl_process(struct zebra_dplane_provider *prov)
* anyway.
*/
if (fnc->socket != -1 && fnc->connecting == false) {
fpm_nl_enqueue(fnc, ctx);

fnc->counters.dplane_contexts++;
frr_mutex_lock_autounlock(&fnc->ctxqueue_mutex);
dplane_ctx_enqueue_tail(&fnc->ctxqueue, ctx);

/* Account the number of contexts. */
fnc->counters.ctxqueue_len++;
if (fnc->counters.ctxqueue_len_peak <
fnc->counters.ctxqueue_len)
fnc->counters.ctxqueue_len_peak =
fnc->counters.ctxqueue_len;
continue;
}

dplane_ctx_set_status(ctx, ZEBRA_DPLANE_REQUEST_SUCCESS);
dplane_provider_enqueue_out_ctx(prov, ctx);
}

if (fnc->counters.ctxqueue_len)
thread_add_timer(fnc->fthread->master, fpm_process_queue,
fnc, 0, &fnc->t_dequeue);

return 0;
}

Expand Down