Skip to content

Commit

Permalink
Squash to "selftests/bpf: Add bpf_burst scheduler"
Browse files Browse the repository at this point in the history
Use __always_inline instead of inline.

Define tcp_rtx_and_write_queues_empty() and sk_stream_memory_free() in
BPF context.

Signed-off-by: Geliang Tang <[email protected]>
  • Loading branch information
geliangtang authored and intel-lab-lkp committed Aug 3, 2023
1 parent e470801 commit ef9b080
Show file tree
Hide file tree
Showing 2 changed files with 35 additions and 9 deletions.
1 change: 1 addition & 0 deletions tools/testing/selftests/bpf/bpf_tcp_helpers.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ enum sk_pacing {
struct sock {
struct sock_common __sk_common;
#define sk_state __sk_common.skc_state
int sk_sndbuf;
int sk_wmem_queued;
unsigned long sk_pacing_rate;
__u32 sk_pacing_status; /* see enum sk_pacing */
Expand Down
43 changes: 34 additions & 9 deletions tools/testing/selftests/bpf/progs/mptcp_bpf_burst.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,22 +25,47 @@ struct subflow_send_info {
__u64 linger_time;
};

static inline __u64 div_u64(__u64 dividend, __u32 divisor)
{
return dividend / divisor;
}

extern bool mptcp_subflow_active(struct mptcp_subflow_context *subflow) __ksym;
extern void mptcp_set_timeout(struct sock *sk) __ksym;
extern __u64 mptcp_wnd_end(const struct mptcp_sock *msk) __ksym;
extern bool bpf_mptcp_subflow_memory_free(const struct sock *sk) __ksym;
extern bool bpf_mptcp_subflow_queues_empty(const struct sock *sk) __ksym;
extern bool tcp_stream_memory_free(const struct sock *sk, int wake) __ksym;
extern bool bpf_mptcp_subflow_queues_empty(struct sock *sk) __ksym;
extern void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk) __ksym;

#define SSK_MODE_ACTIVE 0
#define SSK_MODE_BACKUP 1
#define SSK_MODE_MAX 2

static __always_inline __u64 div_u64(__u64 dividend, __u32 divisor)
{
return dividend / divisor;
}

static __always_inline bool tcp_write_queue_empty(struct sock *sk)
{
const struct tcp_sock *tp = bpf_skc_to_tcp_sock(sk);

return tp ? tp->write_seq == tp->snd_nxt : true;
}

static __always_inline bool tcp_rtx_and_write_queues_empty(struct sock *sk)
{
return bpf_mptcp_subflow_queues_empty(sk) && tcp_write_queue_empty(sk);
}

static __always_inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
{
if (sk->sk_wmem_queued >= sk->sk_sndbuf)
return false;

return tcp_stream_memory_free(sk, wake);
}

static __always_inline bool sk_stream_memory_free(const struct sock *sk)
{
return __sk_stream_memory_free(sk, 0);
}

SEC("struct_ops/mptcp_sched_burst_init")
void BPF_PROG(mptcp_sched_burst_init, struct mptcp_sock *msk)
{
Expand Down Expand Up @@ -110,7 +135,7 @@ static int bpf_burst_get_send(struct mptcp_sock *msk,
if (!subflow)
return -1;
ssk = mptcp_subflow_tcp_sock(subflow);
if (!ssk || !bpf_mptcp_subflow_memory_free(ssk))
if (!ssk || !sk_stream_memory_free(ssk))
return -1;

burst = min(MPTCP_SEND_BURST_SIZE, mptcp_wnd_end(msk) - msk->snd_nxt);
Expand Down Expand Up @@ -149,7 +174,7 @@ static int bpf_burst_get_retrans(struct mptcp_sock *msk,

ssk = mptcp_subflow_tcp_sock(subflow);
/* still data outstanding at TCP level? skip this */
if (!bpf_mptcp_subflow_queues_empty(ssk)) {
if (!tcp_rtx_and_write_queues_empty(ssk)) {
mptcp_pm_subflow_chk_stale(msk, ssk);
min_stale_count = min(min_stale_count, subflow->stale_count);
continue;
Expand Down

0 comments on commit ef9b080

Please sign in to comment.