Skip to content

Commit e3d0ab8

Browse files
FedeDPpoiana
authored andcommitted
fix(driver/bpf): fixed a couple of verifier issues.
Signed-off-by: Federico Di Pierro <[email protected]>
1 parent 774db6b commit e3d0ab8

File tree

1 file changed

+111
-189
lines changed

1 file changed

+111
-189
lines changed

driver/bpf/fillers.h

+111-189
Original file line numberDiff line numberDiff line change
@@ -571,236 +571,158 @@ FILLER(sys_poll_x, true)
571571
return bpf_poll_parse_fds(data, false);
572572
}
573573

574-
#define MAX_IOVCNT 32
575-
#define MAX_IOVCNT_COMPAT 8
574+
#ifdef CONFIG_COMPAT
575+
#define MAX_IOVCNT 8
576+
#else
577+
#define MAX_IOVCNT 32
578+
#endif
576579

577-
static __always_inline int bpf_parse_readv_writev_bufs_64(struct filler_data *data,
580+
static __always_inline int bpf_parse_readv_writev_bufs(struct filler_data *data,
578581
const void __user *iovsrc,
579582
unsigned long iovcnt,
580583
long retval,
581-
int flags,
582-
unsigned long *size)
584+
int flags)
583585
{
584-
const struct iovec *iov;
585586
int res = PPM_SUCCESS;
586587
unsigned long copylen;
588+
long size = 0;
587589
int j;
590+
unsigned long iov_size = sizeof(struct iovec);
591+
unsigned long len_off = offsetof(struct iovec, iov_len);
592+
unsigned long base_off = offsetof(struct iovec, iov_base);
593+
unsigned long ptr_size = sizeof(void *);
588594

589-
copylen = iovcnt * sizeof(struct iovec);
590-
iov = (const struct iovec *)data->tmp_scratch;
595+
#ifdef CONFIG_COMPAT
596+
if (bpf_in_ia32_syscall())
597+
{
598+
iov_size = sizeof(struct compat_iovec);
599+
len_off = offsetof(struct compat_iovec, iov_len);
600+
base_off = offsetof(struct compat_iovec, iov_base);
601+
ptr_size = 4;
602+
}
603+
#endif
591604

605+
copylen = iovcnt * iov_size;
592606
if (copylen > SCRATCH_SIZE_MAX)
593607
{
594608
return PPM_FAILURE_FRAME_SCRATCH_MAP_FULL;
595609
}
596610

597611
#ifdef BPF_FORBIDS_ZERO_ACCESS
598-
if (copylen)
599-
if (bpf_probe_read_user((void *)iov,
600-
((copylen - 1) & SCRATCH_SIZE_MAX) + 1,
601-
(void *)iovsrc))
612+
if (copylen)
613+
if (bpf_probe_read_user(data->tmp_scratch,
614+
((copylen - 1) & SCRATCH_SIZE_MAX) + 1,
615+
(void *)iovsrc))
602616
#else
603-
if (bpf_probe_read_user((void *)iov,
604-
copylen & SCRATCH_SIZE_MAX,
605-
(void *)iovsrc))
617+
if (bpf_probe_read_user(data->tmp_scratch,
618+
copylen & SCRATCH_SIZE_MAX,
619+
(void *)iovsrc))
606620
#endif
607621
return PPM_FAILURE_INVALID_USER_MEMORY;
608622

609-
#pragma unroll
623+
624+
#pragma unroll
610625
for (j = 0; j < MAX_IOVCNT; ++j) {
611626
if (j == iovcnt)
612627
break;
613628
// BPF seems to require a hard limit to avoid overflows
614-
if (*size == LONG_MAX)
629+
if (size == LONG_MAX)
615630
break;
616631

617-
*size += iov[j].iov_len;
632+
volatile unsigned curr_shift = j * iov_size + len_off;
633+
unsigned long shift_bounded = curr_shift & SCRATCH_SIZE_HALF;
634+
if (curr_shift > SCRATCH_SIZE_HALF)
635+
break;
636+
637+
long curr_len;
638+
if (ptr_size == 4)
639+
{
640+
curr_len = *((int *)(data->tmp_scratch + shift_bounded));
641+
}
642+
else
643+
{
644+
curr_len = *((long *)(data->tmp_scratch + shift_bounded));
645+
}
646+
size += curr_len;
618647
}
619648

620649
if ((flags & PRB_FLAG_IS_WRITE) == 0)
621-
if (*size > retval)
622-
*size = retval;
650+
if (size > retval)
651+
size = retval;
623652

624-
if (flags & PRB_FLAG_PUSH_SIZE && res == PPM_SUCCESS) {
625-
res = bpf_push_u32_to_ring(data, (uint32_t)*size);
653+
if (flags & PRB_FLAG_PUSH_SIZE) {
654+
res = bpf_push_u32_to_ring(data, (uint32_t)size);
626655
CHECK_RES(res);
627656
}
628657

629658
if (flags & PRB_FLAG_PUSH_DATA) {
630-
if (*size > 0) {
659+
if (size > 0) {
631660
unsigned long off = _READ(data->state->tail_ctx.curoff);
632-
unsigned long remaining = *size;
633-
int j;
661+
unsigned long remaining = size;
634662

635-
#pragma unroll
663+
#pragma unroll
636664
for (j = 0; j < MAX_IOVCNT; ++j) {
637665
volatile unsigned int to_read;
638-
639666
if (j == iovcnt)
640667
break;
641-
642668
unsigned long off_bounded = off & SCRATCH_SIZE_HALF;
643669
if (off > SCRATCH_SIZE_HALF)
644670
break;
645671

646-
if (iov[j].iov_len <= remaining)
647-
to_read = iov[j].iov_len;
672+
volatile unsigned len_curr_shift = j * iov_size + len_off;
673+
unsigned long len_shift_bounded = len_curr_shift & SCRATCH_SIZE_HALF;
674+
if (len_curr_shift > SCRATCH_SIZE_HALF)
675+
break;
676+
677+
long curr_len;
678+
if (ptr_size == 4)
679+
{
680+
curr_len = *((int *)(data->tmp_scratch + len_shift_bounded));
681+
}
682+
else
683+
{
684+
curr_len = *((long *)(data->tmp_scratch + len_shift_bounded));
685+
}
686+
if (curr_len <= remaining)
687+
to_read = curr_len;
648688
else
649689
to_read = remaining;
650-
651690
if (to_read > SCRATCH_SIZE_HALF)
652691
to_read = SCRATCH_SIZE_HALF;
653692

654-
#ifdef BPF_FORBIDS_ZERO_ACCESS
655-
if (to_read)
656-
if (bpf_probe_read_user(&data->buf[off_bounded],
657-
((to_read - 1) & SCRATCH_SIZE_HALF) + 1,
658-
(void*)iov[j].iov_base))
659-
#else
660-
if (bpf_probe_read_user(&data->buf[off_bounded],
661-
to_read & SCRATCH_SIZE_HALF,
662-
(void*)iov[j].iov_base))
663-
#endif
664-
return PPM_FAILURE_INVALID_USER_MEMORY;
665-
666-
remaining -= to_read;
667-
off += to_read;
668-
}
669-
} else {
670-
*size = 0;
671-
}
672-
return PPM_SUCCESS;
673-
}
674-
675-
return res;
676-
}
677-
678-
#ifdef CONFIG_COMPAT
679-
static __always_inline int bpf_parse_readv_writev_bufs_32(struct filler_data *data,
680-
const void __user *iovsrc,
681-
unsigned long iovcnt,
682-
long retval,
683-
int flags,
684-
unsigned long *size)
685-
{
686-
const struct compat_iovec *compat_iov;
687-
int res = PPM_SUCCESS;
688-
unsigned long copylen;
689-
int j;
690-
691-
copylen = iovcnt * sizeof(struct compat_iovec);
692-
compat_iov = (const struct compat_iovec *)data->tmp_scratch;
693-
694-
if (copylen > SCRATCH_SIZE_MAX)
695-
{
696-
return PPM_FAILURE_FRAME_SCRATCH_MAP_FULL;
697-
}
698-
699-
#ifdef BPF_FORBIDS_ZERO_ACCESS
700-
if (copylen)
701-
if (bpf_probe_read_user((void *)compat_iov,
702-
((copylen - 1) & SCRATCH_SIZE_MAX) + 1,
703-
(void *)iovsrc))
704-
#else
705-
if (bpf_probe_read_user((void *)compat_iov,
706-
copylen & SCRATCH_SIZE_MAX,
707-
(void *)iovsrc))
708-
#endif
709-
return PPM_FAILURE_INVALID_USER_MEMORY;
710-
711-
#pragma unroll
712-
for (j = 0; j < MAX_IOVCNT_COMPAT; ++j) {
713-
if (j == iovcnt)
714-
break;
715-
// BPF seems to require a hard limit to avoid overflows
716-
if (*size == LONG_MAX)
717-
break;
718-
719-
*size += compat_iov[j].iov_len;
720-
}
721-
722-
if ((flags & PRB_FLAG_IS_WRITE) == 0)
723-
if (*size > retval)
724-
*size = retval;
725-
726-
if (flags & PRB_FLAG_PUSH_SIZE && res == PPM_SUCCESS) {
727-
res = bpf_push_u32_to_ring(data, (uint32_t)*size);
728-
CHECK_RES(res);
729-
}
730-
731-
if (flags & PRB_FLAG_PUSH_DATA) {
732-
if (*size > 0) {
733-
unsigned long off = _READ(data->state->tail_ctx.curoff);
734-
unsigned long remaining = *size;
735-
int j;
736-
737-
// The 14 iovec count limit is due to old kernels verifiers
738-
// complaining.
739-
#pragma unroll
740-
for (j = 0; j < MAX_IOVCNT_COMPAT; ++j) {
741-
volatile unsigned int to_read;
742-
743-
if (j == iovcnt)
693+
volatile unsigned base_curr_shift = j * iov_size + base_off;
694+
unsigned long base_shift_bounded = base_curr_shift & SCRATCH_SIZE_HALF;
695+
if (base_curr_shift > SCRATCH_SIZE_HALF)
744696
break;
745697

746-
unsigned long off_bounded = off & SCRATCH_SIZE_HALF;
747-
if (off > SCRATCH_SIZE_HALF)
748-
break;
749-
750-
if (compat_iov[j].iov_len <= remaining)
751-
to_read = compat_iov[j].iov_len;
698+
unsigned long curr_base;
699+
if (ptr_size == 4)
700+
{
701+
curr_base = *((unsigned int *)(data->tmp_scratch + base_shift_bounded));
702+
}
752703
else
753-
to_read = remaining;
754-
755-
if (to_read > SCRATCH_SIZE_HALF)
756-
to_read = SCRATCH_SIZE_HALF;
757-
758-
#ifdef BPF_FORBIDS_ZERO_ACCESS
759-
if (to_read)
760-
if (bpf_probe_read_user(&data->buf[off_bounded],
761-
((to_read - 1) & SCRATCH_SIZE_HALF) + 1,
762-
(void*)compat_iov[j].iov_base))
763-
#else
704+
{
705+
curr_base = *((unsigned long *)(data->tmp_scratch + base_shift_bounded));
706+
}
707+
#ifdef BPF_FORBIDS_ZERO_ACCESS
708+
if (to_read)
764709
if (bpf_probe_read_user(&data->buf[off_bounded],
765-
to_read & SCRATCH_SIZE_HALF,
766-
(void*)compat_iov[j].iov_base))
767-
#endif
710+
((to_read - 1) & SCRATCH_SIZE_HALF) + 1,
711+
(void *)curr_base))
712+
#else
713+
if (bpf_probe_read_user(&data->buf[off_bounded],
714+
to_read & SCRATCH_SIZE_HALF,
715+
(void *)curr_base))
716+
#endif
768717
return PPM_FAILURE_INVALID_USER_MEMORY;
769718

770719
remaining -= to_read;
771720
off += to_read;
772721
}
773722
} else {
774-
*size = 0;
723+
size = 0;
775724
}
776725

777-
return PPM_SUCCESS;
778-
}
779-
return res;
780-
}
781-
#endif
782-
783-
static __always_inline int bpf_parse_readv_writev_bufs(struct filler_data *data,
784-
const void __user *iovsrc,
785-
unsigned long iovcnt,
786-
long retval,
787-
int flags)
788-
{
789-
unsigned long size = 0;
790-
int res = PPM_SUCCESS;
791-
if (!bpf_in_ia32_syscall())
792-
{
793-
res = bpf_parse_readv_writev_bufs_64(data, iovsrc, iovcnt, retval, flags, &size);
794-
}
795-
else
796-
{
797-
#ifdef CONFIG_COMPAT
798-
res = bpf_parse_readv_writev_bufs_32(data, iovsrc, iovcnt, retval, flags, &size);
799-
#endif
800-
}
801-
802-
if(flags & PRB_FLAG_PUSH_DATA && res == PPM_SUCCESS)
803-
{
804726
data->fd = bpf_syscall_get_argument(data, 0);
805727
data->curarg_already_on_frame = true;
806728
return __bpf_val_to_ring(data, 0, size, PT_BYTEBUF, -1, true, KERNEL);
@@ -4312,10 +4234,12 @@ FILLER(sys_recvfrom_x, true)
43124234
unsigned long val;
43134235
uint16_t size = 0;
43144236
long retval;
4315-
int addrlen;
4237+
int addrlen = 0;
43164238
int err = 0;
43174239
int res;
43184240
int fd;
4241+
bool push = true;
4242+
bool from_usr = false;
43194243

43204244
/*
43214245
* Push the common params to the ring
@@ -4324,6 +4248,7 @@ FILLER(sys_recvfrom_x, true)
43244248
res = f_sys_recv_x_common(data, retval);
43254249
CHECK_RES(res);
43264250

4251+
43274252
if (retval >= 0) {
43284253
/*
43294254
* Get the fd
@@ -4350,29 +4275,26 @@ FILLER(sys_recvfrom_x, true)
43504275
*/
43514276
err = bpf_addr_to_kernel(usrsockaddr, addrlen,
43524277
(struct sockaddr *)data->tmp_scratch);
4353-
if (err >= 0) {
4278+
if (err >= 0)
4279+
{
43544280
/*
4355-
* Convert the fd into socket endpoint information
4281+
* Convert the fd into socket endpoint information
43564282
*/
4357-
size = bpf_fd_to_socktuple(data,
4358-
fd,
4359-
(struct sockaddr *)data->tmp_scratch,
4360-
addrlen,
4361-
true,
4362-
true,
4363-
data->tmp_scratch + sizeof(struct sockaddr_storage));
4283+
from_usr = true;
43644284
}
4365-
} else {
4285+
else
4286+
{
4287+
// Do not send any socket endpoint info.
4288+
push = false;
4289+
}
4290+
}
4291+
if (push)
4292+
{
43664293
/*
4367-
* Get socket endpoint information from fd if the user-provided *sockaddr is NULL
4368-
*/
4369-
size = bpf_fd_to_socktuple(data,
4370-
fd,
4371-
NULL,
4372-
0,
4373-
false,
4374-
true,
4375-
data->tmp_scratch + sizeof(struct sockaddr_storage));
4294+
* Get socket endpoint information from fd if the user-provided *sockaddr is NULL
4295+
*/
4296+
size = bpf_fd_to_socktuple(data, fd, (struct sockaddr *)data->tmp_scratch, addrlen, from_usr,
4297+
true, data->tmp_scratch + sizeof(struct sockaddr_storage));
43764298
}
43774299
}
43784300

0 commit comments

Comments
 (0)