The following commit has been merged in the master branch:
commit a7efefc7b49c24525331c7716f8b6de9bd023401
Merge: 3572172a081601ea325e57f7381917d62fe9859e 356d71e00d278d865f8c7f68adebd6ce4698a7e2
Author: Stephen Rothwell <sfr(a)canb.auug.org.au>
Date: Fri Mar 29 09:24:09 2019 +1100
Merge remote-tracking branch 'net-next/master'
diff --combined kernel/bpf/verifier.c
index 6c5a41f7f338,2fe89138309a..b7ad8003c4e6
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@@ -377,7 -377,8 +377,8 @@@ static bool is_release_function(enum bp
static bool is_acquire_function(enum bpf_func_id func_id)
{
return func_id == BPF_FUNC_sk_lookup_tcp ||
- func_id == BPF_FUNC_sk_lookup_udp;
+ func_id == BPF_FUNC_sk_lookup_udp ||
+ func_id == BPF_FUNC_skc_lookup_tcp;
}
static bool is_ptr_cast_function(enum bpf_func_id func_id)
@@@ -1897,9 -1898,8 +1898,9 @@@ continue_func
}
frame++;
if (frame >= MAX_CALL_FRAMES) {
- WARN_ONCE(1, "verifier bug. Call stack is too deep\n");
- return -EFAULT;
+ verbose(env, "the call stack of %d frames is too deep !\n",
+ frame);
+ return -E2BIG;
}
goto process_func;
}
@@@ -3157,19 -3157,11 +3158,11 @@@ static int check_helper_call(struct bpf
} else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) {
mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL;
- if (is_acquire_function(func_id)) {
- int id = acquire_reference_state(env, insn_idx);
-
- if (id < 0)
- return id;
- /* For mark_ptr_or_null_reg() */
- regs[BPF_REG_0].id = id;
- /* For release_reference() */
- regs[BPF_REG_0].ref_obj_id = id;
- } else {
- /* For mark_ptr_or_null_reg() */
- regs[BPF_REG_0].id = ++env->id_gen;
- }
+ regs[BPF_REG_0].id = ++env->id_gen;
+ } else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) {
+ mark_reg_known_zero(env, regs, BPF_REG_0);
+ regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL;
+ regs[BPF_REG_0].id = ++env->id_gen;
} else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) {
mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL;
@@@ -3180,9 -3172,19 +3173,19 @@@
return -EINVAL;
}
- if (is_ptr_cast_function(func_id))
+ if (is_ptr_cast_function(func_id)) {
/* For release_reference() */
regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
+ } else if (is_acquire_function(func_id)) {
+ int id = acquire_reference_state(env, insn_idx);
+
+ if (id < 0)
+ return id;
+ /* For mark_ptr_or_null_reg() */
+ regs[BPF_REG_0].id = id;
+ /* For release_reference() */
+ regs[BPF_REG_0].ref_obj_id = id;
+ }
do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
diff --combined net/core/datagram.c
index e657289db4ac,0dafec5cada0..91bb5a083fee
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@@ -61,6 -61,8 +61,8 @@@
#include <trace/events/skb.h>
#include <net/busy_poll.h>
+ #include "datagram.h"
+
/*
* Is a socket 'connection oriented' ?
*/
@@@ -279,7 -281,7 +281,7 @@@ struct sk_buff *__skb_try_recv_datagram
break;
sk_busy_loop(sk, flags & MSG_DONTWAIT);
- } while (!skb_queue_empty(&sk->sk_receive_queue));
+ } while (sk->sk_receive_queue.prev != *last);
error = -EAGAIN;
@@@ -408,10 -410,10 +410,10 @@@ int skb_kill_datagram(struct sock *sk,
}
EXPORT_SYMBOL(skb_kill_datagram);
- int __skb_datagram_iter(const struct sk_buff *skb, int offset,
- struct iov_iter *to, int len, bool fault_short,
- size_t (*cb)(const void *, size_t, void *, struct iov_iter *),
- void *data)
+ static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
+ struct iov_iter *to, int len, bool fault_short,
+ size_t (*cb)(const void *, size_t, void *,
+ struct iov_iter *), void *data)
{
int start = skb_headlen(skb);
int i, copy = start - offset, start_off = offset, n;
--
LinuxNextTracking