This source file includes following definitions.
- xsk_diag_put_info
- xsk_diag_put_ring
- xsk_diag_put_rings_cfg
- xsk_diag_put_umem
- xsk_diag_fill
- xsk_diag_dump
- xsk_diag_handler_dump
- xsk_diag_init
- xsk_diag_exit
1
2
3
4
5
6
7
8
9 #include <linux/module.h>
10 #include <net/xdp_sock.h>
11 #include <linux/xdp_diag.h>
12 #include <linux/sock_diag.h>
13
14 #include "xsk_queue.h"
15 #include "xsk.h"
16
17 static int xsk_diag_put_info(const struct xdp_sock *xs, struct sk_buff *nlskb)
18 {
19 struct xdp_diag_info di = {};
20
21 di.ifindex = xs->dev ? xs->dev->ifindex : 0;
22 di.queue_id = xs->queue_id;
23 return nla_put(nlskb, XDP_DIAG_INFO, sizeof(di), &di);
24 }
25
26 static int xsk_diag_put_ring(const struct xsk_queue *queue, int nl_type,
27 struct sk_buff *nlskb)
28 {
29 struct xdp_diag_ring dr = {};
30
31 dr.entries = queue->nentries;
32 return nla_put(nlskb, nl_type, sizeof(dr), &dr);
33 }
34
35 static int xsk_diag_put_rings_cfg(const struct xdp_sock *xs,
36 struct sk_buff *nlskb)
37 {
38 int err = 0;
39
40 if (xs->rx)
41 err = xsk_diag_put_ring(xs->rx, XDP_DIAG_RX_RING, nlskb);
42 if (!err && xs->tx)
43 err = xsk_diag_put_ring(xs->tx, XDP_DIAG_TX_RING, nlskb);
44 return err;
45 }
46
47 static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb)
48 {
49 struct xdp_umem *umem = xs->umem;
50 struct xdp_diag_umem du = {};
51 int err;
52
53 if (!umem)
54 return 0;
55
56 du.id = umem->id;
57 du.size = umem->size;
58 du.num_pages = umem->npgs;
59 du.chunk_size = umem->chunk_size_nohr + umem->headroom;
60 du.headroom = umem->headroom;
61 du.ifindex = umem->dev ? umem->dev->ifindex : 0;
62 du.queue_id = umem->queue_id;
63 du.flags = 0;
64 if (umem->zc)
65 du.flags |= XDP_DU_F_ZEROCOPY;
66 du.refs = refcount_read(&umem->users);
67
68 err = nla_put(nlskb, XDP_DIAG_UMEM, sizeof(du), &du);
69
70 if (!err && umem->fq)
71 err = xsk_diag_put_ring(umem->fq, XDP_DIAG_UMEM_FILL_RING, nlskb);
72 if (!err && umem->cq) {
73 err = xsk_diag_put_ring(umem->cq, XDP_DIAG_UMEM_COMPLETION_RING,
74 nlskb);
75 }
76 return err;
77 }
78
79 static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb,
80 struct xdp_diag_req *req,
81 struct user_namespace *user_ns,
82 u32 portid, u32 seq, u32 flags, int sk_ino)
83 {
84 struct xdp_sock *xs = xdp_sk(sk);
85 struct xdp_diag_msg *msg;
86 struct nlmsghdr *nlh;
87
88 nlh = nlmsg_put(nlskb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*msg),
89 flags);
90 if (!nlh)
91 return -EMSGSIZE;
92
93 msg = nlmsg_data(nlh);
94 memset(msg, 0, sizeof(*msg));
95 msg->xdiag_family = AF_XDP;
96 msg->xdiag_type = sk->sk_type;
97 msg->xdiag_ino = sk_ino;
98 sock_diag_save_cookie(sk, msg->xdiag_cookie);
99
100 mutex_lock(&xs->mutex);
101 if ((req->xdiag_show & XDP_SHOW_INFO) && xsk_diag_put_info(xs, nlskb))
102 goto out_nlmsg_trim;
103
104 if ((req->xdiag_show & XDP_SHOW_INFO) &&
105 nla_put_u32(nlskb, XDP_DIAG_UID,
106 from_kuid_munged(user_ns, sock_i_uid(sk))))
107 goto out_nlmsg_trim;
108
109 if ((req->xdiag_show & XDP_SHOW_RING_CFG) &&
110 xsk_diag_put_rings_cfg(xs, nlskb))
111 goto out_nlmsg_trim;
112
113 if ((req->xdiag_show & XDP_SHOW_UMEM) &&
114 xsk_diag_put_umem(xs, nlskb))
115 goto out_nlmsg_trim;
116
117 if ((req->xdiag_show & XDP_SHOW_MEMINFO) &&
118 sock_diag_put_meminfo(sk, nlskb, XDP_DIAG_MEMINFO))
119 goto out_nlmsg_trim;
120
121 mutex_unlock(&xs->mutex);
122 nlmsg_end(nlskb, nlh);
123 return 0;
124
125 out_nlmsg_trim:
126 mutex_unlock(&xs->mutex);
127 nlmsg_cancel(nlskb, nlh);
128 return -EMSGSIZE;
129 }
130
131 static int xsk_diag_dump(struct sk_buff *nlskb, struct netlink_callback *cb)
132 {
133 struct xdp_diag_req *req = nlmsg_data(cb->nlh);
134 struct net *net = sock_net(nlskb->sk);
135 int num = 0, s_num = cb->args[0];
136 struct sock *sk;
137
138 mutex_lock(&net->xdp.lock);
139
140 sk_for_each(sk, &net->xdp.list) {
141 if (!net_eq(sock_net(sk), net))
142 continue;
143 if (num++ < s_num)
144 continue;
145
146 if (xsk_diag_fill(sk, nlskb, req,
147 sk_user_ns(NETLINK_CB(cb->skb).sk),
148 NETLINK_CB(cb->skb).portid,
149 cb->nlh->nlmsg_seq, NLM_F_MULTI,
150 sock_i_ino(sk)) < 0) {
151 num--;
152 break;
153 }
154 }
155
156 mutex_unlock(&net->xdp.lock);
157 cb->args[0] = num;
158 return nlskb->len;
159 }
160
161 static int xsk_diag_handler_dump(struct sk_buff *nlskb, struct nlmsghdr *hdr)
162 {
163 struct netlink_dump_control c = { .dump = xsk_diag_dump };
164 int hdrlen = sizeof(struct xdp_diag_req);
165 struct net *net = sock_net(nlskb->sk);
166
167 if (nlmsg_len(hdr) < hdrlen)
168 return -EINVAL;
169
170 if (!(hdr->nlmsg_flags & NLM_F_DUMP))
171 return -EOPNOTSUPP;
172
173 return netlink_dump_start(net->diag_nlsk, nlskb, hdr, &c);
174 }
175
176 static const struct sock_diag_handler xsk_diag_handler = {
177 .family = AF_XDP,
178 .dump = xsk_diag_handler_dump,
179 };
180
181 static int __init xsk_diag_init(void)
182 {
183 return sock_diag_register(&xsk_diag_handler);
184 }
185
186 static void __exit xsk_diag_exit(void)
187 {
188 sock_diag_unregister(&xsk_diag_handler);
189 }
190
191 module_init(xsk_diag_init);
192 module_exit(xsk_diag_exit);
193 MODULE_LICENSE("GPL");
194 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, AF_XDP);