root/include/net/cls_cgroup.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. task_cls_classid
  2. sock_update_classid
  3. task_get_classid
  4. sock_update_classid
  5. task_get_classid

   1 /* SPDX-License-Identifier: GPL-2.0-or-later */
   2 /*
   3  * cls_cgroup.h                 Control Group Classifier
   4  *
   5  * Authors:     Thomas Graf <tgraf@suug.ch>
   6  */
   7 
   8 #ifndef _NET_CLS_CGROUP_H
   9 #define _NET_CLS_CGROUP_H
  10 
  11 #include <linux/cgroup.h>
  12 #include <linux/hardirq.h>
  13 #include <linux/rcupdate.h>
  14 #include <net/sock.h>
  15 #include <net/inet_sock.h>
  16 
  17 #ifdef CONFIG_CGROUP_NET_CLASSID
  18 struct cgroup_cls_state {
  19         struct cgroup_subsys_state css;
  20         u32 classid;
  21 };
  22 
  23 struct cgroup_cls_state *task_cls_state(struct task_struct *p);
  24 
  25 static inline u32 task_cls_classid(struct task_struct *p)
  26 {
  27         u32 classid;
  28 
  29         if (in_interrupt())
  30                 return 0;
  31 
  32         rcu_read_lock();
  33         classid = container_of(task_css(p, net_cls_cgrp_id),
  34                                struct cgroup_cls_state, css)->classid;
  35         rcu_read_unlock();
  36 
  37         return classid;
  38 }
  39 
  40 static inline void sock_update_classid(struct sock_cgroup_data *skcd)
  41 {
  42         u32 classid;
  43 
  44         classid = task_cls_classid(current);
  45         sock_cgroup_set_classid(skcd, classid);
  46 }
  47 
  48 static inline u32 task_get_classid(const struct sk_buff *skb)
  49 {
  50         u32 classid = task_cls_state(current)->classid;
  51 
  52         /* Due to the nature of the classifier it is required to ignore all
  53          * packets originating from softirq context as accessing `current'
  54          * would lead to false results.
  55          *
  56          * This test assumes that all callers of dev_queue_xmit() explicitly
  57          * disable bh. Knowing this, it is possible to detect softirq based
  58          * calls by looking at the number of nested bh disable calls because
  59          * softirqs always disables bh.
  60          */
  61         if (in_serving_softirq()) {
  62                 struct sock *sk = skb_to_full_sk(skb);
  63 
  64                 /* If there is an sock_cgroup_classid we'll use that. */
  65                 if (!sk || !sk_fullsock(sk))
  66                         return 0;
  67 
  68                 classid = sock_cgroup_classid(&sk->sk_cgrp_data);
  69         }
  70 
  71         return classid;
  72 }
  73 #else /* !CONFIG_CGROUP_NET_CLASSID */
  74 static inline void sock_update_classid(struct sock_cgroup_data *skcd)
  75 {
  76 }
  77 
  78 static inline u32 task_get_classid(const struct sk_buff *skb)
  79 {
  80         return 0;
  81 }
  82 #endif /* CONFIG_CGROUP_NET_CLASSID */
  83 #endif  /* _NET_CLS_CGROUP_H */

/* [<][>][^][v][top][bottom][index][help] */