root/fs/ocfs2/cluster/tcp_internal.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


   1 /* SPDX-License-Identifier: GPL-2.0-or-later */
   2 /* -*- mode: c; c-basic-offset: 8; -*-
   3  * vim: noexpandtab sw=8 ts=8 sts=0:
   4  *
   5  * Copyright (C) 2005 Oracle.  All rights reserved.
   6  */
   7 
   8 #ifndef O2CLUSTER_TCP_INTERNAL_H
   9 #define O2CLUSTER_TCP_INTERNAL_H
  10 
  11 #define O2NET_MSG_MAGIC           ((u16)0xfa55)
  12 #define O2NET_MSG_STATUS_MAGIC    ((u16)0xfa56)
  13 #define O2NET_MSG_KEEP_REQ_MAGIC  ((u16)0xfa57)
  14 #define O2NET_MSG_KEEP_RESP_MAGIC ((u16)0xfa58)
  15 
  16 /* we're delaying our quorum decision so that heartbeat will have timed
  17  * out truly dead nodes by the time we come around to making decisions
  18  * on their number */
  19 #define O2NET_QUORUM_DELAY_MS   ((o2hb_dead_threshold + 2) * O2HB_REGION_TIMEOUT_MS)
  20 
  21 /*
  22  * This version number represents quite a lot, unfortunately.  It not
  23  * only represents the raw network message protocol on the wire but also
  24  * locking semantics of the file system using the protocol.  It should
  25  * be somewhere else, I'm sure, but right now it isn't.
  26  *
  27  * With version 11, we separate out the filesystem locking portion.  The
  28  * filesystem now has a major.minor version it negotiates.  Version 11
  29  * introduces this negotiation to the o2dlm protocol, and as such the
  30  * version here in tcp_internal.h should not need to be bumped for
  31  * filesystem locking changes.
  32  *
  33  * New in version 11
  34  *      - Negotiation of filesystem locking in the dlm join.
  35  *
  36  * New in version 10:
  37  *      - Meta/data locks combined
  38  *
  39  * New in version 9:
  40  *      - All votes removed
  41  *
  42  * New in version 8:
  43  *      - Replace delete inode votes with a cluster lock
  44  *
  45  * New in version 7:
  46  *      - DLM join domain includes the live nodemap
  47  *
  48  * New in version 6:
  49  *      - DLM lockres remote refcount fixes.
  50  *
  51  * New in version 5:
  52  *      - Network timeout checking protocol
  53  *
  54  * New in version 4:
  55  *      - Remove i_generation from lock names for better stat performance.
  56  *
  57  * New in version 3:
  58  *      - Replace dentry votes with a cluster lock
  59  *
  60  * New in version 2:
  61  *      - full 64 bit i_size in the metadata lock lvbs
  62  *      - introduction of "rw" lock and pushing meta/data locking down
  63  */
  64 #define O2NET_PROTOCOL_VERSION 11ULL
  65 struct o2net_handshake {
  66         __be64  protocol_version;
  67         __be64  connector_id;
  68         __be32  o2hb_heartbeat_timeout_ms;
  69         __be32  o2net_idle_timeout_ms;
  70         __be32  o2net_keepalive_delay_ms;
  71         __be32  o2net_reconnect_delay_ms;
  72 };
  73 
  74 struct o2net_node {
  75         /* this is never called from int/bh */
  76         spinlock_t                      nn_lock;
  77 
  78         /* set the moment an sc is allocated and a connect is started */
  79         struct o2net_sock_container     *nn_sc;
  80         /* _valid is only set after the handshake passes and tx can happen */
  81         unsigned                        nn_sc_valid:1;
  82         /* if this is set tx just returns it */
  83         int                             nn_persistent_error;
  84         /* It is only set to 1 after the idle time out. */
  85         atomic_t                        nn_timeout;
  86 
  87         /* threads waiting for an sc to arrive wait on the wq for generation
  88          * to increase.  it is increased when a connecting socket succeeds
  89          * or fails or when an accepted socket is attached. */
  90         wait_queue_head_t               nn_sc_wq;
  91 
  92         struct idr                      nn_status_idr;
  93         struct list_head                nn_status_list;
  94 
  95         /* connects are attempted from when heartbeat comes up until either hb
  96          * goes down, the node is unconfigured, or a connect succeeds.
  97          * connect_work is queued from set_nn_state both from hb up and from
  98          * itself if a connect attempt fails and so can be self-arming.
  99          * shutdown is careful to first mark the nn such that no connects will
 100          * be attempted before canceling delayed connect work and flushing the
 101          * queue. */
 102         struct delayed_work             nn_connect_work;
 103         unsigned long                   nn_last_connect_attempt;
 104 
 105         /* this is queued as nodes come up and is canceled when a connection is
 106          * established.  this expiring gives up on the node and errors out
 107          * transmits */
 108         struct delayed_work             nn_connect_expired;
 109 
 110         /* after we give up on a socket we wait a while before deciding
 111          * that it is still heartbeating and that we should do some
 112          * quorum work */
 113         struct delayed_work             nn_still_up;
 114 };
 115 
 116 struct o2net_sock_container {
 117         struct kref             sc_kref;
 118         /* the next two are valid for the life time of the sc */
 119         struct socket           *sc_sock;
 120         struct o2nm_node        *sc_node;
 121 
 122         /* all of these sc work structs hold refs on the sc while they are
 123          * queued.  they should not be able to ref a freed sc.  the teardown
 124          * race is with o2net_wq destruction in o2net_stop_listening() */
 125 
 126         /* rx and connect work are generated from socket callbacks.  sc
 127          * shutdown removes the callbacks and then flushes the work queue */
 128         struct work_struct      sc_rx_work;
 129         struct work_struct      sc_connect_work;
 130         /* shutdown work is triggered in two ways.  the simple way is
 131          * for a code path calls ensure_shutdown which gets a lock, removes
 132          * the sc from the nn, and queues the work.  in this case the
 133          * work is single-shot.  the work is also queued from a sock
 134          * callback, though, and in this case the work will find the sc
 135          * still on the nn and will call ensure_shutdown itself.. this
 136          * ends up triggering the shutdown work again, though nothing
 137          * will be done in that second iteration.  so work queue teardown
 138          * has to be careful to remove the sc from the nn before waiting
 139          * on the work queue so that the shutdown work doesn't remove the
 140          * sc and rearm itself.
 141          */
 142         struct work_struct      sc_shutdown_work;
 143 
 144         struct timer_list       sc_idle_timeout;
 145         struct delayed_work     sc_keepalive_work;
 146 
 147         unsigned                sc_handshake_ok:1;
 148 
 149         struct page             *sc_page;
 150         size_t                  sc_page_off;
 151 
 152         /* original handlers for the sockets */
 153         void                    (*sc_state_change)(struct sock *sk);
 154         void                    (*sc_data_ready)(struct sock *sk);
 155 
 156         u32                     sc_msg_key;
 157         u16                     sc_msg_type;
 158 
 159 #ifdef CONFIG_DEBUG_FS
 160         struct list_head        sc_net_debug_item;
 161         ktime_t                 sc_tv_timer;
 162         ktime_t                 sc_tv_data_ready;
 163         ktime_t                 sc_tv_advance_start;
 164         ktime_t                 sc_tv_advance_stop;
 165         ktime_t                 sc_tv_func_start;
 166         ktime_t                 sc_tv_func_stop;
 167 #endif
 168 #ifdef CONFIG_OCFS2_FS_STATS
 169         ktime_t                 sc_tv_acquiry_total;
 170         ktime_t                 sc_tv_send_total;
 171         ktime_t                 sc_tv_status_total;
 172         u32                     sc_send_count;
 173         u32                     sc_recv_count;
 174         ktime_t                 sc_tv_process_total;
 175 #endif
 176         struct mutex            sc_send_lock;
 177 };
 178 
 179 struct o2net_msg_handler {
 180         struct rb_node          nh_node;
 181         u32                     nh_max_len;
 182         u32                     nh_msg_type;
 183         u32                     nh_key;
 184         o2net_msg_handler_func  *nh_func;
 185         void                    *nh_func_data;
 186         o2net_post_msg_handler_func
 187                                 *nh_post_func;
 188         struct kref             nh_kref;
 189         struct list_head        nh_unregister_item;
 190 };
 191 
 192 enum o2net_system_error {
 193         O2NET_ERR_NONE = 0,
 194         O2NET_ERR_NO_HNDLR,
 195         O2NET_ERR_OVERFLOW,
 196         O2NET_ERR_DIED,
 197         O2NET_ERR_MAX
 198 };
 199 
 200 struct o2net_status_wait {
 201         enum o2net_system_error ns_sys_status;
 202         s32                     ns_status;
 203         int                     ns_id;
 204         wait_queue_head_t       ns_wq;
 205         struct list_head        ns_node_item;
 206 };
 207 
 208 #ifdef CONFIG_DEBUG_FS
 209 /* just for state dumps */
 210 struct o2net_send_tracking {
 211         struct list_head                st_net_debug_item;
 212         struct task_struct              *st_task;
 213         struct o2net_sock_container     *st_sc;
 214         u32                             st_id;
 215         u32                             st_msg_type;
 216         u32                             st_msg_key;
 217         u8                              st_node;
 218         ktime_t                         st_sock_time;
 219         ktime_t                         st_send_time;
 220         ktime_t                         st_status_time;
 221 };
 222 #else
 223 struct o2net_send_tracking {
 224         u32     dummy;
 225 };
 226 #endif  /* CONFIG_DEBUG_FS */
 227 
 228 #endif /* O2CLUSTER_TCP_INTERNAL_H */

/* [<][>][^][v][top][bottom][index][help] */