1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Definitions shared between vvp and liblustre, and other clients in the
37  * future.
38  *
39  *   Author: Oleg Drokin <oleg.drokin@sun.com>
40  *   Author: Nikita Danilov <nikita.danilov@sun.com>
41  */
42 
43 #ifndef LCLIENT_H
44 #define LCLIENT_H
45 
46 blkcnt_t dirty_cnt(struct inode *inode);
47 
48 int cl_glimpse_size0(struct inode *inode, int agl);
49 int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
50 		    struct inode *inode, struct cl_object *clob, int agl);
51 
cl_glimpse_size(struct inode * inode)52 static inline int cl_glimpse_size(struct inode *inode)
53 {
54 	return cl_glimpse_size0(inode, 0);
55 }
56 
cl_agl(struct inode * inode)57 static inline int cl_agl(struct inode *inode)
58 {
59 	return cl_glimpse_size0(inode, 1);
60 }
61 
62 /**
63  * Locking policy for setattr.
64  */
65 enum ccc_setattr_lock_type {
66 	/** Locking is done by server */
67 	SETATTR_NOLOCK,
68 	/** Extent lock is enqueued */
69 	SETATTR_EXTENT_LOCK,
70 	/** Existing local extent lock is used */
71 	SETATTR_MATCH_LOCK
72 };
73 
74 
75 /**
76  * IO state private to vvp or slp layers.
77  */
78 struct ccc_io {
79 	/** super class */
80 	struct cl_io_slice     cui_cl;
81 	struct cl_io_lock_link cui_link;
82 	/**
83 	 * I/O vector information to or from which read/write is going.
84 	 */
85 	struct iov_iter *cui_iter;
86 	/**
87 	 * Total size for the left IO.
88 	 */
89 	size_t cui_tot_count;
90 
91 	union {
92 		struct {
93 			enum ccc_setattr_lock_type cui_local_lock;
94 		} setattr;
95 	} u;
96 	/**
97 	 * True iff io is processing glimpse right now.
98 	 */
99 	int		  cui_glimpse;
100 	/**
101 	 * Layout version when this IO is initialized
102 	 */
103 	__u32		cui_layout_gen;
104 	/**
105 	 * File descriptor against which IO is done.
106 	 */
107 	struct ll_file_data *cui_fd;
108 	struct kiocb *cui_iocb;
109 };
110 
111 /**
112  * True, if \a io is a normal io, False for splice_{read,write}.
113  * must be implemented in arch specific code.
114  */
115 int cl_is_normalio(const struct lu_env *env, const struct cl_io *io);
116 
117 extern struct lu_context_key ccc_key;
118 extern struct lu_context_key ccc_session_key;
119 
120 struct ccc_thread_info {
121 	struct cl_lock_descr cti_descr;
122 	struct cl_io	 cti_io;
123 	struct cl_attr       cti_attr;
124 };
125 
ccc_env_info(const struct lu_env * env)126 static inline struct ccc_thread_info *ccc_env_info(const struct lu_env *env)
127 {
128 	struct ccc_thread_info      *info;
129 
130 	info = lu_context_key_get(&env->le_ctx, &ccc_key);
131 	LASSERT(info != NULL);
132 	return info;
133 }
134 
ccc_env_thread_attr(const struct lu_env * env)135 static inline struct cl_attr *ccc_env_thread_attr(const struct lu_env *env)
136 {
137 	struct cl_attr *attr = &ccc_env_info(env)->cti_attr;
138 
139 	memset(attr, 0, sizeof(*attr));
140 	return attr;
141 }
142 
ccc_env_thread_io(const struct lu_env * env)143 static inline struct cl_io *ccc_env_thread_io(const struct lu_env *env)
144 {
145 	struct cl_io *io = &ccc_env_info(env)->cti_io;
146 
147 	memset(io, 0, sizeof(*io));
148 	return io;
149 }
150 
151 struct ccc_session {
152 	struct ccc_io cs_ios;
153 };
154 
ccc_env_session(const struct lu_env * env)155 static inline struct ccc_session *ccc_env_session(const struct lu_env *env)
156 {
157 	struct ccc_session *ses;
158 
159 	ses = lu_context_key_get(env->le_ses, &ccc_session_key);
160 	LASSERT(ses != NULL);
161 	return ses;
162 }
163 
ccc_env_io(const struct lu_env * env)164 static inline struct ccc_io *ccc_env_io(const struct lu_env *env)
165 {
166 	return &ccc_env_session(env)->cs_ios;
167 }
168 
169 /**
170  * ccc-private object state.
171  */
172 struct ccc_object {
173 	struct cl_object_header cob_header;
174 	struct cl_object	cob_cl;
175 	struct inode	   *cob_inode;
176 
177 	/**
178 	 * A list of dirty pages pending IO in the cache. Used by
179 	 * SOM. Protected by ll_inode_info::lli_lock.
180 	 *
181 	 * \see ccc_page::cpg_pending_linkage
182 	 */
183 	struct list_head	     cob_pending_list;
184 
185 	/**
186 	 * Access this counter is protected by inode->i_sem. Now that
187 	 * the lifetime of transient pages must be covered by inode sem,
188 	 * we don't need to hold any lock..
189 	 */
190 	int		     cob_transient_pages;
191 	/**
192 	 * Number of outstanding mmaps on this file.
193 	 *
194 	 * \see ll_vm_open(), ll_vm_close().
195 	 */
196 	atomic_t	    cob_mmap_cnt;
197 
198 	/**
199 	 * various flags
200 	 * cob_discard_page_warned
201 	 *     if pages belonging to this object are discarded when a client
202 	 * is evicted, some debug info will be printed, this flag will be set
203 	 * during processing the first discarded page, then avoid flooding
204 	 * debug message for lots of discarded pages.
205 	 *
206 	 * \see ll_dirty_page_discard_warn.
207 	 */
208 	unsigned int		cob_discard_page_warned:1;
209 };
210 
211 /**
212  * ccc-private page state.
213  */
214 struct ccc_page {
215 	struct cl_page_slice cpg_cl;
216 	int		  cpg_defer_uptodate;
217 	int		  cpg_ra_used;
218 	int		  cpg_write_queued;
219 	/**
220 	 * Non-empty iff this page is already counted in
221 	 * ccc_object::cob_pending_list. Protected by
222 	 * ccc_object::cob_pending_guard. This list is only used as a flag,
223 	 * that is, never iterated through, only checked for list_empty(), but
224 	 * having a list is useful for debugging.
225 	 */
226 	struct list_head	   cpg_pending_linkage;
227 	/** VM page */
228 	struct page	  *cpg_page;
229 };
230 
cl2ccc_page(const struct cl_page_slice * slice)231 static inline struct ccc_page *cl2ccc_page(const struct cl_page_slice *slice)
232 {
233 	return container_of(slice, struct ccc_page, cpg_cl);
234 }
235 
236 struct cl_page    *ccc_vmpage_page_transient(struct page *vmpage);
237 
238 struct ccc_device {
239 	struct cl_device    cdv_cl;
240 	struct super_block *cdv_sb;
241 	struct cl_device   *cdv_next;
242 };
243 
244 struct ccc_lock {
245 	struct cl_lock_slice clk_cl;
246 };
247 
248 struct ccc_req {
249 	struct cl_req_slice  crq_cl;
250 };
251 
252 void *ccc_key_init	(const struct lu_context *ctx,
253 			   struct lu_context_key *key);
254 void  ccc_key_fini	(const struct lu_context *ctx,
255 			   struct lu_context_key *key, void *data);
256 void *ccc_session_key_init(const struct lu_context *ctx,
257 			   struct lu_context_key *key);
258 void  ccc_session_key_fini(const struct lu_context *ctx,
259 			   struct lu_context_key *key, void *data);
260 
261 int	      ccc_device_init  (const struct lu_env *env,
262 				   struct lu_device *d,
263 				   const char *name, struct lu_device *next);
264 struct lu_device *ccc_device_fini (const struct lu_env *env,
265 				   struct lu_device *d);
266 struct lu_device *ccc_device_alloc(const struct lu_env *env,
267 				   struct lu_device_type *t,
268 				   struct lustre_cfg *cfg,
269 				   const struct lu_device_operations *luops,
270 				   const struct cl_device_operations *clops);
271 struct lu_device *ccc_device_free (const struct lu_env *env,
272 				   struct lu_device *d);
273 struct lu_object *ccc_object_alloc(const struct lu_env *env,
274 				   const struct lu_object_header *hdr,
275 				   struct lu_device *dev,
276 				   const struct cl_object_operations *clops,
277 				   const struct lu_object_operations *luops);
278 
279 int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
280 		 struct cl_req *req);
281 void ccc_umount(const struct lu_env *env, struct cl_device *dev);
282 int ccc_global_init(struct lu_device_type *device_type);
283 void ccc_global_fini(struct lu_device_type *device_type);
284 int ccc_object_init0(const struct lu_env *env, struct ccc_object *vob,
285 		     const struct cl_object_conf *conf);
286 int ccc_object_init(const struct lu_env *env, struct lu_object *obj,
287 		    const struct lu_object_conf *conf);
288 void ccc_object_free(const struct lu_env *env, struct lu_object *obj);
289 int ccc_lock_init(const struct lu_env *env, struct cl_object *obj,
290 		  struct cl_lock *lock, const struct cl_io *io,
291 		  const struct cl_lock_operations *lkops);
292 int ccc_attr_set(const struct lu_env *env, struct cl_object *obj,
293 		 const struct cl_attr *attr, unsigned valid);
294 int ccc_object_glimpse(const struct lu_env *env,
295 		       const struct cl_object *obj, struct ost_lvb *lvb);
296 int ccc_conf_set(const struct lu_env *env, struct cl_object *obj,
297 		 const struct cl_object_conf *conf);
298 struct page *ccc_page_vmpage(const struct lu_env *env,
299 			    const struct cl_page_slice *slice);
300 int ccc_page_is_under_lock(const struct lu_env *env,
301 			   const struct cl_page_slice *slice, struct cl_io *io);
302 int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice);
303 void ccc_transient_page_verify(const struct cl_page *page);
304 int  ccc_transient_page_own(const struct lu_env *env,
305 			    const struct cl_page_slice *slice,
306 			    struct cl_io *io, int nonblock);
307 void ccc_transient_page_assume(const struct lu_env *env,
308 			       const struct cl_page_slice *slice,
309 			       struct cl_io *io);
310 void ccc_transient_page_unassume(const struct lu_env *env,
311 				 const struct cl_page_slice *slice,
312 				 struct cl_io *io);
313 void ccc_transient_page_disown(const struct lu_env *env,
314 			       const struct cl_page_slice *slice,
315 			       struct cl_io *io);
316 void ccc_transient_page_discard(const struct lu_env *env,
317 				const struct cl_page_slice *slice,
318 				struct cl_io *io);
319 int ccc_transient_page_prep(const struct lu_env *env,
320 			    const struct cl_page_slice *slice,
321 			    struct cl_io *io);
322 void ccc_lock_delete(const struct lu_env *env,
323 		     const struct cl_lock_slice *slice);
324 void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice);
325 int ccc_lock_enqueue(const struct lu_env *env,
326 		     const struct cl_lock_slice *slice,
327 		     struct cl_io *io, __u32 enqflags);
328 int ccc_lock_use(const struct lu_env *env, const struct cl_lock_slice *slice);
329 int ccc_lock_unuse(const struct lu_env *env, const struct cl_lock_slice *slice);
330 int ccc_lock_wait(const struct lu_env *env, const struct cl_lock_slice *slice);
331 int ccc_lock_fits_into(const struct lu_env *env,
332 		       const struct cl_lock_slice *slice,
333 		       const struct cl_lock_descr *need,
334 		       const struct cl_io *io);
335 void ccc_lock_state(const struct lu_env *env,
336 		    const struct cl_lock_slice *slice,
337 		    enum cl_lock_state state);
338 
339 void ccc_io_fini(const struct lu_env *env, const struct cl_io_slice *ios);
340 int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
341 			  __u32 enqflags, enum cl_lock_mode mode,
342 			  pgoff_t start, pgoff_t end);
343 int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io,
344 		    __u32 enqflags, enum cl_lock_mode mode,
345 		    loff_t start, loff_t end);
346 void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios);
347 void ccc_io_advance(const struct lu_env *env, const struct cl_io_slice *ios,
348 		    size_t nob);
349 void ccc_io_update_iov(const struct lu_env *env, struct ccc_io *cio,
350 		       struct cl_io *io);
351 int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
352 		  struct cl_io *io, loff_t start, size_t count, int *exceed);
353 void ccc_req_completion(const struct lu_env *env,
354 			const struct cl_req_slice *slice, int ioret);
355 void ccc_req_attr_set(const struct lu_env *env,
356 		      const struct cl_req_slice *slice,
357 		      const struct cl_object *obj,
358 		      struct cl_req_attr *oa, u64 flags);
359 
360 struct lu_device   *ccc2lu_dev      (struct ccc_device *vdv);
361 struct lu_object   *ccc2lu	  (struct ccc_object *vob);
362 struct ccc_device  *lu2ccc_dev      (const struct lu_device *d);
363 struct ccc_device  *cl2ccc_dev      (const struct cl_device *d);
364 struct ccc_object  *lu2ccc	  (const struct lu_object *obj);
365 struct ccc_object  *cl2ccc	  (const struct cl_object *obj);
366 struct ccc_lock    *cl2ccc_lock     (const struct cl_lock_slice *slice);
367 struct ccc_io      *cl2ccc_io       (const struct lu_env *env,
368 				     const struct cl_io_slice *slice);
369 struct ccc_req     *cl2ccc_req      (const struct cl_req_slice *slice);
370 struct page	 *cl2vm_page      (const struct cl_page_slice *slice);
371 struct inode       *ccc_object_inode(const struct cl_object *obj);
372 struct ccc_object  *cl_inode2ccc    (struct inode *inode);
373 
374 int cl_setattr_ost(struct inode *inode, const struct iattr *attr,
375 		   struct obd_capa *capa);
376 
377 struct cl_page *ccc_vmpage_page_transient(struct page *vmpage);
378 int ccc_object_invariant(const struct cl_object *obj);
379 int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
380 void cl_inode_fini(struct inode *inode);
381 int cl_local_size(struct inode *inode);
382 
383 __u16 ll_dirent_type_get(struct lu_dirent *ent);
384 __u64 cl_fid_build_ino(const struct lu_fid *fid, int api32);
385 __u32 cl_fid_build_gen(const struct lu_fid *fid);
386 
387 # define CLOBINVRNT(env, clob, expr)					\
388 	((void)sizeof(env), (void)sizeof(clob), (void)sizeof(!!(expr)))
389 
390 int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp);
391 int cl_ocd_update(struct obd_device *host,
392 		  struct obd_device *watched,
393 		  enum obd_notify_event ev, void *owner, void *data);
394 
395 struct ccc_grouplock {
396 	struct lu_env   *cg_env;
397 	struct cl_io    *cg_io;
398 	struct cl_lock  *cg_lock;
399 	unsigned long    cg_gid;
400 };
401 
402 int  cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
403 		      struct ccc_grouplock *cg);
404 void cl_put_grouplock(struct ccc_grouplock *cg);
405 
406 /**
407  * New interfaces to get and put lov_stripe_md from lov layer. This violates
408  * layering because lov_stripe_md is supposed to be a private data in lov.
409  *
410  * NB: If you find you have to use these interfaces for your new code, please
411  * think about it again. These interfaces may be removed in the future for
412  * better layering. */
413 struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj);
414 void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm);
415 int lov_read_and_clear_async_rc(struct cl_object *clob);
416 
417 struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode);
418 void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm);
419 
420 /**
421  * Data structure managing a client's cached clean pages. An LRU of
422  * pages is maintained, along with other statistics.
423  */
424 struct cl_client_cache {
425 	atomic_t	ccc_users;    /* # of users (OSCs) of this data */
426 	struct list_head	ccc_lru;      /* LRU list of cached clean pages */
427 	spinlock_t	ccc_lru_lock; /* lock for list */
428 	atomic_t	ccc_lru_left; /* # of LRU entries available */
429 	unsigned long	ccc_lru_max;  /* Max # of LRU entries possible */
430 	unsigned int	ccc_lru_shrinkers; /* # of threads reclaiming */
431 };
432 
433 #endif /*LCLIENT_H */
434