1#include <linux/fanotify.h>
2#include <linux/fdtable.h>
3#include <linux/fsnotify_backend.h>
4#include <linux/init.h>
5#include <linux/jiffies.h>
6#include <linux/kernel.h> /* UINT_MAX */
7#include <linux/mount.h>
8#include <linux/sched.h>
9#include <linux/types.h>
10#include <linux/wait.h>
11
12#include "fanotify.h"
13
14static bool should_merge(struct fsnotify_event *old_fsn,
15			 struct fsnotify_event *new_fsn)
16{
17	struct fanotify_event_info *old, *new;
18
19	pr_debug("%s: old=%p new=%p\n", __func__, old_fsn, new_fsn);
20	old = FANOTIFY_E(old_fsn);
21	new = FANOTIFY_E(new_fsn);
22
23	if (old_fsn->inode == new_fsn->inode && old->tgid == new->tgid &&
24	    old->path.mnt == new->path.mnt &&
25	    old->path.dentry == new->path.dentry)
26		return true;
27	return false;
28}
29
30/* and the list better be locked by something too! */
31static int fanotify_merge(struct list_head *list, struct fsnotify_event *event)
32{
33	struct fsnotify_event *test_event;
34	bool do_merge = false;
35
36	pr_debug("%s: list=%p event=%p\n", __func__, list, event);
37
38#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
39	/*
40	 * Don't merge a permission event with any other event so that we know
41	 * the event structure we have created in fanotify_handle_event() is the
42	 * one we should check for permission response.
43	 */
44	if (event->mask & FAN_ALL_PERM_EVENTS)
45		return 0;
46#endif
47
48	list_for_each_entry_reverse(test_event, list, list) {
49		if (should_merge(test_event, event)) {
50			do_merge = true;
51			break;
52		}
53	}
54
55	if (!do_merge)
56		return 0;
57
58	test_event->mask |= event->mask;
59	return 1;
60}
61
62#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
63static int fanotify_get_response(struct fsnotify_group *group,
64				 struct fanotify_perm_event_info *event)
65{
66	int ret;
67
68	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
69
70	wait_event(group->fanotify_data.access_waitq, event->response ||
71				atomic_read(&group->fanotify_data.bypass_perm));
72
73	if (!event->response) {	/* bypass_perm set */
74		/*
75		 * Event was canceled because group is being destroyed. Remove
76		 * it from group's event list because we are responsible for
77		 * freeing the permission event.
78		 */
79		fsnotify_remove_event(group, &event->fae.fse);
80		return 0;
81	}
82
83	/* userspace responded, convert to something usable */
84	switch (event->response) {
85	case FAN_ALLOW:
86		ret = 0;
87		break;
88	case FAN_DENY:
89	default:
90		ret = -EPERM;
91	}
92	event->response = 0;
93
94	pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__,
95		 group, event, ret);
96
97	return ret;
98}
99#endif
100
101static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
102				       struct fsnotify_mark *vfsmnt_mark,
103				       u32 event_mask,
104				       void *data, int data_type)
105{
106	__u32 marks_mask, marks_ignored_mask;
107	struct path *path = data;
108
109	pr_debug("%s: inode_mark=%p vfsmnt_mark=%p mask=%x data=%p"
110		 " data_type=%d\n", __func__, inode_mark, vfsmnt_mark,
111		 event_mask, data, data_type);
112
113	/* if we don't have enough info to send an event to userspace say no */
114	if (data_type != FSNOTIFY_EVENT_PATH)
115		return false;
116
117	/* sorry, fanotify only gives a damn about files and dirs */
118	if (!d_is_reg(path->dentry) &&
119	    !d_can_lookup(path->dentry))
120		return false;
121
122	if (inode_mark && vfsmnt_mark) {
123		marks_mask = (vfsmnt_mark->mask | inode_mark->mask);
124		marks_ignored_mask = (vfsmnt_mark->ignored_mask | inode_mark->ignored_mask);
125	} else if (inode_mark) {
126		/*
127		 * if the event is for a child and this inode doesn't care about
128		 * events on the child, don't send it!
129		 */
130		if ((event_mask & FS_EVENT_ON_CHILD) &&
131		    !(inode_mark->mask & FS_EVENT_ON_CHILD))
132			return false;
133		marks_mask = inode_mark->mask;
134		marks_ignored_mask = inode_mark->ignored_mask;
135	} else if (vfsmnt_mark) {
136		marks_mask = vfsmnt_mark->mask;
137		marks_ignored_mask = vfsmnt_mark->ignored_mask;
138	} else {
139		BUG();
140	}
141
142	if (d_is_dir(path->dentry) &&
143	    !(marks_mask & FS_ISDIR & ~marks_ignored_mask))
144		return false;
145
146	if (event_mask & FAN_ALL_OUTGOING_EVENTS & marks_mask &
147				 ~marks_ignored_mask)
148		return true;
149
150	return false;
151}
152
153struct fanotify_event_info *fanotify_alloc_event(struct inode *inode, u32 mask,
154						 struct path *path)
155{
156	struct fanotify_event_info *event;
157
158#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
159	if (mask & FAN_ALL_PERM_EVENTS) {
160		struct fanotify_perm_event_info *pevent;
161
162		pevent = kmem_cache_alloc(fanotify_perm_event_cachep,
163					  GFP_KERNEL);
164		if (!pevent)
165			return NULL;
166		event = &pevent->fae;
167		pevent->response = 0;
168		goto init;
169	}
170#endif
171	event = kmem_cache_alloc(fanotify_event_cachep, GFP_KERNEL);
172	if (!event)
173		return NULL;
174init: __maybe_unused
175	fsnotify_init_event(&event->fse, inode, mask);
176	event->tgid = get_pid(task_tgid(current));
177	if (path) {
178		event->path = *path;
179		path_get(&event->path);
180	} else {
181		event->path.mnt = NULL;
182		event->path.dentry = NULL;
183	}
184	return event;
185}
186
187static int fanotify_handle_event(struct fsnotify_group *group,
188				 struct inode *inode,
189				 struct fsnotify_mark *inode_mark,
190				 struct fsnotify_mark *fanotify_mark,
191				 u32 mask, void *data, int data_type,
192				 const unsigned char *file_name, u32 cookie)
193{
194	int ret = 0;
195	struct fanotify_event_info *event;
196	struct fsnotify_event *fsn_event;
197
198	BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS);
199	BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY);
200	BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
201	BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE);
202	BUILD_BUG_ON(FAN_OPEN != FS_OPEN);
203	BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD);
204	BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW);
205	BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM);
206	BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM);
207	BUILD_BUG_ON(FAN_ONDIR != FS_ISDIR);
208
209	if (!fanotify_should_send_event(inode_mark, fanotify_mark, mask, data,
210					data_type))
211		return 0;
212
213	pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode,
214		 mask);
215
216	event = fanotify_alloc_event(inode, mask, data);
217	if (unlikely(!event))
218		return -ENOMEM;
219
220	fsn_event = &event->fse;
221	ret = fsnotify_add_event(group, fsn_event, fanotify_merge);
222	if (ret) {
223		/* Permission events shouldn't be merged */
224		BUG_ON(ret == 1 && mask & FAN_ALL_PERM_EVENTS);
225		/* Our event wasn't used in the end. Free it. */
226		fsnotify_destroy_event(group, fsn_event);
227
228		return 0;
229	}
230
231#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
232	if (mask & FAN_ALL_PERM_EVENTS) {
233		ret = fanotify_get_response(group, FANOTIFY_PE(fsn_event));
234		fsnotify_destroy_event(group, fsn_event);
235	}
236#endif
237	return ret;
238}
239
240static void fanotify_free_group_priv(struct fsnotify_group *group)
241{
242	struct user_struct *user;
243
244	user = group->fanotify_data.user;
245	atomic_dec(&user->fanotify_listeners);
246	free_uid(user);
247}
248
249static void fanotify_free_event(struct fsnotify_event *fsn_event)
250{
251	struct fanotify_event_info *event;
252
253	event = FANOTIFY_E(fsn_event);
254	path_put(&event->path);
255	put_pid(event->tgid);
256#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
257	if (fsn_event->mask & FAN_ALL_PERM_EVENTS) {
258		kmem_cache_free(fanotify_perm_event_cachep,
259				FANOTIFY_PE(fsn_event));
260		return;
261	}
262#endif
263	kmem_cache_free(fanotify_event_cachep, event);
264}
265
266const struct fsnotify_ops fanotify_fsnotify_ops = {
267	.handle_event = fanotify_handle_event,
268	.free_group_priv = fanotify_free_group_priv,
269	.free_event = fanotify_free_event,
270};
271