1/*
2 * fs/ioprio.c
3 *
4 * Copyright (C) 2004 Jens Axboe <axboe@kernel.dk>
5 *
6 * Helper functions for setting/querying io priorities of processes. The
7 * system calls closely mimmick getpriority/setpriority, see the man page for
8 * those. The prio argument is a composite of prio class and prio data, where
9 * the data argument has meaning within that class. The standard scheduling
10 * classes have 8 distinct prio levels, with 0 being the highest prio and 7
11 * being the lowest.
12 *
13 * IOW, setting BE scheduling class with prio 2 is done ala:
14 *
15 * unsigned int prio = (IOPRIO_CLASS_BE << IOPRIO_CLASS_SHIFT) | 2;
16 *
17 * ioprio_set(PRIO_PROCESS, pid, prio);
18 *
19 * See also Documentation/block/ioprio.txt
20 *
21 */
22#include <linux/gfp.h>
23#include <linux/kernel.h>
24#include <linux/export.h>
25#include <linux/ioprio.h>
26#include <linux/blkdev.h>
27#include <linux/capability.h>
28#include <linux/syscalls.h>
29#include <linux/security.h>
30#include <linux/pid_namespace.h>
31
32int set_task_ioprio(struct task_struct *task, int ioprio)
33{
34	int err;
35	struct io_context *ioc;
36	const struct cred *cred = current_cred(), *tcred;
37
38	rcu_read_lock();
39	tcred = __task_cred(task);
40	if (!uid_eq(tcred->uid, cred->euid) &&
41	    !uid_eq(tcred->uid, cred->uid) && !capable(CAP_SYS_NICE)) {
42		rcu_read_unlock();
43		return -EPERM;
44	}
45	rcu_read_unlock();
46
47	err = security_task_setioprio(task, ioprio);
48	if (err)
49		return err;
50
51	ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
52	if (ioc) {
53		ioc->ioprio = ioprio;
54		put_io_context(ioc);
55	}
56
57	return err;
58}
59EXPORT_SYMBOL_GPL(set_task_ioprio);
60
61SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
62{
63	int class = IOPRIO_PRIO_CLASS(ioprio);
64	int data = IOPRIO_PRIO_DATA(ioprio);
65	struct task_struct *p, *g;
66	struct user_struct *user;
67	struct pid *pgrp;
68	kuid_t uid;
69	int ret;
70
71	switch (class) {
72		case IOPRIO_CLASS_RT:
73			if (!capable(CAP_SYS_ADMIN))
74				return -EPERM;
75			/* fall through, rt has prio field too */
76		case IOPRIO_CLASS_BE:
77			if (data >= IOPRIO_BE_NR || data < 0)
78				return -EINVAL;
79
80			break;
81		case IOPRIO_CLASS_IDLE:
82			break;
83		case IOPRIO_CLASS_NONE:
84			if (data)
85				return -EINVAL;
86			break;
87		default:
88			return -EINVAL;
89	}
90
91	ret = -ESRCH;
92	rcu_read_lock();
93	switch (which) {
94		case IOPRIO_WHO_PROCESS:
95			if (!who)
96				p = current;
97			else
98				p = find_task_by_vpid(who);
99			if (p)
100				ret = set_task_ioprio(p, ioprio);
101			break;
102		case IOPRIO_WHO_PGRP:
103			if (!who)
104				pgrp = task_pgrp(current);
105			else
106				pgrp = find_vpid(who);
107			do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
108				ret = set_task_ioprio(p, ioprio);
109				if (ret)
110					break;
111			} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
112			break;
113		case IOPRIO_WHO_USER:
114			uid = make_kuid(current_user_ns(), who);
115			if (!uid_valid(uid))
116				break;
117			if (!who)
118				user = current_user();
119			else
120				user = find_user(uid);
121
122			if (!user)
123				break;
124
125			do_each_thread(g, p) {
126				if (!uid_eq(task_uid(p), uid) ||
127				    !task_pid_vnr(p))
128					continue;
129				ret = set_task_ioprio(p, ioprio);
130				if (ret)
131					goto free_uid;
132			} while_each_thread(g, p);
133free_uid:
134			if (who)
135				free_uid(user);
136			break;
137		default:
138			ret = -EINVAL;
139	}
140
141	rcu_read_unlock();
142	return ret;
143}
144
145static int get_task_ioprio(struct task_struct *p)
146{
147	int ret;
148
149	ret = security_task_getioprio(p);
150	if (ret)
151		goto out;
152	ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
153	if (p->io_context)
154		ret = p->io_context->ioprio;
155out:
156	return ret;
157}
158
159int ioprio_best(unsigned short aprio, unsigned short bprio)
160{
161	unsigned short aclass;
162	unsigned short bclass;
163
164	if (!ioprio_valid(aprio))
165		aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
166	if (!ioprio_valid(bprio))
167		bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
168
169	aclass = IOPRIO_PRIO_CLASS(aprio);
170	bclass = IOPRIO_PRIO_CLASS(bprio);
171	if (aclass == bclass)
172		return min(aprio, bprio);
173	if (aclass > bclass)
174		return bprio;
175	else
176		return aprio;
177}
178
179SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
180{
181	struct task_struct *g, *p;
182	struct user_struct *user;
183	struct pid *pgrp;
184	kuid_t uid;
185	int ret = -ESRCH;
186	int tmpio;
187
188	rcu_read_lock();
189	switch (which) {
190		case IOPRIO_WHO_PROCESS:
191			if (!who)
192				p = current;
193			else
194				p = find_task_by_vpid(who);
195			if (p)
196				ret = get_task_ioprio(p);
197			break;
198		case IOPRIO_WHO_PGRP:
199			if (!who)
200				pgrp = task_pgrp(current);
201			else
202				pgrp = find_vpid(who);
203			do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
204				tmpio = get_task_ioprio(p);
205				if (tmpio < 0)
206					continue;
207				if (ret == -ESRCH)
208					ret = tmpio;
209				else
210					ret = ioprio_best(ret, tmpio);
211			} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
212			break;
213		case IOPRIO_WHO_USER:
214			uid = make_kuid(current_user_ns(), who);
215			if (!who)
216				user = current_user();
217			else
218				user = find_user(uid);
219
220			if (!user)
221				break;
222
223			do_each_thread(g, p) {
224				if (!uid_eq(task_uid(p), user->uid) ||
225				    !task_pid_vnr(p))
226					continue;
227				tmpio = get_task_ioprio(p);
228				if (tmpio < 0)
229					continue;
230				if (ret == -ESRCH)
231					ret = tmpio;
232				else
233					ret = ioprio_best(ret, tmpio);
234			} while_each_thread(g, p);
235
236			if (who)
237				free_uid(user);
238			break;
239		default:
240			ret = -EINVAL;
241	}
242
243	rcu_read_unlock();
244	return ret;
245}
246