This source file includes following definitions.
- rtc_dev_open
- rtc_uie_task
- rtc_uie_timer
- clear_uie
- set_uie
- rtc_dev_update_irq_enable_emul
- rtc_dev_read
- rtc_dev_poll
- rtc_dev_ioctl
- rtc_dev_fasync
- rtc_dev_release
- rtc_dev_prepare
- rtc_dev_init
- rtc_dev_exit
1
2
3
4
5
6
7
8
9
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/module.h>
14 #include <linux/rtc.h>
15 #include <linux/sched/signal.h>
16 #include "rtc-core.h"
17
18 static dev_t rtc_devt;
19
20 #define RTC_DEV_MAX 16
21
22 static int rtc_dev_open(struct inode *inode, struct file *file)
23 {
24 struct rtc_device *rtc = container_of(inode->i_cdev,
25 struct rtc_device, char_dev);
26
27 if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags))
28 return -EBUSY;
29
30 file->private_data = rtc;
31
32 spin_lock_irq(&rtc->irq_lock);
33 rtc->irq_data = 0;
34 spin_unlock_irq(&rtc->irq_lock);
35
36 return 0;
37 }
38
39 #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
40
41
42
43
44 static void rtc_uie_task(struct work_struct *work)
45 {
46 struct rtc_device *rtc =
47 container_of(work, struct rtc_device, uie_task);
48 struct rtc_time tm;
49 int num = 0;
50 int err;
51
52 err = rtc_read_time(rtc, &tm);
53
54 spin_lock_irq(&rtc->irq_lock);
55 if (rtc->stop_uie_polling || err) {
56 rtc->uie_task_active = 0;
57 } else if (rtc->oldsecs != tm.tm_sec) {
58 num = (tm.tm_sec + 60 - rtc->oldsecs) % 60;
59 rtc->oldsecs = tm.tm_sec;
60 rtc->uie_timer.expires = jiffies + HZ - (HZ / 10);
61 rtc->uie_timer_active = 1;
62 rtc->uie_task_active = 0;
63 add_timer(&rtc->uie_timer);
64 } else if (schedule_work(&rtc->uie_task) == 0) {
65 rtc->uie_task_active = 0;
66 }
67 spin_unlock_irq(&rtc->irq_lock);
68 if (num)
69 rtc_handle_legacy_irq(rtc, num, RTC_UF);
70 }
71
72 static void rtc_uie_timer(struct timer_list *t)
73 {
74 struct rtc_device *rtc = from_timer(rtc, t, uie_timer);
75 unsigned long flags;
76
77 spin_lock_irqsave(&rtc->irq_lock, flags);
78 rtc->uie_timer_active = 0;
79 rtc->uie_task_active = 1;
80 if ((schedule_work(&rtc->uie_task) == 0))
81 rtc->uie_task_active = 0;
82 spin_unlock_irqrestore(&rtc->irq_lock, flags);
83 }
84
85 static int clear_uie(struct rtc_device *rtc)
86 {
87 spin_lock_irq(&rtc->irq_lock);
88 if (rtc->uie_irq_active) {
89 rtc->stop_uie_polling = 1;
90 if (rtc->uie_timer_active) {
91 spin_unlock_irq(&rtc->irq_lock);
92 del_timer_sync(&rtc->uie_timer);
93 spin_lock_irq(&rtc->irq_lock);
94 rtc->uie_timer_active = 0;
95 }
96 if (rtc->uie_task_active) {
97 spin_unlock_irq(&rtc->irq_lock);
98 flush_scheduled_work();
99 spin_lock_irq(&rtc->irq_lock);
100 }
101 rtc->uie_irq_active = 0;
102 }
103 spin_unlock_irq(&rtc->irq_lock);
104 return 0;
105 }
106
107 static int set_uie(struct rtc_device *rtc)
108 {
109 struct rtc_time tm;
110 int err;
111
112 err = rtc_read_time(rtc, &tm);
113 if (err)
114 return err;
115 spin_lock_irq(&rtc->irq_lock);
116 if (!rtc->uie_irq_active) {
117 rtc->uie_irq_active = 1;
118 rtc->stop_uie_polling = 0;
119 rtc->oldsecs = tm.tm_sec;
120 rtc->uie_task_active = 1;
121 if (schedule_work(&rtc->uie_task) == 0)
122 rtc->uie_task_active = 0;
123 }
124 rtc->irq_data = 0;
125 spin_unlock_irq(&rtc->irq_lock);
126 return 0;
127 }
128
129 int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, unsigned int enabled)
130 {
131 if (enabled)
132 return set_uie(rtc);
133 else
134 return clear_uie(rtc);
135 }
136 EXPORT_SYMBOL(rtc_dev_update_irq_enable_emul);
137
138 #endif
139
140 static ssize_t
141 rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
142 {
143 struct rtc_device *rtc = file->private_data;
144
145 DECLARE_WAITQUEUE(wait, current);
146 unsigned long data;
147 ssize_t ret;
148
149 if (count != sizeof(unsigned int) && count < sizeof(unsigned long))
150 return -EINVAL;
151
152 add_wait_queue(&rtc->irq_queue, &wait);
153 do {
154 __set_current_state(TASK_INTERRUPTIBLE);
155
156 spin_lock_irq(&rtc->irq_lock);
157 data = rtc->irq_data;
158 rtc->irq_data = 0;
159 spin_unlock_irq(&rtc->irq_lock);
160
161 if (data != 0) {
162 ret = 0;
163 break;
164 }
165 if (file->f_flags & O_NONBLOCK) {
166 ret = -EAGAIN;
167 break;
168 }
169 if (signal_pending(current)) {
170 ret = -ERESTARTSYS;
171 break;
172 }
173 schedule();
174 } while (1);
175 set_current_state(TASK_RUNNING);
176 remove_wait_queue(&rtc->irq_queue, &wait);
177
178 if (ret == 0) {
179 if (sizeof(int) != sizeof(long) &&
180 count == sizeof(unsigned int))
181 ret = put_user(data, (unsigned int __user *)buf) ?:
182 sizeof(unsigned int);
183 else
184 ret = put_user(data, (unsigned long __user *)buf) ?:
185 sizeof(unsigned long);
186 }
187 return ret;
188 }
189
190 static __poll_t rtc_dev_poll(struct file *file, poll_table *wait)
191 {
192 struct rtc_device *rtc = file->private_data;
193 unsigned long data;
194
195 poll_wait(file, &rtc->irq_queue, wait);
196
197 data = rtc->irq_data;
198
199 return (data != 0) ? (EPOLLIN | EPOLLRDNORM) : 0;
200 }
201
202 static long rtc_dev_ioctl(struct file *file,
203 unsigned int cmd, unsigned long arg)
204 {
205 int err = 0;
206 struct rtc_device *rtc = file->private_data;
207 const struct rtc_class_ops *ops = rtc->ops;
208 struct rtc_time tm;
209 struct rtc_wkalrm alarm;
210 void __user *uarg = (void __user *)arg;
211
212 err = mutex_lock_interruptible(&rtc->ops_lock);
213 if (err)
214 return err;
215
216
217
218
219
220 switch (cmd) {
221 case RTC_EPOCH_SET:
222 case RTC_SET_TIME:
223 if (!capable(CAP_SYS_TIME))
224 err = -EACCES;
225 break;
226
227 case RTC_IRQP_SET:
228 if (arg > rtc->max_user_freq && !capable(CAP_SYS_RESOURCE))
229 err = -EACCES;
230 break;
231
232 case RTC_PIE_ON:
233 if (rtc->irq_freq > rtc->max_user_freq &&
234 !capable(CAP_SYS_RESOURCE))
235 err = -EACCES;
236 break;
237 }
238
239 if (err)
240 goto done;
241
242
243
244
245
246
247
248
249
250
251
252
253 switch (cmd) {
254 case RTC_ALM_READ:
255 mutex_unlock(&rtc->ops_lock);
256
257 err = rtc_read_alarm(rtc, &alarm);
258 if (err < 0)
259 return err;
260
261 if (copy_to_user(uarg, &alarm.time, sizeof(tm)))
262 err = -EFAULT;
263 return err;
264
265 case RTC_ALM_SET:
266 mutex_unlock(&rtc->ops_lock);
267
268 if (copy_from_user(&alarm.time, uarg, sizeof(tm)))
269 return -EFAULT;
270
271 alarm.enabled = 0;
272 alarm.pending = 0;
273 alarm.time.tm_wday = -1;
274 alarm.time.tm_yday = -1;
275 alarm.time.tm_isdst = -1;
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290 {
291 time64_t now, then;
292
293 err = rtc_read_time(rtc, &tm);
294 if (err < 0)
295 return err;
296 now = rtc_tm_to_time64(&tm);
297
298 alarm.time.tm_mday = tm.tm_mday;
299 alarm.time.tm_mon = tm.tm_mon;
300 alarm.time.tm_year = tm.tm_year;
301 err = rtc_valid_tm(&alarm.time);
302 if (err < 0)
303 return err;
304 then = rtc_tm_to_time64(&alarm.time);
305
306
307 if (then < now) {
308 rtc_time64_to_tm(now + 24 * 60 * 60, &tm);
309 alarm.time.tm_mday = tm.tm_mday;
310 alarm.time.tm_mon = tm.tm_mon;
311 alarm.time.tm_year = tm.tm_year;
312 }
313 }
314
315 return rtc_set_alarm(rtc, &alarm);
316
317 case RTC_RD_TIME:
318 mutex_unlock(&rtc->ops_lock);
319
320 err = rtc_read_time(rtc, &tm);
321 if (err < 0)
322 return err;
323
324 if (copy_to_user(uarg, &tm, sizeof(tm)))
325 err = -EFAULT;
326 return err;
327
328 case RTC_SET_TIME:
329 mutex_unlock(&rtc->ops_lock);
330
331 if (copy_from_user(&tm, uarg, sizeof(tm)))
332 return -EFAULT;
333
334 return rtc_set_time(rtc, &tm);
335
336 case RTC_PIE_ON:
337 err = rtc_irq_set_state(rtc, 1);
338 break;
339
340 case RTC_PIE_OFF:
341 err = rtc_irq_set_state(rtc, 0);
342 break;
343
344 case RTC_AIE_ON:
345 mutex_unlock(&rtc->ops_lock);
346 return rtc_alarm_irq_enable(rtc, 1);
347
348 case RTC_AIE_OFF:
349 mutex_unlock(&rtc->ops_lock);
350 return rtc_alarm_irq_enable(rtc, 0);
351
352 case RTC_UIE_ON:
353 mutex_unlock(&rtc->ops_lock);
354 return rtc_update_irq_enable(rtc, 1);
355
356 case RTC_UIE_OFF:
357 mutex_unlock(&rtc->ops_lock);
358 return rtc_update_irq_enable(rtc, 0);
359
360 case RTC_IRQP_SET:
361 err = rtc_irq_set_freq(rtc, arg);
362 break;
363
364 case RTC_IRQP_READ:
365 err = put_user(rtc->irq_freq, (unsigned long __user *)uarg);
366 break;
367
368 case RTC_WKALM_SET:
369 mutex_unlock(&rtc->ops_lock);
370 if (copy_from_user(&alarm, uarg, sizeof(alarm)))
371 return -EFAULT;
372
373 return rtc_set_alarm(rtc, &alarm);
374
375 case RTC_WKALM_RD:
376 mutex_unlock(&rtc->ops_lock);
377 err = rtc_read_alarm(rtc, &alarm);
378 if (err < 0)
379 return err;
380
381 if (copy_to_user(uarg, &alarm, sizeof(alarm)))
382 err = -EFAULT;
383 return err;
384
385 default:
386
387 if (ops->ioctl) {
388 err = ops->ioctl(rtc->dev.parent, cmd, arg);
389 if (err == -ENOIOCTLCMD)
390 err = -ENOTTY;
391 } else {
392 err = -ENOTTY;
393 }
394 break;
395 }
396
397 done:
398 mutex_unlock(&rtc->ops_lock);
399 return err;
400 }
401
402 static int rtc_dev_fasync(int fd, struct file *file, int on)
403 {
404 struct rtc_device *rtc = file->private_data;
405
406 return fasync_helper(fd, file, on, &rtc->async_queue);
407 }
408
409 static int rtc_dev_release(struct inode *inode, struct file *file)
410 {
411 struct rtc_device *rtc = file->private_data;
412
413
414
415
416
417
418
419
420
421
422
423 rtc_dev_ioctl(file, RTC_UIE_OFF, 0);
424 rtc_update_irq_enable(rtc, 0);
425 rtc_irq_set_state(rtc, 0);
426
427 clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags);
428 return 0;
429 }
430
431 static const struct file_operations rtc_dev_fops = {
432 .owner = THIS_MODULE,
433 .llseek = no_llseek,
434 .read = rtc_dev_read,
435 .poll = rtc_dev_poll,
436 .unlocked_ioctl = rtc_dev_ioctl,
437 .open = rtc_dev_open,
438 .release = rtc_dev_release,
439 .fasync = rtc_dev_fasync,
440 };
441
442
443
444 void rtc_dev_prepare(struct rtc_device *rtc)
445 {
446 if (!rtc_devt)
447 return;
448
449 if (rtc->id >= RTC_DEV_MAX) {
450 dev_dbg(&rtc->dev, "too many RTC devices\n");
451 return;
452 }
453
454 rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id);
455
456 #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
457 INIT_WORK(&rtc->uie_task, rtc_uie_task);
458 timer_setup(&rtc->uie_timer, rtc_uie_timer, 0);
459 #endif
460
461 cdev_init(&rtc->char_dev, &rtc_dev_fops);
462 rtc->char_dev.owner = rtc->owner;
463 }
464
465 void __init rtc_dev_init(void)
466 {
467 int err;
468
469 err = alloc_chrdev_region(&rtc_devt, 0, RTC_DEV_MAX, "rtc");
470 if (err < 0)
471 pr_err("failed to allocate char dev region\n");
472 }
473
474 void __exit rtc_dev_exit(void)
475 {
476 if (rtc_devt)
477 unregister_chrdev_region(rtc_devt, RTC_DEV_MAX);
478 }