Lines Matching refs:timr
200 static inline void unlock_timer(struct k_itimer *timr, unsigned long flags) in unlock_timer() argument
202 spin_unlock_irqrestore(&timr->it_lock, flags); in unlock_timer()
351 static void schedule_next_timer(struct k_itimer *timr) in schedule_next_timer() argument
353 struct hrtimer *timer = &timr->it.real.timer; in schedule_next_timer()
355 if (timr->it.real.interval.tv64 == 0) in schedule_next_timer()
358 timr->it_overrun += (unsigned int) hrtimer_forward(timer, in schedule_next_timer()
360 timr->it.real.interval); in schedule_next_timer()
362 timr->it_overrun_last = timr->it_overrun; in schedule_next_timer()
363 timr->it_overrun = -1; in schedule_next_timer()
364 ++timr->it_requeue_pending; in schedule_next_timer()
381 struct k_itimer *timr; in do_schedule_next_timer() local
384 timr = lock_timer(info->si_tid, &flags); in do_schedule_next_timer()
386 if (timr && timr->it_requeue_pending == info->si_sys_private) { in do_schedule_next_timer()
387 if (timr->it_clock < 0) in do_schedule_next_timer()
388 posix_cpu_timer_schedule(timr); in do_schedule_next_timer()
390 schedule_next_timer(timr); in do_schedule_next_timer()
392 info->si_overrun += timr->it_overrun_last; in do_schedule_next_timer()
395 if (timr) in do_schedule_next_timer()
396 unlock_timer(timr, flags); in do_schedule_next_timer()
399 int posix_timer_event(struct k_itimer *timr, int si_private) in posix_timer_event() argument
414 timr->sigq->info.si_sys_private = si_private; in posix_timer_event()
417 task = pid_task(timr->it_pid, PIDTYPE_PID); in posix_timer_event()
419 shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID); in posix_timer_event()
420 ret = send_sigqueue(timr->sigq, task, shared); in posix_timer_event()
437 struct k_itimer *timr; in posix_timer_fn() local
442 timr = container_of(timer, struct k_itimer, it.real.timer); in posix_timer_fn()
443 spin_lock_irqsave(&timr->it_lock, flags); in posix_timer_fn()
445 if (timr->it.real.interval.tv64 != 0) in posix_timer_fn()
446 si_private = ++timr->it_requeue_pending; in posix_timer_fn()
448 if (posix_timer_event(timr, si_private)) { in posix_timer_fn()
454 if (timr->it.real.interval.tv64 != 0) { in posix_timer_fn()
483 if (timr->it.real.interval.tv64 < kj.tv64) in posix_timer_fn()
487 timr->it_overrun += (unsigned int) in posix_timer_fn()
489 timr->it.real.interval); in posix_timer_fn()
491 ++timr->it_requeue_pending; in posix_timer_fn()
495 unlock_timer(timr, flags); in posix_timer_fn()
688 struct k_itimer *timr; in __lock_timer() local
698 timr = posix_timer_by_id(timer_id); in __lock_timer()
699 if (timr) { in __lock_timer()
700 spin_lock_irqsave(&timr->it_lock, *flags); in __lock_timer()
701 if (timr->it_signal == current->signal) { in __lock_timer()
703 return timr; in __lock_timer()
705 spin_unlock_irqrestore(&timr->it_lock, *flags); in __lock_timer()
729 common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) in common_timer_get() argument
732 struct hrtimer *timer = &timr->it.real.timer; in common_timer_get()
736 iv = timr->it.real.interval; in common_timer_get()
742 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) in common_timer_get()
752 if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING || in common_timer_get()
753 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) in common_timer_get()
754 timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv); in common_timer_get()
763 if ((timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) in common_timer_get()
774 struct k_itimer *timr; in SYSCALL_DEFINE2() local
779 timr = lock_timer(timer_id, &flags); in SYSCALL_DEFINE2()
780 if (!timr) in SYSCALL_DEFINE2()
783 kc = clockid_to_kclock(timr->it_clock); in SYSCALL_DEFINE2()
787 kc->timer_get(timr, &cur_setting); in SYSCALL_DEFINE2()
789 unlock_timer(timr, flags); in SYSCALL_DEFINE2()
808 struct k_itimer *timr; in SYSCALL_DEFINE1() local
812 timr = lock_timer(timer_id, &flags); in SYSCALL_DEFINE1()
813 if (!timr) in SYSCALL_DEFINE1()
816 overrun = timr->it_overrun_last; in SYSCALL_DEFINE1()
817 unlock_timer(timr, flags); in SYSCALL_DEFINE1()
825 common_timer_set(struct k_itimer *timr, int flags, in common_timer_set() argument
828 struct hrtimer *timer = &timr->it.real.timer; in common_timer_set()
832 common_timer_get(timr, old_setting); in common_timer_set()
835 timr->it.real.interval.tv64 = 0; in common_timer_set()
843 timr->it_requeue_pending = (timr->it_requeue_pending + 2) & in common_timer_set()
845 timr->it_overrun_last = 0; in common_timer_set()
852 hrtimer_init(&timr->it.real.timer, timr->it_clock, mode); in common_timer_set()
853 timr->it.real.timer.function = posix_timer_fn; in common_timer_set()
858 timr->it.real.interval = timespec_to_ktime(new_setting->it_interval); in common_timer_set()
861 if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) { in common_timer_set()
878 struct k_itimer *timr; in SYSCALL_DEFINE4() local
895 timr = lock_timer(timer_id, &flag); in SYSCALL_DEFINE4()
896 if (!timr) in SYSCALL_DEFINE4()
899 kc = clockid_to_kclock(timr->it_clock); in SYSCALL_DEFINE4()
903 error = kc->timer_set(timr, flags, &new_spec, rtn); in SYSCALL_DEFINE4()
905 unlock_timer(timr, flag); in SYSCALL_DEFINE4()