mirror of
https://github.com/opnsense/src.git
synced 2026-03-28 13:43:12 -04:00
Make kevent(2) periodic timer events more reliably periodic. The event
callout is now scheduled using the C_ABSOLUTE flag, and the absolute time of each event is calculated as the time the previous event was scheduled for plus the interval. This ensures that latency in processing a given event doesn't perturb the arrival time of any subsequent events. Reviewed by: jhb
This commit is contained in:
parent
4bb264ae15
commit
41e8f7efbe
2 changed files with 10 additions and 5 deletions
|
|
@ -569,9 +569,10 @@ filt_timerexpire(void *knx)
|
|||
|
||||
if ((kn->kn_flags & EV_ONESHOT) != EV_ONESHOT) {
|
||||
calloutp = (struct callout *)kn->kn_hook;
|
||||
callout_reset_sbt_on(calloutp,
|
||||
timer2sbintime(kn->kn_sdata, kn->kn_sfflags), 0,
|
||||
filt_timerexpire, kn, PCPU_GET(cpuid), 0);
|
||||
*kn->kn_ptr.p_nexttime += timer2sbintime(kn->kn_sdata,
|
||||
kn->kn_sfflags);
|
||||
callout_reset_sbt_on(calloutp, *kn->kn_ptr.p_nexttime, 0,
|
||||
filt_timerexpire, kn, PCPU_GET(cpuid), C_ABSOLUTE);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -607,11 +608,13 @@ filt_timerattach(struct knote *kn)
|
|||
|
||||
kn->kn_flags |= EV_CLEAR; /* automatically set */
|
||||
kn->kn_status &= ~KN_DETACHED; /* knlist_add clears it */
|
||||
kn->kn_ptr.p_nexttime = malloc(sizeof(sbintime_t), M_KQUEUE, M_WAITOK);
|
||||
calloutp = malloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK);
|
||||
callout_init(calloutp, CALLOUT_MPSAFE);
|
||||
kn->kn_hook = calloutp;
|
||||
callout_reset_sbt_on(calloutp, to, 0,
|
||||
filt_timerexpire, kn, PCPU_GET(cpuid), 0);
|
||||
*kn->kn_ptr.p_nexttime = to + sbinuptime();
|
||||
callout_reset_sbt_on(calloutp, *kn->kn_ptr.p_nexttime, 0,
|
||||
filt_timerexpire, kn, PCPU_GET(cpuid), C_ABSOLUTE);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
|
@ -625,6 +628,7 @@ filt_timerdetach(struct knote *kn)
|
|||
calloutp = (struct callout *)kn->kn_hook;
|
||||
callout_drain(calloutp);
|
||||
free(calloutp, M_KQUEUE);
|
||||
free(kn->kn_ptr.p_nexttime, M_KQUEUE);
|
||||
old = atomic_fetch_sub_explicit(&kq_ncallouts, 1, memory_order_relaxed);
|
||||
KASSERT(old > 0, ("Number of callouts cannot become negative"));
|
||||
kn->kn_status |= KN_DETACHED; /* knlist_remove sets it */
|
||||
|
|
|
|||
|
|
@ -221,6 +221,7 @@ struct knote {
|
|||
struct proc *p_proc; /* proc pointer */
|
||||
struct aiocblist *p_aio; /* AIO job pointer */
|
||||
struct aioliojob *p_lio; /* LIO job pointer */
|
||||
sbintime_t *p_nexttime; /* next timer event fires at */
|
||||
void *p_v; /* generic other pointer */
|
||||
} kn_ptr;
|
||||
struct filterops *kn_fop;
|
||||
|
|
|
|||
Loading…
Reference in a new issue