Skip to content

Commit 95f66f9

Browse files
committed
work: new calculation of next tick
Changes method of calculation next work queue tick. Instead of taking actual time of last tick, lets use time of requested tick. From my experiments, it can take up to ~40 cycles from requested time of interrupt until interrupt handler is executed. Signed-off-by: Tomasz Lauda <tomasz.lauda@linux.intel.com>
1 parent af2c4d5 commit 95f66f9

1 file changed

Lines changed: 7 additions & 6 deletions

File tree

src/lib/work.c

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -69,13 +69,13 @@ struct work_queue {
6969
struct notifier notifier; /* notify CPU freq changes */
7070
struct work_queue_timesource *ts; /* time source for work queue */
7171
uint32_t ticks_per_msec; /* ticks per msec */
72-
uint64_t run_ticks; /* ticks when last run */
7372
atomic_t num_work; /* number of queued work items */
7473
};
7574

7675
struct work_queue_shared_context {
7776
atomic_t total_num_work; /* number of total queued work items */
7877
atomic_t timer_clients; /* number of timer clients */
78+
uint64_t last_tick; /* time of last tick */
7979

8080
/* registered timers */
8181
struct timer *timers[PLATFORM_CORE_COUNT];
@@ -104,6 +104,7 @@ static inline void work_set_timer(struct work_queue *queue)
104104

105105
if (atomic_add(&work_shared_ctx->total_num_work, 1) == 1) {
106106
ticks = queue_calc_next_timeout(queue, work_get_timer(queue));
107+
work_shared_ctx->last_tick = ticks;
107108
queue->ts->timer_set(&queue->ts->timer, ticks);
108109
atomic_add(&work_shared_ctx->timer_clients, 1);
109110
timer_enable(&queue->ts->timer);
@@ -185,7 +186,7 @@ static inline void work_next_timeout(struct work_queue *queue,
185186
work->timeout += next_d;
186187
} else {
187188
/* calc next run based on work request */
188-
work->timeout = next_d + queue->run_ticks;
189+
work->timeout = next_d + work_shared_ctx->last_tick;
189190
}
190191
}
191192

@@ -301,8 +302,10 @@ static void queue_reschedule(struct work_queue *queue)
301302
/* re-arm only if there is work to do */
302303
if (atomic_read(&work_shared_ctx->total_num_work)) {
303304
/* re-arm timer */
304-
ticks = queue_calc_next_timeout(queue,
305-
queue->run_ticks);
305+
ticks = queue_calc_next_timeout
306+
(queue,
307+
work_shared_ctx->last_tick);
308+
work_shared_ctx->last_tick = ticks;
306309
queue->ts->timer_set(&queue->ts->timer, ticks);
307310

308311
queue_enable_registered_timers();
@@ -316,8 +319,6 @@ static void queue_run(void *data)
316319
struct work_queue *queue = (struct work_queue *)data;
317320
uint32_t flags;
318321

319-
queue->run_ticks = work_get_timer(queue);
320-
321322
timer_disable(&queue->ts->timer);
322323

323324
spin_lock_irq(&queue->lock, flags);

0 commit comments

Comments
 (0)