summaryrefslogtreecommitdiff
path: root/libbcache/clock.c
blob: 8218769f4f8c757859ad00a345718f6001e36e91 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
#include "bcache.h"
#include "clock.h"

#include <linux/freezer.h>
#include <linux/kthread.h>

static inline bool io_timer_cmp(struct io_timer *l, struct io_timer *r)
{
	return time_after(l->expire, r->expire);
}

void bch_io_timer_add(struct io_clock *clock, struct io_timer *timer)
{
	size_t i;

	spin_lock(&clock->timer_lock);
	for (i = 0; i < clock->timers.used; i++)
		if (clock->timers.data[i] == timer)
			goto out;

	BUG_ON(!heap_add(&clock->timers, timer, io_timer_cmp));
out:
	spin_unlock(&clock->timer_lock);
}

void bch_io_timer_del(struct io_clock *clock, struct io_timer *timer)
{
	size_t i;

	spin_lock(&clock->timer_lock);

	for (i = 0; i < clock->timers.used; i++)
		if (clock->timers.data[i] == timer) {
			heap_del(&clock->timers, i, io_timer_cmp);
			break;
		}

	spin_unlock(&clock->timer_lock);
}

struct io_clock_wait {
	struct io_timer		timer;
	struct task_struct	*task;
	int			expired;
};

static void io_clock_wait_fn(struct io_timer *timer)
{
	struct io_clock_wait *wait = container_of(timer,
				struct io_clock_wait, timer);

	wait->expired = 1;
	wake_up_process(wait->task);
}

void bch_io_clock_schedule_timeout(struct io_clock *clock, unsigned long until)
{
	struct io_clock_wait wait;

	/* XXX: calculate sleep time rigorously */
	wait.timer.expire	= until;
	wait.timer.fn		= io_clock_wait_fn;
	wait.task		= current;
	wait.expired		= 0;
	bch_io_timer_add(clock, &wait.timer);

	schedule();

	bch_io_timer_del(clock, &wait.timer);
}

/*
 * _only_ to be used from a kthread
 */
void bch_kthread_io_clock_wait(struct io_clock *clock,
			       unsigned long until)
{
	struct io_clock_wait wait;

	/* XXX: calculate sleep time rigorously */
	wait.timer.expire	= until;
	wait.timer.fn		= io_clock_wait_fn;
	wait.task		= current;
	wait.expired		= 0;
	bch_io_timer_add(clock, &wait.timer);

	while (1) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (kthread_should_stop())
			break;

		if (wait.expired)
			break;

		schedule();
		try_to_freeze();
	}

	__set_current_state(TASK_RUNNING);
	bch_io_timer_del(clock, &wait.timer);
}

static struct io_timer *get_expired_timer(struct io_clock *clock,
					  unsigned long now)
{
	struct io_timer *ret = NULL;

	spin_lock(&clock->timer_lock);

	if (clock->timers.used &&
	    time_after_eq(now, clock->timers.data[0]->expire))
		heap_pop(&clock->timers, ret, io_timer_cmp);

	spin_unlock(&clock->timer_lock);

	return ret;
}

void bch_increment_clock(struct cache_set *c, unsigned sectors, int rw)
{
	struct io_clock *clock = &c->io_clock[rw];
	struct io_timer *timer;
	unsigned long now;

	/* Buffer up one megabyte worth of IO in the percpu counter */
	preempt_disable();

	if (likely(this_cpu_add_return(*clock->pcpu_buf, sectors) <
		   IO_CLOCK_PCPU_SECTORS)) {
		preempt_enable();
		return;
	}

	sectors = this_cpu_xchg(*clock->pcpu_buf, 0);
	preempt_enable();
	now = atomic_long_add_return(sectors, &clock->now);

	while ((timer = get_expired_timer(clock, now)))
		timer->fn(timer);
}

void bch_io_clock_exit(struct io_clock *clock)
{
	free_heap(&clock->timers);
	free_percpu(clock->pcpu_buf);
}

int bch_io_clock_init(struct io_clock *clock)
{
	atomic_long_set(&clock->now, 0);
	spin_lock_init(&clock->timer_lock);

	clock->pcpu_buf = alloc_percpu(*clock->pcpu_buf);
	if (!clock->pcpu_buf)
		return -ENOMEM;

	if (!init_heap(&clock->timers, NR_IO_TIMERS, GFP_KERNEL))
		return -ENOMEM;

	return 0;
}