aboutsummaryrefslogtreecommitdiff
path: root/fs/bcachefs/nocow_locking.c
blob: 3c21981a4a1c09f9c70596876ef71fdef72cddcb (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
// SPDX-License-Identifier: GPL-2.0

#include "bcachefs.h"
#include "bkey_methods.h"
#include "nocow_locking.h"
#include "util.h"

#include <linux/closure.h>

bool bch2_bucket_nocow_is_locked(struct bucket_nocow_lock_table *t, struct bpos bucket)
{
	u64 dev_bucket = bucket_to_u64(bucket);
	struct nocow_lock_bucket *l = bucket_nocow_lock(t, dev_bucket);
	unsigned i;

	for (i = 0; i < ARRAY_SIZE(l->b); i++)
		if (l->b[i] == dev_bucket && atomic_read(&l->l[i]))
			return true;
	return false;
}

#define sign(v)		(v < 0 ? -1 : v > 0 ? 1 : 0)

void bch2_bucket_nocow_unlock(struct bucket_nocow_lock_table *t, struct bpos bucket, int flags)
{
	u64 dev_bucket = bucket_to_u64(bucket);
	struct nocow_lock_bucket *l = bucket_nocow_lock(t, dev_bucket);
	int lock_val = flags ? 1 : -1;
	unsigned i;

	for (i = 0; i < ARRAY_SIZE(l->b); i++)
		if (l->b[i] == dev_bucket) {
			int v = atomic_sub_return(lock_val, &l->l[i]);

			BUG_ON(v && sign(v) != lock_val);
			if (!v)
				closure_wake_up(&l->wait);
			return;
		}

	BUG();
}

bool __bch2_bucket_nocow_trylock(struct nocow_lock_bucket *l,
				 u64 dev_bucket, int flags)
{
	int v, lock_val = flags ? 1 : -1;
	unsigned i;

	spin_lock(&l->lock);

	for (i = 0; i < ARRAY_SIZE(l->b); i++)
		if (l->b[i] == dev_bucket)
			goto got_entry;

	for (i = 0; i < ARRAY_SIZE(l->b); i++)
		if (!atomic_read(&l->l[i])) {
			l->b[i] = dev_bucket;
			goto take_lock;
		}
fail:
	spin_unlock(&l->lock);
	return false;
got_entry:
	v = atomic_read(&l->l[i]);
	if (lock_val > 0 ? v < 0 : v > 0)
		goto fail;
take_lock:
	v = atomic_read(&l->l[i]);
	/* Overflow? */
	if (v && sign(v + lock_val) != sign(v))
		goto fail;

	atomic_add(lock_val, &l->l[i]);
	spin_unlock(&l->lock);
	return true;
}

void __bch2_bucket_nocow_lock(struct bucket_nocow_lock_table *t,
			      struct nocow_lock_bucket *l,
			      u64 dev_bucket, int flags)
{
	if (!__bch2_bucket_nocow_trylock(l, dev_bucket, flags)) {
		struct bch_fs *c = container_of(t, struct bch_fs, nocow_locks);
		u64 start_time = local_clock();

		__closure_wait_event(&l->wait, __bch2_bucket_nocow_trylock(l, dev_bucket, flags));
		bch2_time_stats_update(&c->times[BCH_TIME_nocow_lock_contended], start_time);
	}
}

void bch2_nocow_locks_to_text(struct printbuf *out, struct bucket_nocow_lock_table *t)

{
	unsigned i, nr_zero = 0;
	struct nocow_lock_bucket *l;

	for (l = t->l; l < t->l + ARRAY_SIZE(t->l); l++) {
		unsigned v = 0;

		for (i = 0; i < ARRAY_SIZE(l->l); i++)
			v |= atomic_read(&l->l[i]);

		if (!v) {
			nr_zero++;
			continue;
		}

		if (nr_zero)
			prt_printf(out, "(%u empty entries)\n", nr_zero);
		nr_zero = 0;

		for (i = 0; i < ARRAY_SIZE(l->l); i++) {
			int v = atomic_read(&l->l[i]);
			if (v) {
				bch2_bpos_to_text(out, u64_to_bucket(l->b[i]));
				prt_printf(out, ": %s %u ", v < 0 ? "copy" : "update", abs(v));
			}
		}
		prt_newline(out);
	}

	if (nr_zero)
		prt_printf(out, "(%u empty entries)\n", nr_zero);
}

void bch2_fs_nocow_locking_exit(struct bch_fs *c)
{
	struct bucket_nocow_lock_table *t = &c->nocow_locks;

	for (struct nocow_lock_bucket *l = t->l; l < t->l + ARRAY_SIZE(t->l); l++)
		for (unsigned j = 0; j < ARRAY_SIZE(l->l); j++)
			BUG_ON(atomic_read(&l->l[j]));
}

int bch2_fs_nocow_locking_init(struct bch_fs *c)
{
	struct bucket_nocow_lock_table *t = &c->nocow_locks;

	for (struct nocow_lock_bucket *l = t->l; l < t->l + ARRAY_SIZE(t->l); l++)
		spin_lock_init(&l->lock);

	return 0;
}