aboutsummaryrefslogtreecommitdiff
path: root/fs/bcachefs/io_read.h
blob: d9c18bb7d4035aee9884de2dc2019e4a97f7249d (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHEFS_IO_READ_H
#define _BCACHEFS_IO_READ_H

#include "bkey_buf.h"

struct bch_read_bio {
	struct bch_fs		*c;
	u64			start_time;
	u64			submit_time;

	/*
	 * Reads will often have to be split, and if the extent being read from
	 * was checksummed or compressed we'll also have to allocate bounce
	 * buffers and copy the data back into the original bio.
	 *
	 * If we didn't have to split, we have to save and restore the original
	 * bi_end_io - @split below indicates which:
	 */
	union {
	struct bch_read_bio	*parent;
	bio_end_io_t		*end_io;
	};

	/*
	 * Saved copy of bio->bi_iter, from submission time - allows us to
	 * resubmit on IO error, and also to copy data back to the original bio
	 * when we're bouncing:
	 */
	struct bvec_iter	bvec_iter;

	unsigned		offset_into_extent;

	u16			flags;
	union {
	struct {
	u16			bounce:1,
				split:1,
				kmalloc:1,
				have_ioref:1,
				narrow_crcs:1,
				hole:1,
				retry:2,
				context:2;
	};
	u16			_state;
	};

	struct bch_devs_list	devs_have;

	struct extent_ptr_decoded pick;

	/*
	 * pos we read from - different from data_pos for indirect extents:
	 */
	u32			subvol;
	struct bpos		read_pos;

	/*
	 * start pos of data we read (may not be pos of data we want) - for
	 * promote, narrow extents paths:
	 */
	enum btree_id		data_btree;
	struct bpos		data_pos;
	struct bversion		version;

	struct promote_op	*promote;

	struct bch_io_opts	opts;

	struct work_struct	work;

	struct bio		bio;
};

#define to_rbio(_bio)		container_of((_bio), struct bch_read_bio, bio)

struct bch_devs_mask;
struct cache_promote_op;
struct extent_ptr_decoded;

int __bch2_read_indirect_extent(struct btree_trans *, unsigned *,
				struct bkey_buf *);

static inline int bch2_read_indirect_extent(struct btree_trans *trans,
					    enum btree_id *data_btree,
					    unsigned *offset_into_extent,
					    struct bkey_buf *k)
{
	if (k->k->k.type != KEY_TYPE_reflink_p)
		return 0;

	*data_btree = BTREE_ID_reflink;
	return __bch2_read_indirect_extent(trans, offset_into_extent, k);
}

enum bch_read_flags {
	BCH_READ_RETRY_IF_STALE		= 1 << 0,
	BCH_READ_MAY_PROMOTE		= 1 << 1,
	BCH_READ_USER_MAPPED		= 1 << 2,
	BCH_READ_NODECODE		= 1 << 3,
	BCH_READ_LAST_FRAGMENT		= 1 << 4,

	/* internal: */
	BCH_READ_MUST_BOUNCE		= 1 << 5,
	BCH_READ_MUST_CLONE		= 1 << 6,
	BCH_READ_IN_RETRY		= 1 << 7,
};

int __bch2_read_extent(struct btree_trans *, struct bch_read_bio *,
		       struct bvec_iter, struct bpos, enum btree_id,
		       struct bkey_s_c, unsigned,
		       struct bch_io_failures *, unsigned);

static inline void bch2_read_extent(struct btree_trans *trans,
			struct bch_read_bio *rbio, struct bpos read_pos,
			enum btree_id data_btree, struct bkey_s_c k,
			unsigned offset_into_extent, unsigned flags)
{
	__bch2_read_extent(trans, rbio, rbio->bio.bi_iter, read_pos,
			   data_btree, k, offset_into_extent, NULL, flags);
}

void __bch2_read(struct bch_fs *, struct bch_read_bio *, struct bvec_iter,
		 subvol_inum, struct bch_io_failures *, unsigned flags);

static inline void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
			     subvol_inum inum)
{
	struct bch_io_failures failed = { .nr = 0 };

	BUG_ON(rbio->_state);

	rbio->c = c;
	rbio->start_time = local_clock();
	rbio->subvol = inum.subvol;

	__bch2_read(c, rbio, rbio->bio.bi_iter, inum, &failed,
		    BCH_READ_RETRY_IF_STALE|
		    BCH_READ_MAY_PROMOTE|
		    BCH_READ_USER_MAPPED);
}

static inline struct bch_read_bio *rbio_init(struct bio *bio,
					     struct bch_io_opts opts)
{
	struct bch_read_bio *rbio = to_rbio(bio);

	rbio->_state	= 0;
	rbio->promote	= NULL;
	rbio->opts	= opts;
	return rbio;
}

void bch2_fs_io_read_exit(struct bch_fs *);
int bch2_fs_io_read_init(struct bch_fs *);

#endif /* _BCACHEFS_IO_READ_H */