blob: b7a38a2069cfe31463b863a0cc9d222f9c933601 (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
|
#ifndef IOU_ALLOC_CACHE_H
#define IOU_ALLOC_CACHE_H
/*
* Don't allow the cache to grow beyond this size.
*/
#define IO_ALLOC_CACHE_MAX 128
static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
void *entry)
{
if (cache->nr_cached < cache->max_cached) {
if (!kasan_mempool_poison_object(entry))
return false;
cache->entries[cache->nr_cached++] = entry;
return true;
}
return false;
}
static inline void *io_alloc_cache_get(struct io_alloc_cache *cache)
{
if (cache->nr_cached) {
void *entry = cache->entries[--cache->nr_cached];
kasan_mempool_unpoison_object(entry, cache->elem_size);
return entry;
}
return NULL;
}
/* returns false if the cache was initialized properly */
static inline bool io_alloc_cache_init(struct io_alloc_cache *cache,
unsigned max_nr, size_t size)
{
cache->entries = kvmalloc_array(max_nr, sizeof(void *), GFP_KERNEL);
if (cache->entries) {
cache->nr_cached = 0;
cache->max_cached = max_nr;
cache->elem_size = size;
return false;
}
return true;
}
static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
void (*free)(const void *))
{
void *entry;
if (!cache->entries)
return;
while ((entry = io_alloc_cache_get(cache)) != NULL)
free(entry);
kvfree(cache->entries);
cache->entries = NULL;
}
#endif
|