summaryrefslogtreecommitdiff
path: root/include/linux/slab.h
blob: d77b7683fb543e5502b189164f0607b5b80ba24a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
#ifndef __TOOLS_LINUX_SLAB_H
#define __TOOLS_LINUX_SLAB_H

#include <malloc.h>
#include <stdlib.h>
#include <string.h>

#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/page.h>
#include <linux/shrinker.h>
#include <linux/types.h>

#define ARCH_KMALLOC_MINALIGN		16
#define KMALLOC_MAX_SIZE		SIZE_MAX

static inline void *kmalloc(size_t size, gfp_t flags)
{
	void *p;

	run_shrinkers();

	p = size
	    ? aligned_alloc(min(rounddown_pow_of_two(size),
				(size_t)PAGE_SIZE), size)
	    : malloc(0);
	if (p && (flags & __GFP_ZERO))
		memset(p, 0, size);

	return p;
}

static inline void *krealloc(void *old, size_t size, gfp_t flags)
{
	void *new;

	run_shrinkers();

	new = malloc(size);
	if (!new)
		return NULL;

	if (flags & __GFP_ZERO)
		memset(new, 0, size);

	memcpy(new, old,
	       min(malloc_usable_size(old),
		   malloc_usable_size(new)));
	free(old);

	return new;
}

#define kzalloc(size, flags)		kmalloc(size, flags|__GFP_ZERO)
#define kmalloc_array(n, size, flags)					\
	((size) != 0 && (n) > SIZE_MAX / (size)				\
	 ? NULL : kmalloc(n * size, flags))

#define kcalloc(n, size, flags)		kmalloc_array(n, size, flags|__GFP_ZERO)

#define kfree(p)			free(p)
#define kzfree(p)			free(p)

#define kvmalloc(size, flags)		kmalloc(size, flags)
#define kvfree(p)			kfree(p)

static inline struct page *alloc_pages(gfp_t flags, unsigned int order)
{
	size_t size = PAGE_SIZE << order;
	void *p;

	run_shrinkers();

	p = aligned_alloc(PAGE_SIZE, size);
	if (p && (flags & __GFP_ZERO))
		memset(p, 0, size);

	return p;
}

#define alloc_page(gfp)			alloc_pages(gfp, 0)

#define __get_free_pages(gfp, order)	((unsigned long) alloc_pages(gfp, order))
#define __get_free_page(gfp)		__get_free_pages(gfp, 0)

#define __free_pages(page, order)			\
do {							\
	(void) order;					\
	free(page);					\
} while (0)

#define free_pages(addr, order)				\
do {							\
	(void) order;					\
	free((void *) (addr));				\
} while (0)

#define __free_page(page) __free_pages((page), 0)
#define free_page(addr) free_pages((addr), 0)

#define VM_IOREMAP		0x00000001	/* ioremap() and friends */
#define VM_ALLOC		0x00000002	/* vmalloc() */
#define VM_MAP			0x00000004	/* vmap()ed pages */
#define VM_USERMAP		0x00000008	/* suitable for remap_vmalloc_range */
#define VM_UNINITIALIZED	0x00000020	/* vm_struct is not fully initialized */
#define VM_NO_GUARD		0x00000040      /* don't add guard page */
#define VM_KASAN		0x00000080      /* has allocated kasan shadow memory */

static inline void vunmap(const void *addr) {}

static inline void *vmap(struct page **pages, unsigned int count,
			 unsigned long flags, unsigned prot)
{
	return NULL;
}

#define is_vmalloc_addr(page)		0

#define vmalloc_to_page(addr)		((struct page *) (addr))

static inline void *kmemdup(const void *src, size_t len, gfp_t gfp)
{
	void *p;

	p = kmalloc(len, gfp);
	if (p)
		memcpy(p, src, len);
	return p;
}

#endif /* __TOOLS_LINUX_SLAB_H */