summaryrefslogtreecommitdiff
path: root/include/linux/slab.h
blob: d0d8790db080d1c7e1e3c5be063ec5bcbdc91c64 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
#ifndef __TOOLS_LINUX_SLAB_H
#define __TOOLS_LINUX_SLAB_H

#include <malloc.h>
#include <stdlib.h>
#include <string.h>

#include <linux/kernel.h>
#include <linux/page.h>
#include <linux/types.h>

#define ARCH_KMALLOC_MINALIGN		16
#define KMALLOC_MAX_SIZE		SIZE_MAX

static inline void *kmalloc(size_t size, gfp_t flags)
{
	void *p = malloc(size);

	if (p && (flags & __GFP_ZERO))
		memset(p, 0, size);

	return p;
}

static inline void *krealloc(void *old, size_t size, gfp_t flags)
{
	void *new = kmalloc(size, flags);

	if (new && (flags & __GFP_ZERO))
		memset(new, 0, size);

	if (new) {
		memcpy(new, old,
		       min(malloc_usable_size(old),
			   malloc_usable_size(new)));
		free(old);
	}

	return new;
}

#define kzalloc(size, flags)		calloc(1, size)
#define kcalloc(n, size, flags)		calloc(n, size)
#define kmalloc_array(n, size, flags)	calloc(n, size)

#define kfree(p)			free(p)
#define kvfree(p)			free(p)
#define kzfree(p)			free(p)

static inline struct page *alloc_pages(gfp_t flags, unsigned int order)
{
	size_t size = PAGE_SIZE << order;
	void *p = memalign(PAGE_SIZE, size);

	if (p && (flags & __GFP_ZERO))
		memset(p, 0, size);

	return p;
}

#define alloc_page(gfp)			alloc_pages(gfp, 0)

#define __get_free_pages(gfp, order)	((unsigned long) alloc_pages(gfp, order))
#define __get_free_page(gfp)		__get_free_pages(gfp, 0)

#define __free_pages(page, order)			\
do {							\
	(void) order;					\
	free(page);					\
} while (0)

#define free_pages(addr, order)				\
do {							\
	(void) order;					\
	free((void *) (addr));				\
} while (0)

#define __free_page(page) __free_pages((page), 0)
#define free_page(addr) free_pages((addr), 0)

#define VM_IOREMAP		0x00000001	/* ioremap() and friends */
#define VM_ALLOC		0x00000002	/* vmalloc() */
#define VM_MAP			0x00000004	/* vmap()ed pages */
#define VM_USERMAP		0x00000008	/* suitable for remap_vmalloc_range */
#define VM_UNINITIALIZED	0x00000020	/* vm_struct is not fully initialized */
#define VM_NO_GUARD		0x00000040      /* don't add guard page */
#define VM_KASAN		0x00000080      /* has allocated kasan shadow memory */

static inline void vunmap(const void *addr) {}

static inline void *vmap(struct page **pages, unsigned int count,
			 unsigned long flags, unsigned prot)
{
	return page_address(pages[0]);
}

#define is_vmalloc_addr(page)		0

#define vmalloc_to_page(addr)		((struct page *) (addr))

#endif /* __TOOLS_LINUX_SLAB_H */