summaryrefslogtreecommitdiff
path: root/tools/perf/util/lock-contention.h
blob: fa16532c971cb24d86ad970d074f9fd211129045 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
// SPDX-License-Identifier: GPL-2.0
#ifndef PERF_LOCK_CONTENTION_H
#define PERF_LOCK_CONTENTION_H

#include <linux/list.h>
#include <linux/rbtree.h>

struct lock_filter {
	int			nr_types;
	int			nr_addrs;
	int			nr_syms;
	unsigned int		*types;
	unsigned long		*addrs;
	char			**syms;
};

struct lock_stat {
	struct hlist_node	hash_entry;
	struct rb_node		rb;		/* used for sorting */

	u64			addr;		/* address of lockdep_map, used as ID */
	char			*name;		/* for strcpy(), we cannot use const */
	u64			*callstack;

	unsigned int		nr_acquire;
	unsigned int		nr_acquired;
	unsigned int		nr_contended;
	unsigned int		nr_release;

	union {
		unsigned int	nr_readlock;
		unsigned int	flags;
	};
	unsigned int		nr_trylock;

	/* these times are in nano sec. */
	u64                     avg_wait_time;
	u64			wait_time_total;
	u64			wait_time_min;
	u64			wait_time_max;

	int			broken; /* flag of blacklist */
	int			combined;
};

/*
 * States of lock_seq_stat
 *
 * UNINITIALIZED is required for detecting first event of acquire.
 * As the nature of lock events, there is no guarantee
 * that the first event for the locks are acquire,
 * it can be acquired, contended or release.
 */
#define SEQ_STATE_UNINITIALIZED      0	       /* initial state */
#define SEQ_STATE_RELEASED	1
#define SEQ_STATE_ACQUIRING	2
#define SEQ_STATE_ACQUIRED	3
#define SEQ_STATE_READ_ACQUIRED	4
#define SEQ_STATE_CONTENDED	5

/*
 * MAX_LOCK_DEPTH
 * Imported from include/linux/sched.h.
 * Should this be synchronized?
 */
#define MAX_LOCK_DEPTH 48

struct lock_stat *lock_stat_find(u64 addr);
struct lock_stat *lock_stat_findnew(u64 addr, const char *name, int flags);

bool match_callstack_filter(struct machine *machine, u64 *callstack);

/*
 * struct lock_seq_stat:
 * Place to put on state of one lock sequence
 * 1) acquire -> acquired -> release
 * 2) acquire -> contended -> acquired -> release
 * 3) acquire (with read or try) -> release
 * 4) Are there other patterns?
 */
struct lock_seq_stat {
	struct list_head        list;
	int			state;
	u64			prev_event_time;
	u64                     addr;

	int                     read_count;
};

struct thread_stat {
	struct rb_node		rb;

	u32                     tid;
	struct list_head        seq_list;
};

/*
 * CONTENTION_STACK_DEPTH
 * Number of stack trace entries to find callers
 */
#define CONTENTION_STACK_DEPTH  8

/*
 * CONTENTION_STACK_SKIP
 * Number of stack trace entries to skip when finding callers.
 * The first few entries belong to the locking implementation itself.
 */
#define CONTENTION_STACK_SKIP  4

/*
 * flags for lock:contention_begin
 * Imported from include/trace/events/lock.h.
 */
#define LCB_F_SPIN	(1U << 0)
#define LCB_F_READ	(1U << 1)
#define LCB_F_WRITE	(1U << 2)
#define LCB_F_RT	(1U << 3)
#define LCB_F_PERCPU	(1U << 4)
#define LCB_F_MUTEX	(1U << 5)

struct evlist;
struct machine;
struct target;

struct lock_contention_fails {
	int task;
	int stack;
	int time;
	int data;
};

struct lock_contention {
	struct evlist *evlist;
	struct target *target;
	struct machine *machine;
	struct hlist_head *result;
	struct lock_filter *filters;
	struct lock_contention_fails fails;
	unsigned long map_nr_entries;
	int max_stack;
	int stack_skip;
	int aggr_mode;
	int owner;
	int nr_filtered;
	bool save_callstack;
};

#ifdef HAVE_BPF_SKEL

int lock_contention_prepare(struct lock_contention *con);
int lock_contention_start(void);
int lock_contention_stop(void);
int lock_contention_read(struct lock_contention *con);
int lock_contention_finish(void);

#else  /* !HAVE_BPF_SKEL */

static inline int lock_contention_prepare(struct lock_contention *con __maybe_unused)
{
	return 0;
}

static inline int lock_contention_start(void) { return 0; }
static inline int lock_contention_stop(void) { return 0; }
static inline int lock_contention_finish(void) { return 0; }

static inline int lock_contention_read(struct lock_contention *con __maybe_unused)
{
	return 0;
}

#endif  /* HAVE_BPF_SKEL */

#endif  /* PERF_LOCK_CONTENTION_H */