Line data Source code
1 : #undef TRACE_SYSTEM
2 : #define TRACE_SYSTEM kmem
3 :
4 : #if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
5 : #define _TRACE_KMEM_H
6 :
7 : #include <linux/types.h>
8 : #include <linux/tracepoint.h>
9 :
10 : /*
11 : * The order of these masks is important. Matching masks will be seen
12 : * first and the left over flags will end up showing by themselves.
13 : *
14 : * For example, if we have GFP_KERNEL before GFP_USER we wil get:
15 : *
16 : * GFP_KERNEL|GFP_HARDWALL
17 : *
18 : * Thus most bits set go first.
19 : */
20 : #define show_gfp_flags(flags) \
21 : (flags) ? __print_flags(flags, "|", \
22 : {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \
23 : {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \
24 : {(unsigned long)GFP_USER, "GFP_USER"}, \
25 : {(unsigned long)GFP_TEMPORARY, "GFP_TEMPORARY"}, \
26 : {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \
27 : {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \
28 : {(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \
29 : {(unsigned long)GFP_NOIO, "GFP_NOIO"}, \
30 : {(unsigned long)__GFP_HIGH, "GFP_HIGH"}, \
31 : {(unsigned long)__GFP_WAIT, "GFP_WAIT"}, \
32 : {(unsigned long)__GFP_IO, "GFP_IO"}, \
33 : {(unsigned long)__GFP_COLD, "GFP_COLD"}, \
34 : {(unsigned long)__GFP_NOWARN, "GFP_NOWARN"}, \
35 : {(unsigned long)__GFP_REPEAT, "GFP_REPEAT"}, \
36 : {(unsigned long)__GFP_NOFAIL, "GFP_NOFAIL"}, \
37 : {(unsigned long)__GFP_NORETRY, "GFP_NORETRY"}, \
38 : {(unsigned long)__GFP_COMP, "GFP_COMP"}, \
39 : {(unsigned long)__GFP_ZERO, "GFP_ZERO"}, \
40 : {(unsigned long)__GFP_NOMEMALLOC, "GFP_NOMEMALLOC"}, \
41 : {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \
42 : {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \
43 : {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \
44 : {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"} \
45 : ) : "GFP_NOWAIT"
46 :
47 : DECLARE_EVENT_CLASS(kmem_alloc,
48 :
49 : TP_PROTO(unsigned long call_site,
50 : const void *ptr,
51 : size_t bytes_req,
52 : size_t bytes_alloc,
53 : gfp_t gfp_flags),
54 :
55 : TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
56 :
57 : TP_STRUCT__entry(
58 : __field( unsigned long, call_site )
59 : __field( const void *, ptr )
60 : __field( size_t, bytes_req )
61 : __field( size_t, bytes_alloc )
62 : __field( gfp_t, gfp_flags )
63 : ),
64 :
65 : TP_fast_assign(
66 : __entry->call_site = call_site;
67 : __entry->ptr = ptr;
68 : __entry->bytes_req = bytes_req;
69 : __entry->bytes_alloc = bytes_alloc;
70 : __entry->gfp_flags = gfp_flags;
71 : ),
72 :
73 : TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
74 : __entry->call_site,
75 : __entry->ptr,
76 : __entry->bytes_req,
77 : __entry->bytes_alloc,
78 : show_gfp_flags(__entry->gfp_flags))
79 : );
80 :
81 : DEFINE_EVENT(kmem_alloc, kmalloc,
82 :
83 : TP_PROTO(unsigned long call_site, const void *ptr,
84 : size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
85 :
86 : TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
87 : );
88 :
89 0 : DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
90 :
91 : TP_PROTO(unsigned long call_site, const void *ptr,
92 : size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
93 :
94 : TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
95 : );
96 :
97 : DECLARE_EVENT_CLASS(kmem_alloc_node,
98 :
99 : TP_PROTO(unsigned long call_site,
100 : const void *ptr,
101 : size_t bytes_req,
102 : size_t bytes_alloc,
103 : gfp_t gfp_flags,
104 : int node),
105 :
106 : TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
107 :
108 : TP_STRUCT__entry(
109 : __field( unsigned long, call_site )
110 : __field( const void *, ptr )
111 : __field( size_t, bytes_req )
112 : __field( size_t, bytes_alloc )
113 : __field( gfp_t, gfp_flags )
114 : __field( int, node )
115 : ),
116 :
117 : TP_fast_assign(
118 : __entry->call_site = call_site;
119 : __entry->ptr = ptr;
120 : __entry->bytes_req = bytes_req;
121 : __entry->bytes_alloc = bytes_alloc;
122 : __entry->gfp_flags = gfp_flags;
123 : __entry->node = node;
124 : ),
125 :
126 : TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
127 : __entry->call_site,
128 : __entry->ptr,
129 : __entry->bytes_req,
130 : __entry->bytes_alloc,
131 : show_gfp_flags(__entry->gfp_flags),
132 : __entry->node)
133 : );
134 :
135 : DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
136 :
137 : TP_PROTO(unsigned long call_site, const void *ptr,
138 : size_t bytes_req, size_t bytes_alloc,
139 : gfp_t gfp_flags, int node),
140 :
141 : TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
142 : );
143 :
144 : DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
145 :
146 : TP_PROTO(unsigned long call_site, const void *ptr,
147 : size_t bytes_req, size_t bytes_alloc,
148 : gfp_t gfp_flags, int node),
149 :
150 : TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
151 : );
152 :
153 : DECLARE_EVENT_CLASS(kmem_free,
154 :
155 : TP_PROTO(unsigned long call_site, const void *ptr),
156 :
157 : TP_ARGS(call_site, ptr),
158 :
159 : TP_STRUCT__entry(
160 : __field( unsigned long, call_site )
161 : __field( const void *, ptr )
162 : ),
163 :
164 : TP_fast_assign(
165 : __entry->call_site = call_site;
166 : __entry->ptr = ptr;
167 : ),
168 :
169 : TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
170 : );
171 :
172 : DEFINE_EVENT(kmem_free, kfree,
173 :
174 : TP_PROTO(unsigned long call_site, const void *ptr),
175 :
176 : TP_ARGS(call_site, ptr)
177 : );
178 :
179 : DEFINE_EVENT(kmem_free, kmem_cache_free,
180 :
181 : TP_PROTO(unsigned long call_site, const void *ptr),
182 :
183 : TP_ARGS(call_site, ptr)
184 : );
185 :
186 : TRACE_EVENT(mm_page_free_direct,
187 :
188 : TP_PROTO(struct page *page, unsigned int order),
189 :
190 : TP_ARGS(page, order),
191 :
192 : TP_STRUCT__entry(
193 : __field( struct page *, page )
194 : __field( unsigned int, order )
195 : ),
196 :
197 : TP_fast_assign(
198 : __entry->page = page;
199 : __entry->order = order;
200 : ),
201 :
202 : TP_printk("page=%p pfn=%lu order=%d",
203 : __entry->page,
204 : page_to_pfn(__entry->page),
205 : __entry->order)
206 : );
207 :
208 : TRACE_EVENT(mm_pagevec_free,
209 :
210 : TP_PROTO(struct page *page, int cold),
211 :
212 : TP_ARGS(page, cold),
213 :
214 : TP_STRUCT__entry(
215 : __field( struct page *, page )
216 : __field( int, cold )
217 : ),
218 :
219 : TP_fast_assign(
220 : __entry->page = page;
221 : __entry->cold = cold;
222 : ),
223 :
224 : TP_printk("page=%p pfn=%lu order=0 cold=%d",
225 : __entry->page,
226 : page_to_pfn(__entry->page),
227 : __entry->cold)
228 : );
229 :
230 : TRACE_EVENT(mm_page_alloc,
231 :
232 : TP_PROTO(struct page *page, unsigned int order,
233 : gfp_t gfp_flags, int migratetype),
234 :
235 : TP_ARGS(page, order, gfp_flags, migratetype),
236 :
237 : TP_STRUCT__entry(
238 : __field( struct page *, page )
239 : __field( unsigned int, order )
240 : __field( gfp_t, gfp_flags )
241 : __field( int, migratetype )
242 : ),
243 :
244 : TP_fast_assign(
245 : __entry->page = page;
246 : __entry->order = order;
247 : __entry->gfp_flags = gfp_flags;
248 : __entry->migratetype = migratetype;
249 : ),
250 :
251 : TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
252 : __entry->page,
253 : page_to_pfn(__entry->page),
254 : __entry->order,
255 : __entry->migratetype,
256 : show_gfp_flags(__entry->gfp_flags))
257 : );
258 :
259 : DECLARE_EVENT_CLASS(mm_page,
260 :
261 : TP_PROTO(struct page *page, unsigned int order, int migratetype),
262 :
263 : TP_ARGS(page, order, migratetype),
264 :
265 : TP_STRUCT__entry(
266 : __field( struct page *, page )
267 : __field( unsigned int, order )
268 : __field( int, migratetype )
269 : ),
270 :
271 : TP_fast_assign(
272 : __entry->page = page;
273 : __entry->order = order;
274 : __entry->migratetype = migratetype;
275 : ),
276 :
277 : TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
278 : __entry->page,
279 : page_to_pfn(__entry->page),
280 : __entry->order,
281 : __entry->migratetype,
282 : __entry->order == 0)
283 : );
284 :
285 : DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
286 :
287 : TP_PROTO(struct page *page, unsigned int order, int migratetype),
288 :
289 : TP_ARGS(page, order, migratetype)
290 : );
291 :
292 : DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain,
293 :
294 : TP_PROTO(struct page *page, unsigned int order, int migratetype),
295 :
296 : TP_ARGS(page, order, migratetype),
297 :
298 : TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
299 : __entry->page, page_to_pfn(__entry->page),
300 : __entry->order, __entry->migratetype)
301 : );
302 :
303 : TRACE_EVENT(mm_page_alloc_extfrag,
304 :
305 : TP_PROTO(struct page *page,
306 : int alloc_order, int fallback_order,
307 : int alloc_migratetype, int fallback_migratetype),
308 :
309 : TP_ARGS(page,
310 : alloc_order, fallback_order,
311 : alloc_migratetype, fallback_migratetype),
312 :
313 : TP_STRUCT__entry(
314 : __field( struct page *, page )
315 : __field( int, alloc_order )
316 : __field( int, fallback_order )
317 : __field( int, alloc_migratetype )
318 : __field( int, fallback_migratetype )
319 : ),
320 :
321 : TP_fast_assign(
322 : __entry->page = page;
323 : __entry->alloc_order = alloc_order;
324 : __entry->fallback_order = fallback_order;
325 : __entry->alloc_migratetype = alloc_migratetype;
326 : __entry->fallback_migratetype = fallback_migratetype;
327 : ),
328 :
329 : TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
330 : __entry->page,
331 : page_to_pfn(__entry->page),
332 : __entry->alloc_order,
333 : __entry->fallback_order,
334 : pageblock_order,
335 : __entry->alloc_migratetype,
336 : __entry->fallback_migratetype,
337 : __entry->fallback_order < pageblock_order,
338 : __entry->alloc_migratetype == __entry->fallback_migratetype)
339 : );
340 :
341 : #endif /* _TRACE_KMEM_H */
342 :
343 : /* This part must be outside protection */
344 : #include <trace/define_trace.h>
|