Line data Source code
1 : #ifndef _ASM_X86_PTRACE_H
2 : #define _ASM_X86_PTRACE_H
3 :
4 : #include <linux/compiler.h> /* For __user */
5 : #include <asm/ptrace-abi.h>
6 : #include <asm/processor-flags.h>
7 :
8 : #ifdef __KERNEL__
9 : #include <asm/segment.h>
10 : #include <asm/page_types.h>
11 : #endif
12 :
13 : #ifndef __ASSEMBLY__
14 :
15 : #ifdef __i386__
16 : /* this struct defines the way the registers are stored on the
17 : stack during a system call. */
18 :
19 : #ifndef __KERNEL__
20 :
21 : struct pt_regs {
22 : long ebx;
23 : long ecx;
24 : long edx;
25 : long esi;
26 : long edi;
27 : long ebp;
28 : long eax;
29 : int xds;
30 : int xes;
31 : int xfs;
32 : int xgs;
33 : long orig_eax;
34 : long eip;
35 : int xcs;
36 : long eflags;
37 : long esp;
38 : int xss;
39 : };
40 :
41 : #else /* __KERNEL__ */
42 :
43 : struct pt_regs {
44 : unsigned long bx;
45 : unsigned long cx;
46 : unsigned long dx;
47 : unsigned long si;
48 : unsigned long di;
49 : unsigned long bp;
50 : unsigned long ax;
51 : unsigned long ds;
52 : unsigned long es;
53 : unsigned long fs;
54 : unsigned long gs;
55 : unsigned long orig_ax;
56 : unsigned long ip;
57 : unsigned long cs;
58 : unsigned long flags;
59 : unsigned long sp;
60 : unsigned long ss;
61 : };
62 :
63 : #endif /* __KERNEL__ */
64 :
65 : #else /* __i386__ */
66 :
67 : #ifndef __KERNEL__
68 :
69 : struct pt_regs {
70 : unsigned long r15;
71 : unsigned long r14;
72 : unsigned long r13;
73 : unsigned long r12;
74 : unsigned long rbp;
75 : unsigned long rbx;
76 : /* arguments: non interrupts/non tracing syscalls only save upto here*/
77 : unsigned long r11;
78 : unsigned long r10;
79 : unsigned long r9;
80 : unsigned long r8;
81 : unsigned long rax;
82 : unsigned long rcx;
83 : unsigned long rdx;
84 : unsigned long rsi;
85 : unsigned long rdi;
86 : unsigned long orig_rax;
87 : /* end of arguments */
88 : /* cpu exception frame or undefined */
89 : unsigned long rip;
90 : unsigned long cs;
91 : unsigned long eflags;
92 : unsigned long rsp;
93 : unsigned long ss;
94 : /* top of stack page */
95 : };
96 :
97 : #else /* __KERNEL__ */
98 :
99 : struct pt_regs {
100 : unsigned long r15;
101 : unsigned long r14;
102 : unsigned long r13;
103 : unsigned long r12;
104 : unsigned long bp;
105 : unsigned long bx;
106 : /* arguments: non interrupts/non tracing syscalls only save upto here*/
107 : unsigned long r11;
108 : unsigned long r10;
109 : unsigned long r9;
110 : unsigned long r8;
111 : unsigned long ax;
112 : unsigned long cx;
113 : unsigned long dx;
114 : unsigned long si;
115 : unsigned long di;
116 : unsigned long orig_ax;
117 : /* end of arguments */
118 : /* cpu exception frame or undefined */
119 : unsigned long ip;
120 : unsigned long cs;
121 : unsigned long flags;
122 : unsigned long sp;
123 : unsigned long ss;
124 : /* top of stack page */
125 : };
126 :
127 : #endif /* __KERNEL__ */
128 : #endif /* !__i386__ */
129 :
130 :
131 : #ifdef __KERNEL__
132 :
133 : #include <linux/init.h>
134 :
135 : struct cpuinfo_x86;
136 : struct task_struct;
137 :
138 : extern unsigned long profile_pc(struct pt_regs *regs);
139 :
140 : extern unsigned long
141 : convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
142 : extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
143 : int error_code, int si_code);
144 : void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
145 :
146 : extern long syscall_trace_enter(struct pt_regs *);
147 : extern void syscall_trace_leave(struct pt_regs *);
148 :
149 : static inline unsigned long regs_return_value(struct pt_regs *regs)
150 : {
151 : return regs->ax;
152 : }
153 :
154 : /*
155 : * user_mode_vm(regs) determines whether a register set came from user mode.
156 : * This is true if V8086 mode was enabled OR if the register set was from
157 : * protected mode with RPL-3 CS value. This tricky test checks that with
158 : * one comparison. Many places in the kernel can bypass this full check
159 : * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
160 : */
161 : static inline int user_mode(struct pt_regs *regs)
162 : {
163 : #ifdef CONFIG_X86_32
164 : return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
165 : #else
166 : return !!(regs->cs & 3);
167 : #endif
168 : }
169 :
170 : static inline int user_mode_vm(struct pt_regs *regs)
171 : {
172 : #ifdef CONFIG_X86_32
173 : return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
174 : USER_RPL;
175 : #else
176 : return user_mode(regs);
177 : #endif
178 : }
179 :
180 : static inline int v8086_mode(struct pt_regs *regs)
181 : {
182 : #ifdef CONFIG_X86_32
183 : return (regs->flags & X86_VM_MASK);
184 : #else
185 : return 0; /* No V86 mode support in long mode */
186 : #endif
187 : }
188 :
189 : /*
190 : * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
191 : * when it traps. The previous stack will be directly underneath the saved
192 : * registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'.
193 : *
194 : * This is valid only for kernel mode traps.
195 : */
196 : static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
197 : {
198 : #ifdef CONFIG_X86_32
199 : return (unsigned long)(®s->sp);
200 : #else
201 : return regs->sp;
202 : #endif
203 : }
204 :
205 : static inline unsigned long instruction_pointer(struct pt_regs *regs)
206 : {
207 : return regs->ip;
208 : }
209 :
210 : static inline unsigned long frame_pointer(struct pt_regs *regs)
211 : {
212 : return regs->bp;
213 : }
214 :
215 : static inline unsigned long user_stack_pointer(struct pt_regs *regs)
216 : {
217 : return regs->sp;
218 : }
219 :
220 : /* Query offset/name of register from its name/offset */
221 : extern int regs_query_register_offset(const char *name);
222 : extern const char *regs_query_register_name(unsigned int offset);
223 : #define MAX_REG_OFFSET (offsetof(struct pt_regs, ss))
224 :
225 : /**
226 : * regs_get_register() - get register value from its offset
227 : * @regs: pt_regs from which register value is gotten.
228 : * @offset: offset number of the register.
229 : *
230 : * regs_get_register returns the value of a register. The @offset is the
231 : * offset of the register in struct pt_regs address which specified by @regs.
232 : * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
233 : */
234 : static inline unsigned long regs_get_register(struct pt_regs *regs,
235 : unsigned int offset)
236 : {
237 : if (unlikely(offset > MAX_REG_OFFSET))
238 : return 0;
239 : return *(unsigned long *)((unsigned long)regs + offset);
240 : }
241 :
242 : /**
243 : * regs_within_kernel_stack() - check the address in the stack
244 : * @regs: pt_regs which contains kernel stack pointer.
245 : * @addr: address which is checked.
246 : *
247 : * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
248 : * If @addr is within the kernel stack, it returns true. If not, returns false.
249 : */
250 : static inline int regs_within_kernel_stack(struct pt_regs *regs,
251 : unsigned long addr)
252 : {
253 : return ((addr & ~(THREAD_SIZE - 1)) ==
254 : (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
255 : }
256 :
257 : /**
258 : * regs_get_kernel_stack_nth() - get Nth entry of the stack
259 : * @regs: pt_regs which contains kernel stack pointer.
260 : * @n: stack entry number.
261 : *
262 : * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
263 : * is specified by @regs. If the @n th entry is NOT in the kernel stack,
264 : * this returns 0.
265 : */
266 : static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
267 : unsigned int n)
268 : {
269 : unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
270 : addr += n;
271 : if (regs_within_kernel_stack(regs, (unsigned long)addr))
272 : return *addr;
273 : else
274 : return 0;
275 : }
276 :
277 : /* Get Nth argument at function call */
278 : extern unsigned long regs_get_argument_nth(struct pt_regs *regs,
279 : unsigned int n);
280 :
281 : /*
282 : * These are defined as per linux/ptrace.h, which see.
283 : */
284 : #define arch_has_single_step() (1)
285 : extern void user_enable_single_step(struct task_struct *);
286 : extern void user_disable_single_step(struct task_struct *);
287 :
288 : extern void user_enable_block_step(struct task_struct *);
289 : #ifdef CONFIG_X86_DEBUGCTLMSR
290 : #define arch_has_block_step() (1)
291 : #else
292 : #define arch_has_block_step() (boot_cpu_data.x86 >= 6)
293 : #endif
294 :
295 : #define ARCH_HAS_USER_SINGLE_STEP_INFO
296 :
297 : struct user_desc;
298 : extern int do_get_thread_area(struct task_struct *p, int idx,
299 : struct user_desc __user *info);
300 : extern int do_set_thread_area(struct task_struct *p, int idx,
301 1 : struct user_desc __user *info, int can_allocate);
302 :
303 : #ifdef CONFIG_X86_PTRACE_BTS
304 : extern void ptrace_bts_untrace(struct task_struct *tsk);
305 :
306 : #define arch_ptrace_untrace(tsk) ptrace_bts_untrace(tsk)
307 : #endif /* CONFIG_X86_PTRACE_BTS */
308 :
309 : #endif /* __KERNEL__ */
310 :
311 : #endif /* !__ASSEMBLY__ */
312 :
313 : #endif /* _ASM_X86_PTRACE_H */
|