428
|
1 /* The lisp stack.
|
|
2 Copyright (C) 1985, 1986, 1987, 1992, 1993 Free Software Foundation, Inc.
|
1292
|
3 Copyright (C) 2002, 2003 Ben Wing.
|
428
|
4
|
|
5 This file is part of XEmacs.
|
|
6
|
|
7 XEmacs is free software; you can redistribute it and/or modify it
|
|
8 under the terms of the GNU General Public License as published by the
|
|
9 Free Software Foundation; either version 2, or (at your option) any
|
|
10 later version.
|
|
11
|
|
12 XEmacs is distributed in the hope that it will be useful, but WITHOUT
|
|
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
15 for more details.
|
|
16
|
|
17 You should have received a copy of the GNU General Public License
|
|
18 along with XEmacs; see the file COPYING. If not, write to
|
|
19 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
20 Boston, MA 02111-1307, USA. */
|
|
21
|
|
22 /* Synched up with: FSF 19.30. Contained redundantly in various C files
|
|
23 in FSFmacs. */
|
|
24
|
|
25 /* Authorship:
|
|
26
|
|
27 FSF: Original version; a long time ago.
|
|
28 XEmacs: split out of some C files. (For some obscure reason, a header
|
|
29 file couldn't be used in FSF Emacs, but XEmacs doesn't have
|
|
30 that problem.)
|
|
31 Mly (probably) or JWZ: Some changes.
|
|
32 */
|
|
33
|
440
|
34 #ifndef INCLUDED_backtrace_h_
|
|
35 #define INCLUDED_backtrace_h_
|
428
|
36
|
|
37 #include <setjmp.h>
|
|
38
|
853
|
39 #ifdef ERROR_CHECK_CATCH
|
|
40 /* you can use this if you are trying to debug corruption in the
|
|
41 catchlist */
|
|
42 void check_catchlist_sanity (void);
|
|
43
|
|
44 /* you can use this if you are trying to debug corruption in the specbind
|
|
45 stack */
|
|
46 void check_specbind_stack_sanity (void);
|
|
47 #else
|
|
48 #define check_catchlist_sanity()
|
|
49 #define check_specbind_stack_sanity()
|
|
50 #endif
|
|
51
|
428
|
52 /* These definitions are used in eval.c and alloc.c */
|
|
53
|
|
54 struct backtrace
|
|
55 {
|
|
56 struct backtrace *next;
|
|
57 Lisp_Object *function;
|
|
58 Lisp_Object *args; /* Points to vector of args. */
|
|
59 int nargs; /* Length of vector.
|
|
60 If nargs is UNEVALLED, args points to
|
|
61 slot holding list of unevalled args */
|
|
62 int pdlcount; /* specpdl_depth () when invoked */
|
|
63 char evalargs;
|
|
64 /* Nonzero means call value of debugger when done with this operation. */
|
|
65 char debug_on_exit;
|
1292
|
66
|
|
67 /* All the rest is information for the use of the profiler. The only
|
|
68 thing that eval.c does is set the first value to 0 so that it can
|
|
69 be relied upon. */
|
|
70
|
|
71 /* ----------------------------------------------------------------- */
|
|
72
|
|
73 /* 0 = profiling not turned on when function called.
|
|
74 Since profiling can be turned on and off dynamically, we can't
|
|
75 always count on having info recorded when a function was called
|
|
76 and need to take evasive action if necessary.
|
|
77 1 = profiling turned on but function not yet actually called. Lots of
|
|
78 stuff can happen between when a function is pushed onto the
|
|
79 backtrace list and when it's actually called (e.g. evalling its
|
|
80 arguments, autoloading, etc.). For greater accuracy we don't
|
|
81 treat the preamble stuff as part of the function itself.
|
|
82 2 = profiling turned on, function called.
|
|
83 */
|
|
84 char function_being_called;
|
|
85 /* The trick here is handling recursive functions and dealing with the
|
|
86 dynamicity of in-profile/not-in-profile. I used to just use a bunch
|
|
87 of hash tables for all info but that fails in the presence of
|
|
88 recursive functions because they can modify values out from under
|
|
89 you. The algorithm here is that we record the total_ticks and
|
|
90 total_consing, as well as the current values of `total-timing' and
|
|
91 `total-gc-usage' for the OBJ -- that's because recursive functions,
|
|
92 which get called later and exit early, will go ahead and modify the
|
|
93 `total-timing' and `total-gc-usage' for the fun, even though it's
|
|
94 not "correct" because the outer function is still running. However,
|
|
95 if we ask for profiling info at this point, at least we're getting
|
|
96 SOME info.
|
|
97
|
|
98 So ... On entry, we record these four values. On exit, we compute
|
|
99 an offset from the recorded value to the current value and then
|
|
100 store it into the appropriate hash table entry, using the recorded
|
|
101 value in the entry rather than the actual one. (Inner recursive
|
|
102 functions may have added their own values to the total-counts, and
|
|
103 we want to subsume them, not add to them.)
|
|
104
|
|
105 #### Also we need to go through the backtrace list during
|
|
106 stop-profiling and record values, just like for unwind_to. */
|
|
107 EMACS_INT current_total_timing_val;
|
|
108 EMACS_INT current_total_gc_usage_val;
|
|
109 EMACS_UINT total_ticks_at_start;
|
|
110 EMACS_UINT total_consing_at_start;
|
428
|
111 };
|
|
112
|
|
113 /* This structure helps implement the `catch' and `throw' control
|
|
114 structure. A struct catchtag contains all the information needed
|
|
115 to restore the state of the interpreter after a non-local jump.
|
853
|
116 (No information is stored concerning how to restore the state of
|
|
117 the condition-handler list; this is handled implicitly through
|
|
118 an unwind-protect. unwind-protects are on the specbind stack,
|
|
119 which is reset to its proper value by `throw'. In the process of
|
|
120 that, any intervening bindings are reset and unwind-protects called,
|
|
121 which fixes up the condition-handler list.
|
428
|
122
|
|
123 catchtag structures are chained together in the C calling stack;
|
|
124 the `next' member points to the next outer catchtag.
|
|
125
|
|
126 A call like (throw TAG VAL) searches for a catchtag whose `tag'
|
853
|
127 member is TAG, and then unbinds to it. A value of Vcatch_everything_tag
|
|
128 for the `tag' member of a catchtag is special and means "catch all throws,
|
|
129 regardless of the tag". This is used internally by the C code. The `val'
|
|
130 member is used to hold VAL while the stack is unwound; `val' is returned
|
|
131 as the value of the catch form. The `actual_tag' member holds the value
|
|
132 of TAG as passed to throw, so that it can be retrieved when catches with
|
|
133 Vcatch_everything_tag are set up.
|
428
|
134
|
|
135 All the other members are concerned with restoring the interpreter
|
|
136 state. */
|
|
137
|
|
138 struct catchtag
|
|
139 {
|
|
140 Lisp_Object tag;
|
853
|
141 /* Stores the actual tag used in `throw'; the same as TAG, unless
|
|
142 TAG is Vcatch_everything_tag. */
|
|
143 Lisp_Object actual_tag;
|
2532
|
144 /* A backtrace prior to the throw, used with Vcatch_everything_tag. */
|
|
145 Lisp_Object backtrace;
|
428
|
146 Lisp_Object val;
|
|
147 struct catchtag *next;
|
|
148 struct gcpro *gcpro;
|
|
149 JMP_BUF jmp;
|
|
150 struct backtrace *backlist;
|
|
151 #if 0 /* FSFmacs */
|
617
|
152 /* FSF uses a separate handler stack to hold condition-cases,
|
|
153 where we use Vcondition_handlers. We should switch to their
|
|
154 system becaue it avoids the need to mess around with consing
|
|
155 up stuff and then dangerously freeing it. See comment in
|
|
156 condition_case_unwind(). */
|
428
|
157 struct handler *handlerlist;
|
|
158 #endif
|
|
159 int lisp_eval_depth;
|
|
160 int pdlcount;
|
|
161 #if 0 /* FSFmacs */
|
|
162 /* This is the equivalent of async_timer_suppress_count.
|
|
163 We probably don't have to bother with this. */
|
|
164 int poll_suppress_count;
|
|
165 #endif
|
|
166 };
|
|
167
|
|
168 /* Dynamic-binding-o-rama */
|
|
169
|
|
170 /* Structure for recording Lisp call stack for backtrace purposes. */
|
|
171
|
|
172 /* The special binding stack holds the outer values of variables while
|
|
173 they are bound by a function application or a let form, stores the
|
|
174 code to be executed for Lisp unwind-protect forms, and stores the C
|
|
175 functions to be called for record_unwind_protect.
|
|
176
|
|
177 If func is non-zero, undoing this binding applies func to old_value;
|
|
178 This implements record_unwind_protect.
|
|
179 If func is zero and symbol is nil, undoing this binding evaluates
|
|
180 the list of forms in old_value; this implements Lisp's unwind-protect
|
|
181 form.
|
|
182 Otherwise, undoing this binding stores old_value as symbol's value; this
|
|
183 undoes the bindings made by a let form or function call. */
|
|
184
|
|
185 struct specbinding
|
|
186 {
|
|
187 Lisp_Object symbol;
|
|
188 Lisp_Object old_value;
|
|
189 Lisp_Object (*func) (Lisp_Object); /* for unwind-protect */
|
|
190 };
|
|
191
|
|
192 #if 0 /* FSFmacs */
|
|
193 /* #### */
|
|
194 /* Everything needed to describe an active condition case. */
|
|
195 struct handler
|
|
196 {
|
|
197 /* The handler clauses and variable from the condition-case form. */
|
|
198 Lisp_Object handler;
|
|
199 Lisp_Object var;
|
|
200 /* Fsignal stores here the condition-case clause that applies,
|
|
201 and Fcondition_case thus knows which clause to run. */
|
|
202 Lisp_Object chosen_clause;
|
|
203
|
|
204 /* Used to effect the longjmp() out to the handler. */
|
|
205 struct catchtag *tag;
|
|
206
|
|
207 /* The next enclosing handler. */
|
|
208 struct handler *next;
|
|
209 };
|
|
210
|
|
211 extern struct handler *handlerlist;
|
|
212
|
|
213 #endif
|
|
214
|
|
215 /* These are extern because GC needs to mark them */
|
|
216 extern struct specbinding *specpdl;
|
|
217 extern struct specbinding *specpdl_ptr;
|
|
218 extern struct catchtag *catchlist;
|
|
219 extern struct backtrace *backtrace_list;
|
|
220
|
771
|
221 /* Most callers should simply use specbind() and unbind_to_1(), but if
|
428
|
222 speed is REALLY IMPORTANT, you can use the faster macros below */
|
|
223 void specbind_magic (Lisp_Object, Lisp_Object);
|
647
|
224 void grow_specpdl (EMACS_INT reserved);
|
428
|
225 void unbind_to_hairy (int);
|
|
226 extern int specpdl_size;
|
|
227
|
|
228 /* Inline version of specbind().
|
|
229 Use this instead of specbind() if speed is sufficiently important
|
|
230 to save the overhead of even a single function call. */
|
|
231 #define SPECBIND(symbol_object, value_object) do { \
|
|
232 Lisp_Object SB_symbol = (symbol_object); \
|
|
233 Lisp_Object SB_newval = (value_object); \
|
|
234 Lisp_Object SB_oldval; \
|
440
|
235 Lisp_Symbol *SB_sym; \
|
428
|
236 \
|
|
237 SPECPDL_RESERVE (1); \
|
|
238 \
|
|
239 CHECK_SYMBOL (SB_symbol); \
|
|
240 SB_sym = XSYMBOL (SB_symbol); \
|
|
241 SB_oldval = SB_sym->value; \
|
|
242 \
|
|
243 if (!SYMBOL_VALUE_MAGIC_P (SB_oldval) || UNBOUNDP (SB_oldval)) \
|
|
244 { \
|
440
|
245 /* #### the following test will go away when we have a constant \
|
428
|
246 symbol magic object */ \
|
|
247 if (EQ (SB_symbol, Qnil) || \
|
|
248 EQ (SB_symbol, Qt) || \
|
|
249 SYMBOL_IS_KEYWORD (SB_symbol)) \
|
|
250 reject_constant_symbols (SB_symbol, SB_newval, 0, \
|
|
251 UNBOUNDP (SB_newval) ? \
|
|
252 Qmakunbound : Qset); \
|
|
253 \
|
|
254 specpdl_ptr->symbol = SB_symbol; \
|
|
255 specpdl_ptr->old_value = SB_oldval; \
|
|
256 specpdl_ptr->func = 0; \
|
|
257 specpdl_ptr++; \
|
|
258 specpdl_depth_counter++; \
|
|
259 \
|
|
260 SB_sym->value = (SB_newval); \
|
|
261 } \
|
|
262 else \
|
|
263 specbind_magic (SB_symbol, SB_newval); \
|
853
|
264 check_specbind_stack_sanity (); \
|
428
|
265 } while (0)
|
|
266
|
|
267 /* An even faster, but less safe inline version of specbind().
|
|
268 Caller guarantees that:
|
|
269 - SYMBOL is a non-constant symbol (i.e. not Qnil, Qt, or keyword).
|
|
270 - specpdl_depth_counter >= specpdl_size.
|
|
271 Else we crash. */
|
|
272 #define SPECBIND_FAST_UNSAFE(symbol_object, value_object) do { \
|
|
273 Lisp_Object SFU_symbol = (symbol_object); \
|
|
274 Lisp_Object SFU_newval = (value_object); \
|
440
|
275 Lisp_Symbol *SFU_sym = XSYMBOL (SFU_symbol); \
|
428
|
276 Lisp_Object SFU_oldval = SFU_sym->value; \
|
814
|
277 /* Most of the time, will be previously unbound. #### With a bit of \
|
|
278 rearranging, this could be reduced to only one check. */ \
|
|
279 if (UNBOUNDP (SFU_oldval) || !SYMBOL_VALUE_MAGIC_P (SFU_oldval)) \
|
428
|
280 { \
|
|
281 specpdl_ptr->symbol = SFU_symbol; \
|
|
282 specpdl_ptr->old_value = SFU_oldval; \
|
|
283 specpdl_ptr->func = 0; \
|
|
284 specpdl_ptr++; \
|
|
285 specpdl_depth_counter++; \
|
|
286 \
|
|
287 SFU_sym->value = (SFU_newval); \
|
|
288 } \
|
|
289 else \
|
|
290 specbind_magic (SFU_symbol, SFU_newval); \
|
853
|
291 check_specbind_stack_sanity (); \
|
428
|
292 } while (0)
|
|
293 /* Request enough room for SIZE future entries on special binding stack */
|
|
294 #define SPECPDL_RESERVE(size) do { \
|
647
|
295 EMACS_INT SR_size = (size); \
|
428
|
296 if (specpdl_depth() + SR_size >= specpdl_size) \
|
|
297 grow_specpdl (SR_size); \
|
|
298 } while (0)
|
|
299
|
771
|
300 /* Inline version of unbind_to_1().
|
|
301 [[Use this instead of unbind_to_1() if speed is sufficiently important
|
|
302 to save the overhead of even a single function call.]]
|
|
303 This is bogus pseudo-optimization. --ben
|
428
|
304
|
771
|
305 Most of the time, unbind_to_1() is called only on ordinary
|
428
|
306 variables, so optimize for that. */
|
|
307 #define UNBIND_TO_GCPRO(count, value) do { \
|
|
308 int UNBIND_TO_count = (count); \
|
|
309 while (specpdl_depth_counter != UNBIND_TO_count) \
|
|
310 { \
|
440
|
311 Lisp_Symbol *sym; \
|
428
|
312 --specpdl_ptr; \
|
|
313 --specpdl_depth_counter; \
|
|
314 \
|
|
315 if (specpdl_ptr->func != 0 || \
|
|
316 ((sym = XSYMBOL (specpdl_ptr->symbol)), \
|
|
317 SYMBOL_VALUE_MAGIC_P (sym->value))) \
|
|
318 { \
|
|
319 struct gcpro gcpro1; \
|
|
320 GCPRO1 (value); \
|
|
321 unbind_to_hairy (UNBIND_TO_count); \
|
|
322 UNGCPRO; \
|
|
323 break; \
|
|
324 } \
|
|
325 \
|
|
326 sym->value = specpdl_ptr->old_value; \
|
|
327 } \
|
853
|
328 check_specbind_stack_sanity (); \
|
428
|
329 } while (0)
|
|
330
|
771
|
331 /* A slightly faster inline version of unbind_to_1,
|
428
|
332 that doesn't offer GCPROing services. */
|
|
333 #define UNBIND_TO(count) do { \
|
|
334 int UNBIND_TO_count = (count); \
|
|
335 while (specpdl_depth_counter != UNBIND_TO_count) \
|
|
336 { \
|
440
|
337 Lisp_Symbol *sym; \
|
428
|
338 --specpdl_ptr; \
|
|
339 --specpdl_depth_counter; \
|
|
340 \
|
|
341 if (specpdl_ptr->func != 0 || \
|
|
342 ((sym = XSYMBOL (specpdl_ptr->symbol)), \
|
|
343 SYMBOL_VALUE_MAGIC_P (sym->value))) \
|
|
344 { \
|
|
345 unbind_to_hairy (UNBIND_TO_count); \
|
|
346 break; \
|
|
347 } \
|
|
348 \
|
|
349 sym->value = specpdl_ptr->old_value; \
|
|
350 } \
|
853
|
351 check_specbind_stack_sanity (); \
|
428
|
352 } while (0)
|
|
353
|
|
354 #if 0
|
|
355 /* Unused. It's too hard to guarantee that the current bindings
|
|
356 contain only variables. */
|
771
|
357 /* Another inline version of unbind_to_1(). VALUE is GC-protected.
|
428
|
358 Caller guarantees that:
|
|
359 - all of the elements on the binding stack are variable bindings.
|
|
360 Else we crash. */
|
|
361 #define UNBIND_TO_GCPRO_VARIABLES_ONLY(count, value) do { \
|
|
362 int UNBIND_TO_count = (count); \
|
|
363 while (specpdl_depth_counter != UNBIND_TO_count) \
|
|
364 { \
|
440
|
365 Lisp_Symbol *sym; \
|
428
|
366 --specpdl_ptr; \
|
|
367 --specpdl_depth_counter; \
|
|
368 \
|
|
369 sym = XSYMBOL (specpdl_ptr->symbol); \
|
|
370 if (!SYMBOL_VALUE_MAGIC_P (sym->value)) \
|
|
371 sym->value = specpdl_ptr->old_value; \
|
|
372 else \
|
|
373 { \
|
|
374 struct gcpro gcpro1; \
|
|
375 GCPRO1 (value); \
|
|
376 unbind_to_hairy (UNBIND_TO_count); \
|
|
377 UNGCPRO; \
|
|
378 break; \
|
|
379 } \
|
|
380 } \
|
|
381 } while (0)
|
|
382 #endif /* unused */
|
|
383
|
|
384 /* A faster, but less safe inline version of Fset().
|
|
385 Caller guarantees that:
|
|
386 - SYMBOL is a non-constant symbol (i.e. not Qnil, Qt, or keyword).
|
|
387 Else we crash. */
|
|
388 #define FSET_FAST_UNSAFE(sym, newval) do { \
|
|
389 Lisp_Object FFU_sym = (sym); \
|
|
390 Lisp_Object FFU_newval = (newval); \
|
440
|
391 Lisp_Symbol *FFU_symbol = XSYMBOL (FFU_sym); \
|
428
|
392 Lisp_Object FFU_oldval = FFU_symbol->value; \
|
|
393 if (!SYMBOL_VALUE_MAGIC_P (FFU_oldval) || UNBOUNDP (FFU_oldval)) \
|
|
394 FFU_symbol->value = FFU_newval; \
|
|
395 else \
|
|
396 Fset (FFU_sym, FFU_newval); \
|
|
397 } while (0)
|
|
398
|
1292
|
399 /* Note: you must always fill in all of the fields in a backtrace structure
|
|
400 before pushing them on the backtrace_list. The profiling code depends
|
|
401 on this. */
|
|
402
|
|
403 #define PUSH_BACKTRACE(bt) do { \
|
|
404 (bt).next = backtrace_list; \
|
|
405 backtrace_list = &(bt); \
|
|
406 } while (0)
|
|
407
|
|
408 #define POP_BACKTRACE(bt) do { \
|
|
409 backtrace_list = (bt).next; \
|
|
410 } while (0)
|
|
411
|
440
|
412 #endif /* INCLUDED_backtrace_h_ */
|