Mercurial > hg > xemacs-beta
annotate src/backtrace.h @ 5602:c9e5612f5424
Support the MP library on recent FreeBSD, have it pass relevant tests.
src/ChangeLog addition:
2011-11-26 Aidan Kehoe <kehoea@parhasard.net>
* number-mp.c (bignum_to_string):
Don't overwrite the accumulator we've just set up for this
function.
* number-mp.c (BIGNUM_TO_TYPE):
mp_itom() doesn't necessarily do what this code used to think with
negative numbers, it can treat them as unsigned ints. Subtract
numbers from bignum_zero instead of multiplying them by -1 to
convert them to their negative equivalents.
* number-mp.c (bignum_to_int):
* number-mp.c (bignum_to_uint):
* number-mp.c (bignum_to_long):
* number-mp.c (bignum_to_ulong):
* number-mp.c (bignum_to_double):
Use the changed BIGNUM_TO_TYPE() in these functions.
* number-mp.c (bignum_ceil):
* number-mp.c (bignum_floor):
In these functions, be more careful about rounding to positive and
negative infinity, respectively. Don't use the sign of QUOTIENT
when working out out whether to add or subtract one, rather use
the sign QUOTIENT would have if arbitrary-precision division were
done.
* number-mp.h:
* number-mp.h (MP_GCD):
Wrap #include <mp.h> in BEGIN_C_DECLS/END_C_DECLS.
* number.c (Fbigfloat_get_precision):
* number.c (Fbigfloat_set_precision):
Don't attempt to call XBIGFLOAT_GET_PREC if this build doesn't
support big floats.
author | Aidan Kehoe <kehoea@parhasard.net> |
---|---|
date | Sat, 26 Nov 2011 17:59:14 +0000 |
parents | 308d34e9f07d |
children |
rev | line source |
---|---|
428 | 1 /* The lisp stack. |
2 Copyright (C) 1985, 1986, 1987, 1992, 1993 Free Software Foundation, Inc. | |
1292 | 3 Copyright (C) 2002, 2003 Ben Wing. |
428 | 4 |
5 This file is part of XEmacs. | |
6 | |
5402
308d34e9f07d
Changed bulk of GPLv2 or later files identified by script
Mats Lidell <matsl@xemacs.org>
parents:
2532
diff
changeset
|
7 XEmacs is free software: you can redistribute it and/or modify it |
428 | 8 under the terms of the GNU General Public License as published by the |
5402
308d34e9f07d
Changed bulk of GPLv2 or later files identified by script
Mats Lidell <matsl@xemacs.org>
parents:
2532
diff
changeset
|
9 Free Software Foundation, either version 3 of the License, or (at your |
308d34e9f07d
Changed bulk of GPLv2 or later files identified by script
Mats Lidell <matsl@xemacs.org>
parents:
2532
diff
changeset
|
10 option) any later version. |
428 | 11 |
12 XEmacs is distributed in the hope that it will be useful, but WITHOUT | |
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
15 for more details. | |
16 | |
17 You should have received a copy of the GNU General Public License | |
5402
308d34e9f07d
Changed bulk of GPLv2 or later files identified by script
Mats Lidell <matsl@xemacs.org>
parents:
2532
diff
changeset
|
18 along with XEmacs. If not, see <http://www.gnu.org/licenses/>. */ |
428 | 19 |
20 /* Synched up with: FSF 19.30. Contained redundantly in various C files | |
21 in FSFmacs. */ | |
22 | |
23 /* Authorship: | |
24 | |
25 FSF: Original version; a long time ago. | |
26 XEmacs: split out of some C files. (For some obscure reason, a header | |
27 file couldn't be used in FSF Emacs, but XEmacs doesn't have | |
28 that problem.) | |
29 Mly (probably) or JWZ: Some changes. | |
30 */ | |
31 | |
440 | 32 #ifndef INCLUDED_backtrace_h_ |
33 #define INCLUDED_backtrace_h_ | |
428 | 34 |
35 #include <setjmp.h> | |
36 | |
853 | 37 #ifdef ERROR_CHECK_CATCH |
38 /* you can use this if you are trying to debug corruption in the | |
39 catchlist */ | |
40 void check_catchlist_sanity (void); | |
41 | |
42 /* you can use this if you are trying to debug corruption in the specbind | |
43 stack */ | |
44 void check_specbind_stack_sanity (void); | |
45 #else | |
46 #define check_catchlist_sanity() | |
47 #define check_specbind_stack_sanity() | |
48 #endif | |
49 | |
428 | 50 /* These definitions are used in eval.c and alloc.c */ |
51 | |
52 struct backtrace | |
53 { | |
54 struct backtrace *next; | |
55 Lisp_Object *function; | |
56 Lisp_Object *args; /* Points to vector of args. */ | |
57 int nargs; /* Length of vector. | |
58 If nargs is UNEVALLED, args points to | |
59 slot holding list of unevalled args */ | |
60 int pdlcount; /* specpdl_depth () when invoked */ | |
61 char evalargs; | |
62 /* Nonzero means call value of debugger when done with this operation. */ | |
63 char debug_on_exit; | |
1292 | 64 |
65 /* All the rest is information for the use of the profiler. The only | |
66 thing that eval.c does is set the first value to 0 so that it can | |
67 be relied upon. */ | |
68 | |
69 /* ----------------------------------------------------------------- */ | |
70 | |
71 /* 0 = profiling not turned on when function called. | |
72 Since profiling can be turned on and off dynamically, we can't | |
73 always count on having info recorded when a function was called | |
74 and need to take evasive action if necessary. | |
75 1 = profiling turned on but function not yet actually called. Lots of | |
76 stuff can happen between when a function is pushed onto the | |
77 backtrace list and when it's actually called (e.g. evalling its | |
78 arguments, autoloading, etc.). For greater accuracy we don't | |
79 treat the preamble stuff as part of the function itself. | |
80 2 = profiling turned on, function called. | |
81 */ | |
82 char function_being_called; | |
83 /* The trick here is handling recursive functions and dealing with the | |
84 dynamicity of in-profile/not-in-profile. I used to just use a bunch | |
85 of hash tables for all info but that fails in the presence of | |
86 recursive functions because they can modify values out from under | |
87 you. The algorithm here is that we record the total_ticks and | |
88 total_consing, as well as the current values of `total-timing' and | |
89 `total-gc-usage' for the OBJ -- that's because recursive functions, | |
90 which get called later and exit early, will go ahead and modify the | |
91 `total-timing' and `total-gc-usage' for the fun, even though it's | |
92 not "correct" because the outer function is still running. However, | |
93 if we ask for profiling info at this point, at least we're getting | |
94 SOME info. | |
95 | |
96 So ... On entry, we record these four values. On exit, we compute | |
97 an offset from the recorded value to the current value and then | |
98 store it into the appropriate hash table entry, using the recorded | |
99 value in the entry rather than the actual one. (Inner recursive | |
100 functions may have added their own values to the total-counts, and | |
101 we want to subsume them, not add to them.) | |
102 | |
103 #### Also we need to go through the backtrace list during | |
104 stop-profiling and record values, just like for unwind_to. */ | |
105 EMACS_INT current_total_timing_val; | |
106 EMACS_INT current_total_gc_usage_val; | |
107 EMACS_UINT total_ticks_at_start; | |
108 EMACS_UINT total_consing_at_start; | |
428 | 109 }; |
110 | |
111 /* This structure helps implement the `catch' and `throw' control | |
112 structure. A struct catchtag contains all the information needed | |
113 to restore the state of the interpreter after a non-local jump. | |
853 | 114 (No information is stored concerning how to restore the state of |
115 the condition-handler list; this is handled implicitly through | |
116 an unwind-protect. unwind-protects are on the specbind stack, | |
117 which is reset to its proper value by `throw'. In the process of | |
118 that, any intervening bindings are reset and unwind-protects called, | |
119 which fixes up the condition-handler list. | |
428 | 120 |
121 catchtag structures are chained together in the C calling stack; | |
122 the `next' member points to the next outer catchtag. | |
123 | |
124 A call like (throw TAG VAL) searches for a catchtag whose `tag' | |
853 | 125 member is TAG, and then unbinds to it. A value of Vcatch_everything_tag |
126 for the `tag' member of a catchtag is special and means "catch all throws, | |
127 regardless of the tag". This is used internally by the C code. The `val' | |
128 member is used to hold VAL while the stack is unwound; `val' is returned | |
129 as the value of the catch form. The `actual_tag' member holds the value | |
130 of TAG as passed to throw, so that it can be retrieved when catches with | |
131 Vcatch_everything_tag are set up. | |
428 | 132 |
133 All the other members are concerned with restoring the interpreter | |
134 state. */ | |
135 | |
136 struct catchtag | |
137 { | |
138 Lisp_Object tag; | |
853 | 139 /* Stores the actual tag used in `throw'; the same as TAG, unless |
140 TAG is Vcatch_everything_tag. */ | |
141 Lisp_Object actual_tag; | |
2532 | 142 /* A backtrace prior to the throw, used with Vcatch_everything_tag. */ |
143 Lisp_Object backtrace; | |
428 | 144 Lisp_Object val; |
145 struct catchtag *next; | |
146 struct gcpro *gcpro; | |
147 JMP_BUF jmp; | |
148 struct backtrace *backlist; | |
149 #if 0 /* FSFmacs */ | |
617 | 150 /* FSF uses a separate handler stack to hold condition-cases, |
151 where we use Vcondition_handlers. We should switch to their | |
152 system becaue it avoids the need to mess around with consing | |
153 up stuff and then dangerously freeing it. See comment in | |
154 condition_case_unwind(). */ | |
428 | 155 struct handler *handlerlist; |
156 #endif | |
157 int lisp_eval_depth; | |
158 int pdlcount; | |
159 #if 0 /* FSFmacs */ | |
160 /* This is the equivalent of async_timer_suppress_count. | |
161 We probably don't have to bother with this. */ | |
162 int poll_suppress_count; | |
163 #endif | |
164 }; | |
165 | |
166 /* Dynamic-binding-o-rama */ | |
167 | |
168 /* Structure for recording Lisp call stack for backtrace purposes. */ | |
169 | |
170 /* The special binding stack holds the outer values of variables while | |
171 they are bound by a function application or a let form, stores the | |
172 code to be executed for Lisp unwind-protect forms, and stores the C | |
173 functions to be called for record_unwind_protect. | |
174 | |
175 If func is non-zero, undoing this binding applies func to old_value; | |
176 This implements record_unwind_protect. | |
177 If func is zero and symbol is nil, undoing this binding evaluates | |
178 the list of forms in old_value; this implements Lisp's unwind-protect | |
179 form. | |
180 Otherwise, undoing this binding stores old_value as symbol's value; this | |
181 undoes the bindings made by a let form or function call. */ | |
182 | |
183 struct specbinding | |
184 { | |
185 Lisp_Object symbol; | |
186 Lisp_Object old_value; | |
187 Lisp_Object (*func) (Lisp_Object); /* for unwind-protect */ | |
188 }; | |
189 | |
190 #if 0 /* FSFmacs */ | |
191 /* #### */ | |
192 /* Everything needed to describe an active condition case. */ | |
193 struct handler | |
194 { | |
195 /* The handler clauses and variable from the condition-case form. */ | |
196 Lisp_Object handler; | |
197 Lisp_Object var; | |
198 /* Fsignal stores here the condition-case clause that applies, | |
199 and Fcondition_case thus knows which clause to run. */ | |
200 Lisp_Object chosen_clause; | |
201 | |
202 /* Used to effect the longjmp() out to the handler. */ | |
203 struct catchtag *tag; | |
204 | |
205 /* The next enclosing handler. */ | |
206 struct handler *next; | |
207 }; | |
208 | |
209 extern struct handler *handlerlist; | |
210 | |
211 #endif | |
212 | |
213 /* These are extern because GC needs to mark them */ | |
214 extern struct specbinding *specpdl; | |
215 extern struct specbinding *specpdl_ptr; | |
216 extern struct catchtag *catchlist; | |
217 extern struct backtrace *backtrace_list; | |
218 | |
771 | 219 /* Most callers should simply use specbind() and unbind_to_1(), but if |
428 | 220 speed is REALLY IMPORTANT, you can use the faster macros below */ |
221 void specbind_magic (Lisp_Object, Lisp_Object); | |
647 | 222 void grow_specpdl (EMACS_INT reserved); |
428 | 223 void unbind_to_hairy (int); |
224 extern int specpdl_size; | |
225 | |
226 /* Inline version of specbind(). | |
227 Use this instead of specbind() if speed is sufficiently important | |
228 to save the overhead of even a single function call. */ | |
229 #define SPECBIND(symbol_object, value_object) do { \ | |
230 Lisp_Object SB_symbol = (symbol_object); \ | |
231 Lisp_Object SB_newval = (value_object); \ | |
232 Lisp_Object SB_oldval; \ | |
440 | 233 Lisp_Symbol *SB_sym; \ |
428 | 234 \ |
235 SPECPDL_RESERVE (1); \ | |
236 \ | |
237 CHECK_SYMBOL (SB_symbol); \ | |
238 SB_sym = XSYMBOL (SB_symbol); \ | |
239 SB_oldval = SB_sym->value; \ | |
240 \ | |
241 if (!SYMBOL_VALUE_MAGIC_P (SB_oldval) || UNBOUNDP (SB_oldval)) \ | |
242 { \ | |
440 | 243 /* #### the following test will go away when we have a constant \ |
428 | 244 symbol magic object */ \ |
245 if (EQ (SB_symbol, Qnil) || \ | |
246 EQ (SB_symbol, Qt) || \ | |
247 SYMBOL_IS_KEYWORD (SB_symbol)) \ | |
248 reject_constant_symbols (SB_symbol, SB_newval, 0, \ | |
249 UNBOUNDP (SB_newval) ? \ | |
250 Qmakunbound : Qset); \ | |
251 \ | |
252 specpdl_ptr->symbol = SB_symbol; \ | |
253 specpdl_ptr->old_value = SB_oldval; \ | |
254 specpdl_ptr->func = 0; \ | |
255 specpdl_ptr++; \ | |
256 specpdl_depth_counter++; \ | |
257 \ | |
258 SB_sym->value = (SB_newval); \ | |
259 } \ | |
260 else \ | |
261 specbind_magic (SB_symbol, SB_newval); \ | |
853 | 262 check_specbind_stack_sanity (); \ |
428 | 263 } while (0) |
264 | |
265 /* An even faster, but less safe inline version of specbind(). | |
266 Caller guarantees that: | |
267 - SYMBOL is a non-constant symbol (i.e. not Qnil, Qt, or keyword). | |
268 - specpdl_depth_counter >= specpdl_size. | |
269 Else we crash. */ | |
270 #define SPECBIND_FAST_UNSAFE(symbol_object, value_object) do { \ | |
271 Lisp_Object SFU_symbol = (symbol_object); \ | |
272 Lisp_Object SFU_newval = (value_object); \ | |
440 | 273 Lisp_Symbol *SFU_sym = XSYMBOL (SFU_symbol); \ |
428 | 274 Lisp_Object SFU_oldval = SFU_sym->value; \ |
814 | 275 /* Most of the time, will be previously unbound. #### With a bit of \ |
276 rearranging, this could be reduced to only one check. */ \ | |
277 if (UNBOUNDP (SFU_oldval) || !SYMBOL_VALUE_MAGIC_P (SFU_oldval)) \ | |
428 | 278 { \ |
279 specpdl_ptr->symbol = SFU_symbol; \ | |
280 specpdl_ptr->old_value = SFU_oldval; \ | |
281 specpdl_ptr->func = 0; \ | |
282 specpdl_ptr++; \ | |
283 specpdl_depth_counter++; \ | |
284 \ | |
285 SFU_sym->value = (SFU_newval); \ | |
286 } \ | |
287 else \ | |
288 specbind_magic (SFU_symbol, SFU_newval); \ | |
853 | 289 check_specbind_stack_sanity (); \ |
428 | 290 } while (0) |
291 /* Request enough room for SIZE future entries on special binding stack */ | |
292 #define SPECPDL_RESERVE(size) do { \ | |
647 | 293 EMACS_INT SR_size = (size); \ |
428 | 294 if (specpdl_depth() + SR_size >= specpdl_size) \ |
295 grow_specpdl (SR_size); \ | |
296 } while (0) | |
297 | |
771 | 298 /* Inline version of unbind_to_1(). |
299 [[Use this instead of unbind_to_1() if speed is sufficiently important | |
300 to save the overhead of even a single function call.]] | |
301 This is bogus pseudo-optimization. --ben | |
428 | 302 |
771 | 303 Most of the time, unbind_to_1() is called only on ordinary |
428 | 304 variables, so optimize for that. */ |
305 #define UNBIND_TO_GCPRO(count, value) do { \ | |
306 int UNBIND_TO_count = (count); \ | |
307 while (specpdl_depth_counter != UNBIND_TO_count) \ | |
308 { \ | |
440 | 309 Lisp_Symbol *sym; \ |
428 | 310 --specpdl_ptr; \ |
311 --specpdl_depth_counter; \ | |
312 \ | |
313 if (specpdl_ptr->func != 0 || \ | |
314 ((sym = XSYMBOL (specpdl_ptr->symbol)), \ | |
315 SYMBOL_VALUE_MAGIC_P (sym->value))) \ | |
316 { \ | |
317 struct gcpro gcpro1; \ | |
318 GCPRO1 (value); \ | |
319 unbind_to_hairy (UNBIND_TO_count); \ | |
320 UNGCPRO; \ | |
321 break; \ | |
322 } \ | |
323 \ | |
324 sym->value = specpdl_ptr->old_value; \ | |
325 } \ | |
853 | 326 check_specbind_stack_sanity (); \ |
428 | 327 } while (0) |
328 | |
771 | 329 /* A slightly faster inline version of unbind_to_1, |
428 | 330 that doesn't offer GCPROing services. */ |
331 #define UNBIND_TO(count) do { \ | |
332 int UNBIND_TO_count = (count); \ | |
333 while (specpdl_depth_counter != UNBIND_TO_count) \ | |
334 { \ | |
440 | 335 Lisp_Symbol *sym; \ |
428 | 336 --specpdl_ptr; \ |
337 --specpdl_depth_counter; \ | |
338 \ | |
339 if (specpdl_ptr->func != 0 || \ | |
340 ((sym = XSYMBOL (specpdl_ptr->symbol)), \ | |
341 SYMBOL_VALUE_MAGIC_P (sym->value))) \ | |
342 { \ | |
343 unbind_to_hairy (UNBIND_TO_count); \ | |
344 break; \ | |
345 } \ | |
346 \ | |
347 sym->value = specpdl_ptr->old_value; \ | |
348 } \ | |
853 | 349 check_specbind_stack_sanity (); \ |
428 | 350 } while (0) |
351 | |
352 #if 0 | |
353 /* Unused. It's too hard to guarantee that the current bindings | |
354 contain only variables. */ | |
771 | 355 /* Another inline version of unbind_to_1(). VALUE is GC-protected. |
428 | 356 Caller guarantees that: |
357 - all of the elements on the binding stack are variable bindings. | |
358 Else we crash. */ | |
359 #define UNBIND_TO_GCPRO_VARIABLES_ONLY(count, value) do { \ | |
360 int UNBIND_TO_count = (count); \ | |
361 while (specpdl_depth_counter != UNBIND_TO_count) \ | |
362 { \ | |
440 | 363 Lisp_Symbol *sym; \ |
428 | 364 --specpdl_ptr; \ |
365 --specpdl_depth_counter; \ | |
366 \ | |
367 sym = XSYMBOL (specpdl_ptr->symbol); \ | |
368 if (!SYMBOL_VALUE_MAGIC_P (sym->value)) \ | |
369 sym->value = specpdl_ptr->old_value; \ | |
370 else \ | |
371 { \ | |
372 struct gcpro gcpro1; \ | |
373 GCPRO1 (value); \ | |
374 unbind_to_hairy (UNBIND_TO_count); \ | |
375 UNGCPRO; \ | |
376 break; \ | |
377 } \ | |
378 } \ | |
379 } while (0) | |
380 #endif /* unused */ | |
381 | |
382 /* A faster, but less safe inline version of Fset(). | |
383 Caller guarantees that: | |
384 - SYMBOL is a non-constant symbol (i.e. not Qnil, Qt, or keyword). | |
385 Else we crash. */ | |
386 #define FSET_FAST_UNSAFE(sym, newval) do { \ | |
387 Lisp_Object FFU_sym = (sym); \ | |
388 Lisp_Object FFU_newval = (newval); \ | |
440 | 389 Lisp_Symbol *FFU_symbol = XSYMBOL (FFU_sym); \ |
428 | 390 Lisp_Object FFU_oldval = FFU_symbol->value; \ |
391 if (!SYMBOL_VALUE_MAGIC_P (FFU_oldval) || UNBOUNDP (FFU_oldval)) \ | |
392 FFU_symbol->value = FFU_newval; \ | |
393 else \ | |
394 Fset (FFU_sym, FFU_newval); \ | |
395 } while (0) | |
396 | |
1292 | 397 /* Note: you must always fill in all of the fields in a backtrace structure |
398 before pushing them on the backtrace_list. The profiling code depends | |
399 on this. */ | |
400 | |
401 #define PUSH_BACKTRACE(bt) do { \ | |
402 (bt).next = backtrace_list; \ | |
403 backtrace_list = &(bt); \ | |
404 } while (0) | |
405 | |
406 #define POP_BACKTRACE(bt) do { \ | |
407 backtrace_list = (bt).next; \ | |
408 } while (0) | |
409 | |
440 | 410 #endif /* INCLUDED_backtrace_h_ */ |