428
|
1 /* Handling asynchronous signals.
|
|
2 Copyright (C) 1992, 1993, 1994 Free Software Foundation, Inc.
|
853
|
3 Copyright (C) 1995, 1996, 2001, 2002 Ben Wing.
|
428
|
4
|
|
5 This file is part of XEmacs.
|
|
6
|
|
7 XEmacs is free software; you can redistribute it and/or modify it
|
|
8 under the terms of the GNU General Public License as published by the
|
|
9 Free Software Foundation; either version 2, or (at your option) any
|
|
10 later version.
|
|
11
|
|
12 XEmacs is distributed in the hope that it will be useful, but WITHOUT
|
|
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
15 for more details.
|
|
16
|
|
17 You should have received a copy of the GNU General Public License
|
|
18 along with XEmacs; see the file COPYING. If not, write to
|
|
19 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
20 Boston, MA 02111-1307, USA. */
|
|
21
|
|
22 /* Synched up with: Not synched with FSF. Split out of keyboard.c. */
|
|
23
|
|
24 #include <config.h>
|
|
25 #include "lisp.h"
|
|
26
|
|
27 #include "console.h"
|
872
|
28 #include "device-impl.h"
|
428
|
29 #include "events.h" /* for signal_fake_event() */
|
872
|
30 #include "frame-impl.h"
|
593
|
31 #include "process.h"
|
611
|
32
|
428
|
33 #include "sysdep.h"
|
611
|
34 #include "sysfile.h"
|
428
|
35 #include "syssignal.h"
|
|
36 #include "systime.h"
|
|
37
|
|
38 /* Set to 1 when a quit-check signal (either a SIGIO interrupt or
|
|
39 the asynch. timeout for poll-for-quit) occurs. The QUITP
|
|
40 macro may look at this. */
|
|
41 volatile int quit_check_signal_happened;
|
|
42
|
|
43 /* Count of the number of times a quit-check signal has occurred.
|
|
44 Some stuff in event-Xt.c looks at this. */
|
|
45 volatile int quit_check_signal_tick_count;
|
|
46
|
|
47 /* Set to 1 when a SIGINT (or SIGQUIT) interrupt is processed.
|
|
48 maybe_read_quit_event() looks at this. */
|
|
49 volatile int sigint_happened;
|
|
50
|
|
51 /* Set to 1 when an asynch. timeout signal occurs. */
|
593
|
52 static volatile int async_timeout_happened;
|
|
53
|
|
54 /* Set to 1 when a multiple of SLOWED_DOWN_INTERRUPTS_SECS elapses,
|
|
55 after slow_down_interrupts() is called. */
|
|
56 static volatile int slowed_interrupt_timeout_happened;
|
428
|
57
|
|
58 /* This is used to synchronize setting the waiting_for_user_input_p
|
|
59 flag. */
|
593
|
60 static volatile int async_timeout_happened_while_emacs_was_blocking;
|
428
|
61
|
|
62 /* See check_quit() for when this is set. */
|
|
63 int dont_check_for_quit;
|
|
64
|
593
|
65 static int poll_for_quit_id;
|
|
66 static int poll_for_sigchld_id;
|
428
|
67
|
|
68 /* This variable is used to communicate to a lisp
|
|
69 process-filter/sentinel/asynchronous callback (via the function
|
|
70 Fwaiting_for_user_input_p below) whether XEmacs was waiting for
|
|
71 user-input when that process-filter was called. */
|
|
72 static int waiting_for_user_input_p;
|
|
73
|
|
74 static int interrupts_slowed_down;
|
|
75
|
|
76 #define SLOWED_DOWN_INTERRUPTS_SECS 15
|
|
77 #define NORMAL_QUIT_CHECK_TIMEOUT_MSECS 250
|
|
78 #define NORMAL_SIGCHLD_CHECK_TIMEOUT_MSECS 250
|
|
79
|
|
80 /* Used so that signals can break out of system calls that aren't
|
|
81 naturally interruptible. */
|
|
82
|
|
83 JMP_BUF break_system_call_jump;
|
|
84 volatile int can_break_system_calls;
|
|
85
|
593
|
86 static SIGTYPE alarm_signal (int signo);
|
|
87
|
|
88
|
428
|
89
|
|
90 /**********************************************************************/
|
|
91 /* Asynchronous timeout functions */
|
|
92 /**********************************************************************/
|
|
93
|
593
|
94 /* See the comment in event-stream.c, under major heading "Timeouts",
|
|
95 for the difference between low-level (one-shot) and high-level
|
|
96 (periodic/resignaling) timeouts. */
|
|
97
|
428
|
98 /* The pending timers are stored in an ordered list, where the first timer
|
|
99 on the list is the first one to fire. Times recorded here are
|
|
100 absolute. */
|
|
101 static struct low_level_timeout *async_timer_queue;
|
|
102
|
|
103 /* Nonzero means async timers are temporarily suppressed. */
|
|
104 static int async_timer_suppress_count;
|
|
105
|
|
106 static void
|
|
107 set_one_shot_timer (EMACS_TIME interval)
|
|
108 {
|
|
109 #ifdef HAVE_SETITIMER
|
|
110 struct itimerval it;
|
|
111 it.it_value = interval;
|
|
112 EMACS_SET_SECS_USECS (it.it_interval, 0, 0);
|
611
|
113 qxe_setitimer (ITIMER_REAL, &it, 0);
|
428
|
114 #else
|
|
115 int secs;
|
|
116 EMACS_TIME_TO_INT (interval, secs);
|
|
117 alarm (secs);
|
|
118 #endif
|
|
119 }
|
|
120
|
|
121 static void
|
|
122 reset_interval_timer (void)
|
|
123 {
|
|
124 EMACS_TIME interval;
|
|
125
|
|
126 /* Get the interval to set. If an interval is available,
|
|
127 make sure it's not zero (this is a valid return, but it will
|
|
128 cause the timer to get disabled, so convert it to a very short
|
|
129 time). */
|
|
130 if (get_low_level_timeout_interval (async_timer_queue, &interval))
|
|
131 {
|
|
132 if (EMACS_SECS (interval) == 0 && EMACS_USECS (interval) == 0)
|
|
133 EMACS_SET_USECS (interval, 1);
|
|
134 }
|
|
135 else
|
|
136 /* A time of 0 means "disable". */
|
|
137 EMACS_SET_SECS_USECS (interval, 0, 0);
|
|
138
|
|
139 set_one_shot_timer (interval);
|
|
140 }
|
|
141
|
|
142
|
|
143 static void
|
|
144 init_async_timeouts (void)
|
|
145 {
|
613
|
146 set_timeout_signal (SIGALRM, alarm_signal);
|
428
|
147 async_timer_suppress_count = 0;
|
|
148 }
|
|
149
|
|
150 /* Turn off async timeouts. */
|
|
151
|
|
152 static void
|
|
153 stop_async_timeouts (void)
|
|
154 {
|
|
155 if (async_timer_suppress_count == 0)
|
|
156 {
|
|
157 /* If timer was on, turn it off. */
|
|
158 EMACS_TIME thyme;
|
|
159 EMACS_SET_SECS_USECS (thyme, 0, 0);
|
|
160 set_one_shot_timer (thyme);
|
|
161 }
|
|
162 async_timer_suppress_count++;
|
|
163 }
|
|
164
|
|
165 /* Turn on async timeouts again. */
|
|
166
|
|
167 static void
|
|
168 start_async_timeouts (void)
|
|
169 {
|
|
170 assert (async_timer_suppress_count > 0);
|
|
171 async_timer_suppress_count--;
|
|
172 if (async_timer_suppress_count == 0)
|
|
173 {
|
|
174 /* Some callers turn off async timeouts and then use the alarm
|
|
175 for their own purposes; so reinitialize everything. */
|
613
|
176 set_timeout_signal (SIGALRM, alarm_signal);
|
428
|
177 reset_interval_timer ();
|
|
178 }
|
|
179 }
|
|
180
|
593
|
181 static void
|
|
182 handle_async_timeout_signal (void)
|
428
|
183 {
|
593
|
184 int interval_id;
|
|
185 int wakeup_id;
|
|
186 Lisp_Object fun, arg;
|
771
|
187 /* Avoid any possibility of GC during QUIT */
|
|
188 int specco = begin_gc_forbidden ();
|
593
|
189
|
|
190 /* No checks for Vinhibit_quit here or anywhere else in this file!!!
|
|
191 Otherwise critical quit will not work right.
|
771
|
192 The only check for Vinhibit_quit is in QUIT itself.
|
|
193
|
|
194 (#### ???? I don't quite understand this comment.) */
|
593
|
195 interval_id = pop_low_level_timeout (&async_timer_queue, 0);
|
|
196
|
|
197 reset_interval_timer ();
|
|
198 if (async_timeout_happened_while_emacs_was_blocking)
|
|
199 {
|
|
200 async_timeout_happened_while_emacs_was_blocking = 0;
|
|
201 waiting_for_user_input_p = 1;
|
|
202 }
|
|
203
|
|
204 wakeup_id = event_stream_resignal_wakeup (interval_id, 1, &fun, &arg);
|
428
|
205
|
593
|
206 if (wakeup_id == poll_for_quit_id)
|
|
207 {
|
|
208 quit_check_signal_happened = 1;
|
|
209 quit_check_signal_tick_count++;
|
|
210 }
|
|
211 else if (wakeup_id == poll_for_sigchld_id)
|
428
|
212 {
|
593
|
213 kick_status_notify ();
|
428
|
214 }
|
593
|
215 else
|
|
216 /* call1 GC-protects its arguments */
|
853
|
217 call1_trapping_problems ("Error in asynchronous timeout callback",
|
|
218 fun, arg, INHIBIT_GC);
|
593
|
219
|
|
220 waiting_for_user_input_p = 0;
|
771
|
221
|
|
222 unbind_to (specco);
|
593
|
223 }
|
|
224
|
|
225 /* The following two functions are the external interface onto
|
|
226 creating/deleting asynchronous interval timeouts, and are
|
|
227 called by event-stream.c. We call back to event-stream.c using
|
|
228 event_stream_resignal_wakeup(), when an interval goes off. */
|
|
229
|
|
230 int
|
|
231 signal_add_async_interval_timeout (EMACS_TIME thyme)
|
|
232 {
|
|
233 int id = add_low_level_timeout (&async_timer_queue, thyme);
|
|
234
|
|
235 /* If this timeout is at the head of the queue, then we need to
|
|
236 set the timer right now for this timeout. Otherwise, things
|
|
237 are fine as-is; after the timers ahead of us are signalled,
|
|
238 the timer will be set for us. */
|
|
239
|
|
240 if (async_timer_queue->id == id)
|
|
241 reset_interval_timer ();
|
|
242
|
|
243 return id;
|
428
|
244 }
|
|
245
|
|
246 void
|
593
|
247 signal_remove_async_interval_timeout (int id)
|
428
|
248 {
|
593
|
249 int first = (async_timer_queue && async_timer_queue->id == id);
|
|
250 remove_low_level_timeout (&async_timer_queue, id);
|
|
251
|
|
252 /* If we removed the timeout from the head of the queue, then
|
|
253 we need to reset the interval timer right now. */
|
|
254 if (first)
|
|
255 reset_interval_timer ();
|
428
|
256 }
|
|
257
|
593
|
258 /* If alarm() gets called when polling isn't disabled, it will mess up
|
|
259 the asynchronous timeouts, and then C-g checking won't work again.
|
|
260 Some libraries call alarm() directly, so we override the standard
|
|
261 library's alarm() and abort() if the caller of the library function
|
|
262 didn't wrap in stop_interrupts()/start_interrupts().
|
428
|
263
|
593
|
264 NOTE: We could potentially avoid the need to wrap by adding a
|
|
265 one-shot timeout to simulate the alarm(), smashing our signal
|
|
266 handler back into place, and calling the library function when the
|
|
267 alarm goes off. But do we want to? We're not going to gain the
|
|
268 ability to C-g out of library functions this way (unless we forcibly
|
|
269 longjmp() out of a signal handler, which is likely to lead to a
|
|
270 crash). --ben */
|
428
|
271
|
|
272 #ifdef HAVE_SETITIMER
|
611
|
273
|
428
|
274 unsigned int
|
|
275 alarm (unsigned int howlong)
|
|
276 {
|
|
277 struct itimerval old_it, new_it;
|
|
278
|
|
279 assert (async_timer_suppress_count > 0);
|
|
280
|
|
281 new_it.it_value.tv_sec = howlong;
|
|
282 new_it.it_value.tv_usec = 0;
|
|
283 new_it.it_interval.tv_sec = 0;
|
|
284 new_it.it_interval.tv_usec = 0;
|
611
|
285 qxe_setitimer (ITIMER_REAL, &new_it, &old_it);
|
428
|
286
|
|
287 /* Never return zero if there was a timer outstanding. */
|
|
288 return old_it.it_value.tv_sec + (old_it.it_value.tv_usec > 0 ? 1 : 0);
|
|
289 }
|
611
|
290
|
|
291 int
|
|
292 qxe_setitimer (int kind, const struct itimerval *itnew,
|
|
293 struct itimerval *itold)
|
|
294 {
|
1315
|
295 #ifdef WIN32_ANY
|
611
|
296 /* setitimer() does not exist on native MS Windows, and appears broken
|
617
|
297 on Cygwin. See win32.c.
|
|
298
|
|
299 We are emulating the Unix98 setitimer() function, as found in its
|
|
300 incarnations on modern versions of Unix. Note however that in
|
|
301 the win32.c version, ITNEW and ITOLD must be equal if both are
|
|
302 non-zero, due to limitations in the underlying multimedia-timer
|
|
303 API. */
|
611
|
304 return mswindows_setitimer (kind, itnew, itold);
|
|
305 #else
|
617
|
306 /* YUCK! glibc defines setitimer's first argument as
|
|
307 enum __itimer_which, not int, which causes compile errors if
|
|
308 we call setitimer() in the obvious way. */
|
|
309 switch (kind)
|
|
310 {
|
|
311 case ITIMER_REAL: return setitimer (ITIMER_REAL, itnew, itold);
|
|
312 case ITIMER_VIRTUAL: return setitimer (ITIMER_VIRTUAL, itnew, itold);
|
|
313 case ITIMER_PROF: return setitimer (ITIMER_PROF, itnew, itold);
|
|
314 default: abort (); return 0;
|
|
315 }
|
428
|
316 #endif
|
611
|
317 }
|
|
318
|
|
319 #endif /* HAVE_SETITIMER */
|
|
320
|
613
|
321 signal_handler_t
|
|
322 set_timeout_signal (int signal_number, signal_handler_t action)
|
|
323 {
|
|
324 #ifdef CYGWIN_BROKEN_SIGNALS
|
|
325 return mswindows_sigset (signal_number, action);
|
|
326 #else
|
|
327 return EMACS_SIGNAL (signal_number, action);
|
|
328 #endif
|
|
329 }
|
428
|
330
|
|
331 DEFUN ("waiting-for-user-input-p", Fwaiting_for_user_input_p, 0, 0, 0, /*
|
|
332 Return non-nil if XEmacs is waiting for input from the user.
|
|
333 This is intended for use by asynchronous timeout callbacks and by
|
|
334 asynchronous process output filters and sentinels (not yet implemented
|
|
335 in XEmacs). It will always be nil if XEmacs is not inside of
|
|
336 an asynchronous timeout or process callback.
|
|
337 */
|
|
338 ())
|
|
339 {
|
|
340 return waiting_for_user_input_p ? Qt : Qnil;
|
|
341 }
|
|
342
|
|
343
|
|
344 /**********************************************************************/
|
593
|
345 /* Enabling/disabling signals */
|
|
346 /**********************************************************************/
|
|
347
|
|
348 static int interrupts_initted;
|
|
349
|
|
350 void
|
|
351 stop_interrupts (void)
|
|
352 {
|
|
353 if (!interrupts_initted)
|
|
354 return;
|
|
355 #if defined(SIGIO) && !defined(BROKEN_SIGIO)
|
|
356 unrequest_sigio ();
|
|
357 #endif
|
|
358 stop_async_timeouts ();
|
|
359 }
|
|
360
|
|
361 void
|
|
362 start_interrupts (void)
|
|
363 {
|
|
364 if (!interrupts_initted)
|
|
365 return;
|
|
366 #if defined(SIGIO) && !defined(BROKEN_SIGIO)
|
|
367 request_sigio ();
|
|
368 #endif
|
|
369 start_async_timeouts ();
|
|
370 }
|
|
371
|
|
372
|
|
373 static void
|
|
374 establish_slow_interrupt_timer (void)
|
|
375 {
|
|
376 EMACS_TIME thyme;
|
|
377
|
|
378 EMACS_SET_SECS_USECS (thyme, SLOWED_DOWN_INTERRUPTS_SECS, 0);
|
|
379 set_one_shot_timer (thyme);
|
|
380 }
|
|
381
|
|
382 /* Some functions don't like being interrupted with SIGALRM or SIGIO.
|
|
383 Previously we were calling stop_interrupts() / start_interrupts(),
|
|
384 but then if the program hangs in one of those functions, e.g.
|
|
385 waiting for a connect(), we're really screwed. So instead we
|
|
386 just "slow them down". We do this by disabling all interrupts
|
|
387 and then installing a timer of length fairly large, like 5 or
|
|
388 10 secs. That way, any "legitimate" connections (which should
|
|
389 take a fairly short amount of time) go through OK, but we can
|
|
390 interrupt bogus ones. */
|
|
391
|
|
392 void
|
|
393 slow_down_interrupts (void)
|
|
394 {
|
|
395 /* We have to set the flag *before* setting the slowed-down timer,
|
|
396 to avoid a race condition -- if the signal occurs between the
|
|
397 call to set_one_shot_timer() and the setting of this flag,
|
|
398 async_timeout_happened will get set, which will be a Bad Thing if
|
|
399 there were no timeouts on the queue. */
|
|
400 interrupts_slowed_down++;
|
|
401 if (interrupts_slowed_down == 1)
|
|
402 {
|
|
403 stop_interrupts ();
|
|
404 establish_slow_interrupt_timer ();
|
|
405 }
|
|
406 }
|
|
407
|
|
408 void
|
|
409 speed_up_interrupts (void)
|
|
410 {
|
|
411 if (interrupts_slowed_down > 0)
|
|
412 {
|
|
413 start_interrupts ();
|
|
414 /* Change this flag AFTER fiddling with interrupts, for the same
|
|
415 race-condition reasons as above. */
|
|
416 interrupts_slowed_down--;
|
|
417 }
|
|
418 }
|
|
419
|
|
420
|
|
421 /**********************************************************************/
|
|
422 /* The mechanism that drives it all */
|
428
|
423 /**********************************************************************/
|
|
424
|
593
|
425 /* called from QUIT when something_happened gets set (as a result of
|
|
426 a signal) */
|
|
427
|
853
|
428 void
|
593
|
429 check_what_happened (void)
|
|
430 {
|
771
|
431 /* No GC can happen anywhere here. handle_async_timeout_signal()
|
|
432 prevents GC (from asynch timeout handler), so does check_quit()
|
|
433 (from processing a message such as WM_INITMENU as a result of
|
|
434 draining the message queue). establish_slow_interrupt_timer() is
|
|
435 too low-level to do anything that might invoke QUIT or call Lisp
|
|
436 code. */
|
1318
|
437
|
|
438 #ifdef ERROR_CHECK_TRAPPING_PROBLEMS
|
|
439 assert_with_message
|
|
440 (proper_redisplay_wrapping_in_place (),
|
|
441 "QUIT called from within redisplay without being properly wrapped");
|
|
442
|
|
443 /* When in a critical section, don't reset something_happened, so that
|
|
444 every single QUIT will verify proper wrapping. (something_happened
|
|
445 was set by enter_redisplay_critical_section() and will be reset
|
|
446 upon exit.) */
|
|
447 if (!in_display)
|
|
448 #endif
|
|
449 something_happened = 0;
|
|
450
|
593
|
451 if (async_timeout_happened)
|
|
452 {
|
|
453 async_timeout_happened = 0;
|
|
454 handle_async_timeout_signal ();
|
|
455 }
|
|
456 if (slowed_interrupt_timeout_happened)
|
|
457 {
|
|
458 slowed_interrupt_timeout_happened = 0;
|
|
459 establish_slow_interrupt_timer ();
|
|
460 }
|
|
461
|
853
|
462 check_quit ();
|
593
|
463 }
|
|
464
|
|
465 #ifdef SIGIO
|
|
466
|
|
467 /* Signal handler for SIGIO. */
|
|
468
|
|
469 static void
|
|
470 input_available_signal (int signo)
|
|
471 {
|
|
472 something_happened = 1; /* tell QUIT to wake up */
|
|
473 quit_check_signal_happened = 1;
|
|
474 quit_check_signal_tick_count++;
|
|
475 EMACS_REESTABLISH_SIGNAL (signo, input_available_signal);
|
|
476 SIGRETURN;
|
|
477 }
|
|
478
|
|
479 #endif /* SIGIO */
|
|
480
|
|
481 /* Actual signal handler for SIGALRM. Called when:
|
|
482
|
|
483 -- asynchronous timeouts (added with `add-async-timeout') go off
|
|
484
|
|
485 -- when the poll-for-quit timer (used for C-g handling; more or
|
|
486 less when SIGIO is unavailable or BROKEN_SIGIO is defined) or
|
|
487 poll-for-sigchld timer (used when BROKEN_SIGCHLD is defined) go
|
|
488 off. The latter two timers, if set, normally go off every 1/4
|
|
489 of a second -- see NORMAL_QUIT_CHECK_TIMEOUT_MSECS and
|
|
490 NORMAL_SIGCHLD_CHECK_TIMEOUT_MSECS. (Both of these timers are
|
|
491 treated like other asynchronous timeouts, but special-cased
|
|
492 in handle_async_timeout_signal().)
|
|
493
|
|
494 -- we called slow_down_interrupts() and SLOWED_DOWN_INTERRUPTS_SECS
|
|
495 (or a multiple of it) has elapsed.
|
|
496
|
|
497 Note that under Windows, we have no working setitimer(), so we
|
|
498 simulate it using the multimedia timeout functions,
|
|
499 e.g. timeSetEvent(). See setitimer() in nt.c.
|
|
500
|
|
501 Note also that we don't actually *do* anything here (except in the
|
|
502 case of can_break_system_calls). Instead, we just set various
|
|
503 flags; next time QUIT is called, the flags will cause
|
|
504 check_what_happened() to be called, at which point we do everything
|
|
505 indicated by the flags.
|
|
506 */
|
|
507
|
|
508 static SIGTYPE
|
|
509 alarm_signal (int signo)
|
|
510 {
|
|
511 something_happened = 1; /* tell QUIT to wake up and call
|
|
512 check_what_happened() */
|
|
513
|
|
514 if (interrupts_slowed_down)
|
|
515 {
|
|
516 /* we are in "slowed-down interrupts" mode; the only alarm
|
|
517 happening here is the slowed-down quit-check alarm, so
|
|
518 we set this flag.
|
|
519
|
|
520 Do NOT set async_timeout_happened, because we don't want
|
|
521 anyone looking at the timeout queue -- async timeouts
|
|
522 are disabled. */
|
|
523 quit_check_signal_happened = 1;
|
|
524 quit_check_signal_tick_count++;
|
|
525 /* make sure we establish the slow timer again. */
|
|
526 slowed_interrupt_timeout_happened = 1;
|
|
527
|
|
528 /* can_break_system_calls is set when we want to break out of
|
|
529 non-interruptible system calls. */
|
|
530 if (can_break_system_calls)
|
|
531 {
|
|
532 /* reset the flag for safety and such. Do this *before*
|
|
533 unblocking or reestablishing the signal to avoid potential
|
|
534 race conditions. */
|
|
535 can_break_system_calls = 0;
|
|
536 #ifndef WIN32_NATIVE
|
|
537 /* #### I didn't add this WIN32_NATIVE check. I'm not sure
|
|
538 why it's here. But then again, someone needs to review
|
|
539 this can_break_system_calls stuff and see if it still
|
|
540 makes sense. --ben */
|
|
541 EMACS_UNBLOCK_SIGNAL (signo);
|
|
542 EMACS_REESTABLISH_SIGNAL (signo, alarm_signal);
|
|
543 LONGJMP (break_system_call_jump, 0);
|
|
544 #endif
|
|
545 }
|
|
546 }
|
|
547 else
|
|
548 {
|
|
549 async_timeout_happened = 1;
|
|
550 if (emacs_is_blocking)
|
|
551 async_timeout_happened_while_emacs_was_blocking = 1;
|
|
552 /* #### This is for QUITP. When it is run, it may not be the
|
|
553 place to do arbitrary stuff like run asynch. handlers, but
|
|
554 it needs to know whether the poll-for-quit asynch. timeout
|
|
555 went off. Rather than put the code in to compute this
|
|
556 specially, we just set this flag. Should fix this. */
|
|
557 quit_check_signal_happened = 1;
|
|
558
|
|
559 #ifdef HAVE_UNIXOID_EVENT_LOOP
|
|
560 signal_fake_event ();
|
|
561 #endif
|
|
562 }
|
|
563
|
|
564 EMACS_REESTABLISH_SIGNAL (signo, alarm_signal);
|
|
565 SIGRETURN;
|
|
566 }
|
|
567
|
428
|
568 /* Set this for debugging, to have a way to get out */
|
|
569 int stop_character; /* #### not currently implemented */
|
|
570
|
593
|
571 /* Signal handler for SIGINT and SIGQUIT. On TTY's, one of these two
|
|
572 signals will get generated in response to C-g. (When running under
|
|
573 X, C-g is handled using the SIGIO handler, which sets a flag
|
|
574 telling the QUIT macro to scan the unread events for a ^G.)
|
|
575 */
|
428
|
576
|
|
577 static SIGTYPE
|
|
578 interrupt_signal (int sig)
|
|
579 {
|
|
580 /* This function can call lisp */
|
|
581 /* #### we should NOT be calling lisp from a signal handler, boys
|
|
582 and girls */
|
|
583 /* Must preserve main program's value of errno. */
|
|
584 int old_errno = errno;
|
|
585
|
|
586 EMACS_REESTABLISH_SIGNAL (sig, interrupt_signal);
|
|
587
|
|
588 if (sigint_happened && CONSOLEP (Vcontrolling_terminal) &&
|
|
589 CONSOLE_LIVE_P (XCONSOLE (Vcontrolling_terminal)) &&
|
|
590 !emacs_is_blocking)
|
|
591 {
|
593
|
592 /* #### this is inherited from GNU Emacs. Do we really want this?
|
|
593 --ben */
|
428
|
594 char c;
|
|
595 fflush (stdout);
|
|
596 reset_initial_console ();
|
|
597 EMACS_UNBLOCK_SIGNAL (sig);
|
|
598 #ifdef SIGTSTP /* Support possible in later USG versions */
|
|
599 /*
|
|
600 * On systems which can suspend the current process and return to the original
|
|
601 * shell, this command causes the user to end up back at the shell.
|
|
602 * The "Auto-save" and "Abort" questions are not asked until
|
|
603 * the user elects to return to emacs, at which point he can save the current
|
|
604 * job and either dump core or continue.
|
|
605 */
|
|
606 sys_suspend ();
|
|
607 #else
|
|
608 /* Perhaps should really fork an inferior shell?
|
|
609 But that would not provide any way to get back
|
|
610 to the original shell, ever. */
|
|
611 stdout_out ("No support for stopping a process on this operating system;\n");
|
|
612 stdout_out ("you can continue or abort.\n");
|
|
613 #endif /* not SIGTSTP */
|
|
614 stdout_out ("Auto-save? (y or n) ");
|
|
615 if (((c = getc (stdin)) & ~040) == 'Y')
|
|
616 Fdo_auto_save (Qnil, Qnil);
|
|
617 while (c != '\n')
|
|
618 c = getc (stdin);
|
|
619 stdout_out ("Abort (and dump core)? (y or n) ");
|
|
620 if (((c = getc (stdin)) & ~040) == 'Y')
|
|
621 abort ();
|
|
622 while (c != '\n')
|
|
623 c = getc (stdin);
|
|
624 stdout_out ("Continuing...\n");
|
|
625 reinit_initial_console ();
|
|
626 MARK_FRAME_CHANGED (XFRAME (DEVICE_SELECTED_FRAME
|
|
627 (XDEVICE (CONSOLE_SELECTED_DEVICE
|
|
628 (XCONSOLE
|
|
629 (Vcontrolling_terminal))))));
|
|
630 }
|
|
631 else
|
|
632 {
|
|
633 /* Else request quit when it's safe */
|
|
634 Vquit_flag = Qt;
|
|
635 sigint_happened = 1;
|
|
636 #ifdef HAVE_UNIXOID_EVENT_LOOP
|
|
637 signal_fake_event ();
|
|
638 #endif
|
|
639 }
|
|
640 errno = old_errno;
|
|
641 SIGRETURN;
|
|
642 }
|
|
643
|
593
|
644
|
|
645 /**********************************************************************/
|
|
646 /* Control-G checking */
|
|
647 /**********************************************************************/
|
|
648
|
853
|
649 /* Note: The code to handle QUIT is divided between lisp.h and signal.c.
|
|
650 There is also some special-case code in the async timer code in
|
|
651 event-stream.c to notice when the poll-for-quit (and poll-for-sigchld)
|
|
652 timers have gone off. */
|
|
653
|
|
654 /* OK, here's an overview of how this convoluted stuff works:
|
|
655
|
|
656 [1] Scattered throughout the XEmacs core code are calls to the macro QUIT;
|
|
657 This macro checks to see whether a C-g has recently been pressed and
|
|
658 not yet handled, and if so, it handles the C-g by calling signal_quit(),
|
|
659 which invokes the standard Fsignal() code, with the error being Qquit.
|
|
660 Lisp code can establish handlers for this (using condition-case), but
|
|
661 normally there is no handler, and so execution is thrown back to the
|
|
662 innermost enclosing event loop. (One of the things that happens when
|
|
663 entering an event loop is that a condition-case is established that
|
|
664 catches *all* calls to `signal', including this one.)
|
|
665
|
|
666 [2] How does the QUIT macro check to see whether C-g has been pressed;
|
|
667 obviously this needs to be extremely fast. Now for some history.
|
|
668 In early Lemacs as inherited from the FSF going back 15 years or
|
|
669 more, there was a great fondness for using SIGIO (which is sent
|
|
670 whenever there is I/O available on a given socket, tty, etc.).
|
|
671 In fact, in GNU Emacs, perhaps even today, all reading of events
|
|
672 from the X server occurs inside the SIGIO handler! This is crazy,
|
|
673 but not completely relevant. What is relevant is that similar
|
|
674 stuff happened inside the SIGIO handler for C-g: it searched
|
|
675 through all the pending (i.e. not yet delivered to XEmacs yet)
|
|
676 X events for one that matched C-g. When it saw a match, it set
|
|
677 Vquit_flag to Qt. On TTY's, C-g is actually mapped to be the
|
|
678 interrupt character (i.e. it generates SIGINT), and XEmacs's
|
|
679 handler for this signal sets Vquit_flag to Qt. Then, sometime
|
|
680 later after the signal handlers finished and a QUIT macro was
|
|
681 called, the macro noticed the setting of Vquit_flag and used
|
|
682 this as an indication to call signal_quit(). What signal_quit()
|
|
683 actually does is set Vquit_flag to Qnil (so that we won't get
|
|
684 repeated interruptions from a single C-g press) and then calls
|
|
685 the equivalent of (signal 'quit nil).
|
|
686
|
|
687 [3] Another complication is introduced in that Vquit_flag is actually
|
|
688 exported to Lisp as `quit-flag'. This allows users some level of
|
|
689 control over whether and when C-g is processed as quit, esp. in
|
|
690 combination with `inhibit-quit'. This is another Lisp variable,
|
|
691 and if set to non-nil, it inhibits signal_quit() from getting
|
|
692 called, meaning that the C-g gets essentially ignored. But not
|
|
693 completely: Because the resetting of `quit-flag' happens only
|
|
694 in signal_quit(), which isn't getting called, the C-g press is
|
|
695 still noticed, and as soon as `inhibit-quit' is set back to nil,
|
|
696 a quit will be signalled at the next QUIT macro. Thus, what
|
|
697 `inhibit-quit' really does is defer quits until after the quit-
|
|
698 inhibitted period.
|
|
699
|
|
700 [4] Another consideration, introduced by XEmacs, is critical quitting.
|
|
701 If you press Control-Shift-G instead of just C-g, `quit-flag' is
|
|
702 set to `critical' instead of to t. When QUIT processes this value,
|
|
703 it *ignores* the value of `inhibit-quit'. This allows you to quit
|
|
704 even out of a quit-inhibitted section of code! Furthermore, when
|
|
705 signal_quit() notices that it was invoked as a result of a critical
|
|
706 quit, it automatically invokes the debugger (which otherwise would
|
|
707 only happen when `debug-on-quit' is set to t).
|
|
708
|
|
709 [5] Well, I explained above about how `quit-flag' gets set correctly,
|
|
710 but I began with a disclaimer stating that this was the old way
|
|
711 of doing things. What's done now? Well, first of all, the SIGIO
|
|
712 handler (which formerly checked all pending events to see if there's
|
|
713 a C-g) now does nothing but set a flag -- or actually two flags,
|
|
714 something_happened and quit_check_signal_happened. There are two
|
|
715 flags because the QUIT macro is now used for more than just handling
|
|
716 QUIT; it's also used for running asynchronous timeout handlers that
|
|
717 have recently expired, and perhaps other things. The idea here is
|
|
718 that the QUIT macros occur extremely often in the code, but only occur
|
|
719 at places that are relatively safe -- in particular, if an error occurs,
|
|
720 nothing will get completely trashed.
|
|
721
|
|
722 [6] Now, let's look at QUIT again.
|
|
723
|
|
724 UNFINISHED. Note, however, that as of the point when this comment
|
|
725 got committed to CVS (mid-2001), the interaction between reading
|
|
726 C-g as an event and processing it as QUIT was overhauled to (for
|
|
727 the first time) be understandable and actually work correctly.
|
|
728 Now, the way things work is that if C-g is pressed while XEmacs is
|
|
729 blocking at the top level, waiting for a user event, it will be
|
|
730 read as an event; otherwise, it will cause QUIT. (This includes
|
|
731 times when XEmacs is blocking, but not waiting for a user event,
|
|
732 e.g. accept-process-output and wait_delaying_user_events().)
|
|
733 Formerly, this was supposed to happen, but didn't always due to a
|
|
734 bizarre and broken scheme, documented in next_event_internal
|
|
735 like this:
|
|
736
|
|
737 If we read a ^G, then set quit-flag but do not discard the ^G.
|
|
738 The callers of next_event_internal() will do one of two things:
|
|
739
|
|
740 -- set Vquit_flag to Qnil. (next-event does this.) This will
|
|
741 cause the ^G to be treated as a normal keystroke.
|
|
742 -- not change Vquit_flag but attempt to enqueue the ^G, at
|
|
743 which point it will be discarded. The next time QUIT is
|
|
744 called, it will notice that Vquit_flag was set.
|
|
745
|
|
746 This required weirdness in enqueue_command_event_1 like this:
|
|
747
|
|
748 put the event on the typeahead queue, unless
|
|
749 the event is the quit char, in which case the `QUIT'
|
|
750 which will occur on the next trip through this loop is
|
|
751 all the processing we should do - leaving it on the queue
|
|
752 would cause the quit to be processed twice.
|
|
753
|
|
754 And further weirdness elsewhere, none of which made any sense,
|
|
755 and didn't work, because (e.g.) it required that QUIT never
|
|
756 happen anywhere inside next_event_internal() or any callers when
|
|
757 C-g should be read as a user event, which was impossible to
|
|
758 implement in practice.
|
|
759
|
|
760 Now what we do is fairly simple. Callers of next_event_internal()
|
|
761 that want C-g read as a user event call begin_dont_check_for_quit().
|
|
762 next_event_internal(), when it gets a C-g, simply sets Vquit_flag
|
|
763 (just as when a C-g is detected during the operation of QUIT or
|
|
764 QUITP), and then tries to QUIT. This will fail if blocked by the
|
|
765 previous call, at which point next_event_internal() will return
|
|
766 the C-g as an event. To unblock things, first set Vquit_flag to
|
|
767 nil (it was set to t when the C-g was read, and if we don't reset
|
|
768 it, the next call to QUIT will quit), and then unbind_to() the
|
|
769 depth returned by begin_dont_check_for_quit(). It makes no
|
|
770 difference is QUIT is called a zillion times in next_event_internal()
|
|
771 or anywhere else, because it's blocked and will never signal.
|
|
772
|
|
773 --ben
|
|
774 */
|
|
775
|
|
776
|
771
|
777 /* Defer all checking or processing of C-g. You can do this, for example,
|
|
778 if you want to read C-g's as events. (In that case, you should set
|
|
779 Vquit_flag to Qnil just before you unbind, because it typically gets set
|
|
780 as a result of reading C-g.) */
|
|
781
|
|
782 int
|
428
|
783 begin_dont_check_for_quit (void)
|
|
784 {
|
771
|
785 int depth = specpdl_depth ();
|
|
786 /* As an optimization in QUIT_FLAG_SAYS_SHOULD_QUIT, we bind inhibit-quit
|
|
787 to t -- it has to be checked anyway, and by doing this, we only need
|
|
788 to check dont_check_for_quit when quit-flag == `critical', which is
|
|
789 rare. */
|
428
|
790 specbind (Qinhibit_quit, Qt);
|
853
|
791 internal_bind_int (&dont_check_for_quit, 1);
|
771
|
792
|
|
793 return depth;
|
428
|
794 }
|
|
795
|
853
|
796 /* If we're inside of a begin_dont_check_for_quit() section, but want
|
|
797 to temporarily enable quit-checking, call this. This is used in
|
|
798 particular when processing menu filters -- some menu filters do
|
|
799 antisocial things like load large amounts of Lisp code (custom in
|
|
800 particular), and we obviously want a way of breaking out of any
|
|
801 problems. If you do use this, you should really be trapping the
|
|
802 throw() that comes from the quitting (as does the code that handles
|
|
803 menus popping up). */
|
|
804
|
428
|
805 int
|
853
|
806 begin_do_check_for_quit (void)
|
|
807 {
|
|
808 int depth = specpdl_depth ();
|
|
809 specbind (Qinhibit_quit, Qnil);
|
|
810 internal_bind_int (&dont_check_for_quit, 0);
|
|
811 /* #### should we set Vquit_flag to Qnil? */
|
|
812 return depth;
|
|
813 }
|
|
814
|
|
815 /* The effect of this function is to set Vquit_flag appropriately if the
|
|
816 user pressed C-g or Sh-C-g. After this function finishes, Vquit_flag
|
|
817 will be Qt for C-g, Qcritical for Sh-C-g, and unchanged otherwise.
|
|
818 The C-g or Sh-C-g is discarded, so it won't be noticed again.
|
|
819 */
|
|
820
|
|
821 void
|
428
|
822 check_quit (void)
|
|
823 {
|
853
|
824 /* dont_check_for_quit is set in three circumstances:
|
428
|
825
|
|
826 (1) when we are in the process of changing the window
|
|
827 configuration. The frame might be in an inconsistent state,
|
|
828 which will cause assertion failures if we check for QUIT.
|
|
829
|
|
830 (2) when we are reading events, and want to read the C-g
|
|
831 as an event. The normal check for quit will discard the C-g,
|
|
832 which would be bad.
|
|
833
|
853
|
834 (3) when we're going down with a fatal error. we're most likely
|
|
835 in an inconsistent state, and we definitely don't want to be
|
|
836 interrupted. */
|
428
|
837
|
853
|
838 /* We should *not* conditionalize on Vinhibit_quit, or
|
428
|
839 critical-quit (Control-Shift-G) won't work right. */
|
|
840
|
771
|
841 /* WARNING: Even calling check_quit(), without actually dispatching
|
|
842 a quit signal, can result in arbitrary Lisp code getting executed
|
|
843 -- at least under Windows. (Not to mention obvious Lisp
|
|
844 invocations like asynchronous timer callbacks.) Here's a sample
|
|
845 stack trace to demonstrate:
|
|
846
|
|
847 NTDLL! DbgBreakPoint@0 address 0x77f9eea9
|
|
848 assert_failed(const char * 0x012d036c, int 4596, const char * 0x012d0354) line 3478
|
|
849 re_match_2_internal(re_pattern_buffer * 0x012d6780, const unsigned char * 0x00000000, int 0, const unsigned char * 0x022f9328, int 34, int 0, re_registers * 0x012d53d0 search_regs, int 34) line 4596 + 41 bytes
|
|
850 re_search_2(re_pattern_buffer * 0x012d6780, const char * 0x00000000, int 0, const char * 0x022f9328, int 34, int 0, int 34, re_registers * 0x012d53d0 search_regs, int 34) line 4269 + 37 bytes
|
|
851 re_search(re_pattern_buffer * 0x012d6780, const char * 0x022f9328, int 34, int 0, int 34, re_registers * 0x012d53d0 search_regs) line 4031 + 37 bytes
|
|
852 string_match_1(long 31222628, long 30282164, long 28377092, buffer * 0x022fde00, int 0) line 413 + 69 bytes
|
|
853 Fstring_match(long 31222628, long 30282164, long 28377092, long 28377092) line 436 + 34 bytes
|
|
854 Ffuncall(int 3, long * 0x008297f8) line 3488 + 168 bytes
|
|
855 execute_optimized_program(const unsigned char * 0x020ddc50, int 6, long * 0x020ddf50) line 744 + 16 bytes
|
|
856 funcall_compiled_function(long 34407748, int 1, long * 0x00829aec) line 516 + 53 bytes
|
|
857 Ffuncall(int 2, long * 0x00829ae8) line 3523 + 17 bytes
|
|
858 execute_optimized_program(const unsigned char * 0x020ddc90, int 4, long * 0x020ddf90) line 744 + 16 bytes
|
|
859 funcall_compiled_function(long 34407720, int 1, long * 0x00829e28) line 516 + 53 bytes
|
|
860 Ffuncall(int 2, long * 0x00829e24) line 3523 + 17 bytes
|
|
861 mapcar1(long 15, long * 0x00829e48, long 34447820, long 34187868) line 2929 + 11 bytes
|
|
862 Fmapcar(long 34447820, long 34187868) line 3035 + 21 bytes
|
|
863 Ffuncall(int 3, long * 0x00829f20) line 3488 + 93 bytes
|
|
864 execute_optimized_program(const unsigned char * 0x020c2b70, int 7, long * 0x020dd010) line 744 + 16 bytes
|
|
865 funcall_compiled_function(long 34407580, int 2, long * 0x0082a210) line 516 + 53 bytes
|
|
866 Ffuncall(int 3, long * 0x0082a20c) line 3523 + 17 bytes
|
|
867 execute_optimized_program(const unsigned char * 0x020cf810, int 6, long * 0x020cfb10) line 744 + 16 bytes
|
|
868 funcall_compiled_function(long 34407524, int 0, long * 0x0082a580) line 516 + 53 bytes
|
|
869 Ffuncall(int 1, long * 0x0082a57c) line 3523 + 17 bytes
|
|
870 run_hook_with_args_in_buffer(buffer * 0x022fde00, int 1, long * 0x0082a57c, int 0) line 3980 + 13 bytes
|
|
871 run_hook_with_args(int 1, long * 0x0082a57c, int 0) line 3993 + 23 bytes
|
|
872 Frun_hooks(int 1, long * 0x0082a57c) line 3847 + 19 bytes
|
|
873 run_hook(long 34447484) line 4094 + 11 bytes
|
|
874 unsafe_handle_wm_initmenu_1(frame * 0x01dbb000) line 736 + 11 bytes
|
|
875 unsafe_handle_wm_initmenu(long 28377092) line 807 + 11 bytes
|
|
876 condition_case_1(long 28377116, long (long)* 0x0101c827 unsafe_handle_wm_initmenu(long), long 28377092, long (long, long)* 0x01005fa4 mswindows_modal_loop_error_handler(long, long), long 28377092) line 1692 + 7 bytes
|
|
877 mswindows_protect_modal_loop(long (long)* 0x0101c827 unsafe_handle_wm_initmenu(long), long 28377092) line 1194 + 32 bytes
|
|
878 mswindows_handle_wm_initmenu(HMENU__ * 0x00010199, frame * 0x01dbb000) line 826 + 17 bytes
|
|
879 mswindows_wnd_proc(HWND__ * 0x000501da, unsigned int 278, unsigned int 65945, long 0) line 3089 + 31 bytes
|
|
880 USER32! UserCallWinProc@20 + 24 bytes
|
|
881 USER32! DispatchClientMessage@20 + 47 bytes
|
|
882 USER32! __fnDWORD@4 + 34 bytes
|
|
883 NTDLL! KiUserCallbackDispatcher@12 + 19 bytes
|
|
884 USER32! DispatchClientMessage@20 address 0x77e163cc
|
|
885 USER32! DefWindowProcW@16 + 34 bytes
|
|
886 qxeDefWindowProc(HWND__ * 0x000501da, unsigned int 274, unsigned int 61696, long 98) line 1188 + 22 bytes
|
|
887 mswindows_wnd_proc(HWND__ * 0x000501da, unsigned int 274, unsigned int 61696, long 98) line 3362 + 21 bytes
|
|
888 USER32! UserCallWinProc@20 + 24 bytes
|
|
889 USER32! DispatchClientMessage@20 + 47 bytes
|
|
890 USER32! __fnDWORD@4 + 34 bytes
|
|
891 NTDLL! KiUserCallbackDispatcher@12 + 19 bytes
|
|
892 USER32! DispatchClientMessage@20 address 0x77e163cc
|
|
893 USER32! DefWindowProcW@16 + 34 bytes
|
|
894 qxeDefWindowProc(HWND__ * 0x000501da, unsigned int 262, unsigned int 98, long 540016641) line 1188 + 22 bytes
|
|
895 mswindows_wnd_proc(HWND__ * 0x000501da, unsigned int 262, unsigned int 98, long 540016641) line 3362 + 21 bytes
|
|
896 USER32! UserCallWinProc@20 + 24 bytes
|
|
897 USER32! DispatchMessageWorker@8 + 244 bytes
|
|
898 USER32! DispatchMessageW@4 + 11 bytes
|
|
899 qxeDispatchMessage(const tagMSG * 0x0082c684 {msg=0x00000106 wp=0x00000062 lp=0x20300001}) line 989 + 10 bytes
|
|
900 mswindows_drain_windows_queue() line 1345 + 9 bytes
|
|
901 emacs_mswindows_quit_p() line 3947
|
|
902 event_stream_quit_p() line 666
|
|
903 check_quit() line 686
|
|
904 check_what_happened() line 437
|
|
905 re_match_2_internal(re_pattern_buffer * 0x012d5a18, const unsigned char * 0x00000000, int 0, const unsigned char * 0x02235000, int 23486, int 14645, re_registers * 0x012d53d0 search_regs, int 23486) line 4717 + 14 bytes
|
|
906 re_search_2(re_pattern_buffer * 0x012d5a18, const char * 0x02235000, int 23486, const char * 0x0223b38e, int 0, int 14645, int 8841, re_registers * 0x012d53d0 search_regs, int 23486) line 4269 + 37 bytes
|
|
907 search_buffer(buffer * 0x022fde00, long 29077572, long 13789, long 23487, long 1, int 1, long 28377092, long 28377092, int 0) line 1224 + 89 bytes
|
|
908 search_command(long 29077572, long 46975, long 28377116, long 28377092, long 28377092, int 1, int 1, int 0) line 1054 + 151 bytes
|
|
909 Fre_search_forward(long 29077572, long 46975, long 28377116, long 28377092, long 28377092) line 2147 + 31 bytes
|
|
910 Ffuncall(int 4, long * 0x0082ceb0) line 3488 + 216 bytes
|
|
911 execute_optimized_program(const unsigned char * 0x02047810, int 13, long * 0x02080c10) line 744 + 16 bytes
|
|
912 funcall_compiled_function(long 34187208, int 3, long * 0x0082d1b8) line 516 + 53 bytes
|
|
913 Ffuncall(int 4, long * 0x0082d1b4) line 3523 + 17 bytes
|
|
914 execute_optimized_program(const unsigned char * 0x01e96a10, int 6, long * 0x020ae510) line 744 + 16 bytes
|
|
915 funcall_compiled_function(long 34186676, int 3, long * 0x0082d4a0) line 516 + 53 bytes
|
|
916 Ffuncall(int 4, long * 0x0082d49c) line 3523 + 17 bytes
|
|
917 execute_optimized_program(const unsigned char * 0x02156b50, int 4, long * 0x020c2db0) line 744 + 16 bytes
|
|
918 funcall_compiled_function(long 34186564, int 2, long * 0x0082d780) line 516 + 53 bytes
|
|
919 Ffuncall(int 3, long * 0x0082d77c) line 3523 + 17 bytes
|
|
920 execute_optimized_program(const unsigned char * 0x0082d964, int 3, long * 0x020c2d70) line 744 + 16 bytes
|
|
921 Fbyte_code(long 29405156, long 34352480, long 7) line 2392 + 38 bytes
|
|
922 Feval(long 34354440) line 3290 + 187 bytes
|
|
923 condition_case_1(long 34354572, long (long)* 0x01087232 Feval(long), long 34354440, long (long, long)* 0x01084764 run_condition_case_handlers(long, long), long 28377092) line 1692 + 7 bytes
|
|
924 condition_case_3(long 34354440, long 28377092, long 34354572) line 1779 + 27 bytes
|
|
925 execute_rare_opcode(long * 0x0082dc7c, const unsigned char * 0x01b090af, int 143) line 1269 + 19 bytes
|
|
926 execute_optimized_program(const unsigned char * 0x01b09090, int 6, long * 0x020ae590) line 654 + 17 bytes
|
|
927 funcall_compiled_function(long 34186620, int 0, long * 0x0082df68) line 516 + 53 bytes
|
|
928 Ffuncall(int 1, long * 0x0082df64) line 3523 + 17 bytes
|
|
929 execute_optimized_program(const unsigned char * 0x02195470, int 1, long * 0x020c2df0) line 744 + 16 bytes
|
|
930 funcall_compiled_function(long 34186508, int 0, long * 0x0082e23c) line 516 + 53 bytes
|
|
931 Ffuncall(int 1, long * 0x0082e238) line 3523 + 17 bytes
|
|
932 execute_optimized_program(const unsigned char * 0x01e5d410, int 6, long * 0x0207d410) line 744 + 16 bytes
|
|
933 funcall_compiled_function(long 34186312, int 1, long * 0x0082e524) line 516 + 53 bytes
|
|
934 Ffuncall(int 2, long * 0x0082e520) line 3523 + 17 bytes
|
|
935 execute_optimized_program(const unsigned char * 0x02108fb0, int 2, long * 0x020c2e30) line 744 + 16 bytes
|
|
936 funcall_compiled_function(long 34186340, int 0, long * 0x0082e7fc) line 516 + 53 bytes
|
|
937 Ffuncall(int 1, long * 0x0082e7f8) line 3523 + 17 bytes
|
|
938 execute_optimized_program(const unsigned char * 0x020fe150, int 2, long * 0x01e6f510) line 744 + 16 bytes
|
|
939 funcall_compiled_function(long 31008124, int 0, long * 0x0082ebd8) line 516 + 53 bytes
|
|
940 Ffuncall(int 1, long * 0x0082ebd4) line 3523 + 17 bytes
|
|
941 run_hook_with_args_in_buffer(buffer * 0x022fde00, int 1, long * 0x0082ebd4, int 0) line 3980 + 13 bytes
|
|
942 run_hook_with_args(int 1, long * 0x0082ebd4, int 0) line 3993 + 23 bytes
|
|
943 Frun_hooks(int 1, long * 0x0082ebd4) line 3847 + 19 bytes
|
|
944 Ffuncall(int 2, long * 0x0082ebd0) line 3509 + 14 bytes
|
|
945 execute_optimized_program(const unsigned char * 0x01ef2210, int 5, long * 0x01da8e10) line 744 + 16 bytes
|
|
946 funcall_compiled_function(long 31020440, int 2, long * 0x0082eeb8) line 516 + 53 bytes
|
|
947 Ffuncall(int 3, long * 0x0082eeb4) line 3523 + 17 bytes
|
|
948 execute_optimized_program(const unsigned char * 0x0082f09c, int 3, long * 0x01d89390) line 744 + 16 bytes
|
|
949 Fbyte_code(long 31102388, long 30970752, long 7) line 2392 + 38 bytes
|
|
950 Feval(long 31087568) line 3290 + 187 bytes
|
|
951 condition_case_1(long 30961240, long (long)* 0x01087232 Feval(long), long 31087568, long (long, long)* 0x01084764 run_condition_case_handlers(long, long), long 28510180) line 1692 + 7 bytes
|
|
952 condition_case_3(long 31087568, long 28510180, long 30961240) line 1779 + 27 bytes
|
|
953 execute_rare_opcode(long * 0x0082f450, const unsigned char * 0x01ef23ec, int 143) line 1269 + 19 bytes
|
|
954 execute_optimized_program(const unsigned char * 0x01ef2310, int 6, long * 0x01da8f10) line 654 + 17 bytes
|
|
955 funcall_compiled_function(long 31020412, int 1, long * 0x0082f740) line 516 + 53 bytes
|
|
956 Ffuncall(int 2, long * 0x0082f73c) line 3523 + 17 bytes
|
|
957 execute_optimized_program(const unsigned char * 0x020fe650, int 3, long * 0x01d8c490) line 744 + 16 bytes
|
|
958 funcall_compiled_function(long 31020020, int 2, long * 0x0082fa14) line 516 + 53 bytes
|
|
959 Ffuncall(int 3, long * 0x0082fa10) line 3523 + 17 bytes
|
|
960 Fcall_interactively(long 29685180, long 28377092, long 28377092) line 1008 + 22 bytes
|
|
961 Fcommand_execute(long 29685180, long 28377092, long 28377092) line 2929 + 17 bytes
|
|
962 execute_command_event(command_builder * 0x01be1900, long 36626492) line 4048 + 25 bytes
|
|
963 Fdispatch_event(long 36626492) line 4341 + 70 bytes
|
|
964 Fcommand_loop_1() line 582 + 9 bytes
|
|
965 command_loop_1(long 28377092) line 495
|
|
966 condition_case_1(long 28377188, long (long)* 0x01064fb9 command_loop_1(long), long 28377092, long (long, long)* 0x010649d0 cmd_error(long, long), long 28377092) line 1692 + 7 bytes
|
|
967 command_loop_3() line 256 + 35 bytes
|
|
968 command_loop_2(long 28377092) line 269
|
|
969 internal_catch(long 28457612, long (long)* 0x01064b20 command_loop_2(long), long 28377092, int * volatile 0x00000000) line 1317 + 7 bytes
|
|
970 initial_command_loop(long 28377092) line 305 + 25 bytes
|
|
971 STACK_TRACE_EYE_CATCHER(int 1, char * * 0x01b63ff0, char * * 0x01ca5300, int 0) line 2501
|
|
972 main(int 1, char * * 0x01b63ff0, char * * 0x01ca5300) line 2938
|
|
973 XEMACS! mainCRTStartup + 180 bytes
|
|
974 _start() line 171
|
|
975 KERNEL32! BaseProcessStart@4 + 115547 bytes
|
|
976
|
|
977 */
|
853
|
978 int specdepth;
|
|
979
|
428
|
980 if (dont_check_for_quit)
|
853
|
981 return;
|
428
|
982
|
|
983 if (quit_check_signal_happened)
|
|
984 {
|
1123
|
985 /* Since arbitrary Lisp code may be executed (e.g. through a menu
|
|
986 filter, see backtrace directly above), GC might happen,
|
771
|
987 which would majorly fuck a lot of things, e.g. re_match()
|
|
988 [string gets relocated] and lots of other code that's not
|
|
989 prepared to handle GC in QUIT. */
|
853
|
990 specdepth = begin_gc_forbidden ();
|
428
|
991 quit_check_signal_happened = 0;
|
|
992 event_stream_quit_p ();
|
771
|
993 unbind_to (specdepth);
|
428
|
994 }
|
|
995 }
|
|
996
|
|
997
|
|
998
|
|
999 void
|
|
1000 init_poll_for_quit (void)
|
|
1001 {
|
|
1002 #if !defined (SIGIO) && !defined (DONT_POLL_FOR_QUIT)
|
|
1003 /* Check for C-g every 1/4 of a second.
|
|
1004
|
|
1005 #### This is just a guess. Some investigation will have to be
|
|
1006 done to see what the best value is. The best value is the
|
|
1007 smallest possible value that doesn't cause a significant amount
|
|
1008 of running time to be spent in C-g checking. */
|
|
1009 if (!poll_for_quit_id)
|
|
1010 poll_for_quit_id =
|
|
1011 event_stream_generate_wakeup (NORMAL_QUIT_CHECK_TIMEOUT_MSECS,
|
|
1012 NORMAL_QUIT_CHECK_TIMEOUT_MSECS,
|
|
1013 Qnil, Qnil, 1);
|
|
1014 #endif /* not SIGIO and not DONT_POLL_FOR_QUIT */
|
|
1015 }
|
|
1016
|
593
|
1017 #if 0 /* not used anywhere */
|
|
1018
|
428
|
1019 void
|
|
1020 reset_poll_for_quit (void)
|
|
1021 {
|
|
1022 #if !defined (SIGIO) && !defined (DONT_POLL_FOR_QUIT)
|
|
1023 if (poll_for_quit_id)
|
|
1024 {
|
|
1025 event_stream_disable_wakeup (poll_for_quit_id, 1);
|
|
1026 poll_for_quit_id = 0;
|
|
1027 }
|
|
1028 #endif /* not SIGIO and not DONT_POLL_FOR_QUIT */
|
|
1029 }
|
|
1030
|
593
|
1031 #endif /* 0 */
|
|
1032
|
853
|
1033 #if defined (HAVE_UNIX_PROCESSES) && !defined (SIGCHLD)
|
428
|
1034
|
|
1035 static void
|
|
1036 init_poll_for_sigchld (void)
|
|
1037 {
|
|
1038 /* Check for terminated processes every 1/4 of a second.
|
|
1039
|
|
1040 #### This is just a guess. Some investigation will have to be
|
|
1041 done to see what the best value is. The best value is the
|
|
1042 smallest possible value that doesn't cause a significant amount
|
|
1043 of running time to be spent in process-termination checking.
|
|
1044 */
|
|
1045 poll_for_sigchld_id =
|
|
1046 event_stream_generate_wakeup (NORMAL_SIGCHLD_CHECK_TIMEOUT_MSECS,
|
|
1047 NORMAL_SIGCHLD_CHECK_TIMEOUT_MSECS,
|
|
1048 Qnil, Qnil, 1);
|
|
1049 }
|
|
1050
|
|
1051 #endif /* not SIGCHLD */
|
|
1052
|
|
1053
|
|
1054 /************************************************************************/
|
|
1055 /* initialization */
|
|
1056 /************************************************************************/
|
|
1057
|
|
1058 /* If we've been nohup'ed, keep it that way.
|
|
1059 This allows `nohup xemacs &' to work.
|
|
1060 More generally, if a normally fatal signal has been redirected
|
|
1061 to SIG_IGN by our invocation environment, trust the environment.
|
|
1062 This keeps xemacs from being killed by a SIGQUIT intended for a
|
|
1063 different process after having been backgrounded under a
|
|
1064 non-job-control shell! */
|
|
1065 static void
|
|
1066 handle_signal_if_fatal (int signo)
|
|
1067 {
|
613
|
1068 if (EMACS_SIGNAL (signo, fatal_error_signal) == SIG_IGN)
|
|
1069 EMACS_SIGNAL (signo, SIG_IGN);
|
428
|
1070 }
|
|
1071
|
|
1072 void
|
|
1073 init_signals_very_early (void)
|
|
1074 {
|
|
1075 /* Catch all signals that would kill us.
|
|
1076 Don't catch these signals in batch mode if not initialized.
|
|
1077 On some machines, this sets static data that would make
|
|
1078 signal fail to work right when the dumped Emacs is run. */
|
|
1079 if (noninteractive && !initialized)
|
|
1080 return;
|
|
1081
|
|
1082 handle_signal_if_fatal (SIGILL); /* ANSI */
|
|
1083 handle_signal_if_fatal (SIGABRT); /* ANSI */
|
|
1084 handle_signal_if_fatal (SIGFPE); /* ANSI */
|
|
1085 handle_signal_if_fatal (SIGSEGV); /* ANSI */
|
|
1086 handle_signal_if_fatal (SIGTERM); /* ANSI */
|
|
1087
|
|
1088
|
|
1089 #ifdef SIGHUP
|
|
1090 handle_signal_if_fatal (SIGHUP); /* POSIX */
|
|
1091 #endif
|
|
1092 #ifdef SIGQUIT
|
|
1093 handle_signal_if_fatal (SIGQUIT); /* POSIX */
|
|
1094 #endif
|
|
1095 #ifdef SIGTRAP
|
|
1096 handle_signal_if_fatal (SIGTRAP); /* POSIX */
|
|
1097 #endif
|
|
1098 #ifdef SIGUSR1
|
|
1099 handle_signal_if_fatal (SIGUSR1); /* POSIX */
|
|
1100 #endif
|
|
1101 #ifdef SIGUSR2
|
|
1102 handle_signal_if_fatal (SIGUSR2); /* POSIX */
|
|
1103 #endif
|
|
1104 #ifdef SIGPIPE
|
|
1105 handle_signal_if_fatal (SIGPIPE); /* POSIX */
|
|
1106 #endif
|
|
1107 #ifdef SIGALRM
|
|
1108 /* This will get reset later, once we're
|
|
1109 capable of handling it properly. */
|
|
1110 handle_signal_if_fatal (SIGALRM); /* POSIX */
|
|
1111 #endif
|
|
1112
|
|
1113
|
|
1114 #ifdef SIGBUS
|
|
1115 handle_signal_if_fatal (SIGBUS); /* XPG5 */
|
|
1116 #endif
|
|
1117 #ifdef SIGSYS
|
|
1118 handle_signal_if_fatal (SIGSYS); /* XPG5 */
|
|
1119 #endif
|
|
1120 #ifdef SIGXCPU
|
|
1121 handle_signal_if_fatal (SIGXCPU); /* XPG5 */
|
|
1122 #endif
|
|
1123 #ifdef SIGXFSZ
|
|
1124 handle_signal_if_fatal (SIGXFSZ); /* XPG5 */
|
|
1125 #endif
|
|
1126 #ifdef SIGVTALRM
|
|
1127 handle_signal_if_fatal (SIGVTALRM); /* XPG5 */
|
|
1128 #endif
|
|
1129 #ifdef SIGPROF
|
|
1130 /* Messes up the REAL profiler */
|
|
1131 /* handle_signal_if_fatal (SIGPROF); */ /* XPG5 */
|
|
1132 #endif
|
|
1133
|
|
1134
|
|
1135 #ifdef SIGHWE
|
|
1136 handle_signal_if_fatal (SIGHWE);
|
|
1137 #endif
|
|
1138 #ifdef SIGPRE
|
|
1139 handle_signal_if_fatal (SIGPRE);
|
|
1140 #endif
|
|
1141 #ifdef SIGORE
|
|
1142 handle_signal_if_fatal (SIGORE);
|
|
1143 #endif
|
|
1144 #ifdef SIGUME
|
|
1145 handle_signal_if_fatal (SIGUME);
|
|
1146 #endif
|
|
1147 #ifdef SIGDLK
|
|
1148 handle_signal_if_fatal (SIGDLK);
|
|
1149 #endif
|
|
1150 #ifdef SIGCPULIM
|
|
1151 handle_signal_if_fatal (SIGCPULIM);
|
|
1152 #endif
|
|
1153 #ifdef SIGIOT
|
|
1154 handle_signal_if_fatal (SIGIOT);
|
|
1155 #endif
|
|
1156 #ifdef SIGEMT
|
|
1157 handle_signal_if_fatal (SIGEMT);
|
|
1158 #endif
|
|
1159 #ifdef SIGLOST
|
|
1160 handle_signal_if_fatal (SIGLOST);
|
|
1161 #endif
|
|
1162 #ifdef SIGSTKFLT /* coprocessor stack fault under Linux */
|
|
1163 handle_signal_if_fatal (SIGSTKFLT);
|
|
1164 #endif
|
|
1165 #ifdef SIGUNUSED /* exists under Linux, and will kill process! */
|
|
1166 handle_signal_if_fatal (SIGUNUSED);
|
|
1167 #endif
|
|
1168
|
|
1169 #ifdef AIX
|
|
1170 /* 20 is SIGCHLD, 21 is SIGTTIN, 22 is SIGTTOU. */
|
|
1171 #ifndef _I386
|
|
1172 handle_signal_if_fatal (SIGIOINT);
|
|
1173 #endif
|
|
1174 handle_signal_if_fatal (SIGGRANT);
|
|
1175 handle_signal_if_fatal (SIGRETRACT);
|
|
1176 handle_signal_if_fatal (SIGSOUND);
|
|
1177 handle_signal_if_fatal (SIGMSG);
|
|
1178 #endif /* AIX */
|
|
1179
|
|
1180 #ifdef SIGDANGER
|
|
1181 /* This just means available memory is getting low. */
|
613
|
1182 EMACS_SIGNAL (SIGDANGER, memory_warning_signal);
|
428
|
1183 #endif
|
|
1184 }
|
|
1185
|
|
1186 void
|
|
1187 syms_of_signal (void)
|
|
1188 {
|
|
1189 DEFSUBR (Fwaiting_for_user_input_p);
|
|
1190 }
|
|
1191
|
|
1192 void
|
|
1193 init_interrupts_late (void)
|
|
1194 {
|
|
1195 if (!noninteractive)
|
|
1196 {
|
613
|
1197 EMACS_SIGNAL (SIGINT, interrupt_signal);
|
428
|
1198 #ifdef HAVE_TERMIO
|
|
1199 /* On systems with TERMIO, C-g is set up for both SIGINT and SIGQUIT
|
|
1200 and we can't tell which one it will give us. */
|
613
|
1201 EMACS_SIGNAL (SIGQUIT, interrupt_signal);
|
428
|
1202 #endif /* HAVE_TERMIO */
|
|
1203 init_async_timeouts ();
|
|
1204 #ifdef SIGIO
|
613
|
1205 EMACS_SIGNAL (SIGIO, input_available_signal);
|
428
|
1206 # ifdef SIGPOLL /* XPG5 */
|
|
1207 /* Some systems (e.g. Motorola SVR4) losingly have different
|
|
1208 values for SIGIO and SIGPOLL, and send SIGPOLL instead of
|
|
1209 SIGIO. On those same systems, an uncaught SIGPOLL kills the
|
|
1210 process. */
|
613
|
1211 EMACS_SIGNAL (SIGPOLL, input_available_signal);
|
428
|
1212 # endif
|
|
1213 #elif !defined (DONT_POLL_FOR_QUIT)
|
|
1214 init_poll_for_quit ();
|
|
1215 #endif
|
|
1216 }
|
|
1217
|
853
|
1218 #if defined (HAVE_UNIX_PROCESSES) && !defined (SIGCHLD)
|
428
|
1219 init_poll_for_sigchld ();
|
|
1220 #endif
|
|
1221
|
|
1222 EMACS_UNBLOCK_ALL_SIGNALS ();
|
|
1223
|
|
1224 interrupts_initted = 1;
|
|
1225 }
|
|
1226
|