Mercurial > hg > xemacs-beta
diff src/lisp-disunion.h @ 207:e45d5e7c476e r20-4b2
Import from CVS: tag r20-4b2
author | cvs |
---|---|
date | Mon, 13 Aug 2007 10:03:52 +0200 |
parents | eb5470882647 |
children | 78f53ef88e17 |
line wrap: on
line diff
--- a/src/lisp-disunion.h Mon Aug 13 10:02:48 2007 +0200 +++ b/src/lisp-disunion.h Mon Aug 13 10:03:52 2007 +0200 @@ -19,59 +19,164 @@ Boston, MA 02111-1307, USA. */ /* Synched up with: FSF 19.30. Split out from lisp.h. */ +/* This file has diverged greatly from FSF Emacs. Syncing is no + longer desired or possible */ -/* If union type is not wanted, define Lisp_Object as just a number - and define the macros below to extract fields by shifting */ +/* + * Format of a non-union-type Lisp Object + * + * For the USE_MINIMAL_TAGBITS implementation: + * + * 3 2 1 0 + * bit 10987654321098765432109876543210 + * -------------------------------- + * VVVVVVVVVVVVVVVVVVVVVVVVVVVVVVTT + * + * For the non-USE_MINIMAL_TAGBITS implementation: + * + * 3 2 1 0 + * bit 10987654321098765432109876543210 + * -------------------------------- + * TTTMVVVVVVVVVVVVVVVVVVVVVVVVVVVV + * + * V = value bits + * T = type bits + * M = mark bits + * + * For integral Lisp types, i.e. integers and characters, the value + * bits are the Lisp object. + * + * The object is obtained by masking off the type and mark + * bits. In the USE_MINIMAL_TAGBITS implementation, bit 1 is + * used as a value bit by splitting the Lisp integer type into + * two subtypes, Lisp_Type_Int_Even and Lisp_Type_Int_Odd. By + * this trickery we get 31 bits for integers instead of 30. + * + * In the non-USE_MINIMAL_TAGBITS world, Lisp integers are 28 + * bits, or more properly (LONGBITS - GCTYPEBITS - 1) bits. + * + * For non-integral types, the value bits of Lisp_Object contain a + * pointer to structure containing the object. The pointer is + * obtained by masking off the type and mark bits. + * + * In the USE_MINIMAL_TAGBITS implementation, all + * pointer-based types are coalesced under a single type called + * Lisp_Type_Record. The type bits for this type are required + * by the implementation to be 00, just like the least + * significant bits of word-aligned struct pointers on 32-bit + * hardware. Because of this, Lisp_Object pointers don't have + * to be masked and are full-sized. + * + * In the non-USE_MINIMAL_TAGBITS implementation, the type and + * mark bits must be masked off and pointers are limited to 28 + * bits (really LONGBITS - GCTYPEBITS - 1 bits). + */ -#define Qzero 0 +#ifdef USE_MINIMAL_TAGBITS +# define Qzero Lisp_Type_Int_Even +# define VALMASK (((1L << (VALBITS)) - 1L) << (GCTYPEBITS)) +#else +# define Qzero Lisp_Type_Int +# define VALMASK ((1L << (VALBITS)) - 1L) +# define GCTYPEMASK ((1L << (GCTYPEBITS)) - 1L) +#endif typedef EMACS_INT Lisp_Object; -#define VALMASK ((1L << (VALBITS)) - 1L) -#define GCTYPEMASK ((1L << (GCTYPEBITS)) - 1L) - -/* comment from FSFmacs (perhaps not accurate here): +#define Qnull_pointer 0 - This is set in the car of a cons and in the plist slot of a symbol - to indicate it is marked. Likewise in the plist slot of an interval, - the chain slot of a marker, the type slot of a float, and the name - slot of a buffer. +/* + * There are no mark bits in the USE_MINIMAL_TAGBITS implementation. + * Integers and characters don't need to be marked. All other types + * are lrecord-based, which means they get marked by incrementing + * their ->implementation pointer. + */ +#if GCMARKBITS > 0 + /* + * XMARKBIT accesses the markbit. Markbits are used only in particular + * slots of particular structure types. Other markbits are always + * zero. Outside of garbage collection, all mark bits are always zero. + */ +# define MARKBIT (1UL << (VALBITS)) +# define XMARKBIT(a) ((a) & (MARKBIT)) + +# define XMARK(a) ((void) ((a) |= (MARKBIT))) +# define XUNMARK(a) ((void) ((a) &= (~(MARKBIT)))) +#endif - In strings, this bit in the size field indicates that the string - is a "large" one, one which was separately malloc'd - rather than being part of a string block. */ - -#define MARKBIT (1UL << (VALBITS)) +/* + * Extract the type bits from a Lisp_Object. If using USE_MINIMAL_TAGBITS, + * the least significant two bits are the type bits. Otherwise the + * most significant GCTYPEBITS bits are the type bits. + * + * In the non-USE_MINIMAL_TAGBITS case, one needs to override this + * if there must be high bits set in data space. Masking the bits + * (doing the result of the below & ((1 << (GCTYPEBITS)) - 1) would + * work on all machines, but would penalize machines which don't + * need it) + */ +#ifdef USE_MINIMAL_TAGBITS +# define XTYPE(a) ((enum Lisp_Type) (((EMACS_UINT)(a)) & ~(VALMASK))) +#else +# define XTYPE(a) ((enum Lisp_Type) (((EMACS_UINT)(a)) >> ((VALBITS) + 1))) +#endif - -/* These macros extract various sorts of values from a Lisp_Object. - For example, if tem is a Lisp_Object whose type is Lisp_Type_Cons, - XCONS (tem) is the struct Lisp_Cons * pointing to the memory for that cons. */ - -/* One needs to override this if there must be high bits set in data space - (doing the result of the below & ((1 << (GCTYPE + 1)) - 1) would work - on all machines, but would penalize machines which don't need it) */ -#define XTYPE(a) ((enum Lisp_Type) (((EMACS_UINT)(a)) >> ((VALBITS) + 1))) +/* + * This applies only to the non-USE_MINIMAL_TAGBITS Lisp_Object. + * + * In the past, during garbage collection, XGCTYPE needed to be used + * for extracting types so that the mark bit was ignored. XGCTYPE + * did and exatr & operation to remove the mark bit. But the mark + * bit has been since moved so that the type bits could be extracted + * with a single shift operation, making XGCTYPE no more expensive + * than XTYPE, so the two operations are now equivalent. + */ +#ifndef XGCTYPE +# define XGCTYPE(a) XTYPE(a) +#endif #define EQ(x,y) ((x) == (y)) -#define GC_EQ(x,y) (XGCTYPE (x) == XGCTYPE (y) && XPNTR (x) == XPNTR (y)) -/* Extract the value of a Lisp_Object as a signed integer. */ - -#ifndef XREALINT /* Some machines need to do this differently. */ -# define XREALINT(a) (((a) << (LONGBITS-VALBITS)) >> (LONGBITS-VALBITS)) +#ifdef USE_MINIMAL_TAGBITS +# define GC_EQ(x,y) EQ(x,y) +#else +# define GC_EQ(x,y) (XGCTYPE (x) == XGCTYPE (y) && XPNTR (x) == XPNTR (y)) #endif -/* Extract the value as an unsigned integer. This is a basis - for extracting it as a pointer to a structure in storage. */ +/* + * Extract the value of a Lisp_Object as a signed integer. + * + * The right shifts below are non-portable because >> is allowed to + * sign extend or not signed extend signed integers depending on the + * compiler implementors preference. But this right-shifting of + * signed ints has been here forever, so the apparently reality is + * that all compilers of any consequence do sign extension, which is + * what is needed here. + */ +#ifndef XREALINT /* Some machines need to do this differently. */ +# ifdef USE_MINIMAL_TAGBITS +# define XREALINT(a) ((a) >> (LONGBITS-VALBITS-1)) +# else +# define XREALINT(a) (((a) << (LONGBITS-VALBITS)) >> (LONGBITS-VALBITS)) +# endif +#endif -#define XUINT(a) ((a) & VALMASK) +/* + * Extract the pointer value bits of a pointer based type. + */ +#ifdef USE_MINIMAL_TAGBITS +# define XPNTRVAL(a) (a) /* This depends on Lisp_Type_Record == 0 */ +# define XCHARVAL(a) ((a) >> (LONGBITS-VALBITS)) +#else +# define XPNTRVAL(a) ((a) & VALMASK) +# define XCHARVAL(a) XPNTRVAL(a) +#endif #ifdef HAVE_SHM /* In this representation, data is found in two widely separated segments. */ extern int pure_size; # define XPNTR(a) \ - (XUINT (a) | (XUINT (a) > pure_size ? DATA_SEG_BITS : PURE_SEG_BITS)) + (XPNTRVAL (a) | (XPNTRVAL (a) > pure_size ? DATA_SEG_BITS : PURE_SEG_BITS)) # else /* not HAVE_SHM */ # ifdef DATA_SEG_BITS /* This case is used for the rt-pc. @@ -79,38 +184,30 @@ and did not adjust it in that case. But I don't think that zero should ever be found in a Lisp object whose data type says it points to something. */ -# define XPNTR(a) (XUINT (a) | DATA_SEG_BITS) +# define XPNTR(a) (XPNTRVAL (a) | DATA_SEG_BITS) # else -# define XPNTR(a) XUINT (a) +# define XPNTR(a) XPNTRVAL (a) # endif #endif /* not HAVE_SHM */ -#define XSETINT(a, b) XSETOBJ (a, Lisp_Type_Int, b) - -#define XSETCHAR(var, value) XSETOBJ (var, Lisp_Type_Char, value) - -/* XSETOBJ was formerly named XSET. The name change was made to catch - C code that attempts to use this macro. You should always use the - individual settor macros (XSETCONS, XSETBUFFER, etc.) instead. */ - -#define XSETOBJ(var, type_tag, value) \ - ((void) ((var) = (((EMACS_INT) (type_tag) << ((VALBITS) + 1)) \ - + ((EMACS_INT) (value) & VALMASK)))) +#ifdef USE_MINIMAL_TAGBITS -/* During garbage collection, XGCTYPE must be used for extracting types - so that the mark bit is ignored. XMARKBIT accesses the markbit. - Markbits are used only in particular slots of particular structure types. - Other markbits are always zero. - Outside of garbage collection, all mark bits are always zero. */ +/* XSETINT depends on Lisp_Type_Int_Even == 1 and Lisp_Type_Int_Odd == 3 */ +# define XSETINT(var, value) \ + ((void) ((var) = ((value) << (LONGBITS-VALBITS-1)) + 1)) +# define XSETCHAR(var, value) \ + ((void) ((var) = ((value) << (LONGBITS-VALBITS)) + Lisp_Type_Char)) +# define XSETOBJ(var, type_tag, value) \ + ((void) ((var) = ((EMACS_UINT) (value)))) -#ifndef XGCTYPE -# define XGCTYPE(a) XTYPE(a) -#endif +#else -# define XMARKBIT(a) ((a) & (MARKBIT)) - -# define XMARK(a) ((void) ((a) |= (MARKBIT))) -# define XUNMARK(a) ((void) ((a) &= (~(MARKBIT)))) +# define XSETINT(a, b) XSETOBJ (a, Lisp_Type_Int, b) +# define XSETCHAR(var, value) XSETOBJ (var, Lisp_Type_Char, value) +# define XSETOBJ(var, type_tag, value) \ + ((void) ((var) = (((EMACS_UINT) (type_tag) << ((VALBITS) + 1)) \ + + ((EMACS_INT) (value) & VALMASK)))) +#endif /* Use this for turning a (void *) into a Lisp_Object, as when the Lisp_Object is passed into a toolkit callback function */