428
+ − 1 /* Header file for the buffer manipulation primitives.
+ − 2 Copyright (C) 1985, 1986, 1992, 1993, 1994, 1995
+ − 3 Free Software Foundation, Inc.
+ − 4 Copyright (C) 1995 Sun Microsystems, Inc.
771
+ − 5 Copyright (C) 2001, 2002 Ben Wing.
428
+ − 6
+ − 7 This file is part of XEmacs.
+ − 8
+ − 9 XEmacs is free software; you can redistribute it and/or modify it
+ − 10 under the terms of the GNU General Public License as published by the
+ − 11 Free Software Foundation; either version 2, or (at your option) any
+ − 12 later version.
+ − 13
+ − 14 XEmacs is distributed in the hope that it will be useful, but WITHOUT
+ − 15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ − 16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ − 17 for more details.
+ − 18
+ − 19 You should have received a copy of the GNU General Public License
+ − 20 along with XEmacs; see the file COPYING. If not, write to
+ − 21 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ − 22 Boston, MA 02111-1307, USA. */
+ − 23
+ − 24 /* Synched up with: FSF 19.30. */
+ − 25
+ − 26 /* Authorship:
+ − 27
+ − 28 FSF: long ago.
+ − 29 JWZ: separated out bufslots.h, early in Lemacs.
+ − 30 Ben Wing: almost completely rewritten for Mule, 19.12.
+ − 31 */
+ − 32
440
+ − 33 #ifndef INCLUDED_buffer_h_
+ − 34 #define INCLUDED_buffer_h_
428
+ − 35
446
+ − 36 #include "casetab.h"
+ − 37 #include "chartab.h"
+ − 38
428
+ − 39 /************************************************************************/
+ − 40 /* */
+ − 41 /* definition of Lisp buffer object */
+ − 42 /* */
+ − 43 /************************************************************************/
+ − 44
665
+ − 45 /* Note: we keep both Bytebpos and Charbpos versions of some of the
428
+ − 46 important buffer positions because they are accessed so much.
+ − 47 If we didn't do this, we would constantly be invalidating the
665
+ − 48 charbpos<->bytebpos cache under Mule.
428
+ − 49
+ − 50 Note that under non-Mule, both versions will always be the
+ − 51 same so we don't really need to keep track of them. But it
+ − 52 simplifies the logic to go ahead and do so all the time and
+ − 53 the memory loss is insignificant. */
+ − 54
+ − 55 /* Formerly, it didn't much matter what went inside the struct buffer_text
+ − 56 and what went outside it. Now it does, with the advent of "indirect
+ − 57 buffers" that share text with another buffer. An indirect buffer
+ − 58 shares the same *text* as another buffer, but has its own buffer-local
+ − 59 variables, its own accessible region, and its own markers and extents.
+ − 60 (Due to the nature of markers, it doesn't actually matter much whether
+ − 61 we stick them inside or out of the struct buffer_text -- the user won't
+ − 62 notice any difference -- but we go ahead and put them outside for
+ − 63 consistency and overall saneness of algorithm.)
+ − 64
+ − 65 FSFmacs gets away with not maintaining any "children" pointers from
+ − 66 a buffer to the indirect buffers that refer to it by putting the
+ − 67 markers inside of the struct buffer_text, using markers to keep track
+ − 68 of BEGV and ZV in indirect buffers, and relying on the fact that
+ − 69 all intervals (text properties and overlays) use markers for their
+ − 70 start and end points. We don't do this for extents (markers are
+ − 71 inefficient anyway and take up space), so we have to maintain
+ − 72 children pointers. This is not terribly hard, though, and the
+ − 73 code to maintain this is just like the code already present in
+ − 74 extent-parent and extent-children.
+ − 75 */
+ − 76
+ − 77 struct buffer_text
+ − 78 {
665
+ − 79 Intbyte *beg; /* Actual address of buffer contents. */
+ − 80 Bytebpos gpt; /* Index of gap in buffer. */
+ − 81 Bytebpos z; /* Index of end of buffer. */
+ − 82 Charbpos bufz; /* Equivalent as a Charbpos. */
+ − 83 Bytecount gap_size;/* Size of buffer's gap */
+ − 84 Bytecount end_gap_size;/* Size of buffer's end gap */
428
+ − 85 long modiff; /* This counts buffer-modification events
+ − 86 for this buffer. It is incremented for
+ − 87 each such event, and never otherwise
+ − 88 changed. */
+ − 89 long save_modiff; /* Previous value of modiff, as of last
+ − 90 time buffer visited or saved a file. */
+ − 91
+ − 92 #ifdef MULE
771
+ − 93 /* We keep track of a "known" region for very fast access. This
+ − 94 information is text-only so it goes here. We update this at each
+ − 95 change to the buffer, so if it's entirely ASCII, these will always
+ − 96 contain the minimum and maximum positions of the buffer. */
665
+ − 97 Charbpos mule_bufmin, mule_bufmax;
+ − 98 Bytebpos mule_bytmin, mule_bytmax;
428
+ − 99 int mule_shifter, mule_three_p;
+ − 100
+ − 101 /* And we also cache 16 positions for fairly fast access near those
+ − 102 positions. */
665
+ − 103 Charbpos mule_charbpos_cache[16];
+ − 104 Bytebpos mule_bytebpos_cache[16];
771
+ − 105
+ − 106 int entirely_ascii_p;
428
+ − 107 #endif
+ − 108
+ − 109 /* Similar to the above, we keep track of positions for which line
+ − 110 number has last been calculated. See line-number.c. */
+ − 111 Lisp_Object line_number_cache;
+ − 112
+ − 113 /* Change data that goes with the text. */
+ − 114 struct buffer_text_change_data *changes;
+ − 115 };
+ − 116
+ − 117 struct buffer
+ − 118 {
+ − 119 struct lcrecord_header header;
+ − 120
+ − 121 /* This structure holds the coordinates of the buffer contents
+ − 122 in ordinary buffers. In indirect buffers, this is not used. */
+ − 123 struct buffer_text own_text;
+ − 124
+ − 125 /* This points to the `struct buffer_text' that is used for this buffer.
+ − 126 In an ordinary buffer, this is the own_text field above.
+ − 127 In an indirect buffer, this is the own_text field of another buffer. */
+ − 128 struct buffer_text *text;
+ − 129
665
+ − 130 Bytebpos pt; /* Position of point in buffer. */
+ − 131 Charbpos bufpt; /* Equivalent as a Charbpos. */
+ − 132 Bytebpos begv; /* Index of beginning of accessible range. */
+ − 133 Charbpos bufbegv; /* Equivalent as a Charbpos. */
+ − 134 Bytebpos zv; /* Index of end of accessible range. */
+ − 135 Charbpos bufzv; /* Equivalent as a Charbpos. */
428
+ − 136
+ − 137 int face_change; /* This is set when a change in how the text should
+ − 138 be displayed (e.g., font, color) is made. */
+ − 139
448
+ − 140 /* Whether buffer specific face is specified. */
+ − 141 int buffer_local_face_property;
+ − 142
428
+ − 143 /* change data indicating what portion of the text has changed
+ − 144 since the last time this was reset. Used by redisplay.
+ − 145 Logically we should keep this with the text structure, but
+ − 146 redisplay resets it for each buffer individually and we don't
+ − 147 want interference between an indirect buffer and its base
+ − 148 buffer. */
+ − 149 struct each_buffer_change_data *changes;
+ − 150
+ − 151 #ifdef REGION_CACHE_NEEDS_WORK
+ − 152 /* If the long line scan cache is enabled (i.e. the buffer-local
+ − 153 variable cache-long-line-scans is non-nil), newline_cache
+ − 154 points to the newline cache, and width_run_cache points to the
+ − 155 width run cache.
+ − 156
+ − 157 The newline cache records which stretches of the buffer are
+ − 158 known *not* to contain newlines, so that they can be skipped
+ − 159 quickly when we search for newlines.
+ − 160
+ − 161 The width run cache records which stretches of the buffer are
+ − 162 known to contain characters whose widths are all the same. If
+ − 163 the width run cache maps a character to a value > 0, that value
+ − 164 is the character's width; if it maps a character to zero, we
+ − 165 don't know what its width is. This allows compute_motion to
+ − 166 process such regions very quickly, using algebra instead of
+ − 167 inspecting each character. See also width_table, below. */
+ − 168 struct region_cache *newline_cache;
+ − 169 struct region_cache *width_run_cache;
+ − 170 #endif /* REGION_CACHE_NEEDS_WORK */
+ − 171
+ − 172 /* The markers that refer to this buffer. This is actually a single
+ − 173 marker -- successive elements in its marker `chain' are the other
+ − 174 markers referring to this buffer */
440
+ − 175 Lisp_Marker *markers;
428
+ − 176
+ − 177 /* The buffer's extent info. This is its own type, an extent-info
+ − 178 object (done this way for ease in marking / finalizing). */
+ − 179 Lisp_Object extent_info;
+ − 180
+ − 181 /* ----------------------------------------------------------------- */
+ − 182 /* All the stuff above this line is the responsibility of insdel.c,
+ − 183 with some help from marker.c and extents.c.
+ − 184 All the stuff below this line is the responsibility of buffer.c. */
+ − 185
+ − 186 /* In an indirect buffer, this points to the base buffer.
+ − 187 In an ordinary buffer, it is 0.
+ − 188 We DO mark through this slot. */
+ − 189 struct buffer *base_buffer;
+ − 190
+ − 191 /* List of indirect buffers whose base is this buffer.
+ − 192 If we are an indirect buffer, this will be nil.
+ − 193 Do NOT mark through this. */
+ − 194 Lisp_Object indirect_children;
+ − 195
+ − 196 /* Flags saying which DEFVAR_PER_BUFFER variables
+ − 197 are local to this buffer. */
+ − 198 int local_var_flags;
+ − 199
+ − 200 /* Set to the modtime of the visited file when read or written.
+ − 201 -1 means visited file was nonexistent.
+ − 202 0 means visited file modtime unknown; in no case complain
+ − 203 about any mismatch on next save attempt. */
+ − 204 int modtime;
+ − 205
+ − 206 /* the value of text->modiff at the last auto-save. */
442
+ − 207 long auto_save_modified;
428
+ − 208
+ − 209 /* The time at which we detected a failure to auto-save,
+ − 210 Or -1 if we didn't have a failure. */
+ − 211 int auto_save_failure_time;
+ − 212
+ − 213 /* Position in buffer at which display started
+ − 214 the last time this buffer was displayed. */
+ − 215 int last_window_start;
+ − 216
+ − 217 /* Everything from here down must be a Lisp_Object */
+ − 218
+ − 219 #define MARKED_SLOT(x) Lisp_Object x
+ − 220 #include "bufslots.h"
+ − 221 #undef MARKED_SLOT
+ − 222 };
+ − 223
+ − 224 DECLARE_LRECORD (buffer, struct buffer);
+ − 225 #define XBUFFER(x) XRECORD (x, buffer, struct buffer)
+ − 226 #define XSETBUFFER(x, p) XSETRECORD (x, p, buffer)
617
+ − 227 #define wrap_buffer(p) wrap_record (p, buffer)
428
+ − 228 #define BUFFERP(x) RECORDP (x, buffer)
+ − 229 #define CHECK_BUFFER(x) CHECK_RECORD (x, buffer)
+ − 230 #define CONCHECK_BUFFER(x) CONCHECK_RECORD (x, buffer)
+ − 231
+ − 232 #define BUFFER_LIVE_P(b) (!NILP ((b)->name))
+ − 233
+ − 234 #define CHECK_LIVE_BUFFER(x) do { \
+ − 235 CHECK_BUFFER (x); \
+ − 236 if (!BUFFER_LIVE_P (XBUFFER (x))) \
+ − 237 dead_wrong_type_argument (Qbuffer_live_p, (x)); \
+ − 238 } while (0)
+ − 239
+ − 240 #define CONCHECK_LIVE_BUFFER(x) do { \
+ − 241 CONCHECK_BUFFER (x); \
+ − 242 if (!BUFFER_LIVE_P (XBUFFER (x))) \
+ − 243 x = wrong_type_argument (Qbuffer_live_p, (x)); \
+ − 244 } while (0)
+ − 245
+ − 246
+ − 247 #define BUFFER_BASE_BUFFER(b) ((b)->base_buffer ? (b)->base_buffer : (b))
+ − 248
+ − 249 /* Map over buffers sharing the same text as MPS_BUF. MPS_BUFVAR is a
+ − 250 variable that gets the buffer values (beginning with the base
+ − 251 buffer, then the children), and MPS_BUFCONS should be a temporary
+ − 252 Lisp_Object variable. */
647
+ − 253 #define MAP_INDIRECT_BUFFERS(mps_buf, mps_bufvar, mps_bufcons) \
+ − 254 for (mps_bufcons = Qunbound, \
+ − 255 mps_bufvar = BUFFER_BASE_BUFFER (mps_buf); \
+ − 256 UNBOUNDP (mps_bufcons) ? \
+ − 257 (mps_bufcons = mps_bufvar->indirect_children, \
+ − 258 1) \
+ − 259 : (!NILP (mps_bufcons) \
+ − 260 && (mps_bufvar = XBUFFER (XCAR (mps_bufcons)), 1) \
+ − 261 && (mps_bufcons = XCDR (mps_bufcons), 1)); \
428
+ − 262 )
+ − 263
+ − 264
+ − 265 /*----------------------------------------------------------------------*/
+ − 266 /* Accessor macros for important positions in a buffer */
+ − 267 /*----------------------------------------------------------------------*/
+ − 268
+ − 269 /* We put them here because some stuff below wants them before the
+ − 270 place where we would normally put them. */
+ − 271
+ − 272 /* None of these are lvalues. Use the settor macros below to change
+ − 273 the positions. */
+ − 274
+ − 275 /* Beginning of buffer. */
665
+ − 276 #define BI_BUF_BEG(buf) ((Bytebpos) 1)
+ − 277 #define BUF_BEG(buf) ((Charbpos) 1)
428
+ − 278
+ − 279 /* Beginning of accessible range of buffer. */
+ − 280 #define BI_BUF_BEGV(buf) ((buf)->begv + 0)
+ − 281 #define BUF_BEGV(buf) ((buf)->bufbegv + 0)
+ − 282
+ − 283 /* End of accessible range of buffer. */
+ − 284 #define BI_BUF_ZV(buf) ((buf)->zv + 0)
+ − 285 #define BUF_ZV(buf) ((buf)->bufzv + 0)
+ − 286
+ − 287 /* End of buffer. */
+ − 288 #define BI_BUF_Z(buf) ((buf)->text->z + 0)
+ − 289 #define BUF_Z(buf) ((buf)->text->bufz + 0)
+ − 290
+ − 291 /* Point. */
+ − 292 #define BI_BUF_PT(buf) ((buf)->pt + 0)
+ − 293 #define BUF_PT(buf) ((buf)->bufpt + 0)
+ − 294
+ − 295 /*----------------------------------------------------------------------*/
+ − 296 /* Converting between positions and addresses */
+ − 297 /*----------------------------------------------------------------------*/
+ − 298
+ − 299 /* Convert the address of a byte in the buffer into a position. */
665
+ − 300 INLINE_HEADER Bytebpos BI_BUF_PTR_BYTE_POS (struct buffer *buf, Intbyte *ptr);
+ − 301 INLINE_HEADER Bytebpos
+ − 302 BI_BUF_PTR_BYTE_POS (struct buffer *buf, Intbyte *ptr)
428
+ − 303 {
+ − 304 return (ptr - buf->text->beg + 1
+ − 305 - ((ptr - buf->text->beg + 1) > buf->text->gpt
+ − 306 ? buf->text->gap_size : 0));
+ − 307 }
+ − 308
+ − 309 #define BUF_PTR_BYTE_POS(buf, ptr) \
665
+ − 310 bytebpos_to_charbpos (buf, BI_BUF_PTR_BYTE_POS (buf, ptr))
428
+ − 311
+ − 312 /* Address of byte at position POS in buffer. */
665
+ − 313 INLINE_HEADER Intbyte * BI_BUF_BYTE_ADDRESS (struct buffer *buf, Bytebpos pos);
+ − 314 INLINE_HEADER Intbyte *
+ − 315 BI_BUF_BYTE_ADDRESS (struct buffer *buf, Bytebpos pos)
428
+ − 316 {
+ − 317 return (buf->text->beg +
+ − 318 ((pos >= buf->text->gpt ? (pos + buf->text->gap_size) : pos)
+ − 319 - 1));
+ − 320 }
+ − 321
+ − 322 #define BUF_BYTE_ADDRESS(buf, pos) \
665
+ − 323 BI_BUF_BYTE_ADDRESS (buf, charbpos_to_bytebpos (buf, pos))
428
+ − 324
+ − 325 /* Address of byte before position POS in buffer. */
665
+ − 326 INLINE_HEADER Intbyte * BI_BUF_BYTE_ADDRESS_BEFORE (struct buffer *buf, Bytebpos pos);
+ − 327 INLINE_HEADER Intbyte *
+ − 328 BI_BUF_BYTE_ADDRESS_BEFORE (struct buffer *buf, Bytebpos pos)
428
+ − 329 {
+ − 330 return (buf->text->beg +
+ − 331 ((pos > buf->text->gpt ? (pos + buf->text->gap_size) : pos)
+ − 332 - 2));
+ − 333 }
+ − 334
+ − 335 #define BUF_BYTE_ADDRESS_BEFORE(buf, pos) \
665
+ − 336 BI_BUF_BYTE_ADDRESS_BEFORE (buf, charbpos_to_bytebpos (buf, pos))
428
+ − 337
+ − 338 /*----------------------------------------------------------------------*/
+ − 339 /* Converting between byte indices and memory indices */
+ − 340 /*----------------------------------------------------------------------*/
+ − 341
665
+ − 342 INLINE_HEADER int valid_membpos_p (struct buffer *buf, Membpos x);
442
+ − 343 INLINE_HEADER int
665
+ − 344 valid_membpos_p (struct buffer *buf, Membpos x)
428
+ − 345 {
665
+ − 346 return ((x >= 1 && x <= (Membpos) buf->text->gpt) ||
+ − 347 (x > (Membpos) (buf->text->gpt + buf->text->gap_size) &&
+ − 348 x <= (Membpos) (buf->text->z + buf->text->gap_size)));
428
+ − 349 }
+ − 350
665
+ − 351 INLINE_HEADER Membpos bytebpos_to_membpos (struct buffer *buf, Bytebpos x);
+ − 352 INLINE_HEADER Membpos
+ − 353 bytebpos_to_membpos (struct buffer *buf, Bytebpos x)
428
+ − 354 {
665
+ − 355 return (Membpos) ((x > buf->text->gpt) ? (x + buf->text->gap_size) : x);
428
+ − 356 }
+ − 357
+ − 358
665
+ − 359 INLINE_HEADER Bytebpos membpos_to_bytebpos (struct buffer *buf, Membpos x);
+ − 360 INLINE_HEADER Bytebpos
+ − 361 membpos_to_bytebpos (struct buffer *buf, Membpos x)
428
+ − 362 {
665
+ − 363 #ifdef ERROR_CHECK_CHARBPOS
+ − 364 assert (valid_membpos_p (buf, x));
428
+ − 365 #endif
665
+ − 366 return (Bytebpos) ((x > (Membpos) buf->text->gpt) ?
428
+ − 367 x - buf->text->gap_size :
+ − 368 x);
+ − 369 }
+ − 370
665
+ − 371 #define membpos_to_charbpos(buf, x) \
+ − 372 bytebpos_to_charbpos (buf, membpos_to_bytebpos (buf, x))
+ − 373 #define charbpos_to_membpos(buf, x) \
+ − 374 bytebpos_to_membpos (buf, charbpos_to_bytebpos (buf, x))
428
+ − 375
+ − 376 /* These macros generalize many standard buffer-position functions to
+ − 377 either a buffer or a string. */
+ − 378
665
+ − 379 /* Converting between Membposs and Bytebposs, for a buffer-or-string.
428
+ − 380 For strings, this is a no-op. For buffers, this resolves
665
+ − 381 to the standard membpos<->bytebpos converters. */
428
+ − 382
665
+ − 383 #define buffer_or_string_bytebpos_to_membpos(obj, ind) \
+ − 384 (BUFFERP (obj) ? bytebpos_to_membpos (XBUFFER (obj), ind) : (Membpos) ind)
428
+ − 385
665
+ − 386 #define buffer_or_string_membpos_to_bytebpos(obj, ind) \
+ − 387 (BUFFERP (obj) ? membpos_to_bytebpos (XBUFFER (obj), ind) : (Bytebpos) ind)
428
+ − 388
665
+ − 389 /* Converting between Charbpos's and Bytebposs, for a buffer-or-string.
428
+ − 390 For strings, this maps to the bytecount<->charcount converters. */
+ − 391
665
+ − 392 #define buffer_or_string_charbpos_to_bytebpos(obj, pos) \
+ − 393 (BUFFERP (obj) ? charbpos_to_bytebpos (XBUFFER (obj), pos) : \
771
+ − 394 (Bytebpos) XSTRING_INDEX_CHAR_TO_BYTE (obj, pos))
428
+ − 395
665
+ − 396 #define buffer_or_string_bytebpos_to_charbpos(obj, ind) \
+ − 397 (BUFFERP (obj) ? bytebpos_to_charbpos (XBUFFER (obj), ind) : \
771
+ − 398 (Charbpos) XSTRING_INDEX_BYTE_TO_CHAR (obj, ind))
428
+ − 399
665
+ − 400 /* Similar for Charbpos's and Membposs. */
428
+ − 401
665
+ − 402 #define buffer_or_string_charbpos_to_membpos(obj, pos) \
+ − 403 (BUFFERP (obj) ? charbpos_to_membpos (XBUFFER (obj), pos) : \
771
+ − 404 (Membpos) XSTRING_INDEX_CHAR_TO_BYTE (obj, pos))
428
+ − 405
665
+ − 406 #define buffer_or_string_membpos_to_charbpos(obj, ind) \
+ − 407 (BUFFERP (obj) ? membpos_to_charbpos (XBUFFER (obj), ind) : \
771
+ − 408 (Charbpos) XSTRING_INDEX_BYTE_TO_CHAR (obj, ind))
428
+ − 409
+ − 410 /************************************************************************/
+ − 411 /* */
+ − 412 /* working with buffer-level data */
+ − 413 /* */
+ − 414 /************************************************************************/
+ − 415
+ − 416 /*
+ − 417
+ − 418 (A) Working with byte indices:
+ − 419 ------------------------------
+ − 420
665
+ − 421 VALID_BYTEBPOS_P(buf, bi):
428
+ − 422 Given a byte index, does it point to the beginning of a character?
+ − 423
665
+ − 424 ASSERT_VALID_BYTEBPOS_UNSAFE(buf, bi):
428
+ − 425 If error-checking is enabled, assert that the given byte index
+ − 426 is within range and points to the beginning of a character
+ − 427 or to the end of the buffer. Otherwise, do nothing.
+ − 428
665
+ − 429 ASSERT_VALID_BYTEBPOS_BACKWARD_UNSAFE(buf, bi):
428
+ − 430 If error-checking is enabled, assert that the given byte index
665
+ − 431 is within range and satisfies ASSERT_VALID_BYTEBPOS() and also
428
+ − 432 does not refer to the beginning of the buffer. (i.e. movement
+ − 433 backwards is OK.) Otherwise, do nothing.
+ − 434
665
+ − 435 ASSERT_VALID_BYTEBPOS_FORWARD_UNSAFE(buf, bi):
428
+ − 436 If error-checking is enabled, assert that the given byte index
665
+ − 437 is within range and satisfies ASSERT_VALID_BYTEBPOS() and also
428
+ − 438 does not refer to the end of the buffer. (i.e. movement
+ − 439 forwards is OK.) Otherwise, do nothing.
+ − 440
665
+ − 441 VALIDATE_BYTEBPOS_BACKWARD(buf, bi):
428
+ − 442 Make sure that the given byte index is pointing to the beginning
+ − 443 of a character. If not, back up until this is the case. Note
+ − 444 that there are not too many places where it is legitimate to do
+ − 445 this sort of thing. It's an error if you're passed an "invalid"
+ − 446 byte index.
+ − 447
665
+ − 448 VALIDATE_BYTEBPOS_FORWARD(buf, bi):
428
+ − 449 Make sure that the given byte index is pointing to the beginning
+ − 450 of a character. If not, move forward until this is the case.
+ − 451 Note that there are not too many places where it is legitimate
+ − 452 to do this sort of thing. It's an error if you're passed an
+ − 453 "invalid" byte index.
+ − 454
665
+ − 455 INC_BYTEBPOS(buf, bi):
428
+ − 456 Given a byte index (assumed to point at the beginning of a
+ − 457 character), modify that value so it points to the beginning
+ − 458 of the next character.
+ − 459
665
+ − 460 DEC_BYTEBPOS(buf, bi):
428
+ − 461 Given a byte index (assumed to point at the beginning of a
+ − 462 character), modify that value so it points to the beginning
+ − 463 of the previous character. Unlike for DEC_CHARPTR(), we can
+ − 464 do all the assert()s because there are sentinels at the
+ − 465 beginning of the gap and the end of the buffer.
+ − 466
665
+ − 467 BYTEBPOS_INVALID:
+ − 468 A constant representing an invalid Bytebpos. Valid Bytebposs
428
+ − 469 can never have this value.
+ − 470
+ − 471
665
+ − 472 (B) Converting between Charbpos's and Bytebposs:
428
+ − 473 --------------------------------------------
+ − 474
665
+ − 475 charbpos_to_bytebpos(buf, bu):
+ − 476 Given a Charbpos, return the equivalent Bytebpos.
428
+ − 477
665
+ − 478 bytebpos_to_charbpos(buf, bi):
+ − 479 Given a Bytebpos, return the equivalent Charbpos.
428
+ − 480
665
+ − 481 make_charbpos(buf, bi):
+ − 482 Given a Bytebpos, return the equivalent Charbpos as a Lisp Object.
428
+ − 483 */
+ − 484
+ − 485
+ − 486 /*----------------------------------------------------------------------*/
+ − 487 /* working with byte indices */
+ − 488 /*----------------------------------------------------------------------*/
+ − 489
+ − 490 #ifdef MULE
665
+ − 491 # define VALID_BYTEBPOS_P(buf, x) \
+ − 492 INTBYTE_FIRST_BYTE_P (*BI_BUF_BYTE_ADDRESS (buf, x))
428
+ − 493 #else
665
+ − 494 # define VALID_BYTEBPOS_P(buf, x) 1
428
+ − 495 #endif
+ − 496
665
+ − 497 #ifdef ERROR_CHECK_CHARBPOS
428
+ − 498
665
+ − 499 # define ASSERT_VALID_BYTEBPOS_UNSAFE(buf, x) do { \
428
+ − 500 assert (BUFFER_LIVE_P (buf)); \
+ − 501 assert ((x) >= BI_BUF_BEG (buf) && x <= BI_BUF_Z (buf)); \
665
+ − 502 assert (VALID_BYTEBPOS_P (buf, x)); \
428
+ − 503 } while (0)
665
+ − 504 # define ASSERT_VALID_BYTEBPOS_BACKWARD_UNSAFE(buf, x) do { \
428
+ − 505 assert (BUFFER_LIVE_P (buf)); \
+ − 506 assert ((x) > BI_BUF_BEG (buf) && x <= BI_BUF_Z (buf)); \
665
+ − 507 assert (VALID_BYTEBPOS_P (buf, x)); \
428
+ − 508 } while (0)
665
+ − 509 # define ASSERT_VALID_BYTEBPOS_FORWARD_UNSAFE(buf, x) do { \
428
+ − 510 assert (BUFFER_LIVE_P (buf)); \
+ − 511 assert ((x) >= BI_BUF_BEG (buf) && x < BI_BUF_Z (buf)); \
665
+ − 512 assert (VALID_BYTEBPOS_P (buf, x)); \
428
+ − 513 } while (0)
+ − 514
665
+ − 515 #else /* not ERROR_CHECK_CHARBPOS */
+ − 516 # define ASSERT_VALID_BYTEBPOS_UNSAFE(buf, x)
+ − 517 # define ASSERT_VALID_BYTEBPOS_BACKWARD_UNSAFE(buf, x)
+ − 518 # define ASSERT_VALID_BYTEBPOS_FORWARD_UNSAFE(buf, x)
428
+ − 519
665
+ − 520 #endif /* not ERROR_CHECK_CHARBPOS */
428
+ − 521
+ − 522 /* Note that, although the Mule version will work fine for non-Mule
+ − 523 as well (it should reduce down to nothing), we provide a separate
+ − 524 version to avoid compilation warnings and possible non-optimal
+ − 525 results with stupid compilers. */
+ − 526
+ − 527 #ifdef MULE
665
+ − 528 # define VALIDATE_BYTEBPOS_BACKWARD(buf, x) do { \
+ − 529 Intbyte *VBB_ptr = BI_BUF_BYTE_ADDRESS (buf, x); \
+ − 530 while (!INTBYTE_FIRST_BYTE_P (*VBB_ptr)) \
428
+ − 531 VBB_ptr--, (x)--; \
+ − 532 } while (0)
+ − 533 #else
665
+ − 534 # define VALIDATE_BYTEBPOS_BACKWARD(buf, x)
428
+ − 535 #endif
+ − 536
+ − 537 /* Note that, although the Mule version will work fine for non-Mule
+ − 538 as well (it should reduce down to nothing), we provide a separate
+ − 539 version to avoid compilation warnings and possible non-optimal
+ − 540 results with stupid compilers. */
+ − 541
+ − 542 #ifdef MULE
665
+ − 543 # define VALIDATE_BYTEBPOS_FORWARD(buf, x) do { \
+ − 544 Intbyte *VBF_ptr = BI_BUF_BYTE_ADDRESS (buf, x); \
+ − 545 while (!INTBYTE_FIRST_BYTE_P (*VBF_ptr)) \
428
+ − 546 VBF_ptr++, (x)++; \
+ − 547 } while (0)
+ − 548 #else
665
+ − 549 # define VALIDATE_BYTEBPOS_FORWARD(buf, x)
428
+ − 550 #endif
+ − 551
665
+ − 552 /* Note that in the simplest case (no MULE, no ERROR_CHECK_CHARBPOS),
428
+ − 553 this crap reduces down to simply (x)++. */
+ − 554
665
+ − 555 #define INC_BYTEBPOS(buf, x) do \
428
+ − 556 { \
665
+ − 557 ASSERT_VALID_BYTEBPOS_FORWARD_UNSAFE (buf, x); \
428
+ − 558 /* Note that we do the increment first to \
+ − 559 make sure that the pointer in \
665
+ − 560 VALIDATE_BYTEBPOS_FORWARD() ends up on \
428
+ − 561 the correct side of the gap */ \
+ − 562 (x)++; \
665
+ − 563 VALIDATE_BYTEBPOS_FORWARD (buf, x); \
428
+ − 564 } while (0)
+ − 565
665
+ − 566 /* Note that in the simplest case (no MULE, no ERROR_CHECK_CHARBPOS),
428
+ − 567 this crap reduces down to simply (x)--. */
+ − 568
665
+ − 569 #define DEC_BYTEBPOS(buf, x) do \
428
+ − 570 { \
771
+ − 571 ASSERT_VALID_BYTEBPOS_BACKWARD_UNSAFE (buf, x); \
428
+ − 572 /* Note that we do the decrement first to \
+ − 573 make sure that the pointer in \
665
+ − 574 VALIDATE_BYTEBPOS_BACKWARD() ends up on \
428
+ − 575 the correct side of the gap */ \
+ − 576 (x)--; \
665
+ − 577 VALIDATE_BYTEBPOS_BACKWARD (buf, x); \
428
+ − 578 } while (0)
+ − 579
665
+ − 580 INLINE_HEADER Bytebpos prev_bytebpos (struct buffer *buf, Bytebpos x);
+ − 581 INLINE_HEADER Bytebpos
+ − 582 prev_bytebpos (struct buffer *buf, Bytebpos x)
428
+ − 583 {
665
+ − 584 DEC_BYTEBPOS (buf, x);
428
+ − 585 return x;
+ − 586 }
+ − 587
665
+ − 588 INLINE_HEADER Bytebpos next_bytebpos (struct buffer *buf, Bytebpos x);
+ − 589 INLINE_HEADER Bytebpos
+ − 590 next_bytebpos (struct buffer *buf, Bytebpos x)
428
+ − 591 {
665
+ − 592 INC_BYTEBPOS (buf, x);
428
+ − 593 return x;
+ − 594 }
+ − 595
665
+ − 596 #define BYTEBPOS_INVALID ((Bytebpos) -1)
428
+ − 597
+ − 598 /*----------------------------------------------------------------------*/
+ − 599 /* Converting between buffer positions and byte indices */
+ − 600 /*----------------------------------------------------------------------*/
+ − 601
+ − 602 #ifdef MULE
+ − 603
665
+ − 604 Bytebpos charbpos_to_bytebpos_func (struct buffer *buf, Charbpos x);
+ − 605 Charbpos bytebpos_to_charbpos_func (struct buffer *buf, Bytebpos x);
428
+ − 606
+ − 607 /* The basic algorithm we use is to keep track of a known region of
771
+ − 608 characters in each buffer, all of which are of the same width. We keep
+ − 609 track of the boundaries of the region in both Charbpos and Bytebpos
+ − 610 coordinates and also keep track of the char width, which is 1 - 4 bytes.
+ − 611 If the position we're translating is not in the known region, then we
+ − 612 invoke a function to update the known region to surround the position in
+ − 613 question. This assumes locality of reference, which is usually the
+ − 614 case.
+ − 615
+ − 616 Note that the function to update the known region can be simple or
+ − 617 complicated depending on how much information we cache. In addition to
+ − 618 the known region, we always cache the correct conversions for point,
+ − 619 BEGV, and ZV, and in addition to this we cache 16 positions where the
+ − 620 conversion is known. We only look in the cache or update it when we
+ − 621 need to move the known region more than a certain amount (currently 50
+ − 622 chars), and then we throw away a "random" value and replace it with the
+ − 623 newly calculated value.
+ − 624
+ − 625 Finally, we maintain an extra flag that tracks whether the buffer is
+ − 626 entirely ASCII, to speed up the conversions even more. This flag is
+ − 627 actually of dubious value because in an entirely-ASCII buffer the known
+ − 628 region will always span the entire buffer (in fact, we update the flag
+ − 629 based on this fact), and so all we're saving is a few machine cycles.
428
+ − 630
771
+ − 631 A potentially smarter method than what we do with known regions and
+ − 632 cached positions would be to keep some sort of pseudo-extent layer over
+ − 633 the buffer; maybe keep track of the charbpos/bytebpos correspondence at the
+ − 634 beginning of each line, which would allow us to do a binary search over
+ − 635 the pseudo-extents to narrow things down to the correct line, at which
+ − 636 point you could use a linear movement method. This would also mesh well
+ − 637 with efficiently implementing a line-numbering scheme. However, you
+ − 638 have to weigh the amount of time spent updating the cache vs. the
+ − 639 savings that result from it. In reality, we modify the buffer far less
+ − 640 often than we access it, so a cache of this sort that provides
+ − 641 guaranteed LOG (N) performance (or perhaps N * LOG (N), if we set a
+ − 642 maximum on the cache size) would indeed be a win, particularly in very
+ − 643 large buffers. If we ever implement this, we should probably set a
+ − 644 reasonably high minimum below which we use the old method, because the
+ − 645 time spent updating the fancy cache would likely become dominant when
+ − 646 making buffer modifications in smaller buffers.
428
+ − 647
771
+ − 648 Note also that we have to multiply or divide by the char width in order
+ − 649 to convert the positions. We do some tricks to avoid ever actually
+ − 650 having to do a multiply or divide, because that is typically an
+ − 651 expensive operation (esp. divide). Multiplying or dividing by 1, 2, or
+ − 652 4 can be implemented simply as a shift left or shift right, and we keep
+ − 653 track of a shifter value (0, 1, or 2) indicating how much to shift.
+ − 654 Multiplying by 3 can be implemented by doubling and then adding the
+ − 655 original value. Dividing by 3, alas, cannot be implemented in any
+ − 656 simple shift/subtract method, as far as I know; so we just do a table
+ − 657 lookup. For simplicity, we use a table of size 128K, which indexes the
+ − 658 "divide-by-3" values for the first 64K non-negative numbers. (Note that
+ − 659 we can increase the size up to 384K, i.e. indexing the first 192K
+ − 660 non-negative numbers, while still using shorts in the array.) This also
+ − 661 means that the size of the known region can be at most 64K for
+ − 662 width-three characters.
+ − 663
+ − 664 !!#### We should investigate the algorithm in GNU Emacs. I think it
+ − 665 does something similar, but it may differ in some details, and it's
+ − 666 worth seeing if anything can be gleaned.
428
+ − 667 */
+ − 668
+ − 669 extern short three_to_one_table[];
+ − 670
771
+ − 671 INLINE_HEADER Bytebpos real_charbpos_to_bytebpos (struct buffer *buf, Charbpos x);
+ − 672 INLINE_HEADER Bytebpos
665
+ − 673 real_charbpos_to_bytebpos (struct buffer *buf, Charbpos x)
428
+ − 674 {
771
+ − 675 if (buf->text->entirely_ascii_p)
+ − 676 return (Bytebpos) x;
428
+ − 677 if (x >= buf->text->mule_bufmin && x <= buf->text->mule_bufmax)
+ − 678 return (buf->text->mule_bytmin +
+ − 679 ((x - buf->text->mule_bufmin) << buf->text->mule_shifter) +
+ − 680 (buf->text->mule_three_p ? (x - buf->text->mule_bufmin) : 0));
+ − 681 else
665
+ − 682 return charbpos_to_bytebpos_func (buf, x);
428
+ − 683 }
+ − 684
771
+ − 685 INLINE_HEADER Charbpos real_bytebpos_to_charbpos (struct buffer *buf, Bytebpos x);
+ − 686 INLINE_HEADER Charbpos
665
+ − 687 real_bytebpos_to_charbpos (struct buffer *buf, Bytebpos x)
428
+ − 688 {
771
+ − 689 if (buf->text->entirely_ascii_p)
+ − 690 return (Charbpos) x;
428
+ − 691 if (x >= buf->text->mule_bytmin && x <= buf->text->mule_bytmax)
+ − 692 return (buf->text->mule_bufmin +
+ − 693 ((buf->text->mule_three_p
+ − 694 ? three_to_one_table[x - buf->text->mule_bytmin]
+ − 695 : (x - buf->text->mule_bytmin) >> buf->text->mule_shifter)));
+ − 696 else
665
+ − 697 return bytebpos_to_charbpos_func (buf, x);
428
+ − 698 }
+ − 699
+ − 700 #else /* not MULE */
+ − 701
665
+ − 702 # define real_charbpos_to_bytebpos(buf, x) ((Bytebpos) x)
+ − 703 # define real_bytebpos_to_charbpos(buf, x) ((Charbpos) x)
428
+ − 704
+ − 705 #endif /* not MULE */
+ − 706
665
+ − 707 #ifdef ERROR_CHECK_CHARBPOS
428
+ − 708
665
+ − 709 Bytebpos charbpos_to_bytebpos (struct buffer *buf, Charbpos x);
+ − 710 Charbpos bytebpos_to_charbpos (struct buffer *buf, Bytebpos x);
428
+ − 711
665
+ − 712 #else /* not ERROR_CHECK_CHARBPOS */
428
+ − 713
665
+ − 714 #define charbpos_to_bytebpos real_charbpos_to_bytebpos
+ − 715 #define bytebpos_to_charbpos real_bytebpos_to_charbpos
428
+ − 716
665
+ − 717 #endif /* not ERROR_CHECK_CHARBPOS */
428
+ − 718
665
+ − 719 #define make_charbpos(buf, ind) make_int (bytebpos_to_charbpos (buf, ind))
428
+ − 720
+ − 721 /*----------------------------------------------------------------------*/
+ − 722 /* Converting between buffer bytes and Emacs characters */
+ − 723 /*----------------------------------------------------------------------*/
+ − 724
+ − 725 /* The character at position POS in buffer. */
+ − 726 #define BI_BUF_FETCH_CHAR(buf, pos) \
+ − 727 charptr_emchar (BI_BUF_BYTE_ADDRESS (buf, pos))
+ − 728 #define BUF_FETCH_CHAR(buf, pos) \
665
+ − 729 BI_BUF_FETCH_CHAR (buf, charbpos_to_bytebpos (buf, pos))
428
+ − 730
+ − 731 /* The character at position POS in buffer, as a string. This is
+ − 732 equivalent to set_charptr_emchar (str, BUF_FETCH_CHAR (buf, pos))
+ − 733 but is faster for Mule. */
+ − 734
+ − 735 # define BI_BUF_CHARPTR_COPY_CHAR(buf, pos, str) \
+ − 736 charptr_copy_char (BI_BUF_BYTE_ADDRESS (buf, pos), str)
+ − 737 #define BUF_CHARPTR_COPY_CHAR(buf, pos, str) \
665
+ − 738 BI_BUF_CHARPTR_COPY_CHAR (buf, charbpos_to_bytebpos (buf, pos), str)
428
+ − 739
+ − 740
+ − 741 /************************************************************************/
440
+ − 742 /* */
428
+ − 743 /* higher-level buffer-position functions */
+ − 744 /* */
+ − 745 /************************************************************************/
+ − 746
+ − 747 /*----------------------------------------------------------------------*/
+ − 748 /* Settor macros for important positions in a buffer */
+ − 749 /*----------------------------------------------------------------------*/
+ − 750
+ − 751 /* Set beginning of accessible range of buffer. */
+ − 752 #define SET_BOTH_BUF_BEGV(buf, val, bival) \
+ − 753 do \
+ − 754 { \
+ − 755 (buf)->begv = (bival); \
+ − 756 (buf)->bufbegv = (val); \
+ − 757 } while (0)
+ − 758
+ − 759 /* Set end of accessible range of buffer. */
+ − 760 #define SET_BOTH_BUF_ZV(buf, val, bival) \
+ − 761 do \
+ − 762 { \
+ − 763 (buf)->zv = (bival); \
+ − 764 (buf)->bufzv = (val); \
+ − 765 } while (0)
+ − 766
+ − 767 /* Set point. */
+ − 768 /* Since BEGV and ZV are almost never set, it's reasonable to enforce
665
+ − 769 the restriction that the Charbpos and Bytebpos values must both be
428
+ − 770 specified. However, point is set in lots and lots of places. So
+ − 771 we provide the ability to specify both (for efficiency) or just
+ − 772 one. */
+ − 773 #define BOTH_BUF_SET_PT(buf, val, bival) set_buffer_point (buf, val, bival)
+ − 774 #define BI_BUF_SET_PT(buf, bival) \
665
+ − 775 BOTH_BUF_SET_PT (buf, bytebpos_to_charbpos (buf, bival), bival)
428
+ − 776 #define BUF_SET_PT(buf, value) \
665
+ − 777 BOTH_BUF_SET_PT (buf, value, charbpos_to_bytebpos (buf, value))
428
+ − 778
+ − 779
+ − 780 #if 0 /* FSFmacs */
+ − 781 /* These macros exist in FSFmacs because SET_PT() in FSFmacs incorrectly
+ − 782 does too much stuff, such as moving out of invisible extents. */
+ − 783 #define TEMP_SET_PT(position) (temp_set_point ((position), current_buffer))
+ − 784 #define SET_BUF_PT(buf, value) ((buf)->pt = (value))
+ − 785 #endif /* FSFmacs */
+ − 786
+ − 787 /*----------------------------------------------------------------------*/
+ − 788 /* Miscellaneous buffer values */
+ − 789 /*----------------------------------------------------------------------*/
+ − 790
+ − 791 /* Number of characters in buffer */
+ − 792 #define BUF_SIZE(buf) (BUF_Z (buf) - BUF_BEG (buf))
+ − 793
+ − 794 /* Is this buffer narrowed? */
+ − 795 #define BUF_NARROWED(buf) \
+ − 796 ((BI_BUF_BEGV (buf) != BI_BUF_BEG (buf)) || \
+ − 797 (BI_BUF_ZV (buf) != BI_BUF_Z (buf)))
+ − 798
+ − 799 /* Modification count. */
+ − 800 #define BUF_MODIFF(buf) ((buf)->text->modiff)
+ − 801
+ − 802 /* Saved modification count. */
+ − 803 #define BUF_SAVE_MODIFF(buf) ((buf)->text->save_modiff)
+ − 804
+ − 805 /* Face changed. */
+ − 806 #define BUF_FACECHANGE(buf) ((buf)->face_change)
+ − 807
+ − 808 #define POINT_MARKER_P(marker) \
+ − 809 (XMARKER (marker)->buffer != 0 && \
434
+ − 810 EQ (marker, XMARKER (marker)->buffer->point_marker))
428
+ − 811
+ − 812 #define BUF_MARKERS(buf) ((buf)->markers)
+ − 813
+ − 814 /* WARNING:
+ − 815
+ − 816 The new definitions of CEILING_OF() and FLOOR_OF() differ semantically
+ − 817 from the old ones (in FSF Emacs and XEmacs 19.11 and before).
+ − 818 Conversion is as follows:
+ − 819
+ − 820 OLD_BI_CEILING_OF(n) = NEW_BI_CEILING_OF(n) - 1
+ − 821 OLD_BI_FLOOR_OF(n) = NEW_BI_FLOOR_OF(n + 1)
+ − 822
+ − 823 The definitions were changed because the new definitions are more
771
+ − 824 consistent with the way everything else works in XEmacs.
428
+ − 825 */
+ − 826
+ − 827 /* Properties of CEILING_OF and FLOOR_OF (also apply to BI_ variants):
+ − 828
+ − 829 1) FLOOR_OF (CEILING_OF (n)) = n
+ − 830 CEILING_OF (FLOOR_OF (n)) = n
+ − 831
+ − 832 2) CEILING_OF (n) = n if and only if n = ZV
+ − 833 FLOOR_OF (n) = n if and only if n = BEGV
+ − 834
+ − 835 3) CEILING_OF (CEILING_OF (n)) = ZV
+ − 836 FLOOR_OF (FLOOR_OF (n)) = BEGV
+ − 837
+ − 838 4) The bytes in the regions
+ − 839
+ − 840 [BYTE_ADDRESS (n), BYTE_ADDRESS_BEFORE (CEILING_OF (n))]
+ − 841
+ − 842 and
+ − 843
+ − 844 [BYTE_ADDRESS (FLOOR_OF (n)), BYTE_ADDRESS_BEFORE (n)]
+ − 845
+ − 846 are contiguous.
771
+ − 847
+ − 848 A typical loop using CEILING_OF to process contiguous ranges of text
+ − 849 between [from, to) looks like this:
+ − 850
+ − 851 {
+ − 852 Bytebpos pos = from;
+ − 853
+ − 854 while (pos < to)
+ − 855 {
+ − 856 Bytebpos ceil;
+ − 857
+ − 858 ceil = BI_BUF_CEILING_OF (buf, pos);
+ − 859 ceil = min (to, ceil);
+ − 860 process_intbyte_string (BI_BUF_BYTE_ADDRESS (buf, pos), ceil - pos);
+ − 861 pos = ceil;
+ − 862 }
+ − 863 }
+ − 864
+ − 865 Currently there will be at most two iterations in the loop, but it is
+ − 866 written in such a way that it will still work if the buffer
+ − 867 representation is changed to have multiple gaps in it.
+ − 868 */
428
+ − 869
+ − 870
+ − 871 /* Return the maximum index in the buffer it is safe to scan forwards
+ − 872 past N to. This is used to prevent buffer scans from running into
+ − 873 the gap (e.g. search.c). All characters between N and CEILING_OF(N)
+ − 874 are located contiguous in memory. Note that the character *at*
+ − 875 CEILING_OF(N) is not contiguous in memory. */
+ − 876 #define BI_BUF_CEILING_OF(b, n) \
+ − 877 ((n) < (b)->text->gpt && (b)->text->gpt < BI_BUF_ZV (b) ? \
+ − 878 (b)->text->gpt : BI_BUF_ZV (b))
+ − 879 #define BUF_CEILING_OF(b, n) \
665
+ − 880 bytebpos_to_charbpos (b, BI_BUF_CEILING_OF (b, charbpos_to_bytebpos (b, n)))
428
+ − 881
+ − 882 /* Return the minimum index in the buffer it is safe to scan backwards
+ − 883 past N to. All characters between FLOOR_OF(N) and N are located
+ − 884 contiguous in memory. Note that the character *at* N may not be
+ − 885 contiguous in memory. */
+ − 886 #define BI_BUF_FLOOR_OF(b, n) \
+ − 887 (BI_BUF_BEGV (b) < (b)->text->gpt && (b)->text->gpt < (n) ? \
+ − 888 (b)->text->gpt : BI_BUF_BEGV (b))
+ − 889 #define BUF_FLOOR_OF(b, n) \
665
+ − 890 bytebpos_to_charbpos (b, BI_BUF_FLOOR_OF (b, charbpos_to_bytebpos (b, n)))
428
+ − 891
+ − 892 #define BI_BUF_CEILING_OF_IGNORE_ACCESSIBLE(b, n) \
+ − 893 ((n) < (b)->text->gpt && (b)->text->gpt < BI_BUF_Z (b) ? \
+ − 894 (b)->text->gpt : BI_BUF_Z (b))
+ − 895 #define BUF_CEILING_OF_IGNORE_ACCESSIBLE(b, n) \
665
+ − 896 bytebpos_to_charbpos \
+ − 897 (b, BI_BUF_CEILING_OF_IGNORE_ACCESSIBLE (b, charbpos_to_bytebpos (b, n)))
428
+ − 898
+ − 899 #define BI_BUF_FLOOR_OF_IGNORE_ACCESSIBLE(b, n) \
+ − 900 (BI_BUF_BEG (b) < (b)->text->gpt && (b)->text->gpt < (n) ? \
+ − 901 (b)->text->gpt : BI_BUF_BEG (b))
+ − 902 #define BUF_FLOOR_OF_IGNORE_ACCESSIBLE(b, n) \
665
+ − 903 bytebpos_to_charbpos \
+ − 904 (b, BI_BUF_FLOOR_OF_IGNORE_ACCESSIBLE (b, charbpos_to_bytebpos (b, n)))
428
+ − 905
+ − 906 /* This structure marks which slots in a buffer have corresponding
+ − 907 default values in Vbuffer_defaults.
+ − 908 Each such slot has a nonzero value in this structure.
+ − 909 The value has only one nonzero bit.
+ − 910
+ − 911 When a buffer has its own local value for a slot,
+ − 912 the bit for that slot (found in the same slot in this structure)
+ − 913 is turned on in the buffer's local_var_flags slot.
+ − 914
+ − 915 If a slot in this structure is zero, then even though there may
+ − 916 be a DEFVAR_BUFFER_LOCAL for the slot, there is no default value for it;
+ − 917 and the corresponding slot in Vbuffer_defaults is not used. */
+ − 918
+ − 919 extern struct buffer buffer_local_flags;
+ − 920
+ − 921
+ − 922 /* Allocation of buffer data. */
+ − 923
+ − 924 #ifdef REL_ALLOC
+ − 925
440
+ − 926 char *r_alloc (unsigned char **, size_t);
+ − 927 char *r_re_alloc (unsigned char **, size_t);
428
+ − 928 void r_alloc_free (unsigned char **);
+ − 929
+ − 930 #define BUFFER_ALLOC(data, size) \
665
+ − 931 ((Intbyte *) r_alloc ((unsigned char **) &data, (size) * sizeof(Intbyte)))
428
+ − 932 #define BUFFER_REALLOC(data, size) \
665
+ − 933 ((Intbyte *) r_re_alloc ((unsigned char **) &data, (size) * sizeof(Intbyte)))
428
+ − 934 #define BUFFER_FREE(data) r_alloc_free ((unsigned char **) &(data))
+ − 935 #define R_ALLOC_DECLARE(var,data) r_alloc_declare (&(var), data)
+ − 936
+ − 937 #else /* !REL_ALLOC */
+ − 938
+ − 939 #define BUFFER_ALLOC(data,size)\
665
+ − 940 (data = xnew_array (Intbyte, size))
428
+ − 941 #define BUFFER_REALLOC(data,size)\
665
+ − 942 ((Intbyte *) xrealloc (data, (size) * sizeof(Intbyte)))
428
+ − 943 /* Avoid excess parentheses, or syntax errors may rear their heads. */
+ − 944 #define BUFFER_FREE(data) xfree (data)
+ − 945 #define R_ALLOC_DECLARE(var,data)
+ − 946
+ − 947 #endif /* !REL_ALLOC */
+ − 948
+ − 949
+ − 950 /************************************************************************/
+ − 951 /* Case conversion */
+ − 952 /************************************************************************/
+ − 953
+ − 954 /* A "trt" table is a mapping from characters to other characters,
+ − 955 typically used to convert between uppercase and lowercase. For
+ − 956 compatibility reasons, trt tables are currently in the form of
+ − 957 a Lisp string of 256 characters, specifying the conversion for each
+ − 958 of the first 256 Emacs characters (i.e. the 256 Latin-1 characters).
+ − 959 This should be generalized at some point to support conversions for
+ − 960 all of the allowable Mule characters.
+ − 961 */
+ − 962
+ − 963 /* The _1 macros are named as such because they assume that you have
+ − 964 already guaranteed that the character values are all in the range
+ − 965 0 - 255. Bad lossage will happen otherwise. */
+ − 966
446
+ − 967 #define MAKE_TRT_TABLE() Fmake_char_table (Qgeneric)
+ − 968 INLINE_HEADER Emchar TRT_TABLE_CHAR_1 (Lisp_Object table, Emchar c);
+ − 969 INLINE_HEADER Emchar
+ − 970 TRT_TABLE_CHAR_1 (Lisp_Object table, Emchar ch)
+ − 971 {
+ − 972 Lisp_Object TRT_char;
+ − 973 TRT_char = get_char_table (ch, XCHAR_TABLE (table));
+ − 974 if (NILP (TRT_char))
+ − 975 return ch;
+ − 976 else
+ − 977 return XCHAR (TRT_char);
+ − 978 }
+ − 979 #define SET_TRT_TABLE_CHAR_1(table, ch1, ch2) \
+ − 980 Fput_char_table (make_char (ch1), make_char (ch2), table);
428
+ − 981
442
+ − 982 INLINE_HEADER Emchar TRT_TABLE_OF (Lisp_Object trt, Emchar c);
+ − 983 INLINE_HEADER Emchar
428
+ − 984 TRT_TABLE_OF (Lisp_Object trt, Emchar c)
+ − 985 {
446
+ − 986 return TRT_TABLE_CHAR_1 (trt, c);
428
+ − 987 }
+ − 988
771
+ − 989 INLINE_HEADER Lisp_Object BUFFER_CASE_TABLE (struct buffer *buf);
+ − 990 INLINE_HEADER Lisp_Object
+ − 991 BUFFER_CASE_TABLE (struct buffer *buf)
+ − 992 {
+ − 993 return buf ? buf->case_table : Vstandard_case_table;
+ − 994 }
+ − 995
428
+ − 996 /* Macros used below. */
446
+ − 997 #define DOWNCASE_TABLE_OF(buf, c) \
771
+ − 998 TRT_TABLE_OF (XCASE_TABLE_DOWNCASE (BUFFER_CASE_TABLE (buf)), c)
446
+ − 999 #define UPCASE_TABLE_OF(buf, c) \
771
+ − 1000 TRT_TABLE_OF (XCASE_TABLE_UPCASE (BUFFER_CASE_TABLE (buf)), c)
428
+ − 1001
+ − 1002 /* 1 if CH is upper case. */
+ − 1003
442
+ − 1004 INLINE_HEADER int UPPERCASEP (struct buffer *buf, Emchar ch);
+ − 1005 INLINE_HEADER int
428
+ − 1006 UPPERCASEP (struct buffer *buf, Emchar ch)
+ − 1007 {
+ − 1008 return DOWNCASE_TABLE_OF (buf, ch) != ch;
+ − 1009 }
+ − 1010
+ − 1011 /* 1 if CH is lower case. */
+ − 1012
442
+ − 1013 INLINE_HEADER int LOWERCASEP (struct buffer *buf, Emchar ch);
+ − 1014 INLINE_HEADER int
428
+ − 1015 LOWERCASEP (struct buffer *buf, Emchar ch)
+ − 1016 {
+ − 1017 return (UPCASE_TABLE_OF (buf, ch) != ch &&
+ − 1018 DOWNCASE_TABLE_OF (buf, ch) == ch);
+ − 1019 }
+ − 1020
+ − 1021 /* 1 if CH is neither upper nor lower case. */
+ − 1022
442
+ − 1023 INLINE_HEADER int NOCASEP (struct buffer *buf, Emchar ch);
+ − 1024 INLINE_HEADER int
428
+ − 1025 NOCASEP (struct buffer *buf, Emchar ch)
+ − 1026 {
+ − 1027 return UPCASE_TABLE_OF (buf, ch) == ch;
+ − 1028 }
+ − 1029
+ − 1030 /* Upcase a character, or make no change if that cannot be done. */
+ − 1031
442
+ − 1032 INLINE_HEADER Emchar UPCASE (struct buffer *buf, Emchar ch);
+ − 1033 INLINE_HEADER Emchar
428
+ − 1034 UPCASE (struct buffer *buf, Emchar ch)
+ − 1035 {
+ − 1036 return (DOWNCASE_TABLE_OF (buf, ch) == ch) ? UPCASE_TABLE_OF (buf, ch) : ch;
+ − 1037 }
+ − 1038
+ − 1039 /* Upcase a character known to be not upper case. Unused. */
+ − 1040
+ − 1041 #define UPCASE1(buf, ch) UPCASE_TABLE_OF (buf, ch)
+ − 1042
+ − 1043 /* Downcase a character, or make no change if that cannot be done. */
+ − 1044
+ − 1045 #define DOWNCASE(buf, ch) DOWNCASE_TABLE_OF (buf, ch)
+ − 1046
440
+ − 1047 #endif /* INCLUDED_buffer_h_ */