2 * Copyright (c) 2009, 2013 The FreeBSD Foundation
5 * This software was developed by Ed Schouten under sponsorship from the
8 * Portions of this software were developed by Oleksandr Rybalko
9 * under sponsorship from the FreeBSD Foundation.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <sys/param.h>
37 #include <sys/kernel.h>
39 #include <sys/malloc.h>
40 #include <sys/mutex.h>
41 #include <sys/systm.h>
43 #include <dev/vt/vt.h>
45 static MALLOC_DEFINE(M_VTBUF, "vtbuf", "vt buffer");
47 #define VTBUF_LOCK(vb) mtx_lock_spin(&(vb)->vb_lock)
48 #define VTBUF_UNLOCK(vb) mtx_unlock_spin(&(vb)->vb_lock)
50 #define POS_INDEX(c, r) (((r) << 12) + (c))
51 #define POS_COPY(d, s) do { \
52 (d).tp_col = (s).tp_col; \
53 (d).tp_row = (s).tp_row; \
59 * line5 <--- curroffset (terminal output to that line)
61 * line1 <--- roffset (history display from that point)
66 vthistory_seek(struct vt_buf *vb, int offset, int whence)
68 int diff, top, bottom, roffset;
70 /* No scrolling if not enabled. */
71 if ((vb->vb_flags & VBF_SCROLL) == 0) {
72 if (vb->vb_roffset != vb->vb_curroffset) {
73 vb->vb_roffset = vb->vb_curroffset;
76 return (0); /* No changes */
78 top = (vb->vb_flags & VBF_HISTORY_FULL)?
79 (vb->vb_curroffset + vb->vb_scr_size.tp_row):vb->vb_history_size;
80 bottom = vb->vb_curroffset + vb->vb_history_size;
83 * Operate on copy of offset value, since it temporary can be bigger
84 * than amount of rows in buffer.
86 roffset = vb->vb_roffset + vb->vb_history_size;
89 roffset = offset + vb->vb_history_size;
95 /* Go to current offset. */
96 roffset = vb->vb_curroffset + vb->vb_history_size;
100 roffset = (roffset < top)?top:roffset;
101 roffset = (roffset > bottom)?bottom:roffset;
103 roffset %= vb->vb_history_size;
105 if (vb->vb_roffset != roffset) {
106 diff = vb->vb_roffset - roffset;
107 vb->vb_roffset = roffset;
109 * Offset changed, please update Nth lines on sceen.
110 * +N - Nth lines at top;
111 * -N - Nth lines at bottom.
115 return (0); /* No changes */
119 vthistory_addlines(struct vt_buf *vb, int offset)
122 vb->vb_curroffset += offset;
123 if (vb->vb_curroffset < 0)
124 vb->vb_curroffset = 0;
125 vb->vb_curroffset %= vb->vb_history_size;
126 if ((vb->vb_flags & VBF_SCROLL) == 0) {
127 vb->vb_roffset = vb->vb_curroffset;
132 vthistory_getpos(const struct vt_buf *vb, unsigned int *offset)
135 *offset = vb->vb_roffset;
138 #ifndef SC_NO_CUTPASTE /* Only mouse support use it now. */
139 /* Translate current view row number to history row. */
141 vtbuf_wth(struct vt_buf *vb, int row)
144 return ((vb->vb_roffset + row) % vb->vb_history_size);
148 /* Translate history row to current view row number. */
150 vtbuf_htw(struct vt_buf *vb, int row)
155 * History offset roffset winrow
156 * 205 200 ((205 - 200 + 1000) % 1000) = 5
157 * 90 990 ((90 - 990 + 1000) % 1000) = 100
159 return ((row - vb->vb_roffset + vb->vb_history_size) %
160 vb->vb_history_size);
164 vtbuf_iscursor(struct vt_buf *vb, int row, int col)
166 int sc, sr, ec, er, tmp;
168 if ((vb->vb_flags & (VBF_CURSOR|VBF_SCROLL)) == VBF_CURSOR &&
169 (vb->vb_cursor.tp_row == row) && (vb->vb_cursor.tp_col == col))
172 /* Mark cut/paste region. */
175 * Luckily screen view is not like circular buffer, so we will
176 * calculate in screen coordinates. Translate first.
178 sc = vb->vb_mark_start.tp_col;
179 sr = vtbuf_htw(vb, vb->vb_mark_start.tp_row);
180 ec = vb->vb_mark_end.tp_col;
181 er = vtbuf_htw(vb, vb->vb_mark_end.tp_row);
184 /* Swap start and end if start > end. */
185 if (POS_INDEX(sc, sr) > POS_INDEX(ec, er)) {
186 tmp = sc; sc = ec; ec = tmp;
187 tmp = sr; sr = er; er = tmp;
190 if ((POS_INDEX(sc, sr) <= POS_INDEX(col, row)) &&
191 (POS_INDEX(col, row) < POS_INDEX(ec, er)))
197 static inline uint64_t
198 vtbuf_dirty_axis(unsigned int begin, unsigned int end)
200 uint64_t left, right, mask;
203 * Mark all bits between begin % 64 and end % 64 dirty.
204 * This code is functionally equivalent to:
206 * for (i = begin; i < end; i++)
207 * mask |= (uint64_t)1 << (i % 64);
210 /* Obvious case. Mark everything dirty. */
211 if (end - begin >= 64)
214 /* 1....0; used bits on the left. */
215 left = VBM_DIRTY << begin % 64;
216 /* 0....1; used bits on the right. */
217 right = VBM_DIRTY >> -end % 64;
220 * Only take the intersection. If the result of that is 0, it
221 * means that the selection crossed a 64 bit boundary along the
222 * way, which means we have to take the complement.
231 vtbuf_dirty(struct vt_buf *vb, const term_rect_t *area)
235 if (vb->vb_dirtyrect.tr_begin.tp_row > area->tr_begin.tp_row)
236 vb->vb_dirtyrect.tr_begin.tp_row = area->tr_begin.tp_row;
237 if (vb->vb_dirtyrect.tr_begin.tp_col > area->tr_begin.tp_col)
238 vb->vb_dirtyrect.tr_begin.tp_col = area->tr_begin.tp_col;
239 if (vb->vb_dirtyrect.tr_end.tp_row < area->tr_end.tp_row)
240 vb->vb_dirtyrect.tr_end.tp_row = area->tr_end.tp_row;
241 if (vb->vb_dirtyrect.tr_end.tp_col < area->tr_end.tp_col)
242 vb->vb_dirtyrect.tr_end.tp_col = area->tr_end.tp_col;
243 vb->vb_dirtymask.vbm_row |=
244 vtbuf_dirty_axis(area->tr_begin.tp_row, area->tr_end.tp_row);
245 vb->vb_dirtymask.vbm_col |=
246 vtbuf_dirty_axis(area->tr_begin.tp_col, area->tr_end.tp_col);
251 vtbuf_dirty_cell(struct vt_buf *vb, const term_pos_t *p)
256 area.tr_end.tp_row = p->tp_row + 1;
257 area.tr_end.tp_col = p->tp_col + 1;
258 vtbuf_dirty(vb, &area);
262 vtbuf_make_undirty(struct vt_buf *vb)
265 vb->vb_dirtyrect.tr_begin = vb->vb_scr_size;
266 vb->vb_dirtyrect.tr_end.tp_row = vb->vb_dirtyrect.tr_end.tp_col = 0;
267 vb->vb_dirtymask.vbm_row = vb->vb_dirtymask.vbm_col = 0;
271 vtbuf_undirty(struct vt_buf *vb, term_rect_t *r, struct vt_bufmask *m)
275 *r = vb->vb_dirtyrect;
276 *m = vb->vb_dirtymask;
277 vtbuf_make_undirty(vb);
282 vtbuf_copy(struct vt_buf *vb, const term_rect_t *r, const term_pos_t *p2)
284 const term_pos_t *p1 = &r->tr_begin;
286 unsigned int rows, cols;
289 KASSERT(r->tr_begin.tp_row < vb->vb_scr_size.tp_row,
290 ("vtbuf_copy begin.tp_row %d must be less than screen width %d",
291 r->tr_begin.tp_row, vb->vb_scr_size.tp_row));
292 KASSERT(r->tr_begin.tp_col < vb->vb_scr_size.tp_col,
293 ("vtbuf_copy begin.tp_col %d must be less than screen height %d",
294 r->tr_begin.tp_col, vb->vb_scr_size.tp_col));
296 KASSERT(r->tr_end.tp_row <= vb->vb_scr_size.tp_row,
297 ("vtbuf_copy end.tp_row %d must be less than screen width %d",
298 r->tr_end.tp_row, vb->vb_scr_size.tp_row));
299 KASSERT(r->tr_end.tp_col <= vb->vb_scr_size.tp_col,
300 ("vtbuf_copy end.tp_col %d must be less than screen height %d",
301 r->tr_end.tp_col, vb->vb_scr_size.tp_col));
303 KASSERT(p2->tp_row < vb->vb_scr_size.tp_row,
304 ("vtbuf_copy tp_row %d must be less than screen width %d",
305 p2->tp_row, vb->vb_scr_size.tp_row));
306 KASSERT(p2->tp_col < vb->vb_scr_size.tp_col,
307 ("vtbuf_copy tp_col %d must be less than screen height %d",
308 p2->tp_col, vb->vb_scr_size.tp_col));
310 rows = r->tr_end.tp_row - r->tr_begin.tp_row;
311 rdiff = r->tr_begin.tp_row - p2->tp_row;
312 cols = r->tr_end.tp_col - r->tr_begin.tp_col;
313 if (r->tr_begin.tp_row > p2->tp_row && r->tr_begin.tp_col == 0 &&
314 r->tr_end.tp_col == vb->vb_scr_size.tp_col && /* Full row. */
315 (rows + rdiff) == vb->vb_scr_size.tp_row && /* Whole screen. */
316 rdiff > 0) { /* Only forward dirrection. Do not eat history. */
317 vthistory_addlines(vb, rdiff);
318 } else if (p2->tp_row < p1->tp_row) {
319 /* Handle overlapping copies of line segments. */
321 for (pr = 0; pr < rows; pr++)
323 &VTBUF_FIELD(vb, p2->tp_row + pr, p2->tp_col),
324 &VTBUF_FIELD(vb, p1->tp_row + pr, p1->tp_col),
325 cols * sizeof(term_char_t));
327 /* Move data down. */
328 for (pr = rows - 1; pr >= 0; pr--)
330 &VTBUF_FIELD(vb, p2->tp_row + pr, p2->tp_col),
331 &VTBUF_FIELD(vb, p1->tp_row + pr, p1->tp_col),
332 cols * sizeof(term_char_t));
336 area.tr_end.tp_row = MIN(p2->tp_row + rows, vb->vb_scr_size.tp_row);
337 area.tr_end.tp_col = MIN(p2->tp_col + cols, vb->vb_scr_size.tp_col);
338 vtbuf_dirty(vb, &area);
342 vtbuf_fill(struct vt_buf *vb, const term_rect_t *r, term_char_t c)
347 for (pr = r->tr_begin.tp_row; pr < r->tr_end.tp_row; pr++) {
348 row = vb->vb_rows[(vb->vb_curroffset + pr) %
349 VTBUF_MAX_HEIGHT(vb)];
350 for (pc = r->tr_begin.tp_col; pc < r->tr_end.tp_col; pc++) {
357 vtbuf_fill_locked(struct vt_buf *vb, const term_rect_t *r, term_char_t c)
359 KASSERT(r->tr_begin.tp_row < vb->vb_scr_size.tp_row,
360 ("vtbuf_fill_locked begin.tp_row %d must be < screen height %d",
361 r->tr_begin.tp_row, vb->vb_scr_size.tp_row));
362 KASSERT(r->tr_begin.tp_col < vb->vb_scr_size.tp_col,
363 ("vtbuf_fill_locked begin.tp_col %d must be < screen width %d",
364 r->tr_begin.tp_col, vb->vb_scr_size.tp_col));
366 KASSERT(r->tr_end.tp_row <= vb->vb_scr_size.tp_row,
367 ("vtbuf_fill_locked end.tp_row %d must be <= screen height %d",
368 r->tr_end.tp_row, vb->vb_scr_size.tp_row));
369 KASSERT(r->tr_end.tp_col <= vb->vb_scr_size.tp_col,
370 ("vtbuf_fill_locked end.tp_col %d must be <= screen width %d",
371 r->tr_end.tp_col, vb->vb_scr_size.tp_col));
374 vtbuf_fill(vb, r, c);
381 vtbuf_init_rows(struct vt_buf *vb)
385 vb->vb_history_size = MAX(vb->vb_history_size, vb->vb_scr_size.tp_row);
387 for (r = 0; r < vb->vb_history_size; r++)
388 vb->vb_rows[r] = &vb->vb_buffer[r *
389 vb->vb_scr_size.tp_col];
393 vtbuf_init_early(struct vt_buf *vb)
396 vb->vb_flags |= VBF_CURSOR;
398 vb->vb_curroffset = 0;
399 vb->vb_mark_start.tp_row = 0;
400 vb->vb_mark_start.tp_col = 0;
401 vb->vb_mark_end.tp_row = 0;
402 vb->vb_mark_end.tp_col = 0;
405 vtbuf_make_undirty(vb);
406 if ((vb->vb_flags & VBF_MTX_INIT) == 0) {
407 mtx_init(&vb->vb_lock, "vtbuf", NULL, MTX_SPIN);
408 vb->vb_flags |= VBF_MTX_INIT;
413 vtbuf_init(struct vt_buf *vb, const term_pos_t *p)
417 vb->vb_scr_size = *p;
418 vb->vb_history_size = VBF_DEFAULT_HISTORY_SIZE;
420 if ((vb->vb_flags & VBF_STATIC) == 0) {
421 sz = vb->vb_history_size * p->tp_col * sizeof(term_char_t);
422 vb->vb_buffer = malloc(sz, M_VTBUF, M_WAITOK | M_ZERO);
424 sz = vb->vb_history_size * sizeof(term_char_t *);
425 vb->vb_rows = malloc(sz, M_VTBUF, M_WAITOK | M_ZERO);
428 vtbuf_init_early(vb);
432 vtbuf_sethistory_size(struct vt_buf *vb, int size)
437 p.tp_row = vb->vb_scr_size.tp_row;
438 p.tp_col = vb->vb_scr_size.tp_col;
439 vtbuf_grow(vb, &p, size);
443 vtbuf_grow(struct vt_buf *vb, const term_pos_t *p, int history_size)
445 term_char_t *old, *new, **rows, **oldrows, **copyrows, *row;
446 int bufsize, rowssize, w, h, c, r;
449 history_size = MAX(history_size, p->tp_row);
451 /* If new screen/history size bigger or buffer is VBF_STATIC. */
452 if ((history_size > vb->vb_history_size) || (p->tp_col >
453 vb->vb_scr_size.tp_col) || (vb->vb_flags & VBF_STATIC)) {
454 /* Allocate new buffer. */
455 bufsize = history_size * p->tp_col * sizeof(term_char_t);
456 new = malloc(bufsize, M_VTBUF, M_WAITOK | M_ZERO);
457 rowssize = history_size * sizeof(term_pos_t *);
458 rows = malloc(rowssize, M_VTBUF, M_WAITOK | M_ZERO);
462 old = vb->vb_flags & VBF_STATIC ? NULL : vb->vb_buffer;
463 oldrows = vb->vb_flags & VBF_STATIC ? NULL : vb->vb_rows;
464 copyrows = vb->vb_rows;
465 w = vb->vb_scr_size.tp_col;
466 h = vb->vb_history_size;
468 vb->vb_history_size = history_size;
471 vb->vb_flags &= ~VBF_STATIC;
472 vb->vb_scr_size = *p;
475 /* Copy history and fill extra space. */
476 for (r = 0; r < history_size; r ++) {
478 if (r < h) { /* Copy. */
479 memmove(rows[r], copyrows[r],
480 MIN(p->tp_col, w) * sizeof(term_char_t));
481 for (c = MIN(p->tp_col, w); c < p->tp_col;
483 row[c] = VTBUF_SPACE_CHAR;
485 } else { /* Just fill. */
486 rect.tr_begin.tp_col = 0;
487 rect.tr_begin.tp_row = r;
488 rect.tr_end.tp_col = p->tp_col;
489 rect.tr_end.tp_row = p->tp_row;
490 vtbuf_fill(vb, &rect, VTBUF_SPACE_CHAR);
494 vtbuf_make_undirty(vb);
496 /* Deallocate old buffer. */
498 free(oldrows, M_VTBUF);
500 /* Just update the size. */
501 vb->vb_scr_size = *p;
506 vtbuf_putchar(struct vt_buf *vb, const term_pos_t *p, term_char_t c)
510 KASSERT(p->tp_row < vb->vb_scr_size.tp_row,
511 ("vtbuf_putchar tp_row %d must be less than screen width %d",
512 p->tp_row, vb->vb_scr_size.tp_row));
513 KASSERT(p->tp_col < vb->vb_scr_size.tp_col,
514 ("vtbuf_putchar tp_col %d must be less than screen height %d",
515 p->tp_col, vb->vb_scr_size.tp_col));
517 row = vb->vb_rows[(vb->vb_curroffset + p->tp_row) %
518 VTBUF_MAX_HEIGHT(vb)];
519 if (row[p->tp_col] != c) {
523 vtbuf_dirty_cell(vb, p);
528 vtbuf_cursor_position(struct vt_buf *vb, const term_pos_t *p)
531 if (vb->vb_flags & VBF_CURSOR) {
532 vtbuf_dirty_cell(vb, &vb->vb_cursor);
534 vtbuf_dirty_cell(vb, &vb->vb_cursor);
540 #ifndef SC_NO_CUTPASTE
542 vtbuf_mouse_cursor_position(struct vt_buf *vb, int col, int row)
546 area.tr_begin.tp_row = MAX(row - 1, 0);
547 area.tr_begin.tp_col = MAX(col - 1, 0);
548 area.tr_end.tp_row = MIN(row + 2, vb->vb_scr_size.tp_row);
549 area.tr_end.tp_col = MIN(col + 2, vb->vb_scr_size.tp_col);
550 vtbuf_dirty(vb, &area);
554 vtbuf_flush_mark(struct vt_buf *vb)
559 /* Notify renderer to update marked region. */
560 if (vb->vb_mark_start.tp_col || vb->vb_mark_end.tp_col ||
561 vb->vb_mark_start.tp_row || vb->vb_mark_end.tp_row) {
563 s = vtbuf_htw(vb, vb->vb_mark_start.tp_row);
564 e = vtbuf_htw(vb, vb->vb_mark_end.tp_row);
566 area.tr_begin.tp_col = 0;
567 area.tr_begin.tp_row = MIN(s, e);
569 area.tr_end.tp_col = vb->vb_scr_size.tp_col;
570 area.tr_end.tp_row = MAX(s, e) + 1;
572 vtbuf_dirty(vb, &area);
577 vtbuf_get_marked_len(struct vt_buf *vb)
582 /* Swap according to window coordinates. */
583 if (POS_INDEX(vtbuf_htw(vb, vb->vb_mark_start.tp_row),
584 vb->vb_mark_start.tp_col) >
585 POS_INDEX(vtbuf_htw(vb, vb->vb_mark_end.tp_row),
586 vb->vb_mark_end.tp_col)) {
587 POS_COPY(e, vb->vb_mark_start);
588 POS_COPY(s, vb->vb_mark_end);
590 POS_COPY(s, vb->vb_mark_start);
591 POS_COPY(e, vb->vb_mark_end);
594 si = s.tp_row * vb->vb_scr_size.tp_col + s.tp_col;
595 ei = e.tp_row * vb->vb_scr_size.tp_col + e.tp_col;
597 /* Number symbols and number of rows to inject \n */
598 sz = ei - si + ((e.tp_row - s.tp_row) * 2) + 1;
600 return (sz * sizeof(term_char_t));
604 vtbuf_extract_marked(struct vt_buf *vb, term_char_t *buf, int sz)
609 /* Swap according to window coordinates. */
610 if (POS_INDEX(vtbuf_htw(vb, vb->vb_mark_start.tp_row),
611 vb->vb_mark_start.tp_col) >
612 POS_INDEX(vtbuf_htw(vb, vb->vb_mark_end.tp_row),
613 vb->vb_mark_end.tp_col)) {
614 POS_COPY(e, vb->vb_mark_start);
615 POS_COPY(s, vb->vb_mark_end);
617 POS_COPY(s, vb->vb_mark_start);
618 POS_COPY(e, vb->vb_mark_end);
622 for (r = s.tp_row; r <= e.tp_row; r ++) {
623 cs = (r == s.tp_row)?s.tp_col:0;
624 ce = (r == e.tp_row)?e.tp_col:vb->vb_scr_size.tp_col;
625 for (c = cs; c < ce; c ++) {
626 buf[i++] = vb->vb_rows[r][c];
628 /* Add new line for all rows, but not for last one. */
637 vtbuf_set_mark(struct vt_buf *vb, int type, int col, int row)
643 case VTB_MARK_END: /* B1 UP */
644 if (vb->vb_mark_last != VTB_MARK_MOVE)
648 case VTB_MARK_EXTEND:
649 vtbuf_flush_mark(vb); /* Clean old mark. */
650 vb->vb_mark_end.tp_col = col;
651 vb->vb_mark_end.tp_row = vtbuf_wth(vb, row);
654 vtbuf_flush_mark(vb); /* Clean old mark. */
655 vb->vb_mark_start.tp_col = col;
656 vb->vb_mark_start.tp_row = vtbuf_wth(vb, row);
657 /* Start again, so clear end point. */
658 vb->vb_mark_end.tp_col = col;
659 vb->vb_mark_end.tp_row = vtbuf_wth(vb, row);
662 vtbuf_flush_mark(vb); /* Clean old mark. */
663 vb->vb_mark_start.tp_row = vb->vb_mark_end.tp_row =
665 r = vb->vb_rows[vb->vb_mark_start.tp_row];
666 for (i = col; i >= 0; i --) {
667 if (TCHAR_CHARACTER(r[i]) == ' ') {
668 vb->vb_mark_start.tp_col = i + 1;
672 for (i = col; i < vb->vb_scr_size.tp_col; i ++) {
673 if (TCHAR_CHARACTER(r[i]) == ' ') {
674 vb->vb_mark_end.tp_col = i;
678 if (vb->vb_mark_start.tp_col > vb->vb_mark_end.tp_col)
679 vb->vb_mark_start.tp_col = vb->vb_mark_end.tp_col;
682 vtbuf_flush_mark(vb); /* Clean old mark. */
683 vb->vb_mark_start.tp_col = 0;
684 vb->vb_mark_end.tp_col = vb->vb_scr_size.tp_col;
685 vb->vb_mark_start.tp_row = vb->vb_mark_end.tp_row =
689 vb->vb_mark_last = type;
696 vb->vb_mark_last = type;
697 /* Draw new marked region. */
698 vtbuf_flush_mark(vb);
704 vtbuf_cursor_visibility(struct vt_buf *vb, int yes)
709 oflags = vb->vb_flags;
711 vb->vb_flags |= VBF_CURSOR;
713 vb->vb_flags &= ~VBF_CURSOR;
714 nflags = vb->vb_flags;
717 if (oflags != nflags)
718 vtbuf_dirty_cell(vb, &vb->vb_cursor);
722 vtbuf_scroll_mode(struct vt_buf *vb, int yes)
727 oflags = vb->vb_flags;
729 vb->vb_flags |= VBF_SCROLL;
731 vb->vb_flags &= ~VBF_SCROLL;
732 nflags = vb->vb_flags;
735 if (oflags != nflags)
736 vtbuf_dirty_cell(vb, &vb->vb_cursor);