2 * Copyright (c) 2009, 2013 The FreeBSD Foundation
5 * This software was developed by Ed Schouten under sponsorship from the
8 * Portions of this software were developed by Oleksandr Rybalko
9 * under sponsorship from the FreeBSD Foundation.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <sys/param.h>
37 #include <sys/kernel.h>
39 #include <sys/malloc.h>
40 #include <sys/mutex.h>
41 #include <sys/systm.h>
43 #include <dev/vt/vt.h>
45 static MALLOC_DEFINE(M_VTBUF, "vtbuf", "vt buffer");
47 #define VTBUF_LOCK(vb) mtx_lock_spin(&(vb)->vb_lock)
48 #define VTBUF_UNLOCK(vb) mtx_unlock_spin(&(vb)->vb_lock)
50 #define POS_INDEX(c, r) (((r) << 12) + (c))
51 #define POS_COPY(d, s) do { \
52 (d).tp_col = (s).tp_col; \
53 (d).tp_row = (s).tp_row; \
59 * line5 <--- curroffset (terminal output to that line)
61 * line1 <--- roffset (history display from that point)
66 vthistory_seek(struct vt_buf *vb, int offset, int whence)
68 int diff, top, bottom, roffset;
70 /* No scrolling if not enabled. */
71 if ((vb->vb_flags & VBF_SCROLL) == 0) {
72 if (vb->vb_roffset != vb->vb_curroffset) {
73 vb->vb_roffset = vb->vb_curroffset;
76 return (0); /* No changes */
78 top = (vb->vb_flags & VBF_HISTORY_FULL)?
79 (vb->vb_curroffset + vb->vb_scr_size.tp_row):vb->vb_history_size;
80 bottom = vb->vb_curroffset + vb->vb_history_size;
83 * Operate on copy of offset value, since it temporary can be bigger
84 * than amount of rows in buffer.
86 roffset = vb->vb_roffset + vb->vb_history_size;
89 roffset = offset + vb->vb_history_size;
95 /* Go to current offset. */
96 roffset = vb->vb_curroffset + vb->vb_history_size;
100 roffset = (roffset < top)?top:roffset;
101 roffset = (roffset > bottom)?bottom:roffset;
103 roffset %= vb->vb_history_size;
105 if (vb->vb_roffset != roffset) {
106 diff = vb->vb_roffset - roffset;
107 vb->vb_roffset = roffset;
109 * Offset changed, please update Nth lines on sceen.
110 * +N - Nth lines at top;
111 * -N - Nth lines at bottom.
115 return (0); /* No changes */
119 vthistory_addlines(struct vt_buf *vb, int offset)
122 vb->vb_curroffset += offset;
123 if (vb->vb_curroffset < 0)
124 vb->vb_curroffset = 0;
125 vb->vb_curroffset %= vb->vb_history_size;
126 if ((vb->vb_flags & VBF_SCROLL) == 0) {
127 vb->vb_roffset = vb->vb_curroffset;
132 vthistory_getpos(const struct vt_buf *vb, unsigned int *offset)
135 *offset = vb->vb_roffset;
138 #ifndef SC_NO_CUTPASTE /* Only mouse support use it now. */
139 /* Translate current view row number to history row. */
141 vtbuf_wth(struct vt_buf *vb, int row)
144 return ((vb->vb_roffset + row) % vb->vb_history_size);
148 /* Translate history row to current view row number. */
150 vtbuf_htw(struct vt_buf *vb, int row)
155 * History offset roffset winrow
156 * 205 200 ((205 - 200 + 1000) % 1000) = 5
157 * 90 990 ((90 - 990 + 1000) % 1000) = 100
159 return ((row - vb->vb_roffset + vb->vb_history_size) %
160 vb->vb_history_size);
164 vtbuf_iscursor(struct vt_buf *vb, int row, int col)
166 int sc, sr, ec, er, tmp;
168 if ((vb->vb_flags & (VBF_CURSOR|VBF_SCROLL)) == VBF_CURSOR &&
169 (vb->vb_cursor.tp_row == row) && (vb->vb_cursor.tp_col == col))
172 /* Mark cut/paste region. */
175 * Luckily screen view is not like circular buffer, so we will
176 * calculate in screen coordinates. Translate first.
178 sc = vb->vb_mark_start.tp_col;
179 sr = vtbuf_htw(vb, vb->vb_mark_start.tp_row);
180 ec = vb->vb_mark_end.tp_col;
181 er = vtbuf_htw(vb, vb->vb_mark_end.tp_row);
184 /* Swap start and end if start > end. */
185 if (POS_INDEX(sc, sr) > POS_INDEX(ec, er)) {
186 tmp = sc; sc = ec; ec = tmp;
187 tmp = sr; sr = er; er = tmp;
190 if ((POS_INDEX(sc, sr) <= POS_INDEX(col, row)) &&
191 (POS_INDEX(col, row) < POS_INDEX(ec, er)))
197 static inline uint64_t
198 vtbuf_dirty_axis(unsigned int begin, unsigned int end)
200 uint64_t left, right, mask;
203 * Mark all bits between begin % 64 and end % 64 dirty.
204 * This code is functionally equivalent to:
206 * for (i = begin; i < end; i++)
207 * mask |= (uint64_t)1 << (i % 64);
210 /* Obvious case. Mark everything dirty. */
211 if (end - begin >= 64)
214 /* 1....0; used bits on the left. */
215 left = VBM_DIRTY << begin % 64;
216 /* 0....1; used bits on the right. */
217 right = VBM_DIRTY >> -end % 64;
220 * Only take the intersection. If the result of that is 0, it
221 * means that the selection crossed a 64 bit boundary along the
222 * way, which means we have to take the complement.
231 vtbuf_dirty_locked(struct vt_buf *vb, const term_rect_t *area)
234 if (vb->vb_dirtyrect.tr_begin.tp_row > area->tr_begin.tp_row)
235 vb->vb_dirtyrect.tr_begin.tp_row = area->tr_begin.tp_row;
236 if (vb->vb_dirtyrect.tr_begin.tp_col > area->tr_begin.tp_col)
237 vb->vb_dirtyrect.tr_begin.tp_col = area->tr_begin.tp_col;
238 if (vb->vb_dirtyrect.tr_end.tp_row < area->tr_end.tp_row)
239 vb->vb_dirtyrect.tr_end.tp_row = area->tr_end.tp_row;
240 if (vb->vb_dirtyrect.tr_end.tp_col < area->tr_end.tp_col)
241 vb->vb_dirtyrect.tr_end.tp_col = area->tr_end.tp_col;
242 vb->vb_dirtymask.vbm_row |=
243 vtbuf_dirty_axis(area->tr_begin.tp_row, area->tr_end.tp_row);
244 vb->vb_dirtymask.vbm_col |=
245 vtbuf_dirty_axis(area->tr_begin.tp_col, area->tr_end.tp_col);
249 vtbuf_dirty(struct vt_buf *vb, const term_rect_t *area)
253 vtbuf_dirty_locked(vb, area);
258 vtbuf_dirty_cell_locked(struct vt_buf *vb, const term_pos_t *p)
263 area.tr_end.tp_row = p->tp_row + 1;
264 area.tr_end.tp_col = p->tp_col + 1;
265 vtbuf_dirty_locked(vb, &area);
269 vtbuf_make_undirty(struct vt_buf *vb)
272 vb->vb_dirtyrect.tr_begin = vb->vb_scr_size;
273 vb->vb_dirtyrect.tr_end.tp_row = vb->vb_dirtyrect.tr_end.tp_col = 0;
274 vb->vb_dirtymask.vbm_row = vb->vb_dirtymask.vbm_col = 0;
278 vtbuf_undirty(struct vt_buf *vb, term_rect_t *r, struct vt_bufmask *m)
282 *r = vb->vb_dirtyrect;
283 *m = vb->vb_dirtymask;
284 vtbuf_make_undirty(vb);
289 vtbuf_copy(struct vt_buf *vb, const term_rect_t *r, const term_pos_t *p2)
291 const term_pos_t *p1 = &r->tr_begin;
293 unsigned int rows, cols;
296 KASSERT(r->tr_begin.tp_row < vb->vb_scr_size.tp_row,
297 ("vtbuf_copy begin.tp_row %d must be less than screen width %d",
298 r->tr_begin.tp_row, vb->vb_scr_size.tp_row));
299 KASSERT(r->tr_begin.tp_col < vb->vb_scr_size.tp_col,
300 ("vtbuf_copy begin.tp_col %d must be less than screen height %d",
301 r->tr_begin.tp_col, vb->vb_scr_size.tp_col));
303 KASSERT(r->tr_end.tp_row <= vb->vb_scr_size.tp_row,
304 ("vtbuf_copy end.tp_row %d must be less than screen width %d",
305 r->tr_end.tp_row, vb->vb_scr_size.tp_row));
306 KASSERT(r->tr_end.tp_col <= vb->vb_scr_size.tp_col,
307 ("vtbuf_copy end.tp_col %d must be less than screen height %d",
308 r->tr_end.tp_col, vb->vb_scr_size.tp_col));
310 KASSERT(p2->tp_row < vb->vb_scr_size.tp_row,
311 ("vtbuf_copy tp_row %d must be less than screen width %d",
312 p2->tp_row, vb->vb_scr_size.tp_row));
313 KASSERT(p2->tp_col < vb->vb_scr_size.tp_col,
314 ("vtbuf_copy tp_col %d must be less than screen height %d",
315 p2->tp_col, vb->vb_scr_size.tp_col));
317 rows = r->tr_end.tp_row - r->tr_begin.tp_row;
318 rdiff = r->tr_begin.tp_row - p2->tp_row;
319 cols = r->tr_end.tp_col - r->tr_begin.tp_col;
320 if (r->tr_begin.tp_row > p2->tp_row && r->tr_begin.tp_col == 0 &&
321 r->tr_end.tp_col == vb->vb_scr_size.tp_col && /* Full row. */
322 (rows + rdiff) == vb->vb_scr_size.tp_row && /* Whole screen. */
323 rdiff > 0) { /* Only forward dirrection. Do not eat history. */
324 vthistory_addlines(vb, rdiff);
325 } else if (p2->tp_row < p1->tp_row) {
326 /* Handle overlapping copies of line segments. */
328 for (pr = 0; pr < rows; pr++)
330 &VTBUF_FIELD(vb, p2->tp_row + pr, p2->tp_col),
331 &VTBUF_FIELD(vb, p1->tp_row + pr, p1->tp_col),
332 cols * sizeof(term_char_t));
334 /* Move data down. */
335 for (pr = rows - 1; pr >= 0; pr--)
337 &VTBUF_FIELD(vb, p2->tp_row + pr, p2->tp_col),
338 &VTBUF_FIELD(vb, p1->tp_row + pr, p1->tp_col),
339 cols * sizeof(term_char_t));
343 area.tr_end.tp_row = MIN(p2->tp_row + rows, vb->vb_scr_size.tp_row);
344 area.tr_end.tp_col = MIN(p2->tp_col + cols, vb->vb_scr_size.tp_col);
345 vtbuf_dirty(vb, &area);
349 vtbuf_fill(struct vt_buf *vb, const term_rect_t *r, term_char_t c)
354 for (pr = r->tr_begin.tp_row; pr < r->tr_end.tp_row; pr++) {
355 row = vb->vb_rows[(vb->vb_curroffset + pr) %
356 VTBUF_MAX_HEIGHT(vb)];
357 for (pc = r->tr_begin.tp_col; pc < r->tr_end.tp_col; pc++) {
364 vtbuf_fill_locked(struct vt_buf *vb, const term_rect_t *r, term_char_t c)
366 KASSERT(r->tr_begin.tp_row < vb->vb_scr_size.tp_row,
367 ("vtbuf_fill_locked begin.tp_row %d must be < screen width %d",
368 r->tr_begin.tp_row, vb->vb_scr_size.tp_row));
369 KASSERT(r->tr_begin.tp_col < vb->vb_scr_size.tp_col,
370 ("vtbuf_fill_locked begin.tp_col %d must be < screen height %d",
371 r->tr_begin.tp_col, vb->vb_scr_size.tp_col));
373 KASSERT(r->tr_end.tp_row <= vb->vb_scr_size.tp_row,
374 ("vtbuf_fill_locked end.tp_row %d must be <= screen width %d",
375 r->tr_end.tp_row, vb->vb_scr_size.tp_row));
376 KASSERT(r->tr_end.tp_col <= vb->vb_scr_size.tp_col,
377 ("vtbuf_fill_locked end.tp_col %d must be <= screen height %d",
378 r->tr_end.tp_col, vb->vb_scr_size.tp_col));
381 vtbuf_fill(vb, r, c);
382 vtbuf_dirty_locked(vb, r);
387 vtbuf_init_rows(struct vt_buf *vb)
391 vb->vb_history_size = MAX(vb->vb_history_size, vb->vb_scr_size.tp_row);
393 for (r = 0; r < vb->vb_history_size; r++)
394 vb->vb_rows[r] = &vb->vb_buffer[r *
395 vb->vb_scr_size.tp_col];
399 vtbuf_init_early(struct vt_buf *vb)
402 vb->vb_flags |= VBF_CURSOR;
404 vb->vb_curroffset = 0;
405 vb->vb_mark_start.tp_row = 0;
406 vb->vb_mark_start.tp_col = 0;
407 vb->vb_mark_end.tp_row = 0;
408 vb->vb_mark_end.tp_col = 0;
411 vtbuf_make_undirty(vb);
412 if ((vb->vb_flags & VBF_MTX_INIT) == 0) {
413 mtx_init(&vb->vb_lock, "vtbuf", NULL, MTX_SPIN);
414 vb->vb_flags |= VBF_MTX_INIT;
419 vtbuf_init(struct vt_buf *vb, const term_pos_t *p)
423 vb->vb_scr_size = *p;
424 vb->vb_history_size = VBF_DEFAULT_HISTORY_SIZE;
426 if ((vb->vb_flags & VBF_STATIC) == 0) {
427 sz = vb->vb_history_size * p->tp_col * sizeof(term_char_t);
428 vb->vb_buffer = malloc(sz, M_VTBUF, M_WAITOK | M_ZERO);
430 sz = vb->vb_history_size * sizeof(term_char_t *);
431 vb->vb_rows = malloc(sz, M_VTBUF, M_WAITOK | M_ZERO);
434 vtbuf_init_early(vb);
438 vtbuf_sethistory_size(struct vt_buf *vb, int size)
443 p.tp_row = vb->vb_scr_size.tp_row;
444 p.tp_col = vb->vb_scr_size.tp_col;
445 vtbuf_grow(vb, &p, size);
449 vtbuf_grow(struct vt_buf *vb, const term_pos_t *p, int history_size)
451 term_char_t *old, *new, **rows, **oldrows, **copyrows, *row;
452 int bufsize, rowssize, w, h, c, r;
455 history_size = MAX(history_size, p->tp_row);
457 if (history_size > vb->vb_history_size || p->tp_col >
458 vb->vb_scr_size.tp_col) {
459 /* Allocate new buffer. */
460 bufsize = history_size * p->tp_col * sizeof(term_char_t);
461 new = malloc(bufsize, M_VTBUF, M_WAITOK | M_ZERO);
462 rowssize = history_size * sizeof(term_pos_t *);
463 rows = malloc(rowssize, M_VTBUF, M_WAITOK | M_ZERO);
467 old = vb->vb_flags & VBF_STATIC ? NULL : vb->vb_buffer;
468 oldrows = vb->vb_flags & VBF_STATIC ? NULL : vb->vb_rows;
469 copyrows = vb->vb_rows;
470 w = vb->vb_scr_size.tp_col;
471 h = vb->vb_history_size;
473 vb->vb_history_size = history_size;
476 vb->vb_flags &= ~VBF_STATIC;
477 vb->vb_scr_size = *p;
480 /* Copy history and fill extra space. */
481 for (r = 0; r < history_size; r ++) {
483 if (r < h) { /* Copy. */
484 memmove(rows[r], copyrows[r],
485 MIN(p->tp_col, w) * sizeof(term_char_t));
486 for (c = MIN(p->tp_col, w); c < p->tp_col;
488 row[c] = VTBUF_SPACE_CHAR;
490 } else { /* Just fill. */
491 rect.tr_begin.tp_col = 0;
492 rect.tr_begin.tp_row = r;
493 rect.tr_end.tp_col = p->tp_col;
494 rect.tr_end.tp_row = p->tp_row;
495 vtbuf_fill(vb, &rect, VTBUF_SPACE_CHAR);
499 vtbuf_make_undirty(vb);
501 /* Deallocate old buffer. */
503 free(oldrows, M_VTBUF);
508 vtbuf_putchar(struct vt_buf *vb, const term_pos_t *p, term_char_t c)
512 KASSERT(p->tp_row < vb->vb_scr_size.tp_row,
513 ("vtbuf_putchar tp_row %d must be less than screen width %d",
514 p->tp_row, vb->vb_scr_size.tp_row));
515 KASSERT(p->tp_col < vb->vb_scr_size.tp_col,
516 ("vtbuf_putchar tp_col %d must be less than screen height %d",
517 p->tp_col, vb->vb_scr_size.tp_col));
519 row = vb->vb_rows[(vb->vb_curroffset + p->tp_row) %
520 VTBUF_MAX_HEIGHT(vb)];
521 if (row[p->tp_col] != c) {
524 vtbuf_dirty_cell_locked(vb, p);
530 vtbuf_cursor_position(struct vt_buf *vb, const term_pos_t *p)
533 if (vb->vb_flags & VBF_CURSOR) {
535 vtbuf_dirty_cell_locked(vb, &vb->vb_cursor);
537 vtbuf_dirty_cell_locked(vb, &vb->vb_cursor);
544 #ifndef SC_NO_CUTPASTE
546 vtbuf_mouse_cursor_position(struct vt_buf *vb, int col, int row)
550 area.tr_begin.tp_row = MAX(row - 1, 0);
551 area.tr_begin.tp_col = MAX(col - 1, 0);
552 area.tr_end.tp_row = MIN(row + 2, vb->vb_scr_size.tp_row);
553 area.tr_end.tp_col = MIN(col + 2, vb->vb_scr_size.tp_col);
554 vtbuf_dirty(vb, &area);
558 vtbuf_flush_mark(struct vt_buf *vb)
563 /* Notify renderer to update marked region. */
564 if (vb->vb_mark_start.tp_col || vb->vb_mark_end.tp_col ||
565 vb->vb_mark_start.tp_row || vb->vb_mark_end.tp_row) {
567 s = vtbuf_htw(vb, vb->vb_mark_start.tp_row);
568 e = vtbuf_htw(vb, vb->vb_mark_end.tp_row);
570 area.tr_begin.tp_col = 0;
571 area.tr_begin.tp_row = MIN(s, e);
573 area.tr_end.tp_col = vb->vb_scr_size.tp_col;
574 area.tr_end.tp_row = MAX(s, e) + 1;
576 vtbuf_dirty(vb, &area);
581 vtbuf_get_marked_len(struct vt_buf *vb)
586 /* Swap according to window coordinates. */
587 if (POS_INDEX(vtbuf_htw(vb, vb->vb_mark_start.tp_row),
588 vb->vb_mark_start.tp_col) >
589 POS_INDEX(vtbuf_htw(vb, vb->vb_mark_end.tp_row),
590 vb->vb_mark_end.tp_col)) {
591 POS_COPY(e, vb->vb_mark_start);
592 POS_COPY(s, vb->vb_mark_end);
594 POS_COPY(s, vb->vb_mark_start);
595 POS_COPY(e, vb->vb_mark_end);
598 si = s.tp_row * vb->vb_scr_size.tp_col + s.tp_col;
599 ei = e.tp_row * vb->vb_scr_size.tp_col + e.tp_col;
601 /* Number symbols and number of rows to inject \n */
602 sz = ei - si + ((e.tp_row - s.tp_row) * 2) + 1;
604 return (sz * sizeof(term_char_t));
608 vtbuf_extract_marked(struct vt_buf *vb, term_char_t *buf, int sz)
613 /* Swap according to window coordinates. */
614 if (POS_INDEX(vtbuf_htw(vb, vb->vb_mark_start.tp_row),
615 vb->vb_mark_start.tp_col) >
616 POS_INDEX(vtbuf_htw(vb, vb->vb_mark_end.tp_row),
617 vb->vb_mark_end.tp_col)) {
618 POS_COPY(e, vb->vb_mark_start);
619 POS_COPY(s, vb->vb_mark_end);
621 POS_COPY(s, vb->vb_mark_start);
622 POS_COPY(e, vb->vb_mark_end);
626 for (r = s.tp_row; r <= e.tp_row; r ++) {
627 cs = (r == s.tp_row)?s.tp_col:0;
628 ce = (r == e.tp_row)?e.tp_col:vb->vb_scr_size.tp_col;
629 for (c = cs; c < ce; c ++) {
630 buf[i++] = vb->vb_rows[r][c];
632 /* Add new line for all rows, but not for last one. */
641 vtbuf_set_mark(struct vt_buf *vb, int type, int col, int row)
647 case VTB_MARK_END: /* B1 UP */
648 if (vb->vb_mark_last != VTB_MARK_MOVE)
652 case VTB_MARK_EXTEND:
653 vtbuf_flush_mark(vb); /* Clean old mark. */
654 vb->vb_mark_end.tp_col = col;
655 vb->vb_mark_end.tp_row = vtbuf_wth(vb, row);
658 vtbuf_flush_mark(vb); /* Clean old mark. */
659 vb->vb_mark_start.tp_col = col;
660 vb->vb_mark_start.tp_row = vtbuf_wth(vb, row);
661 /* Start again, so clear end point. */
662 vb->vb_mark_end.tp_col = col;
663 vb->vb_mark_end.tp_row = vtbuf_wth(vb, row);
666 vtbuf_flush_mark(vb); /* Clean old mark. */
667 vb->vb_mark_start.tp_row = vb->vb_mark_end.tp_row =
669 r = vb->vb_rows[vb->vb_mark_start.tp_row];
670 for (i = col; i >= 0; i --) {
671 if (TCHAR_CHARACTER(r[i]) == ' ') {
672 vb->vb_mark_start.tp_col = i + 1;
676 for (i = col; i < vb->vb_scr_size.tp_col; i ++) {
677 if (TCHAR_CHARACTER(r[i]) == ' ') {
678 vb->vb_mark_end.tp_col = i;
682 if (vb->vb_mark_start.tp_col > vb->vb_mark_end.tp_col)
683 vb->vb_mark_start.tp_col = vb->vb_mark_end.tp_col;
686 vtbuf_flush_mark(vb); /* Clean old mark. */
687 vb->vb_mark_start.tp_col = 0;
688 vb->vb_mark_end.tp_col = vb->vb_scr_size.tp_col;
689 vb->vb_mark_start.tp_row = vb->vb_mark_end.tp_row =
693 vb->vb_mark_last = type;
700 vb->vb_mark_last = type;
701 /* Draw new marked region. */
702 vtbuf_flush_mark(vb);
708 vtbuf_cursor_visibility(struct vt_buf *vb, int yes)
713 oflags = vb->vb_flags;
715 vb->vb_flags |= VBF_CURSOR;
717 vb->vb_flags &= ~VBF_CURSOR;
718 nflags = vb->vb_flags;
720 if (oflags != nflags)
721 vtbuf_dirty_cell_locked(vb, &vb->vb_cursor);
726 vtbuf_scroll_mode(struct vt_buf *vb, int yes)
731 oflags = vb->vb_flags;
733 vb->vb_flags |= VBF_SCROLL;
735 vb->vb_flags &= ~VBF_SCROLL;
736 nflags = vb->vb_flags;
738 if (oflags != nflags)
739 vtbuf_dirty_cell_locked(vb, &vb->vb_cursor);