2 * Copyright (c) 2009, 2013 The FreeBSD Foundation
5 * This software was developed by Ed Schouten under sponsorship from the
8 * Portions of this software were developed by Oleksandr Rybalko
9 * under sponsorship from the FreeBSD Foundation.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <sys/param.h>
37 #include <sys/kernel.h>
39 #include <sys/malloc.h>
40 #include <sys/mutex.h>
41 #include <sys/reboot.h>
42 #include <sys/systm.h>
44 #include <dev/vt/vt.h>
46 static MALLOC_DEFINE(M_VTBUF, "vtbuf", "vt buffer");
48 #define VTBUF_LOCK(vb) mtx_lock_spin(&(vb)->vb_lock)
49 #define VTBUF_UNLOCK(vb) mtx_unlock_spin(&(vb)->vb_lock)
51 #define POS_INDEX(c, r) (((r) << 12) + (c))
52 #define POS_COPY(d, s) do { \
53 (d).tp_col = (s).tp_col; \
54 (d).tp_row = (s).tp_row; \
60 * line5 <--- curroffset (terminal output to that line)
62 * line1 <--- roffset (history display from that point)
67 vthistory_seek(struct vt_buf *vb, int offset, int whence)
69 int diff, top, bottom, roffset;
71 /* No scrolling if not enabled. */
72 if ((vb->vb_flags & VBF_SCROLL) == 0) {
73 if (vb->vb_roffset != vb->vb_curroffset) {
74 vb->vb_roffset = vb->vb_curroffset;
77 return (0); /* No changes */
79 top = (vb->vb_flags & VBF_HISTORY_FULL)?
80 (vb->vb_curroffset + vb->vb_scr_size.tp_row):vb->vb_history_size;
81 bottom = vb->vb_curroffset + vb->vb_history_size;
84 * Operate on copy of offset value, since it temporary can be bigger
85 * than amount of rows in buffer.
87 roffset = vb->vb_roffset + vb->vb_history_size;
90 roffset = offset + vb->vb_history_size;
96 /* Go to current offset. */
97 roffset = vb->vb_curroffset + vb->vb_history_size;
101 roffset = (roffset < top)?top:roffset;
102 roffset = (roffset > bottom)?bottom:roffset;
104 roffset %= vb->vb_history_size;
106 if (vb->vb_roffset != roffset) {
107 diff = vb->vb_roffset - roffset;
108 vb->vb_roffset = roffset;
110 * Offset changed, please update Nth lines on sceen.
111 * +N - Nth lines at top;
112 * -N - Nth lines at bottom.
116 return (0); /* No changes */
120 vthistory_addlines(struct vt_buf *vb, int offset)
123 vb->vb_curroffset += offset;
124 if (vb->vb_curroffset < 0)
125 vb->vb_curroffset = 0;
126 vb->vb_curroffset %= vb->vb_history_size;
127 if ((vb->vb_flags & VBF_SCROLL) == 0) {
128 vb->vb_roffset = vb->vb_curroffset;
133 vthistory_getpos(const struct vt_buf *vb, unsigned int *offset)
136 *offset = vb->vb_roffset;
139 #ifndef SC_NO_CUTPASTE /* Only mouse support use it now. */
140 /* Translate current view row number to history row. */
142 vtbuf_wth(struct vt_buf *vb, int row)
145 return ((vb->vb_roffset + row) % vb->vb_history_size);
149 /* Translate history row to current view row number. */
151 vtbuf_htw(struct vt_buf *vb, int row)
156 * History offset roffset winrow
157 * 205 200 ((205 - 200 + 1000) % 1000) = 5
158 * 90 990 ((90 - 990 + 1000) % 1000) = 100
160 return ((row - vb->vb_roffset + vb->vb_history_size) %
161 vb->vb_history_size);
165 vtbuf_iscursor(struct vt_buf *vb, int row, int col)
167 int sc, sr, ec, er, tmp;
169 if ((vb->vb_flags & (VBF_CURSOR|VBF_SCROLL)) == VBF_CURSOR &&
170 (vb->vb_cursor.tp_row == row) && (vb->vb_cursor.tp_col == col))
173 /* Mark cut/paste region. */
176 * Luckily screen view is not like circular buffer, so we will
177 * calculate in screen coordinates. Translate first.
179 sc = vb->vb_mark_start.tp_col;
180 sr = vtbuf_htw(vb, vb->vb_mark_start.tp_row);
181 ec = vb->vb_mark_end.tp_col;
182 er = vtbuf_htw(vb, vb->vb_mark_end.tp_row);
185 /* Swap start and end if start > end. */
186 if (POS_INDEX(sc, sr) > POS_INDEX(ec, er)) {
187 tmp = sc; sc = ec; ec = tmp;
188 tmp = sr; sr = er; er = tmp;
191 if ((POS_INDEX(sc, sr) <= POS_INDEX(col, row)) &&
192 (POS_INDEX(col, row) < POS_INDEX(ec, er)))
198 static inline uint64_t
199 vtbuf_dirty_axis(unsigned int begin, unsigned int end)
201 uint64_t left, right, mask;
204 * Mark all bits between begin % 64 and end % 64 dirty.
205 * This code is functionally equivalent to:
207 * for (i = begin; i < end; i++)
208 * mask |= (uint64_t)1 << (i % 64);
211 /* Obvious case. Mark everything dirty. */
212 if (end - begin >= 64)
215 /* 1....0; used bits on the left. */
216 left = VBM_DIRTY << begin % 64;
217 /* 0....1; used bits on the right. */
218 right = VBM_DIRTY >> -end % 64;
221 * Only take the intersection. If the result of that is 0, it
222 * means that the selection crossed a 64 bit boundary along the
223 * way, which means we have to take the complement.
232 vtbuf_dirty(struct vt_buf *vb, const term_rect_t *area)
236 if (vb->vb_dirtyrect.tr_begin.tp_row > area->tr_begin.tp_row)
237 vb->vb_dirtyrect.tr_begin.tp_row = area->tr_begin.tp_row;
238 if (vb->vb_dirtyrect.tr_begin.tp_col > area->tr_begin.tp_col)
239 vb->vb_dirtyrect.tr_begin.tp_col = area->tr_begin.tp_col;
240 if (vb->vb_dirtyrect.tr_end.tp_row < area->tr_end.tp_row)
241 vb->vb_dirtyrect.tr_end.tp_row = area->tr_end.tp_row;
242 if (vb->vb_dirtyrect.tr_end.tp_col < area->tr_end.tp_col)
243 vb->vb_dirtyrect.tr_end.tp_col = area->tr_end.tp_col;
244 vb->vb_dirtymask.vbm_row |=
245 vtbuf_dirty_axis(area->tr_begin.tp_row, area->tr_end.tp_row);
246 vb->vb_dirtymask.vbm_col |=
247 vtbuf_dirty_axis(area->tr_begin.tp_col, area->tr_end.tp_col);
252 vtbuf_dirty_cell(struct vt_buf *vb, const term_pos_t *p)
257 area.tr_end.tp_row = p->tp_row + 1;
258 area.tr_end.tp_col = p->tp_col + 1;
259 vtbuf_dirty(vb, &area);
263 vtbuf_make_undirty(struct vt_buf *vb)
266 vb->vb_dirtyrect.tr_begin = vb->vb_scr_size;
267 vb->vb_dirtyrect.tr_end.tp_row = vb->vb_dirtyrect.tr_end.tp_col = 0;
268 vb->vb_dirtymask.vbm_row = vb->vb_dirtymask.vbm_col = 0;
272 vtbuf_undirty(struct vt_buf *vb, term_rect_t *r, struct vt_bufmask *m)
276 *r = vb->vb_dirtyrect;
277 *m = vb->vb_dirtymask;
278 vtbuf_make_undirty(vb);
283 vtbuf_copy(struct vt_buf *vb, const term_rect_t *r, const term_pos_t *p2)
285 const term_pos_t *p1 = &r->tr_begin;
287 unsigned int rows, cols;
290 KASSERT(r->tr_begin.tp_row < vb->vb_scr_size.tp_row,
291 ("vtbuf_copy begin.tp_row %d must be less than screen width %d",
292 r->tr_begin.tp_row, vb->vb_scr_size.tp_row));
293 KASSERT(r->tr_begin.tp_col < vb->vb_scr_size.tp_col,
294 ("vtbuf_copy begin.tp_col %d must be less than screen height %d",
295 r->tr_begin.tp_col, vb->vb_scr_size.tp_col));
297 KASSERT(r->tr_end.tp_row <= vb->vb_scr_size.tp_row,
298 ("vtbuf_copy end.tp_row %d must be less than screen width %d",
299 r->tr_end.tp_row, vb->vb_scr_size.tp_row));
300 KASSERT(r->tr_end.tp_col <= vb->vb_scr_size.tp_col,
301 ("vtbuf_copy end.tp_col %d must be less than screen height %d",
302 r->tr_end.tp_col, vb->vb_scr_size.tp_col));
304 KASSERT(p2->tp_row < vb->vb_scr_size.tp_row,
305 ("vtbuf_copy tp_row %d must be less than screen width %d",
306 p2->tp_row, vb->vb_scr_size.tp_row));
307 KASSERT(p2->tp_col < vb->vb_scr_size.tp_col,
308 ("vtbuf_copy tp_col %d must be less than screen height %d",
309 p2->tp_col, vb->vb_scr_size.tp_col));
311 rows = r->tr_end.tp_row - r->tr_begin.tp_row;
312 rdiff = r->tr_begin.tp_row - p2->tp_row;
313 cols = r->tr_end.tp_col - r->tr_begin.tp_col;
314 if (r->tr_begin.tp_row > p2->tp_row && r->tr_begin.tp_col == 0 &&
315 r->tr_end.tp_col == vb->vb_scr_size.tp_col && /* Full row. */
316 (rows + rdiff) == vb->vb_scr_size.tp_row && /* Whole screen. */
317 rdiff > 0) { /* Only forward dirrection. Do not eat history. */
318 vthistory_addlines(vb, rdiff);
319 } else if (p2->tp_row < p1->tp_row) {
320 /* Handle overlapping copies of line segments. */
322 for (pr = 0; pr < rows; pr++)
324 &VTBUF_FIELD(vb, p2->tp_row + pr, p2->tp_col),
325 &VTBUF_FIELD(vb, p1->tp_row + pr, p1->tp_col),
326 cols * sizeof(term_char_t));
328 /* Move data down. */
329 for (pr = rows - 1; pr >= 0; pr--)
331 &VTBUF_FIELD(vb, p2->tp_row + pr, p2->tp_col),
332 &VTBUF_FIELD(vb, p1->tp_row + pr, p1->tp_col),
333 cols * sizeof(term_char_t));
337 area.tr_end.tp_row = MIN(p2->tp_row + rows, vb->vb_scr_size.tp_row);
338 area.tr_end.tp_col = MIN(p2->tp_col + cols, vb->vb_scr_size.tp_col);
339 vtbuf_dirty(vb, &area);
343 vtbuf_fill(struct vt_buf *vb, const term_rect_t *r, term_char_t c)
348 for (pr = r->tr_begin.tp_row; pr < r->tr_end.tp_row; pr++) {
349 row = vb->vb_rows[(vb->vb_curroffset + pr) %
350 VTBUF_MAX_HEIGHT(vb)];
351 for (pc = r->tr_begin.tp_col; pc < r->tr_end.tp_col; pc++) {
358 vtbuf_fill_locked(struct vt_buf *vb, const term_rect_t *r, term_char_t c)
360 KASSERT(r->tr_begin.tp_row < vb->vb_scr_size.tp_row,
361 ("vtbuf_fill_locked begin.tp_row %d must be < screen height %d",
362 r->tr_begin.tp_row, vb->vb_scr_size.tp_row));
363 KASSERT(r->tr_begin.tp_col < vb->vb_scr_size.tp_col,
364 ("vtbuf_fill_locked begin.tp_col %d must be < screen width %d",
365 r->tr_begin.tp_col, vb->vb_scr_size.tp_col));
367 KASSERT(r->tr_end.tp_row <= vb->vb_scr_size.tp_row,
368 ("vtbuf_fill_locked end.tp_row %d must be <= screen height %d",
369 r->tr_end.tp_row, vb->vb_scr_size.tp_row));
370 KASSERT(r->tr_end.tp_col <= vb->vb_scr_size.tp_col,
371 ("vtbuf_fill_locked end.tp_col %d must be <= screen width %d",
372 r->tr_end.tp_col, vb->vb_scr_size.tp_col));
375 vtbuf_fill(vb, r, c);
382 vtbuf_init_rows(struct vt_buf *vb)
386 vb->vb_history_size = MAX(vb->vb_history_size, vb->vb_scr_size.tp_row);
388 for (r = 0; r < vb->vb_history_size; r++)
389 vb->vb_rows[r] = &vb->vb_buffer[r * vb->vb_scr_size.tp_col];
393 vtbuf_init_early(struct vt_buf *vb)
397 vb->vb_flags |= VBF_CURSOR;
399 vb->vb_curroffset = 0;
400 vb->vb_mark_start.tp_row = 0;
401 vb->vb_mark_start.tp_col = 0;
402 vb->vb_mark_end.tp_row = 0;
403 vb->vb_mark_end.tp_col = 0;
406 rect.tr_begin.tp_row = rect.tr_begin.tp_col = 0;
407 rect.tr_end = vb->vb_scr_size;
408 vtbuf_fill(vb, &rect, VTBUF_SPACE_CHAR((boothowto & RB_MUTE) == 0 ?
409 TERMINAL_KERN_ATTR : TERMINAL_NORM_ATTR));
410 vtbuf_make_undirty(vb);
411 if ((vb->vb_flags & VBF_MTX_INIT) == 0) {
412 mtx_init(&vb->vb_lock, "vtbuf", NULL, MTX_SPIN);
413 vb->vb_flags |= VBF_MTX_INIT;
418 vtbuf_init(struct vt_buf *vb, const term_pos_t *p)
422 vb->vb_scr_size = *p;
423 vb->vb_history_size = VBF_DEFAULT_HISTORY_SIZE;
425 if ((vb->vb_flags & VBF_STATIC) == 0) {
426 sz = vb->vb_history_size * p->tp_col * sizeof(term_char_t);
427 vb->vb_buffer = malloc(sz, M_VTBUF, M_WAITOK | M_ZERO);
429 sz = vb->vb_history_size * sizeof(term_char_t *);
430 vb->vb_rows = malloc(sz, M_VTBUF, M_WAITOK | M_ZERO);
433 vtbuf_init_early(vb);
437 vtbuf_sethistory_size(struct vt_buf *vb, int size)
442 p.tp_row = vb->vb_scr_size.tp_row;
443 p.tp_col = vb->vb_scr_size.tp_col;
444 vtbuf_grow(vb, &p, size);
448 vtbuf_grow(struct vt_buf *vb, const term_pos_t *p, int history_size)
450 term_char_t *old, *new, **rows, **oldrows, **copyrows, *row;
451 int bufsize, rowssize, w, h, c, r;
454 history_size = MAX(history_size, p->tp_row);
456 /* If new screen/history size bigger or buffer is VBF_STATIC. */
457 if ((history_size > vb->vb_history_size) || (p->tp_col >
458 vb->vb_scr_size.tp_col) || (vb->vb_flags & VBF_STATIC)) {
459 /* Allocate new buffer. */
460 bufsize = history_size * p->tp_col * sizeof(term_char_t);
461 new = malloc(bufsize, M_VTBUF, M_WAITOK | M_ZERO);
462 rowssize = history_size * sizeof(term_pos_t *);
463 rows = malloc(rowssize, M_VTBUF, M_WAITOK | M_ZERO);
467 old = vb->vb_flags & VBF_STATIC ? NULL : vb->vb_buffer;
468 oldrows = vb->vb_flags & VBF_STATIC ? NULL : vb->vb_rows;
469 copyrows = vb->vb_rows;
470 w = vb->vb_scr_size.tp_col;
471 h = vb->vb_history_size;
473 vb->vb_history_size = history_size;
476 vb->vb_flags &= ~VBF_STATIC;
477 vb->vb_scr_size = *p;
480 /* Copy history and fill extra space. */
481 for (r = 0; r < history_size; r ++) {
483 * XXX VTBUF_SPACE_CHAR(TERMINAL_NORM_ATTR) will
484 * extended lines of kernel text using the wrong
488 if (r < h) { /* Copy. */
489 memmove(rows[r], copyrows[r],
490 MIN(p->tp_col, w) * sizeof(term_char_t));
491 for (c = MIN(p->tp_col, w); c < p->tp_col;
493 row[c] = VTBUF_SPACE_CHAR(
496 } else { /* Just fill. */
497 rect.tr_begin.tp_col = 0;
498 rect.tr_begin.tp_row = r;
499 rect.tr_end.tp_col = p->tp_col;
500 rect.tr_end.tp_row = p->tp_row;
501 vtbuf_fill(vb, &rect,
502 VTBUF_SPACE_CHAR(TERMINAL_NORM_ATTR));
506 vtbuf_make_undirty(vb);
508 /* Deallocate old buffer. */
510 free(oldrows, M_VTBUF);
512 /* Just update the size. */
513 vb->vb_scr_size = *p;
518 vtbuf_putchar(struct vt_buf *vb, const term_pos_t *p, term_char_t c)
522 KASSERT(p->tp_row < vb->vb_scr_size.tp_row,
523 ("vtbuf_putchar tp_row %d must be less than screen width %d",
524 p->tp_row, vb->vb_scr_size.tp_row));
525 KASSERT(p->tp_col < vb->vb_scr_size.tp_col,
526 ("vtbuf_putchar tp_col %d must be less than screen height %d",
527 p->tp_col, vb->vb_scr_size.tp_col));
529 row = vb->vb_rows[(vb->vb_curroffset + p->tp_row) %
530 VTBUF_MAX_HEIGHT(vb)];
531 if (row[p->tp_col] != c) {
535 vtbuf_dirty_cell(vb, p);
540 vtbuf_cursor_position(struct vt_buf *vb, const term_pos_t *p)
543 if (vb->vb_flags & VBF_CURSOR) {
544 vtbuf_dirty_cell(vb, &vb->vb_cursor);
546 vtbuf_dirty_cell(vb, &vb->vb_cursor);
552 #ifndef SC_NO_CUTPASTE
554 vtbuf_mouse_cursor_position(struct vt_buf *vb, int col, int row)
558 area.tr_begin.tp_row = MAX(row - 1, 0);
559 area.tr_begin.tp_col = MAX(col - 1, 0);
560 area.tr_end.tp_row = MIN(row + 2, vb->vb_scr_size.tp_row);
561 area.tr_end.tp_col = MIN(col + 2, vb->vb_scr_size.tp_col);
562 vtbuf_dirty(vb, &area);
566 vtbuf_flush_mark(struct vt_buf *vb)
571 /* Notify renderer to update marked region. */
572 if (vb->vb_mark_start.tp_col || vb->vb_mark_end.tp_col ||
573 vb->vb_mark_start.tp_row || vb->vb_mark_end.tp_row) {
575 s = vtbuf_htw(vb, vb->vb_mark_start.tp_row);
576 e = vtbuf_htw(vb, vb->vb_mark_end.tp_row);
578 area.tr_begin.tp_col = 0;
579 area.tr_begin.tp_row = MIN(s, e);
581 area.tr_end.tp_col = vb->vb_scr_size.tp_col;
582 area.tr_end.tp_row = MAX(s, e) + 1;
584 vtbuf_dirty(vb, &area);
589 vtbuf_get_marked_len(struct vt_buf *vb)
594 /* Swap according to window coordinates. */
595 if (POS_INDEX(vtbuf_htw(vb, vb->vb_mark_start.tp_row),
596 vb->vb_mark_start.tp_col) >
597 POS_INDEX(vtbuf_htw(vb, vb->vb_mark_end.tp_row),
598 vb->vb_mark_end.tp_col)) {
599 POS_COPY(e, vb->vb_mark_start);
600 POS_COPY(s, vb->vb_mark_end);
602 POS_COPY(s, vb->vb_mark_start);
603 POS_COPY(e, vb->vb_mark_end);
606 si = s.tp_row * vb->vb_scr_size.tp_col + s.tp_col;
607 ei = e.tp_row * vb->vb_scr_size.tp_col + e.tp_col;
609 /* Number symbols and number of rows to inject \n */
610 sz = ei - si + ((e.tp_row - s.tp_row) * 2) + 1;
612 return (sz * sizeof(term_char_t));
616 vtbuf_extract_marked(struct vt_buf *vb, term_char_t *buf, int sz)
621 /* Swap according to window coordinates. */
622 if (POS_INDEX(vtbuf_htw(vb, vb->vb_mark_start.tp_row),
623 vb->vb_mark_start.tp_col) >
624 POS_INDEX(vtbuf_htw(vb, vb->vb_mark_end.tp_row),
625 vb->vb_mark_end.tp_col)) {
626 POS_COPY(e, vb->vb_mark_start);
627 POS_COPY(s, vb->vb_mark_end);
629 POS_COPY(s, vb->vb_mark_start);
630 POS_COPY(e, vb->vb_mark_end);
634 for (r = s.tp_row; r <= e.tp_row; r ++) {
635 cs = (r == s.tp_row)?s.tp_col:0;
636 ce = (r == e.tp_row)?e.tp_col:vb->vb_scr_size.tp_col;
637 for (c = cs; c < ce; c ++) {
638 buf[i++] = vb->vb_rows[r][c];
640 /* Add new line for all rows, but not for last one. */
649 vtbuf_set_mark(struct vt_buf *vb, int type, int col, int row)
655 case VTB_MARK_END: /* B1 UP */
656 if (vb->vb_mark_last != VTB_MARK_MOVE)
660 case VTB_MARK_EXTEND:
661 vtbuf_flush_mark(vb); /* Clean old mark. */
662 vb->vb_mark_end.tp_col = col;
663 vb->vb_mark_end.tp_row = vtbuf_wth(vb, row);
666 vtbuf_flush_mark(vb); /* Clean old mark. */
667 vb->vb_mark_start.tp_col = col;
668 vb->vb_mark_start.tp_row = vtbuf_wth(vb, row);
669 /* Start again, so clear end point. */
670 vb->vb_mark_end.tp_col = col;
671 vb->vb_mark_end.tp_row = vtbuf_wth(vb, row);
674 vtbuf_flush_mark(vb); /* Clean old mark. */
675 vb->vb_mark_start.tp_row = vb->vb_mark_end.tp_row =
677 r = vb->vb_rows[vb->vb_mark_start.tp_row];
678 for (i = col; i >= 0; i --) {
679 if (TCHAR_CHARACTER(r[i]) == ' ') {
680 vb->vb_mark_start.tp_col = i + 1;
684 for (i = col; i < vb->vb_scr_size.tp_col; i ++) {
685 if (TCHAR_CHARACTER(r[i]) == ' ') {
686 vb->vb_mark_end.tp_col = i;
690 if (vb->vb_mark_start.tp_col > vb->vb_mark_end.tp_col)
691 vb->vb_mark_start.tp_col = vb->vb_mark_end.tp_col;
694 vtbuf_flush_mark(vb); /* Clean old mark. */
695 vb->vb_mark_start.tp_col = 0;
696 vb->vb_mark_end.tp_col = vb->vb_scr_size.tp_col;
697 vb->vb_mark_start.tp_row = vb->vb_mark_end.tp_row =
701 vb->vb_mark_last = type;
708 vb->vb_mark_last = type;
709 /* Draw new marked region. */
710 vtbuf_flush_mark(vb);
716 vtbuf_cursor_visibility(struct vt_buf *vb, int yes)
721 oflags = vb->vb_flags;
723 vb->vb_flags |= VBF_CURSOR;
725 vb->vb_flags &= ~VBF_CURSOR;
726 nflags = vb->vb_flags;
729 if (oflags != nflags)
730 vtbuf_dirty_cell(vb, &vb->vb_cursor);
734 vtbuf_scroll_mode(struct vt_buf *vb, int yes)
739 oflags = vb->vb_flags;
741 vb->vb_flags |= VBF_SCROLL;
743 vb->vb_flags &= ~VBF_SCROLL;
744 nflags = vb->vb_flags;
747 if (oflags != nflags)
748 vtbuf_dirty_cell(vb, &vb->vb_cursor);