2 * Copyright (c) 2009 The FreeBSD Foundation
5 * This software was developed by Ed Schouten under sponsorship from the
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/mutex.h>
38 #include <sys/systm.h>
40 #include <dev/vt/vt.h>
42 static MALLOC_DEFINE(M_VTBUF, "vtbuf", "vt buffer");
44 #define VTBUF_LOCK(vb) mtx_lock_spin(&(vb)->vb_lock)
45 #define VTBUF_UNLOCK(vb) mtx_unlock_spin(&(vb)->vb_lock)
48 * line5 <--- curroffset (terminal output to that line)
50 * line1 <--- roffset (history display from that point)
55 vthistory_seek(struct vt_buf *vb, int offset, int whence)
57 int top, bottom, roffset;
59 /* No scrolling if not enabled. */
60 if ((vb->vb_flags & VBF_SCROLL) == 0) {
61 if (vb->vb_roffset != vb->vb_curroffset) {
62 vb->vb_roffset = vb->vb_curroffset;
65 return (0); /* No changes */
67 top = (vb->vb_flags & VBF_HISTORY_FULL)?
68 (vb->vb_curroffset + vb->vb_scr_size.tp_row):vb->vb_history_size;
69 bottom = vb->vb_curroffset + vb->vb_history_size;
72 * Operate on copy of offset value, since it temporary can be bigger
73 * than amount of rows in buffer.
75 roffset = vb->vb_roffset + vb->vb_history_size;
84 /* Go to current offset. */
85 roffset = vb->vb_curroffset;
89 roffset = (roffset < top)?top:roffset;
90 roffset = (roffset > bottom)?bottom:roffset;
92 roffset %= vb->vb_history_size;
94 if (vb->vb_roffset != roffset) {
95 vb->vb_roffset = roffset;
96 return (1); /* Offset changed, please update sceen. */
98 return (0); /* No changes */
102 vthistory_addlines(struct vt_buf *vb, int offset)
105 vb->vb_curroffset += offset;
106 if (vb->vb_curroffset < 0)
107 vb->vb_curroffset = 0;
108 vb->vb_curroffset %= vb->vb_history_size;
109 if ((vb->vb_flags & VBF_SCROLL) == 0) {
110 vb->vb_roffset = vb->vb_curroffset;
115 vthistory_getpos(const struct vt_buf *vb, unsigned int *offset)
118 *offset = vb->vb_roffset;
121 static inline uint64_t
122 vtbuf_dirty_axis(unsigned int begin, unsigned int end)
124 uint64_t left, right, mask;
127 * Mark all bits between begin % 64 and end % 64 dirty.
128 * This code is functionally equivalent to:
130 * for (i = begin; i < end; i++)
131 * mask |= (uint64_t)1 << (i % 64);
134 /* Obvious case. Mark everything dirty. */
135 if (end - begin >= 64)
138 /* 1....0; used bits on the left. */
139 left = VBM_DIRTY << begin % 64;
140 /* 0....1; used bits on the right. */
141 right = VBM_DIRTY >> -end % 64;
144 * Only take the intersection. If the result of that is 0, it
145 * means that the selection crossed a 64 bit boundary along the
146 * way, which means we have to take the complement.
155 vtbuf_dirty(struct vt_buf *vb, const term_rect_t *area)
159 if (vb->vb_dirtyrect.tr_begin.tp_row > area->tr_begin.tp_row)
160 vb->vb_dirtyrect.tr_begin.tp_row = area->tr_begin.tp_row;
161 if (vb->vb_dirtyrect.tr_begin.tp_col > area->tr_begin.tp_col)
162 vb->vb_dirtyrect.tr_begin.tp_col = area->tr_begin.tp_col;
163 if (vb->vb_dirtyrect.tr_end.tp_row < area->tr_end.tp_row)
164 vb->vb_dirtyrect.tr_end.tp_row = area->tr_end.tp_row;
165 if (vb->vb_dirtyrect.tr_end.tp_col < area->tr_end.tp_col)
166 vb->vb_dirtyrect.tr_end.tp_col = area->tr_end.tp_col;
167 vb->vb_dirtymask.vbm_row |=
168 vtbuf_dirty_axis(area->tr_begin.tp_row, area->tr_end.tp_row);
169 vb->vb_dirtymask.vbm_col |=
170 vtbuf_dirty_axis(area->tr_begin.tp_col, area->tr_end.tp_col);
175 vtbuf_dirty_cell(struct vt_buf *vb, const term_pos_t *p)
180 area.tr_end.tp_row = p->tp_row + 1;
181 area.tr_end.tp_col = p->tp_col + 1;
182 vtbuf_dirty(vb, &area);
186 vtbuf_make_undirty(struct vt_buf *vb)
189 vb->vb_dirtyrect.tr_begin = vb->vb_scr_size;
190 vb->vb_dirtyrect.tr_end.tp_row = vb->vb_dirtyrect.tr_end.tp_col = 0;
191 vb->vb_dirtymask.vbm_row = vb->vb_dirtymask.vbm_col = 0;
195 vtbuf_undirty(struct vt_buf *vb, term_rect_t *r, struct vt_bufmask *m)
199 *r = vb->vb_dirtyrect;
200 *m = vb->vb_dirtymask;
201 vtbuf_make_undirty(vb);
206 vtbuf_copy(struct vt_buf *vb, const term_rect_t *r, const term_pos_t *p2)
208 const term_pos_t *p1 = &r->tr_begin;
210 unsigned int rows, cols;
213 KASSERT(r->tr_begin.tp_row < vb->vb_scr_size.tp_row,
214 ("vtbuf_copy begin.tp_row %d must be less than screen width %d",
215 r->tr_begin.tp_row, vb->vb_scr_size.tp_row));
216 KASSERT(r->tr_begin.tp_col < vb->vb_scr_size.tp_col,
217 ("vtbuf_copy begin.tp_col %d must be less than screen height %d",
218 r->tr_begin.tp_col, vb->vb_scr_size.tp_col));
220 KASSERT(r->tr_end.tp_row <= vb->vb_scr_size.tp_row,
221 ("vtbuf_copy end.tp_row %d must be less than screen width %d",
222 r->tr_end.tp_row, vb->vb_scr_size.tp_row));
223 KASSERT(r->tr_end.tp_col <= vb->vb_scr_size.tp_col,
224 ("vtbuf_copy end.tp_col %d must be less than screen height %d",
225 r->tr_end.tp_col, vb->vb_scr_size.tp_col));
227 KASSERT(p2->tp_row < vb->vb_scr_size.tp_row,
228 ("vtbuf_copy tp_row %d must be less than screen width %d",
229 p2->tp_row, vb->vb_scr_size.tp_row));
230 KASSERT(p2->tp_col < vb->vb_scr_size.tp_col,
231 ("vtbuf_copy tp_col %d must be less than screen height %d",
232 p2->tp_col, vb->vb_scr_size.tp_col));
234 rows = r->tr_end.tp_row - r->tr_begin.tp_row;
235 rdiff = r->tr_begin.tp_row - p2->tp_row;
236 cols = r->tr_end.tp_col - r->tr_begin.tp_col;
237 if (r->tr_begin.tp_row > p2->tp_row && r->tr_begin.tp_col == 0 &&
238 r->tr_end.tp_col == vb->vb_scr_size.tp_col && /* Full row. */
239 (rows + rdiff) == vb->vb_scr_size.tp_row && /* Whole screen. */
240 rdiff > 0) { /* Only forward dirrection. Do not eat history. */
241 vthistory_addlines(vb, rdiff);
242 } else if (p2->tp_row < p1->tp_row) {
243 /* Handle overlapping copies of line segments. */
245 for (pr = 0; pr < rows; pr++)
247 &VTBUF_FIELD(vb, p2->tp_row + pr, p2->tp_col),
248 &VTBUF_FIELD(vb, p1->tp_row + pr, p1->tp_col),
249 cols * sizeof(term_char_t));
251 /* Move data down. */
252 for (pr = rows - 1; pr >= 0; pr--)
254 &VTBUF_FIELD(vb, p2->tp_row + pr, p2->tp_col),
255 &VTBUF_FIELD(vb, p1->tp_row + pr, p1->tp_col),
256 cols * sizeof(term_char_t));
260 area.tr_end.tp_row = MIN(p2->tp_row + rows, vb->vb_scr_size.tp_row);
261 area.tr_end.tp_col = MIN(p2->tp_col + cols, vb->vb_scr_size.tp_col);
262 vtbuf_dirty(vb, &area);
266 vtbuf_fill(struct vt_buf *vb, const term_rect_t *r, term_char_t c)
271 for (pr = r->tr_begin.tp_row; pr < r->tr_end.tp_row; pr++) {
272 row = vb->vb_rows[(vb->vb_curroffset + pr) %
273 VTBUF_MAX_HEIGHT(vb)];
274 for (pc = r->tr_begin.tp_col; pc < r->tr_end.tp_col; pc++) {
281 vtbuf_fill_locked(struct vt_buf *vb, const term_rect_t *r, term_char_t c)
283 KASSERT(r->tr_begin.tp_row < vb->vb_scr_size.tp_row,
284 ("vtbuf_fill_locked begin.tp_row %d must be < screen width %d",
285 r->tr_begin.tp_row, vb->vb_scr_size.tp_row));
286 KASSERT(r->tr_begin.tp_col < vb->vb_scr_size.tp_col,
287 ("vtbuf_fill_locked begin.tp_col %d must be < screen height %d",
288 r->tr_begin.tp_col, vb->vb_scr_size.tp_col));
290 KASSERT(r->tr_end.tp_row <= vb->vb_scr_size.tp_row,
291 ("vtbuf_fill_locked end.tp_row %d must be <= screen width %d",
292 r->tr_end.tp_row, vb->vb_scr_size.tp_row));
293 KASSERT(r->tr_end.tp_col <= vb->vb_scr_size.tp_col,
294 ("vtbuf_fill_locked end.tp_col %d must be <= screen height %d",
295 r->tr_end.tp_col, vb->vb_scr_size.tp_col));
298 vtbuf_fill(vb, r, c);
305 vtbuf_init_rows(struct vt_buf *vb)
309 vb->vb_history_size = MAX(vb->vb_history_size, vb->vb_scr_size.tp_row);
311 for (r = 0; r < vb->vb_history_size; r++)
312 vb->vb_rows[r] = &vb->vb_buffer[r *
313 vb->vb_scr_size.tp_col];
317 vtbuf_init_early(struct vt_buf *vb)
320 vb->vb_flags |= VBF_CURSOR;
322 vb->vb_curroffset = 0;
325 vtbuf_make_undirty(vb);
326 if ((vb->vb_flags & VBF_MTX_INIT) == 0) {
327 mtx_init(&vb->vb_lock, "vtbuf", NULL, MTX_SPIN);
328 vb->vb_flags |= VBF_MTX_INIT;
333 vtbuf_init(struct vt_buf *vb, const term_pos_t *p)
337 vb->vb_scr_size = *p;
338 vb->vb_history_size = VBF_DEFAULT_HISTORY_SIZE;
340 if ((vb->vb_flags & VBF_STATIC) == 0) {
341 sz = vb->vb_history_size * p->tp_col * sizeof(term_char_t);
342 vb->vb_buffer = malloc(sz, M_VTBUF, M_WAITOK | M_ZERO);
344 sz = vb->vb_history_size * sizeof(term_char_t *);
345 vb->vb_rows = malloc(sz, M_VTBUF, M_WAITOK | M_ZERO);
348 vtbuf_init_early(vb);
352 vtbuf_sethistory_size(struct vt_buf *vb, int size)
357 p.tp_row = vb->vb_scr_size.tp_row;
358 p.tp_col = vb->vb_scr_size.tp_col;
359 vtbuf_grow(vb, &p, size);
363 vtbuf_grow(struct vt_buf *vb, const term_pos_t *p, int history_size)
365 term_char_t *old, *new, **rows, **oldrows, **copyrows, *row;
366 int bufsize, rowssize, w, h, c, r;
369 history_size = MAX(history_size, p->tp_row);
371 if (history_size > vb->vb_history_size || p->tp_col >
372 vb->vb_scr_size.tp_col) {
373 /* Allocate new buffer. */
374 bufsize = history_size * p->tp_col * sizeof(term_char_t);
375 new = malloc(bufsize, M_VTBUF, M_WAITOK | M_ZERO);
376 rowssize = history_size * sizeof(term_pos_t *);
377 rows = malloc(rowssize, M_VTBUF, M_WAITOK | M_ZERO);
381 old = vb->vb_flags & VBF_STATIC ? NULL : vb->vb_buffer;
382 oldrows = vb->vb_flags & VBF_STATIC ? NULL : vb->vb_rows;
383 copyrows = vb->vb_rows;
384 w = vb->vb_scr_size.tp_col;
385 h = vb->vb_history_size;
387 vb->vb_history_size = history_size;
390 vb->vb_flags &= ~VBF_STATIC;
391 vb->vb_scr_size = *p;
394 /* Copy history and fill extra space. */
395 for (r = 0; r < history_size; r ++) {
397 if (r < h) { /* Copy. */
398 memmove(rows[r], copyrows[r],
399 MIN(p->tp_col, w) * sizeof(term_char_t));
400 for (c = MIN(p->tp_col, w); c < p->tp_col;
402 row[c] = VTBUF_SPACE_CHAR;
404 } else { /* Just fill. */
405 rect.tr_begin.tp_col = 0;
406 rect.tr_begin.tp_row = r;
407 rect.tr_end.tp_col = p->tp_col;
408 rect.tr_end.tp_row = p->tp_row;
409 vtbuf_fill(vb, &rect, VTBUF_SPACE_CHAR);
413 vtbuf_make_undirty(vb);
415 /* Deallocate old buffer. */
417 free(oldrows, M_VTBUF);
422 vtbuf_putchar(struct vt_buf *vb, const term_pos_t *p, term_char_t c)
426 KASSERT(p->tp_row < vb->vb_scr_size.tp_row,
427 ("vtbuf_putchar tp_row %d must be less than screen width %d",
428 p->tp_row, vb->vb_scr_size.tp_row));
429 KASSERT(p->tp_col < vb->vb_scr_size.tp_col,
430 ("vtbuf_putchar tp_col %d must be less than screen height %d",
431 p->tp_col, vb->vb_scr_size.tp_col));
433 row = vb->vb_rows[(vb->vb_curroffset + p->tp_row) %
434 VTBUF_MAX_HEIGHT(vb)];
435 if (row[p->tp_col] != c) {
439 vtbuf_dirty_cell(vb, p);
444 vtbuf_cursor_position(struct vt_buf *vb, const term_pos_t *p)
447 if (vb->vb_flags & VBF_CURSOR) {
448 vtbuf_dirty_cell(vb, &vb->vb_cursor);
450 vtbuf_dirty_cell(vb, &vb->vb_cursor);
457 vtbuf_cursor_visibility(struct vt_buf *vb, int yes)
462 oflags = vb->vb_flags;
464 vb->vb_flags |= VBF_CURSOR;
466 vb->vb_flags &= ~VBF_CURSOR;
467 nflags = vb->vb_flags;
470 if (oflags != nflags)
471 vtbuf_dirty_cell(vb, &vb->vb_cursor);