2 * Copyright (c) 2009 The FreeBSD Foundation
5 * This software was developed by Ed Schouten under sponsorship from the
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/mutex.h>
38 #include <sys/systm.h>
40 #include <dev/vt/vt.h>
42 static MALLOC_DEFINE(M_VTBUF, "vtbuf", "vt buffer");
44 #define VTBUF_LOCK(vb) mtx_lock_spin(&(vb)->vb_lock)
45 #define VTBUF_UNLOCK(vb) mtx_unlock_spin(&(vb)->vb_lock)
48 * line5 <--- curroffset (terminal output to that line)
50 * line1 <--- roffset (history display from that point)
55 vthistory_seek(struct vt_buf *vb, int offset, int whence)
57 int diff, top, bottom, roffset;
59 /* No scrolling if not enabled. */
60 if ((vb->vb_flags & VBF_SCROLL) == 0) {
61 if (vb->vb_roffset != vb->vb_curroffset) {
62 vb->vb_roffset = vb->vb_curroffset;
65 return (0); /* No changes */
67 top = (vb->vb_flags & VBF_HISTORY_FULL)?
68 (vb->vb_curroffset + vb->vb_scr_size.tp_row):vb->vb_history_size;
69 bottom = vb->vb_curroffset + vb->vb_history_size;
72 * Operate on copy of offset value, since it temporary can be bigger
73 * than amount of rows in buffer.
75 roffset = vb->vb_roffset + vb->vb_history_size;
78 roffset = offset + vb->vb_history_size;
84 /* Go to current offset. */
85 roffset = vb->vb_curroffset + vb->vb_history_size;
89 roffset = (roffset < top)?top:roffset;
90 roffset = (roffset > bottom)?bottom:roffset;
92 roffset %= vb->vb_history_size;
94 if (vb->vb_roffset != roffset) {
95 diff = vb->vb_roffset - roffset;
96 vb->vb_roffset = roffset;
98 * Offset changed, please update Nth lines on sceen.
99 * +N - Nth lines at top;
100 * -N - Nth lines at bottom.
104 return (0); /* No changes */
108 vthistory_addlines(struct vt_buf *vb, int offset)
111 vb->vb_curroffset += offset;
112 if (vb->vb_curroffset < 0)
113 vb->vb_curroffset = 0;
114 vb->vb_curroffset %= vb->vb_history_size;
115 if ((vb->vb_flags & VBF_SCROLL) == 0) {
116 vb->vb_roffset = vb->vb_curroffset;
121 vthistory_getpos(const struct vt_buf *vb, unsigned int *offset)
124 *offset = vb->vb_roffset;
127 static inline uint64_t
128 vtbuf_dirty_axis(unsigned int begin, unsigned int end)
130 uint64_t left, right, mask;
133 * Mark all bits between begin % 64 and end % 64 dirty.
134 * This code is functionally equivalent to:
136 * for (i = begin; i < end; i++)
137 * mask |= (uint64_t)1 << (i % 64);
140 /* Obvious case. Mark everything dirty. */
141 if (end - begin >= 64)
144 /* 1....0; used bits on the left. */
145 left = VBM_DIRTY << begin % 64;
146 /* 0....1; used bits on the right. */
147 right = VBM_DIRTY >> -end % 64;
150 * Only take the intersection. If the result of that is 0, it
151 * means that the selection crossed a 64 bit boundary along the
152 * way, which means we have to take the complement.
161 vtbuf_dirty(struct vt_buf *vb, const term_rect_t *area)
165 if (vb->vb_dirtyrect.tr_begin.tp_row > area->tr_begin.tp_row)
166 vb->vb_dirtyrect.tr_begin.tp_row = area->tr_begin.tp_row;
167 if (vb->vb_dirtyrect.tr_begin.tp_col > area->tr_begin.tp_col)
168 vb->vb_dirtyrect.tr_begin.tp_col = area->tr_begin.tp_col;
169 if (vb->vb_dirtyrect.tr_end.tp_row < area->tr_end.tp_row)
170 vb->vb_dirtyrect.tr_end.tp_row = area->tr_end.tp_row;
171 if (vb->vb_dirtyrect.tr_end.tp_col < area->tr_end.tp_col)
172 vb->vb_dirtyrect.tr_end.tp_col = area->tr_end.tp_col;
173 vb->vb_dirtymask.vbm_row |=
174 vtbuf_dirty_axis(area->tr_begin.tp_row, area->tr_end.tp_row);
175 vb->vb_dirtymask.vbm_col |=
176 vtbuf_dirty_axis(area->tr_begin.tp_col, area->tr_end.tp_col);
181 vtbuf_dirty_cell(struct vt_buf *vb, const term_pos_t *p)
186 area.tr_end.tp_row = p->tp_row + 1;
187 area.tr_end.tp_col = p->tp_col + 1;
188 vtbuf_dirty(vb, &area);
192 vtbuf_make_undirty(struct vt_buf *vb)
195 vb->vb_dirtyrect.tr_begin = vb->vb_scr_size;
196 vb->vb_dirtyrect.tr_end.tp_row = vb->vb_dirtyrect.tr_end.tp_col = 0;
197 vb->vb_dirtymask.vbm_row = vb->vb_dirtymask.vbm_col = 0;
201 vtbuf_undirty(struct vt_buf *vb, term_rect_t *r, struct vt_bufmask *m)
205 *r = vb->vb_dirtyrect;
206 *m = vb->vb_dirtymask;
207 vtbuf_make_undirty(vb);
212 vtbuf_copy(struct vt_buf *vb, const term_rect_t *r, const term_pos_t *p2)
214 const term_pos_t *p1 = &r->tr_begin;
216 unsigned int rows, cols;
219 KASSERT(r->tr_begin.tp_row < vb->vb_scr_size.tp_row,
220 ("vtbuf_copy begin.tp_row %d must be less than screen width %d",
221 r->tr_begin.tp_row, vb->vb_scr_size.tp_row));
222 KASSERT(r->tr_begin.tp_col < vb->vb_scr_size.tp_col,
223 ("vtbuf_copy begin.tp_col %d must be less than screen height %d",
224 r->tr_begin.tp_col, vb->vb_scr_size.tp_col));
226 KASSERT(r->tr_end.tp_row <= vb->vb_scr_size.tp_row,
227 ("vtbuf_copy end.tp_row %d must be less than screen width %d",
228 r->tr_end.tp_row, vb->vb_scr_size.tp_row));
229 KASSERT(r->tr_end.tp_col <= vb->vb_scr_size.tp_col,
230 ("vtbuf_copy end.tp_col %d must be less than screen height %d",
231 r->tr_end.tp_col, vb->vb_scr_size.tp_col));
233 KASSERT(p2->tp_row < vb->vb_scr_size.tp_row,
234 ("vtbuf_copy tp_row %d must be less than screen width %d",
235 p2->tp_row, vb->vb_scr_size.tp_row));
236 KASSERT(p2->tp_col < vb->vb_scr_size.tp_col,
237 ("vtbuf_copy tp_col %d must be less than screen height %d",
238 p2->tp_col, vb->vb_scr_size.tp_col));
240 rows = r->tr_end.tp_row - r->tr_begin.tp_row;
241 rdiff = r->tr_begin.tp_row - p2->tp_row;
242 cols = r->tr_end.tp_col - r->tr_begin.tp_col;
243 if (r->tr_begin.tp_row > p2->tp_row && r->tr_begin.tp_col == 0 &&
244 r->tr_end.tp_col == vb->vb_scr_size.tp_col && /* Full row. */
245 (rows + rdiff) == vb->vb_scr_size.tp_row && /* Whole screen. */
246 rdiff > 0) { /* Only forward dirrection. Do not eat history. */
247 vthistory_addlines(vb, rdiff);
248 } else if (p2->tp_row < p1->tp_row) {
249 /* Handle overlapping copies of line segments. */
251 for (pr = 0; pr < rows; pr++)
253 &VTBUF_FIELD(vb, p2->tp_row + pr, p2->tp_col),
254 &VTBUF_FIELD(vb, p1->tp_row + pr, p1->tp_col),
255 cols * sizeof(term_char_t));
257 /* Move data down. */
258 for (pr = rows - 1; pr >= 0; pr--)
260 &VTBUF_FIELD(vb, p2->tp_row + pr, p2->tp_col),
261 &VTBUF_FIELD(vb, p1->tp_row + pr, p1->tp_col),
262 cols * sizeof(term_char_t));
266 area.tr_end.tp_row = MIN(p2->tp_row + rows, vb->vb_scr_size.tp_row);
267 area.tr_end.tp_col = MIN(p2->tp_col + cols, vb->vb_scr_size.tp_col);
268 vtbuf_dirty(vb, &area);
272 vtbuf_fill(struct vt_buf *vb, const term_rect_t *r, term_char_t c)
277 for (pr = r->tr_begin.tp_row; pr < r->tr_end.tp_row; pr++) {
278 row = vb->vb_rows[(vb->vb_curroffset + pr) %
279 VTBUF_MAX_HEIGHT(vb)];
280 for (pc = r->tr_begin.tp_col; pc < r->tr_end.tp_col; pc++) {
287 vtbuf_fill_locked(struct vt_buf *vb, const term_rect_t *r, term_char_t c)
289 KASSERT(r->tr_begin.tp_row < vb->vb_scr_size.tp_row,
290 ("vtbuf_fill_locked begin.tp_row %d must be < screen width %d",
291 r->tr_begin.tp_row, vb->vb_scr_size.tp_row));
292 KASSERT(r->tr_begin.tp_col < vb->vb_scr_size.tp_col,
293 ("vtbuf_fill_locked begin.tp_col %d must be < screen height %d",
294 r->tr_begin.tp_col, vb->vb_scr_size.tp_col));
296 KASSERT(r->tr_end.tp_row <= vb->vb_scr_size.tp_row,
297 ("vtbuf_fill_locked end.tp_row %d must be <= screen width %d",
298 r->tr_end.tp_row, vb->vb_scr_size.tp_row));
299 KASSERT(r->tr_end.tp_col <= vb->vb_scr_size.tp_col,
300 ("vtbuf_fill_locked end.tp_col %d must be <= screen height %d",
301 r->tr_end.tp_col, vb->vb_scr_size.tp_col));
304 vtbuf_fill(vb, r, c);
311 vtbuf_init_rows(struct vt_buf *vb)
315 vb->vb_history_size = MAX(vb->vb_history_size, vb->vb_scr_size.tp_row);
317 for (r = 0; r < vb->vb_history_size; r++)
318 vb->vb_rows[r] = &vb->vb_buffer[r *
319 vb->vb_scr_size.tp_col];
323 vtbuf_init_early(struct vt_buf *vb)
326 vb->vb_flags |= VBF_CURSOR;
328 vb->vb_curroffset = 0;
331 vtbuf_make_undirty(vb);
332 if ((vb->vb_flags & VBF_MTX_INIT) == 0) {
333 mtx_init(&vb->vb_lock, "vtbuf", NULL, MTX_SPIN);
334 vb->vb_flags |= VBF_MTX_INIT;
339 vtbuf_init(struct vt_buf *vb, const term_pos_t *p)
343 vb->vb_scr_size = *p;
344 vb->vb_history_size = VBF_DEFAULT_HISTORY_SIZE;
346 if ((vb->vb_flags & VBF_STATIC) == 0) {
347 sz = vb->vb_history_size * p->tp_col * sizeof(term_char_t);
348 vb->vb_buffer = malloc(sz, M_VTBUF, M_WAITOK | M_ZERO);
350 sz = vb->vb_history_size * sizeof(term_char_t *);
351 vb->vb_rows = malloc(sz, M_VTBUF, M_WAITOK | M_ZERO);
354 vtbuf_init_early(vb);
358 vtbuf_sethistory_size(struct vt_buf *vb, int size)
363 p.tp_row = vb->vb_scr_size.tp_row;
364 p.tp_col = vb->vb_scr_size.tp_col;
365 vtbuf_grow(vb, &p, size);
369 vtbuf_grow(struct vt_buf *vb, const term_pos_t *p, int history_size)
371 term_char_t *old, *new, **rows, **oldrows, **copyrows, *row;
372 int bufsize, rowssize, w, h, c, r;
375 history_size = MAX(history_size, p->tp_row);
377 if (history_size > vb->vb_history_size || p->tp_col >
378 vb->vb_scr_size.tp_col) {
379 /* Allocate new buffer. */
380 bufsize = history_size * p->tp_col * sizeof(term_char_t);
381 new = malloc(bufsize, M_VTBUF, M_WAITOK | M_ZERO);
382 rowssize = history_size * sizeof(term_pos_t *);
383 rows = malloc(rowssize, M_VTBUF, M_WAITOK | M_ZERO);
387 old = vb->vb_flags & VBF_STATIC ? NULL : vb->vb_buffer;
388 oldrows = vb->vb_flags & VBF_STATIC ? NULL : vb->vb_rows;
389 copyrows = vb->vb_rows;
390 w = vb->vb_scr_size.tp_col;
391 h = vb->vb_history_size;
393 vb->vb_history_size = history_size;
396 vb->vb_flags &= ~VBF_STATIC;
397 vb->vb_scr_size = *p;
400 /* Copy history and fill extra space. */
401 for (r = 0; r < history_size; r ++) {
403 if (r < h) { /* Copy. */
404 memmove(rows[r], copyrows[r],
405 MIN(p->tp_col, w) * sizeof(term_char_t));
406 for (c = MIN(p->tp_col, w); c < p->tp_col;
408 row[c] = VTBUF_SPACE_CHAR;
410 } else { /* Just fill. */
411 rect.tr_begin.tp_col = 0;
412 rect.tr_begin.tp_row = r;
413 rect.tr_end.tp_col = p->tp_col;
414 rect.tr_end.tp_row = p->tp_row;
415 vtbuf_fill(vb, &rect, VTBUF_SPACE_CHAR);
419 vtbuf_make_undirty(vb);
421 /* Deallocate old buffer. */
423 free(oldrows, M_VTBUF);
428 vtbuf_putchar(struct vt_buf *vb, const term_pos_t *p, term_char_t c)
432 KASSERT(p->tp_row < vb->vb_scr_size.tp_row,
433 ("vtbuf_putchar tp_row %d must be less than screen width %d",
434 p->tp_row, vb->vb_scr_size.tp_row));
435 KASSERT(p->tp_col < vb->vb_scr_size.tp_col,
436 ("vtbuf_putchar tp_col %d must be less than screen height %d",
437 p->tp_col, vb->vb_scr_size.tp_col));
439 row = vb->vb_rows[(vb->vb_curroffset + p->tp_row) %
440 VTBUF_MAX_HEIGHT(vb)];
441 if (row[p->tp_col] != c) {
445 vtbuf_dirty_cell(vb, p);
450 vtbuf_cursor_position(struct vt_buf *vb, const term_pos_t *p)
453 if (vb->vb_flags & VBF_CURSOR) {
454 vtbuf_dirty_cell(vb, &vb->vb_cursor);
456 vtbuf_dirty_cell(vb, &vb->vb_cursor);
463 vtbuf_cursor_visibility(struct vt_buf *vb, int yes)
468 oflags = vb->vb_flags;
470 vb->vb_flags |= VBF_CURSOR;
472 vb->vb_flags &= ~VBF_CURSOR;
473 nflags = vb->vb_flags;
476 if (oflags != nflags)
477 vtbuf_dirty_cell(vb, &vb->vb_cursor);