2 * Copyright (c) 2009 The FreeBSD Foundation
5 * This software was developed by Ed Schouten under sponsorship from the
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/mutex.h>
38 #include <sys/systm.h>
40 #include <dev/vt/vt.h>
42 static MALLOC_DEFINE(M_VTBUF, "vtbuf", "vt buffer");
44 #define VTBUF_LOCK(vb) mtx_lock_spin(&(vb)->vb_lock)
45 #define VTBUF_UNLOCK(vb) mtx_unlock_spin(&(vb)->vb_lock)
48 * line5 <--- curroffset (terminal output to that line)
50 * line1 <--- roffset (history display from that point)
55 vthistory_seek(struct vt_buf *vb, int offset, int whence)
59 /* No scrolling if not enabled. */
60 if ((vb->vb_flags & VBF_SCROLL) == 0) {
61 if (vb->vb_roffset != vb->vb_curroffset) {
62 vb->vb_roffset = vb->vb_curroffset;
65 return (0); /* No changes */
68 * Operate on copy of offset value, since it temporary can be bigger
69 * than amount of rows in buffer.
71 roffset = vb->vb_roffset;
80 /* Go to current offset. */
81 roffset = vb->vb_curroffset;
87 if (roffset >= vb->vb_history_size)
88 /* Still have screen_height rows. */
89 roffset %= VTBUF_MAX_HEIGHT(vb);
91 if (vb->vb_roffset != roffset) {
92 vb->vb_roffset = roffset;
93 return (1); /* Offset changed, please update sceen. */
95 return (0); /* No changes */
99 vthistory_addlines(struct vt_buf *vb, int offset)
102 vb->vb_curroffset += offset;
103 if (vb->vb_curroffset < 0)
104 vb->vb_curroffset = 0;
105 vb->vb_curroffset %= vb->vb_history_size;
106 if ((vb->vb_flags & VBF_SCROLL) == 0) {
107 vb->vb_roffset = vb->vb_curroffset;
112 vthistory_getpos(const struct vt_buf *vb, unsigned int *offset)
115 *offset = vb->vb_roffset;
118 static inline uint64_t
119 vtbuf_dirty_axis(unsigned int begin, unsigned int end)
121 uint64_t left, right, mask;
124 * Mark all bits between begin % 64 and end % 64 dirty.
125 * This code is functionally equivalent to:
127 * for (i = begin; i < end; i++)
128 * mask |= (uint64_t)1 << (i % 64);
131 /* Obvious case. Mark everything dirty. */
132 if (end - begin >= 64)
135 /* 1....0; used bits on the left. */
136 left = VBM_DIRTY << begin % 64;
137 /* 0....1; used bits on the right. */
138 right = VBM_DIRTY >> -end % 64;
141 * Only take the intersection. If the result of that is 0, it
142 * means that the selection crossed a 64 bit boundary along the
143 * way, which means we have to take the complement.
152 vtbuf_dirty(struct vt_buf *vb, const term_rect_t *area)
156 if (vb->vb_dirtyrect.tr_begin.tp_row > area->tr_begin.tp_row)
157 vb->vb_dirtyrect.tr_begin.tp_row = area->tr_begin.tp_row;
158 if (vb->vb_dirtyrect.tr_begin.tp_col > area->tr_begin.tp_col)
159 vb->vb_dirtyrect.tr_begin.tp_col = area->tr_begin.tp_col;
160 if (vb->vb_dirtyrect.tr_end.tp_row < area->tr_end.tp_row)
161 vb->vb_dirtyrect.tr_end.tp_row = area->tr_end.tp_row;
162 if (vb->vb_dirtyrect.tr_end.tp_col < area->tr_end.tp_col)
163 vb->vb_dirtyrect.tr_end.tp_col = area->tr_end.tp_col;
164 vb->vb_dirtymask.vbm_row |=
165 vtbuf_dirty_axis(area->tr_begin.tp_row, area->tr_end.tp_row);
166 vb->vb_dirtymask.vbm_col |=
167 vtbuf_dirty_axis(area->tr_begin.tp_col, area->tr_end.tp_col);
172 vtbuf_dirty_cell(struct vt_buf *vb, const term_pos_t *p)
177 area.tr_end.tp_row = p->tp_row + 1;
178 area.tr_end.tp_col = p->tp_col + 1;
179 vtbuf_dirty(vb, &area);
183 vtbuf_make_undirty(struct vt_buf *vb)
186 vb->vb_dirtyrect.tr_begin = vb->vb_scr_size;
187 vb->vb_dirtyrect.tr_end.tp_row = vb->vb_dirtyrect.tr_end.tp_col = 0;
188 vb->vb_dirtymask.vbm_row = vb->vb_dirtymask.vbm_col = 0;
192 vtbuf_undirty(struct vt_buf *vb, term_rect_t *r, struct vt_bufmask *m)
196 *r = vb->vb_dirtyrect;
197 *m = vb->vb_dirtymask;
198 vtbuf_make_undirty(vb);
203 vtbuf_copy(struct vt_buf *vb, const term_rect_t *r, const term_pos_t *p2)
205 const term_pos_t *p1 = &r->tr_begin;
207 unsigned int rows, cols;
210 KASSERT(r->tr_begin.tp_row < vb->vb_scr_size.tp_row,
211 ("vtbuf_copy begin.tp_row %d must be less than screen width %d",
212 r->tr_begin.tp_row, vb->vb_scr_size.tp_row));
213 KASSERT(r->tr_begin.tp_col < vb->vb_scr_size.tp_col,
214 ("vtbuf_copy begin.tp_col %d must be less than screen height %d",
215 r->tr_begin.tp_col, vb->vb_scr_size.tp_col));
217 KASSERT(r->tr_end.tp_row <= vb->vb_scr_size.tp_row,
218 ("vtbuf_copy end.tp_row %d must be less than screen width %d",
219 r->tr_end.tp_row, vb->vb_scr_size.tp_row));
220 KASSERT(r->tr_end.tp_col <= vb->vb_scr_size.tp_col,
221 ("vtbuf_copy end.tp_col %d must be less than screen height %d",
222 r->tr_end.tp_col, vb->vb_scr_size.tp_col));
224 KASSERT(p2->tp_row < vb->vb_scr_size.tp_row,
225 ("vtbuf_copy tp_row %d must be less than screen width %d",
226 p2->tp_row, vb->vb_scr_size.tp_row));
227 KASSERT(p2->tp_col < vb->vb_scr_size.tp_col,
228 ("vtbuf_copy tp_col %d must be less than screen height %d",
229 p2->tp_col, vb->vb_scr_size.tp_col));
231 rows = r->tr_end.tp_row - r->tr_begin.tp_row;
232 rdiff = r->tr_begin.tp_row - p2->tp_row;
233 cols = r->tr_end.tp_col - r->tr_begin.tp_col;
234 if (r->tr_begin.tp_row > p2->tp_row && r->tr_begin.tp_col == 0 &&
235 r->tr_end.tp_col == vb->vb_scr_size.tp_col && /* Full row. */
236 (rows + rdiff) == vb->vb_scr_size.tp_row && /* Whole screen. */
237 rdiff > 0) { /* Only forward dirrection. Do not eat history. */
238 vthistory_addlines(vb, rdiff);
239 } else if (p2->tp_row < p1->tp_row) {
240 /* Handle overlapping copies of line segments. */
242 for (pr = 0; pr < rows; pr++)
244 &VTBUF_FIELD(vb, p2->tp_row + pr, p2->tp_col),
245 &VTBUF_FIELD(vb, p1->tp_row + pr, p1->tp_col),
246 cols * sizeof(term_char_t));
248 /* Move data down. */
249 for (pr = rows - 1; pr >= 0; pr--)
251 &VTBUF_FIELD(vb, p2->tp_row + pr, p2->tp_col),
252 &VTBUF_FIELD(vb, p1->tp_row + pr, p1->tp_col),
253 cols * sizeof(term_char_t));
257 area.tr_end.tp_row = MIN(p2->tp_row + rows, vb->vb_scr_size.tp_row);
258 area.tr_end.tp_col = MIN(p2->tp_col + cols, vb->vb_scr_size.tp_col);
259 vtbuf_dirty(vb, &area);
263 vtbuf_fill(struct vt_buf *vb, const term_rect_t *r, term_char_t c)
268 for (pr = r->tr_begin.tp_row; pr < r->tr_end.tp_row; pr++) {
269 row = vb->vb_rows[(vb->vb_curroffset + pr) %
270 VTBUF_MAX_HEIGHT(vb)];
271 for (pc = r->tr_begin.tp_col; pc < r->tr_end.tp_col; pc++) {
278 vtbuf_fill_locked(struct vt_buf *vb, const term_rect_t *r, term_char_t c)
280 KASSERT(r->tr_begin.tp_row < vb->vb_scr_size.tp_row,
281 ("vtbuf_fill_locked begin.tp_row %d must be < screen width %d",
282 r->tr_begin.tp_row, vb->vb_scr_size.tp_row));
283 KASSERT(r->tr_begin.tp_col < vb->vb_scr_size.tp_col,
284 ("vtbuf_fill_locked begin.tp_col %d must be < screen height %d",
285 r->tr_begin.tp_col, vb->vb_scr_size.tp_col));
287 KASSERT(r->tr_end.tp_row <= vb->vb_scr_size.tp_row,
288 ("vtbuf_fill_locked end.tp_row %d must be <= screen width %d",
289 r->tr_end.tp_row, vb->vb_scr_size.tp_row));
290 KASSERT(r->tr_end.tp_col <= vb->vb_scr_size.tp_col,
291 ("vtbuf_fill_locked end.tp_col %d must be <= screen height %d",
292 r->tr_end.tp_col, vb->vb_scr_size.tp_col));
295 vtbuf_fill(vb, r, c);
302 vtbuf_init_rows(struct vt_buf *vb)
306 vb->vb_history_size = MAX(vb->vb_history_size, vb->vb_scr_size.tp_row);
308 for (r = 0; r < vb->vb_history_size; r++)
309 vb->vb_rows[r] = &vb->vb_buffer[r *
310 vb->vb_scr_size.tp_col];
314 vtbuf_init_early(struct vt_buf *vb)
317 vb->vb_flags |= VBF_CURSOR;
319 vb->vb_curroffset = 0;
322 vtbuf_make_undirty(vb);
323 if ((vb->vb_flags & VBF_MTX_INIT) == 0) {
324 mtx_init(&vb->vb_lock, "vtbuf", NULL, MTX_SPIN);
325 vb->vb_flags |= VBF_MTX_INIT;
330 vtbuf_init(struct vt_buf *vb, const term_pos_t *p)
334 vb->vb_scr_size = *p;
335 vb->vb_history_size = VBF_DEFAULT_HISTORY_SIZE;
337 if ((vb->vb_flags & VBF_STATIC) == 0) {
338 sz = vb->vb_history_size * p->tp_col * sizeof(term_char_t);
339 vb->vb_buffer = malloc(sz, M_VTBUF, M_WAITOK);
341 sz = vb->vb_history_size * sizeof(term_char_t *);
342 vb->vb_rows = malloc(sz, M_VTBUF, M_WAITOK);
345 vtbuf_init_early(vb);
349 vtbuf_sethistory_size(struct vt_buf *vb, int size)
354 p.tp_row = vb->vb_scr_size.tp_row;
355 p.tp_col = vb->vb_scr_size.tp_col;
356 vtbuf_grow(vb, &p, size);
360 vtbuf_grow(struct vt_buf *vb, const term_pos_t *p, int history_size)
362 term_char_t *old, *new, **rows, **oldrows, **copyrows, *row;
363 int bufsize, rowssize, w, h, c, r;
366 history_size = MAX(history_size, p->tp_row);
368 if (history_size > vb->vb_history_size || p->tp_col >
369 vb->vb_scr_size.tp_col) {
370 /* Allocate new buffer. */
371 bufsize = history_size * p->tp_col * sizeof(term_char_t);
372 new = malloc(bufsize, M_VTBUF, M_WAITOK|M_ZERO);
373 rowssize = history_size * sizeof(term_pos_t *);
374 rows = malloc(rowssize, M_VTBUF, M_WAITOK|M_ZERO);
378 old = vb->vb_flags & VBF_STATIC ? NULL : vb->vb_buffer;
379 oldrows = vb->vb_flags & VBF_STATIC ? NULL : vb->vb_rows;
380 copyrows = vb->vb_rows;
381 w = vb->vb_scr_size.tp_col;
382 h = vb->vb_history_size;
384 vb->vb_history_size = history_size;
387 vb->vb_flags &= ~VBF_STATIC;
388 vb->vb_scr_size = *p;
391 /* Copy history and fill extra space. */
392 for (r = 0; r < history_size; r ++) {
394 if (r < h) { /* Copy. */
395 memmove(rows[r], copyrows[r],
396 MIN(p->tp_col, w) * sizeof(term_char_t));
397 for (c = MIN(p->tp_col, w); c < p->tp_col;
399 row[c] = VTBUF_SPACE_CHAR;
401 } else { /* Just fill. */
402 rect.tr_begin.tp_col = 0;
403 rect.tr_begin.tp_row = r;
404 rect.tr_end.tp_col = p->tp_col;
405 rect.tr_end.tp_row = p->tp_row;
406 vtbuf_fill(vb, &rect, VTBUF_SPACE_CHAR);
410 vtbuf_make_undirty(vb);
412 /* Deallocate old buffer. */
414 free(oldrows, M_VTBUF);
419 vtbuf_putchar(struct vt_buf *vb, const term_pos_t *p, term_char_t c)
423 KASSERT(p->tp_row < vb->vb_scr_size.tp_row,
424 ("vtbuf_putchar tp_row %d must be less than screen width %d",
425 p->tp_row, vb->vb_scr_size.tp_row));
426 KASSERT(p->tp_col < vb->vb_scr_size.tp_col,
427 ("vtbuf_putchar tp_col %d must be less than screen height %d",
428 p->tp_col, vb->vb_scr_size.tp_col));
430 row = vb->vb_rows[(vb->vb_curroffset + p->tp_row) %
431 VTBUF_MAX_HEIGHT(vb)];
432 if (row[p->tp_col] != c) {
436 vtbuf_dirty_cell(vb, p);
441 vtbuf_cursor_position(struct vt_buf *vb, const term_pos_t *p)
444 if (vb->vb_flags & VBF_CURSOR) {
445 vtbuf_dirty_cell(vb, &vb->vb_cursor);
447 vtbuf_dirty_cell(vb, &vb->vb_cursor);
454 vtbuf_cursor_visibility(struct vt_buf *vb, int yes)
459 oflags = vb->vb_flags;
461 vb->vb_flags |= VBF_CURSOR;
463 vb->vb_flags &= ~VBF_CURSOR;
464 nflags = vb->vb_flags;
467 if (oflags != nflags)
468 vtbuf_dirty_cell(vb, &vb->vb_cursor);