]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/vmware/vmci/vmci_kernel_if.c
Fix kernel panic in vmci driver initialization.
[FreeBSD/FreeBSD.git] / sys / dev / vmware / vmci / vmci_kernel_if.c
1 /*-
2  * Copyright (c) 2018 VMware, Inc.
3  *
4  * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0)
5  */
6
7 /* This file implements defines and helper functions. */
8
9 #include <sys/cdefs.h>
10 __FBSDID("$FreeBSD$");
11
12 #include <sys/malloc.h>
13 #include <sys/proc.h>
14 #include <sys/uio.h>
15
16 #include <machine/bus.h>
17
18 #include "vmci.h"
19 #include "vmci_defs.h"
20 #include "vmci_kernel_defs.h"
21 #include "vmci_kernel_if.h"
22 #include "vmci_queue.h"
23
24 struct vmci_queue_kernel_if {
25         size_t                  num_pages;      /* Num pages incl. header. */
26         struct vmci_dma_alloc   *dmas;          /* For dma alloc. */
27 };
28
29 /*
30  *------------------------------------------------------------------------------
31  *
32  * vmci_init_lock
33  *
34  *     Initializes the lock. Must be called before use.
35  *
36  * Results:
37  *     Always VMCI_SUCCESS.
38  *
39  * Side effects:
40  *     Thread can block.
41  *
42  *------------------------------------------------------------------------------
43  */
44
45 int
46 vmci_init_lock(vmci_lock *lock, char *name)
47 {
48
49         mtx_init(lock, name, NULL, MTX_DEF | MTX_NOWITNESS);
50         return (VMCI_SUCCESS);
51 }
52
53 /*
54  *------------------------------------------------------------------------------
55  *
56  * vmci_cleanup_lock
57  *
58  *     Cleanup the lock. Must be called before deallocating lock.
59  *
60  * Results:
61  *     None
62  *
63  * Side effects:
64  *     Deletes kernel lock state
65  *
66  *------------------------------------------------------------------------------
67  */
68
69 void
70 vmci_cleanup_lock(vmci_lock *lock)
71 {
72
73         if mtx_initialized(lock)
74                 mtx_destroy(lock);
75 }
76
77 /*
78  *------------------------------------------------------------------------------
79  *
80  * vmci_grab_lock
81  *
82  *     Grabs the given lock.
83  *
84  * Results:
85  *      None
86  *
87  * Side effects:
88  *      Thread can block.
89  *
90  *------------------------------------------------------------------------------
91  */
92
93 void
94 vmci_grab_lock(vmci_lock *lock)
95 {
96
97         mtx_lock(lock);
98 }
99
100 /*
101  *------------------------------------------------------------------------------
102  *
103  * vmci_release_lock
104  *
105  *     Releases the given lock.
106  *
107  * Results:
108  *     None
109  *
110  * Side effects:
111  *     A thread blocked on this lock may wake up.
112  *
113  *------------------------------------------------------------------------------
114  */
115
116 void
117 vmci_release_lock(vmci_lock *lock)
118 {
119
120         mtx_unlock(lock);
121 }
122
123 /*
124  *------------------------------------------------------------------------------
125  *
126  * vmci_grab_lock_bh
127  *
128  *     Grabs the given lock.
129  *
130  * Results:
131  *     None
132  *
133  * Side effects:
134  *     None.
135  *
136  *------------------------------------------------------------------------------
137  */
138
139 void
140 vmci_grab_lock_bh(vmci_lock *lock)
141 {
142
143         mtx_lock(lock);
144 }
145
146 /*
147  *------------------------------------------------------------------------------
148  *
149  * vmci_release_lock_bh
150  *
151  *     Releases the given lock.
152  *
153  * Results:
154  *     None
155  *
156  * Side effects:
157  *     None.
158  *
159  *------------------------------------------------------------------------------
160  */
161
162 void
163 vmci_release_lock_bh(vmci_lock *lock)
164 {
165
166         mtx_unlock(lock);
167 }
168
169 /*
170  *------------------------------------------------------------------------------
171  *
172  * vmci_initialized_lock
173  *
174  *     Returns whether a lock has been initialized.
175  *
176  * Results:
177  *     Return 1 if initialized or 0 if unininitialized.
178  *
179  * Side effects:
180  *     None
181  *
182  *------------------------------------------------------------------------------
183  */
184
185 int
186 vmci_initialized_lock(vmci_lock *lock)
187 {
188
189         return mtx_initialized(lock);
190 }
191
192 /*
193  *------------------------------------------------------------------------------
194  *
195  * vmci_alloc_kernel_mem
196  *
197  *     Allocate physically contiguous memory for the VMCI driver.
198  *
199  * Results:
200  *     The address allocated or NULL on error.
201  *
202  *
203  * Side effects:
204  *     Memory may be allocated.
205  *
206  *------------------------------------------------------------------------------
207  */
208
209 void *
210 vmci_alloc_kernel_mem(size_t size, int flags)
211 {
212         void *ptr;
213
214         if ((flags & VMCI_MEMORY_ATOMIC) != 0)
215                 ptr = contigmalloc(size, M_DEVBUF, M_NOWAIT, 0, 0xFFFFFFFF,
216                     8, 1024 * 1024);
217         else
218                 ptr = contigmalloc(size, M_DEVBUF, M_WAITOK, 0, 0xFFFFFFFF,
219                     8, 1024 * 1024);
220
221         return (ptr);
222 }
223
224 /*
225  *------------------------------------------------------------------------------
226  *
227  * vmci_free_kernel_mem
228  *
229  *     Free kernel memory allocated for the VMCI driver.
230  *
231  * Results:
232  *     None.
233  *
234  * Side effects:
235  *     Memory is freed.
236  *
237  *------------------------------------------------------------------------------
238  */
239
240 void
241 vmci_free_kernel_mem(void *ptr, size_t size)
242 {
243
244         contigfree(ptr, size, M_DEVBUF);
245 }
246
247 /*
248  *------------------------------------------------------------------------------
249  *
250  * vmci_can_schedule_delayed_work --
251  *
252  *     Checks to see if the given platform supports delayed work callbacks.
253  *
254  * Results:
255  *     true if it does. false otherwise.
256  *
257  * Side effects:
258  *     None.
259  *
260  *------------------------------------------------------------------------------
261  */
262
263 bool
264 vmci_can_schedule_delayed_work(void)
265 {
266
267         return (true);
268 }
269
270 /*
271  *------------------------------------------------------------------------------
272  *
273  * vmci_schedule_delayed_work --
274  *
275  *     Schedule the specified callback.
276  *
277  * Results:
278  *     Zero on success, error code otherwise.
279  *
280  * Side effects:
281  *     None.
282  *
283  *------------------------------------------------------------------------------
284  */
285
286 int
287 vmci_schedule_delayed_work(vmci_work_fn *work_fn, void *data)
288 {
289
290         return (vmci_schedule_delayed_work_fn(work_fn, data));
291 }
292
293 /*
294  *------------------------------------------------------------------------------
295  *
296  * vmci_create_event --
297  *
298  * Results:
299  *     None.
300  *
301  * Side effects:
302  *     None.
303  *
304  *------------------------------------------------------------------------------
305  */
306
307 void
308 vmci_create_event(vmci_event *event)
309 {
310
311         sema_init(event, 0, "vmci_event");
312 }
313
314 /*
315  *------------------------------------------------------------------------------
316  *
317  * vmci_destroy_event --
318  *
319  * Results:
320  *     None.
321  *
322  * Side effects:
323  *     None.
324  *
325  *------------------------------------------------------------------------------
326  */
327
328 void
329 vmci_destroy_event(vmci_event *event)
330 {
331
332         if (mtx_owned(&event->sema_mtx))
333                 sema_destroy(event);
334 }
335
336 /*
337  *------------------------------------------------------------------------------
338  *
339  * vmci_signal_event --
340  *
341  * Results:
342  *     None.
343  *
344  * Side effects:
345  *     None.
346  *
347  *------------------------------------------------------------------------------
348  */
349
350 void
351 vmci_signal_event(vmci_event *event)
352 {
353
354         sema_post(event);
355 }
356
357 /*
358  *------------------------------------------------------------------------------
359  *
360  * vmci_wait_on_event --
361  *
362  * Results:
363  *     None.
364  *
365  * Side effects:
366  *     None.
367  *
368  *------------------------------------------------------------------------------
369  */
370
371 void
372 vmci_wait_on_event(vmci_event *event, vmci_event_release_cb release_cb,
373     void *client_data)
374 {
375
376         release_cb(client_data);
377         sema_wait(event);
378 }
379
380 /*
381  *------------------------------------------------------------------------------
382  *
383  * vmci_mutex_init --
384  *
385  *     Initializes the mutex. Must be called before use.
386  *
387  * Results:
388  *     Success.
389  *
390  * Side effects:
391  *     None.
392  *
393  *------------------------------------------------------------------------------
394  */
395
396 int
397 vmci_mutex_init(vmci_mutex *mutex, char *name)
398 {
399
400         mtx_init(mutex, name, NULL, MTX_DEF | MTX_NOWITNESS);
401         return (VMCI_SUCCESS);
402 }
403
404 /*
405  *------------------------------------------------------------------------------
406  *
407  * vmci_mutex_destroy --
408  *
409  *     Destroys the mutex.
410  *
411  * Results:
412  *     None.
413  *
414  * Side effects:
415  *     None.
416  *
417  *------------------------------------------------------------------------------
418  */
419
420 void
421 vmci_mutex_destroy(vmci_mutex *mutex)
422 {
423
424         mtx_destroy(mutex);
425 }
426
427 /*
428  *------------------------------------------------------------------------------
429  *
430  * vmci_mutex_acquire --
431  *
432  *     Acquires the mutex.
433  *
434  * Results:
435  *     None.
436  *
437  * Side effects:
438  *     Thread may block.
439  *
440  *------------------------------------------------------------------------------
441  */
442
443 void
444 vmci_mutex_acquire(vmci_mutex *mutex)
445 {
446
447         mtx_lock(mutex);
448 }
449
450 /*
451  *------------------------------------------------------------------------------
452  *
453  * vmci_mutex_release --
454  *
455  *     Releases the mutex.
456  *
457  * Results:
458  *     None.
459  *
460  * Side effects:
461  *     May wake up the thread blocking on this mutex.
462  *
463  *------------------------------------------------------------------------------
464  */
465
466 void
467 vmci_mutex_release(vmci_mutex *mutex)
468 {
469
470         mtx_unlock(mutex);
471 }
472
473 /*
474  *------------------------------------------------------------------------------
475  *
476  * vmci_mutex_initialized
477  *
478  *     Returns whether a mutex has been initialized.
479  *
480  * Results:
481  *     Return 1 if initialized or 0 if unininitialized.
482  *
483  * Side effects:
484  *     None
485  *
486  *------------------------------------------------------------------------------
487  */
488
489 int
490 vmci_mutex_initialized(vmci_mutex *mutex)
491 {
492
493         return mtx_initialized(mutex);
494 }
495 /*
496  *------------------------------------------------------------------------------
497  *
498  * vmci_alloc_queue --
499  *
500  *     Allocates kernel queue pages of specified size with IOMMU mappings, plus
501  *     space for the queue structure/kernel interface and the queue header.
502  *
503  * Results:
504  *     Pointer to the queue on success, NULL otherwise.
505  *
506  * Side effects:
507  *     Memory is allocated.
508  *
509  *------------------------------------------------------------------------------
510  */
511
512 void *
513 vmci_alloc_queue(uint64_t size, uint32_t flags)
514 {
515         struct vmci_queue *queue;
516         size_t i;
517         const size_t num_pages = CEILING(size, PAGE_SIZE) + 1;
518         const size_t dmas_size = num_pages * sizeof(struct vmci_dma_alloc);
519         const size_t queue_size =
520             sizeof(*queue) + sizeof(*(queue->kernel_if)) + dmas_size;
521
522         /* Size should be enforced by vmci_qpair_alloc(), double-check here. */
523         if (size > VMCI_MAX_GUEST_QP_MEMORY) {
524                 ASSERT(false);
525                 return (NULL);
526         }
527
528         queue = malloc(queue_size, M_DEVBUF, M_NOWAIT);
529         if (!queue)
530                 return (NULL);
531
532         queue->q_header = NULL;
533         queue->saved_header = NULL;
534         queue->kernel_if = (struct vmci_queue_kernel_if *)(queue + 1);
535         queue->kernel_if->num_pages = num_pages;
536         queue->kernel_if->dmas = (struct vmci_dma_alloc *)(queue->kernel_if +
537             1);
538         for (i = 0; i < num_pages; i++) {
539                 vmci_dma_malloc(PAGE_SIZE, 1, &queue->kernel_if->dmas[i]);
540                 if (!queue->kernel_if->dmas[i].dma_vaddr) {
541                         /* Size excl. the header. */
542                         vmci_free_queue(queue, i * PAGE_SIZE);
543                         return (NULL);
544                 }
545         }
546
547         /* Queue header is the first page. */
548         queue->q_header = (void *)queue->kernel_if->dmas[0].dma_vaddr;
549
550         return ((void *)queue);
551 }
552
553 /*
554  *------------------------------------------------------------------------------
555  *
556  * vmci_free_queue --
557  *
558  *     Frees kernel VA space for a given queue and its queue header, and frees
559  *     physical data pages.
560  *
561  * Results:
562  *     None.
563  *
564  * Side effects:
565  *     Memory is freed.
566  *
567  *------------------------------------------------------------------------------
568  */
569
570 void
571 vmci_free_queue(void *q, uint64_t size)
572 {
573         struct vmci_queue *queue = q;
574
575         if (queue) {
576                 const size_t num_pages = CEILING(size, PAGE_SIZE) + 1;
577                 uint64_t i;
578
579                 /* Given size doesn't include header, so add in a page here. */
580                 for (i = 0; i < num_pages; i++)
581                         vmci_dma_free(&queue->kernel_if->dmas[i]);
582                 free(queue, M_DEVBUF);
583         }
584 }
585
586 /*
587  *------------------------------------------------------------------------------
588  *
589  * vmci_alloc_ppn_set --
590  *
591  *     Allocates two list of PPNs --- one for the pages in the produce queue,
592  *     and the other for the pages in the consume queue. Intializes the list of
593  *     PPNs with the page frame numbers of the KVA for the two queues (and the
594  *     queue headers).
595  *
596  * Results:
597  *     Success or failure.
598  *
599  * Side effects:
600  *     Memory may be allocated.
601  *
602  *-----------------------------------------------------------------------------
603  */
604
605 int
606 vmci_alloc_ppn_set(void *prod_q, uint64_t num_produce_pages, void *cons_q,
607     uint64_t num_consume_pages, struct ppn_set *ppn_set)
608 {
609         struct vmci_queue *consume_q = cons_q;
610         struct vmci_queue *produce_q = prod_q;
611         vmci_ppn_list consume_ppns;
612         vmci_ppn_list produce_ppns;
613         uint64_t i;
614
615         if (!produce_q || !num_produce_pages || !consume_q ||
616             !num_consume_pages || !ppn_set)
617                 return (VMCI_ERROR_INVALID_ARGS);
618
619         if (ppn_set->initialized)
620                 return (VMCI_ERROR_ALREADY_EXISTS);
621
622         produce_ppns =
623             vmci_alloc_kernel_mem(num_produce_pages * sizeof(*produce_ppns),
624             VMCI_MEMORY_NORMAL);
625         if (!produce_ppns)
626                 return (VMCI_ERROR_NO_MEM);
627
628         consume_ppns =
629             vmci_alloc_kernel_mem(num_consume_pages * sizeof(*consume_ppns),
630             VMCI_MEMORY_NORMAL);
631         if (!consume_ppns) {
632                 vmci_free_kernel_mem(produce_ppns,
633                     num_produce_pages * sizeof(*produce_ppns));
634                 return (VMCI_ERROR_NO_MEM);
635         }
636
637         for (i = 0; i < num_produce_pages; i++) {
638                 unsigned long pfn;
639
640                 produce_ppns[i] =
641                     pfn = produce_q->kernel_if->dmas[i].dma_paddr >> PAGE_SHIFT;
642
643                 /*
644                  * Fail allocation if PFN isn't supported by hypervisor.
645                  */
646
647                 if (sizeof(pfn) >
648                     sizeof(*produce_ppns) && pfn != produce_ppns[i])
649                         goto ppn_error;
650         }
651         for (i = 0; i < num_consume_pages; i++) {
652                 unsigned long pfn;
653
654                 consume_ppns[i] =
655                     pfn = consume_q->kernel_if->dmas[i].dma_paddr >> PAGE_SHIFT;
656
657                 /*
658                  * Fail allocation if PFN isn't supported by hypervisor.
659                  */
660
661                 if (sizeof(pfn) >
662                     sizeof(*consume_ppns) && pfn != consume_ppns[i])
663                         goto ppn_error;
664
665         }
666
667         ppn_set->num_produce_pages = num_produce_pages;
668         ppn_set->num_consume_pages = num_consume_pages;
669         ppn_set->produce_ppns = produce_ppns;
670         ppn_set->consume_ppns = consume_ppns;
671         ppn_set->initialized = true;
672         return (VMCI_SUCCESS);
673
674 ppn_error:
675         vmci_free_kernel_mem(produce_ppns, num_produce_pages *
676             sizeof(*produce_ppns));
677         vmci_free_kernel_mem(consume_ppns, num_consume_pages *
678             sizeof(*consume_ppns));
679         return (VMCI_ERROR_INVALID_ARGS);
680 }
681
682 /*
683  *------------------------------------------------------------------------------
684  *
685  * vmci_free_ppn_set --
686  *
687  *     Frees the two list of PPNs for a queue pair.
688  *
689  * Results:
690  *     None.
691  *
692  * Side effects:
693  *     None.
694  *
695  *------------------------------------------------------------------------------
696  */
697
698 void
699 vmci_free_ppn_set(struct ppn_set *ppn_set)
700 {
701
702         ASSERT(ppn_set);
703         if (ppn_set->initialized) {
704                 /* Do not call these functions on NULL inputs. */
705                 ASSERT(ppn_set->produce_ppns && ppn_set->consume_ppns);
706                 vmci_free_kernel_mem(ppn_set->produce_ppns,
707                     ppn_set->num_produce_pages *
708                     sizeof(*ppn_set->produce_ppns));
709                 vmci_free_kernel_mem(ppn_set->consume_ppns,
710                     ppn_set->num_consume_pages *
711                     sizeof(*ppn_set->consume_ppns));
712         }
713         memset(ppn_set, 0, sizeof(*ppn_set));
714 }
715
716 /*
717  *------------------------------------------------------------------------------
718  *
719  * vmci_populate_ppn_list --
720  *
721  *     Populates the list of PPNs in the hypercall structure with the PPNS
722  *     of the produce queue and the consume queue.
723  *
724  * Results:
725  *     VMCI_SUCCESS.
726  *
727  * Side effects:
728  *     None.
729  *
730  *------------------------------------------------------------------------------
731  */
732
733 int
734 vmci_populate_ppn_list(uint8_t *call_buf, const struct ppn_set *ppn_set)
735 {
736
737         ASSERT(call_buf && ppn_set && ppn_set->initialized);
738         memcpy(call_buf, ppn_set->produce_ppns,
739             ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns));
740         memcpy(call_buf + ppn_set->num_produce_pages *
741             sizeof(*ppn_set->produce_ppns), ppn_set->consume_ppns,
742             ppn_set->num_consume_pages * sizeof(*ppn_set->consume_ppns));
743
744         return (VMCI_SUCCESS);
745 }
746
747 /*
748  *------------------------------------------------------------------------------
749  *
750  * vmci_memcpy_{to,from}iovec --
751  *
752  *     These helper routines will copy the specified bytes to/from memory that's
753  *     specified as a struct iovec.  The routines can not verify the correctness
754  *     of the struct iovec's contents.
755  *
756  * Results:
757  *      None.
758  *
759  * Side effects:
760  *      None.
761  *
762  *------------------------------------------------------------------------------
763  */
764
765 static inline void
766 vmci_memcpy_toiovec(struct iovec *iov, uint8_t *src, size_t len)
767 {
768
769         while (len > 0) {
770                 if (iov->iov_len) {
771                         size_t to_copy = MIN(iov->iov_len, len);
772                         memcpy(iov->iov_base, src, to_copy);
773                         src += to_copy;
774                         len -= to_copy;
775                         iov->iov_base = (void *)((uintptr_t) iov->iov_base +
776                             to_copy);
777                         iov->iov_len -= to_copy;
778                 }
779                 iov++;
780         }
781 }
782
783 static inline void
784 vmci_memcpy_fromiovec(uint8_t *dst, struct iovec *iov, size_t len)
785 {
786
787         while (len > 0) {
788                 if (iov->iov_len) {
789                         size_t to_copy = MIN(iov->iov_len, len);
790                         memcpy(dst, iov->iov_base, to_copy);
791                         dst += to_copy;
792                         len -= to_copy;
793                         iov->iov_base = (void *)((uintptr_t) iov->iov_base +
794                             to_copy);
795                         iov->iov_len -= to_copy;
796                 }
797                 iov++;
798         }
799 }
800
801 /*
802  *------------------------------------------------------------------------------
803  *
804  * __vmci_memcpy_to_queue --
805  *
806  *     Copies from a given buffer or iovector to a VMCI Queue. Assumes that
807  *     offset + size does not wrap around in the queue.
808  *
809  * Results:
810  *     Zero on success, negative error code on failure.
811  *
812  * Side effects:
813  *     None.
814  *
815  *------------------------------------------------------------------------------
816  */
817
818 #pragma GCC diagnostic ignored "-Wcast-qual"
819 static int
820 __vmci_memcpy_to_queue(struct vmci_queue *queue, uint64_t queue_offset,
821     const void *src, size_t size, bool is_iovec)
822 {
823         struct vmci_queue_kernel_if *kernel_if = queue->kernel_if;
824         size_t bytes_copied = 0;
825
826         while (bytes_copied < size) {
827                 const uint64_t page_index =
828                     (queue_offset + bytes_copied) / PAGE_SIZE;
829                 const size_t page_offset =
830                     (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
831                 void *va;
832                 size_t to_copy;
833
834                 /* Skip header. */
835                 va = (void *)kernel_if->dmas[page_index + 1].dma_vaddr;
836
837                 ASSERT(va);
838                 /*
839                  * Fill up the page if we have enough payload, or else
840                  * copy the remaining bytes.
841                  */
842                 to_copy = MIN(PAGE_SIZE - page_offset, size - bytes_copied);
843
844                 if (is_iovec) {
845                         struct iovec *iov = (struct iovec *)src;
846
847                         /* The iovec will track bytes_copied internally. */
848                         vmci_memcpy_fromiovec((uint8_t *)va + page_offset,
849                             iov, to_copy);
850                 } else
851                         memcpy((uint8_t *)va + page_offset,
852                             (uint8_t *)src + bytes_copied, to_copy);
853                 bytes_copied += to_copy;
854         }
855
856         return (VMCI_SUCCESS);
857 }
858
859 /*
860  *------------------------------------------------------------------------------
861  *
862  * __vmci_memcpy_from_queue --
863  *
864  *     Copies to a given buffer or iovector from a VMCI Queue. Assumes that
865  *     offset + size does not wrap around in the queue.
866  *
867  * Results:
868  *     Zero on success, negative error code on failure.
869  *
870  * Side effects:
871  *     None.
872  *
873  *------------------------------------------------------------------------------
874  */
875
876 static int
877 __vmci_memcpy_from_queue(void *dest, const struct vmci_queue *queue,
878     uint64_t queue_offset, size_t size, bool is_iovec)
879 {
880         struct vmci_queue_kernel_if *kernel_if = queue->kernel_if;
881         size_t bytes_copied = 0;
882
883         while (bytes_copied < size) {
884                 const uint64_t page_index =
885                     (queue_offset + bytes_copied) / PAGE_SIZE;
886                 const size_t page_offset =
887                     (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
888                 void *va;
889                 size_t to_copy;
890
891                 /* Skip header. */
892                 va = (void *)kernel_if->dmas[page_index + 1].dma_vaddr;
893
894                 ASSERT(va);
895                 /*
896                  * Fill up the page if we have enough payload, or else
897                  * copy the remaining bytes.
898                  */
899                 to_copy = MIN(PAGE_SIZE - page_offset, size - bytes_copied);
900
901                 if (is_iovec) {
902                         struct iovec *iov = (struct iovec *)dest;
903
904                         /* The iovec will track bytesCopied internally. */
905                         vmci_memcpy_toiovec(iov, (uint8_t *)va +
906                             page_offset, to_copy);
907                 } else
908                         memcpy((uint8_t *)dest + bytes_copied,
909                             (uint8_t *)va + page_offset, to_copy);
910
911                 bytes_copied += to_copy;
912         }
913
914         return (VMCI_SUCCESS);
915 }
916
917 /*
918  *------------------------------------------------------------------------------
919  *
920  * vmci_memcpy_to_queue --
921  *
922  *     Copies from a given buffer to a VMCI Queue.
923  *
924  * Results:
925  *     Zero on success, negative error code on failure.
926  *
927  * Side effects:
928  *     None.
929  *
930  *------------------------------------------------------------------------------
931  */
932
933 int
934 vmci_memcpy_to_queue(struct vmci_queue *queue, uint64_t queue_offset,
935     const void *src, size_t src_offset, size_t size, int buf_type,
936     bool can_block)
937 {
938
939         ASSERT(can_block);
940
941         return (__vmci_memcpy_to_queue(queue, queue_offset,
942             (uint8_t *)src + src_offset, size, false));
943 }
944
945 /*
946  *------------------------------------------------------------------------------
947  *
948  * vmci_memcpy_from_queue --
949  *
950  *      Copies to a given buffer from a VMCI Queue.
951  *
952  * Results:
953  *      Zero on success, negative error code on failure.
954  *
955  * Side effects:
956  *      None.
957  *
958  *------------------------------------------------------------------------------
959  */
960
961 int
962 vmci_memcpy_from_queue(void *dest, size_t dest_offset,
963     const struct vmci_queue *queue, uint64_t queue_offset, size_t size,
964     int buf_type, bool can_block)
965 {
966
967         ASSERT(can_block);
968
969         return (__vmci_memcpy_from_queue((uint8_t *)dest + dest_offset,
970             queue, queue_offset, size, false));
971 }
972
973 /*
974  *------------------------------------------------------------------------------
975  *
976  * vmci_memcpy_to_queue_local --
977  *
978  *     Copies from a given buffer to a local VMCI queue. This is the
979  *     same as a regular copy.
980  *
981  * Results:
982  *     Zero on success, negative error code on failure.
983  *
984  * Side effects:
985  *     None.
986  *
987  *------------------------------------------------------------------------------
988  */
989
990 int
991 vmci_memcpy_to_queue_local(struct vmci_queue *queue, uint64_t queue_offset,
992     const void *src, size_t src_offset, size_t size, int buf_type,
993     bool can_block)
994 {
995
996         ASSERT(can_block);
997
998         return (__vmci_memcpy_to_queue(queue, queue_offset,
999             (uint8_t *)src + src_offset, size, false));
1000 }
1001
1002 /*
1003  *------------------------------------------------------------------------------
1004  *
1005  * vmci_memcpy_from_queue_local --
1006  *
1007  *     Copies to a given buffer from a VMCI Queue.
1008  *
1009  * Results:
1010  *     Zero on success, negative error code on failure.
1011  *
1012  * Side effects:
1013  *     None.
1014  *
1015  *------------------------------------------------------------------------------
1016  */
1017
1018 int
1019 vmci_memcpy_from_queue_local(void *dest, size_t dest_offset,
1020     const struct vmci_queue *queue, uint64_t queue_offset, size_t size,
1021     int buf_type, bool can_block)
1022 {
1023
1024         ASSERT(can_block);
1025
1026         return (__vmci_memcpy_from_queue((uint8_t *)dest + dest_offset,
1027             queue, queue_offset, size, false));
1028 }
1029
1030 /*------------------------------------------------------------------------------
1031  *
1032  * vmci_memcpy_to_queue_v --
1033  *
1034  *     Copies from a given iovec from a VMCI Queue.
1035  *
1036  * Results:
1037  *     Zero on success, negative error code on failure.
1038  *
1039  * Side effects:
1040  *     None.
1041  *
1042  *------------------------------------------------------------------------------
1043  */
1044
1045 int
1046 vmci_memcpy_to_queue_v(struct vmci_queue *queue, uint64_t queue_offset,
1047     const void *src, size_t src_offset, size_t size, int buf_type,
1048     bool can_block)
1049 {
1050
1051         ASSERT(can_block);
1052
1053         /*
1054          * We ignore src_offset because src is really a struct iovec * and will
1055          * maintain offset internally.
1056          */
1057         return (__vmci_memcpy_to_queue(queue, queue_offset, src, size,
1058             true));
1059 }
1060
1061 /*
1062  *------------------------------------------------------------------------------
1063  *
1064  * vmci_memcpy_from_queue_v --
1065  *
1066  *     Copies to a given iovec from a VMCI Queue.
1067  *
1068  * Results:
1069  *     Zero on success, negative error code on failure.
1070  *
1071  * Side effects:
1072  *     None.
1073  *
1074  *------------------------------------------------------------------------------
1075  */
1076
1077 int
1078 vmci_memcpy_from_queue_v(void *dest, size_t dest_offset,
1079     const struct vmci_queue *queue, uint64_t queue_offset, size_t size,
1080     int buf_type, bool can_block)
1081 {
1082
1083         ASSERT(can_block);
1084
1085         /*
1086          * We ignore dest_offset because dest is really a struct iovec * and
1087          * will maintain offset internally.
1088          */
1089         return (__vmci_memcpy_from_queue(dest, queue, queue_offset, size,
1090             true));
1091 }
1092
1093 /*
1094  *------------------------------------------------------------------------------
1095  *
1096  * vmci_read_port_bytes --
1097  *
1098  *     Copy memory from an I/O port to kernel memory.
1099  *
1100  * Results:
1101  *     No results.
1102  *
1103  * Side effects:
1104  *     None.
1105  *
1106  *------------------------------------------------------------------------------
1107  */
1108
1109 void
1110 vmci_read_port_bytes(vmci_io_handle handle, vmci_io_port port, uint8_t *buffer,
1111     size_t buffer_length)
1112 {
1113
1114         insb(port, buffer, buffer_length);
1115 }