3 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <sys/ctype.h>
37 #include <sys/unistd.h>
38 #include <sys/param.h>
39 #include <sys/types.h>
40 #include <sys/errno.h>
41 #include <sys/systm.h>
42 #include <sys/malloc.h>
44 #include <sys/mutex.h>
46 #include <sys/callout.h>
47 #if __FreeBSD_version > 502113
50 #include <sys/kernel.h>
52 #include <sys/condvar.h>
53 #include <sys/kthread.h>
54 #include <sys/module.h>
56 #include <sys/sched.h>
57 #include <sys/sysctl.h>
59 #include <machine/atomic.h>
60 #include <machine/bus.h>
61 #include <machine/stdarg.h>
62 #include <machine/resource.h>
68 #include <vm/vm_param.h>
71 #include <vm/vm_kern.h>
72 #include <vm/vm_map.h>
74 #include <compat/ndis/pe_var.h>
75 #include <compat/ndis/cfg_var.h>
76 #include <compat/ndis/resource_var.h>
77 #include <compat/ndis/ntoskrnl_var.h>
78 #include <compat/ndis/hal_var.h>
79 #include <compat/ndis/ndis_var.h>
81 #ifdef NTOSKRNL_DEBUG_TIMERS
82 static int sysctl_show_timers(SYSCTL_HANDLER_ARGS);
84 SYSCTL_PROC(_debug, OID_AUTO, ntoskrnl_timers, CTLFLAG_RW, 0, 0,
85 sysctl_show_timers, "I", "Show ntoskrnl timer stats");
99 typedef struct kdpc_queue kdpc_queue;
103 struct thread *we_td;
106 typedef struct wb_ext wb_ext;
108 #define NTOSKRNL_TIMEOUTS 256
109 #ifdef NTOSKRNL_DEBUG_TIMERS
110 static uint64_t ntoskrnl_timer_fires;
111 static uint64_t ntoskrnl_timer_sets;
112 static uint64_t ntoskrnl_timer_reloads;
113 static uint64_t ntoskrnl_timer_cancels;
116 struct callout_entry {
117 struct callout ce_callout;
121 typedef struct callout_entry callout_entry;
123 static struct list_entry ntoskrnl_calllist;
124 static struct mtx ntoskrnl_calllock;
126 static struct list_entry ntoskrnl_intlist;
127 static kspin_lock ntoskrnl_intlock;
129 static uint8_t RtlEqualUnicodeString(unicode_string *,
130 unicode_string *, uint8_t);
131 static void RtlCopyUnicodeString(unicode_string *,
133 static irp *IoBuildSynchronousFsdRequest(uint32_t, device_object *,
134 void *, uint32_t, uint64_t *, nt_kevent *, io_status_block *);
135 static irp *IoBuildAsynchronousFsdRequest(uint32_t,
136 device_object *, void *, uint32_t, uint64_t *, io_status_block *);
137 static irp *IoBuildDeviceIoControlRequest(uint32_t,
138 device_object *, void *, uint32_t, void *, uint32_t,
139 uint8_t, nt_kevent *, io_status_block *);
140 static irp *IoAllocateIrp(uint8_t, uint8_t);
141 static void IoReuseIrp(irp *, uint32_t);
142 static void IoFreeIrp(irp *);
143 static void IoInitializeIrp(irp *, uint16_t, uint8_t);
144 static irp *IoMakeAssociatedIrp(irp *, uint8_t);
145 static uint32_t KeWaitForMultipleObjects(uint32_t,
146 nt_dispatch_header **, uint32_t, uint32_t, uint32_t, uint8_t,
147 int64_t *, wait_block *);
148 static void ntoskrnl_waittest(nt_dispatch_header *, uint32_t);
149 static void ntoskrnl_satisfy_wait(nt_dispatch_header *, struct thread *);
150 static void ntoskrnl_satisfy_multiple_waits(wait_block *);
151 static int ntoskrnl_is_signalled(nt_dispatch_header *, struct thread *);
152 static void ntoskrnl_insert_timer(ktimer *, int);
153 static void ntoskrnl_remove_timer(ktimer *);
154 #ifdef NTOSKRNL_DEBUG_TIMERS
155 static void ntoskrnl_show_timers(void);
157 static void ntoskrnl_timercall(void *);
158 static void ntoskrnl_dpc_thread(void *);
159 static void ntoskrnl_destroy_dpc_threads(void);
160 static void ntoskrnl_destroy_workitem_threads(void);
161 static void ntoskrnl_workitem_thread(void *);
162 static void ntoskrnl_workitem(device_object *, void *);
163 static void ntoskrnl_unicode_to_ascii(uint16_t *, char *, int);
164 static void ntoskrnl_ascii_to_unicode(char *, uint16_t *, int);
165 static uint8_t ntoskrnl_insert_dpc(list_entry *, kdpc *);
166 static void WRITE_REGISTER_USHORT(uint16_t *, uint16_t);
167 static uint16_t READ_REGISTER_USHORT(uint16_t *);
168 static void WRITE_REGISTER_ULONG(uint32_t *, uint32_t);
169 static uint32_t READ_REGISTER_ULONG(uint32_t *);
170 static void WRITE_REGISTER_UCHAR(uint8_t *, uint8_t);
171 static uint8_t READ_REGISTER_UCHAR(uint8_t *);
172 static int64_t _allmul(int64_t, int64_t);
173 static int64_t _alldiv(int64_t, int64_t);
174 static int64_t _allrem(int64_t, int64_t);
175 static int64_t _allshr(int64_t, uint8_t);
176 static int64_t _allshl(int64_t, uint8_t);
177 static uint64_t _aullmul(uint64_t, uint64_t);
178 static uint64_t _aulldiv(uint64_t, uint64_t);
179 static uint64_t _aullrem(uint64_t, uint64_t);
180 static uint64_t _aullshr(uint64_t, uint8_t);
181 static uint64_t _aullshl(uint64_t, uint8_t);
182 static slist_entry *ntoskrnl_pushsl(slist_header *, slist_entry *);
183 static slist_entry *ntoskrnl_popsl(slist_header *);
184 static void ExInitializePagedLookasideList(paged_lookaside_list *,
185 lookaside_alloc_func *, lookaside_free_func *,
186 uint32_t, size_t, uint32_t, uint16_t);
187 static void ExDeletePagedLookasideList(paged_lookaside_list *);
188 static void ExInitializeNPagedLookasideList(npaged_lookaside_list *,
189 lookaside_alloc_func *, lookaside_free_func *,
190 uint32_t, size_t, uint32_t, uint16_t);
191 static void ExDeleteNPagedLookasideList(npaged_lookaside_list *);
193 *ExInterlockedPushEntrySList(slist_header *,
194 slist_entry *, kspin_lock *);
196 *ExInterlockedPopEntrySList(slist_header *, kspin_lock *);
197 static uint32_t InterlockedIncrement(volatile uint32_t *);
198 static uint32_t InterlockedDecrement(volatile uint32_t *);
199 static void ExInterlockedAddLargeStatistic(uint64_t *, uint32_t);
200 static void *MmAllocateContiguousMemory(uint32_t, uint64_t);
201 static void *MmAllocateContiguousMemorySpecifyCache(uint32_t,
202 uint64_t, uint64_t, uint64_t, uint32_t);
203 static void MmFreeContiguousMemory(void *);
204 static void MmFreeContiguousMemorySpecifyCache(void *, uint32_t, uint32_t);
205 static uint32_t MmSizeOfMdl(void *, size_t);
206 static void *MmMapLockedPages(mdl *, uint8_t);
207 static void *MmMapLockedPagesSpecifyCache(mdl *,
208 uint8_t, uint32_t, void *, uint32_t, uint32_t);
209 static void MmUnmapLockedPages(void *, mdl *);
210 static uint8_t MmIsAddressValid(void *);
211 static device_t ntoskrnl_finddev(device_t, uint64_t, struct resource **);
212 static void RtlZeroMemory(void *, size_t);
213 static void RtlCopyMemory(void *, const void *, size_t);
214 static size_t RtlCompareMemory(const void *, const void *, size_t);
215 static ndis_status RtlUnicodeStringToInteger(unicode_string *,
216 uint32_t, uint32_t *);
217 static int atoi (const char *);
218 static long atol (const char *);
219 static int rand(void);
220 static void srand(unsigned int);
221 static void ntoskrnl_time(uint64_t *);
222 static uint8_t IoIsWdmVersionAvailable(uint8_t, uint8_t);
223 static void ntoskrnl_thrfunc(void *);
224 static ndis_status PsCreateSystemThread(ndis_handle *,
225 uint32_t, void *, ndis_handle, void *, void *, void *);
226 static ndis_status PsTerminateSystemThread(ndis_status);
227 static ndis_status IoGetDeviceProperty(device_object *, uint32_t,
228 uint32_t, void *, uint32_t *);
229 static void KeInitializeMutex(kmutant *, uint32_t);
230 static uint32_t KeReleaseMutex(kmutant *, uint8_t);
231 static uint32_t KeReadStateMutex(kmutant *);
232 static ndis_status ObReferenceObjectByHandle(ndis_handle,
233 uint32_t, void *, uint8_t, void **, void **);
234 static void ObfDereferenceObject(void *);
235 static uint32_t ZwClose(ndis_handle);
236 static uint32_t WmiQueryTraceInformation(uint32_t, void *, uint32_t,
238 static uint32_t WmiTraceMessage(uint64_t, uint32_t, void *, uint16_t, ...);
239 static uint32_t IoWMIRegistrationControl(device_object *, uint32_t);
240 static void *ntoskrnl_memset(void *, int, size_t);
241 static void *ntoskrnl_memmove(void *, void *, size_t);
242 static void *ntoskrnl_memchr(void *, unsigned char, size_t);
243 static char *ntoskrnl_strstr(char *, char *);
244 static int ntoskrnl_toupper(int);
245 static int ntoskrnl_tolower(int);
246 static funcptr ntoskrnl_findwrap(funcptr);
247 static uint32_t DbgPrint(char *, ...);
248 static void DbgBreakPoint(void);
249 static void dummy(void);
251 static struct mtx ntoskrnl_dispatchlock;
252 static struct mtx ntoskrnl_interlock;
253 static kspin_lock ntoskrnl_cancellock;
254 static int ntoskrnl_kth = 0;
255 static struct nt_objref_head ntoskrnl_reflist;
256 static uma_zone_t mdl_zone;
257 static uma_zone_t iw_zone;
258 static struct kdpc_queue *kq_queues;
259 static struct kdpc_queue *wq_queues;
260 static int wq_idx = 0;
265 image_patch_table *patch;
273 mtx_init(&ntoskrnl_dispatchlock,
274 "ntoskrnl dispatch lock", MTX_NDIS_LOCK, MTX_DEF|MTX_RECURSE);
275 mtx_init(&ntoskrnl_interlock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN);
276 KeInitializeSpinLock(&ntoskrnl_cancellock);
277 KeInitializeSpinLock(&ntoskrnl_intlock);
278 TAILQ_INIT(&ntoskrnl_reflist);
280 InitializeListHead(&ntoskrnl_calllist);
281 InitializeListHead(&ntoskrnl_intlist);
282 mtx_init(&ntoskrnl_calllock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN);
284 kq_queues = ExAllocatePoolWithTag(NonPagedPool,
285 #ifdef NTOSKRNL_MULTIPLE_DPCS
286 sizeof(kdpc_queue) * mp_ncpus, 0);
288 sizeof(kdpc_queue), 0);
291 if (kq_queues == NULL)
294 wq_queues = ExAllocatePoolWithTag(NonPagedPool,
295 sizeof(kdpc_queue) * WORKITEM_THREADS, 0);
297 if (wq_queues == NULL)
300 #ifdef NTOSKRNL_MULTIPLE_DPCS
301 bzero((char *)kq_queues, sizeof(kdpc_queue) * mp_ncpus);
303 bzero((char *)kq_queues, sizeof(kdpc_queue));
305 bzero((char *)wq_queues, sizeof(kdpc_queue) * WORKITEM_THREADS);
308 * Launch the DPC threads.
311 #ifdef NTOSKRNL_MULTIPLE_DPCS
312 for (i = 0; i < mp_ncpus; i++) {
314 for (i = 0; i < 1; i++) {
318 sprintf(name, "Windows DPC %d", i);
319 error = kthread_create(ntoskrnl_dpc_thread, kq, &p,
320 RFHIGHPID, NDIS_KSTACK_PAGES, name);
322 panic("failed to launch DPC thread");
326 * Launch the workitem threads.
329 for (i = 0; i < WORKITEM_THREADS; i++) {
331 sprintf(name, "Windows Workitem %d", i);
332 error = kthread_create(ntoskrnl_workitem_thread, kq, &p,
333 RFHIGHPID, NDIS_KSTACK_PAGES, name);
335 panic("failed to launch workitem thread");
338 patch = ntoskrnl_functbl;
339 while (patch->ipt_func != NULL) {
340 windrv_wrap((funcptr)patch->ipt_func,
341 (funcptr *)&patch->ipt_wrap,
342 patch->ipt_argcnt, patch->ipt_ftype);
346 for (i = 0; i < NTOSKRNL_TIMEOUTS; i++) {
347 e = ExAllocatePoolWithTag(NonPagedPool,
348 sizeof(callout_entry), 0);
350 panic("failed to allocate timeouts");
351 mtx_lock_spin(&ntoskrnl_calllock);
352 InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
353 mtx_unlock_spin(&ntoskrnl_calllock);
357 * MDLs are supposed to be variable size (they describe
358 * buffers containing some number of pages, but we don't
359 * know ahead of time how many pages that will be). But
360 * always allocating them off the heap is very slow. As
361 * a compromise, we create an MDL UMA zone big enough to
362 * handle any buffer requiring up to 16 pages, and we
363 * use those for any MDLs for buffers of 16 pages or less
364 * in size. For buffers larger than that (which we assume
365 * will be few and far between, we allocate the MDLs off
369 mdl_zone = uma_zcreate("Windows MDL", MDL_ZONE_SIZE,
370 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
372 iw_zone = uma_zcreate("Windows WorkItem", sizeof(io_workitem),
373 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
381 image_patch_table *patch;
385 patch = ntoskrnl_functbl;
386 while (patch->ipt_func != NULL) {
387 windrv_unwrap(patch->ipt_wrap);
391 /* Stop the workitem queues. */
392 ntoskrnl_destroy_workitem_threads();
393 /* Stop the DPC queues. */
394 ntoskrnl_destroy_dpc_threads();
396 ExFreePool(kq_queues);
397 ExFreePool(wq_queues);
399 uma_zdestroy(mdl_zone);
400 uma_zdestroy(iw_zone);
402 mtx_lock_spin(&ntoskrnl_calllock);
403 while(!IsListEmpty(&ntoskrnl_calllist)) {
404 l = RemoveHeadList(&ntoskrnl_calllist);
405 e = CONTAINING_RECORD(l, callout_entry, ce_list);
406 mtx_unlock_spin(&ntoskrnl_calllock);
408 mtx_lock_spin(&ntoskrnl_calllock);
410 mtx_unlock_spin(&ntoskrnl_calllock);
412 mtx_destroy(&ntoskrnl_dispatchlock);
413 mtx_destroy(&ntoskrnl_interlock);
414 mtx_destroy(&ntoskrnl_calllock);
420 * We need to be able to reference this externally from the wrapper;
421 * GCC only generates a local implementation of memset.
424 ntoskrnl_memset(buf, ch, size)
429 return(memset(buf, ch, size));
433 ntoskrnl_memmove(dst, src, size)
438 bcopy(src, dst, size);
443 ntoskrnl_memchr(buf, ch, len)
449 unsigned char *p = buf;
454 } while (--len != 0);
460 ntoskrnl_strstr(s, find)
466 if ((c = *find++) != 0) {
470 if ((sc = *s++) == 0)
473 } while (strncmp(s, find, len) != 0);
494 RtlEqualUnicodeString(str1, str2, caseinsensitive)
495 unicode_string *str1;
496 unicode_string *str2;
497 uint8_t caseinsensitive;
501 if (str1->us_len != str2->us_len)
504 for (i = 0; i < str1->us_len; i++) {
505 if (caseinsensitive == TRUE) {
506 if (toupper((char)(str1->us_buf[i] & 0xFF)) !=
507 toupper((char)(str2->us_buf[i] & 0xFF)))
510 if (str1->us_buf[i] != str2->us_buf[i])
519 RtlCopyUnicodeString(dest, src)
520 unicode_string *dest;
524 if (dest->us_maxlen >= src->us_len)
525 dest->us_len = src->us_len;
527 dest->us_len = dest->us_maxlen;
528 memcpy(dest->us_buf, src->us_buf, dest->us_len);
533 ntoskrnl_ascii_to_unicode(ascii, unicode, len)
542 for (i = 0; i < len; i++) {
543 *ustr = (uint16_t)ascii[i];
551 ntoskrnl_unicode_to_ascii(unicode, ascii, len)
560 for (i = 0; i < len / 2; i++) {
561 *astr = (uint8_t)unicode[i];
569 RtlUnicodeStringToAnsiString(dest, src, allocate)
574 if (dest == NULL || src == NULL)
575 return(STATUS_INVALID_PARAMETER);
577 dest->as_len = src->us_len / 2;
578 if (dest->as_maxlen < dest->as_len)
579 dest->as_len = dest->as_maxlen;
581 if (allocate == TRUE) {
582 dest->as_buf = ExAllocatePoolWithTag(NonPagedPool,
583 (src->us_len / 2) + 1, 0);
584 if (dest->as_buf == NULL)
585 return(STATUS_INSUFFICIENT_RESOURCES);
586 dest->as_len = dest->as_maxlen = src->us_len / 2;
588 dest->as_len = src->us_len / 2; /* XXX */
589 if (dest->as_maxlen < dest->as_len)
590 dest->as_len = dest->as_maxlen;
593 ntoskrnl_unicode_to_ascii(src->us_buf, dest->as_buf,
596 return (STATUS_SUCCESS);
600 RtlAnsiStringToUnicodeString(dest, src, allocate)
601 unicode_string *dest;
605 if (dest == NULL || src == NULL)
606 return(STATUS_INVALID_PARAMETER);
608 if (allocate == TRUE) {
609 dest->us_buf = ExAllocatePoolWithTag(NonPagedPool,
611 if (dest->us_buf == NULL)
612 return(STATUS_INSUFFICIENT_RESOURCES);
613 dest->us_len = dest->us_maxlen = strlen(src->as_buf) * 2;
615 dest->us_len = src->as_len * 2; /* XXX */
616 if (dest->us_maxlen < dest->us_len)
617 dest->us_len = dest->us_maxlen;
620 ntoskrnl_ascii_to_unicode(src->as_buf, dest->us_buf,
623 return (STATUS_SUCCESS);
627 ExAllocatePoolWithTag(pooltype, len, tag)
634 buf = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
650 IoAllocateDriverObjectExtension(drv, clid, extlen, ext)
656 custom_extension *ce;
658 ce = ExAllocatePoolWithTag(NonPagedPool, sizeof(custom_extension)
662 return(STATUS_INSUFFICIENT_RESOURCES);
665 InsertTailList((&drv->dro_driverext->dre_usrext), (&ce->ce_list));
667 *ext = (void *)(ce + 1);
669 return(STATUS_SUCCESS);
673 IoGetDriverObjectExtension(drv, clid)
678 custom_extension *ce;
681 * Sanity check. Our dummy bus drivers don't have
682 * any driver extentions.
685 if (drv->dro_driverext == NULL)
688 e = drv->dro_driverext->dre_usrext.nle_flink;
689 while (e != &drv->dro_driverext->dre_usrext) {
690 ce = (custom_extension *)e;
691 if (ce->ce_clid == clid)
692 return((void *)(ce + 1));
701 IoCreateDevice(drv, devextlen, devname, devtype, devchars, exclusive, newdev)
704 unicode_string *devname;
708 device_object **newdev;
712 dev = ExAllocatePoolWithTag(NonPagedPool, sizeof(device_object), 0);
714 return(STATUS_INSUFFICIENT_RESOURCES);
716 dev->do_type = devtype;
717 dev->do_drvobj = drv;
718 dev->do_currirp = NULL;
722 dev->do_devext = ExAllocatePoolWithTag(NonPagedPool,
725 if (dev->do_devext == NULL) {
727 return(STATUS_INSUFFICIENT_RESOURCES);
730 bzero(dev->do_devext, devextlen);
732 dev->do_devext = NULL;
734 dev->do_size = sizeof(device_object) + devextlen;
736 dev->do_attacheddev = NULL;
737 dev->do_nextdev = NULL;
738 dev->do_devtype = devtype;
739 dev->do_stacksize = 1;
740 dev->do_alignreq = 1;
741 dev->do_characteristics = devchars;
742 dev->do_iotimer = NULL;
743 KeInitializeEvent(&dev->do_devlock, EVENT_TYPE_SYNC, TRUE);
746 * Vpd is used for disk/tape devices,
747 * but we don't support those. (Yet.)
751 dev->do_devobj_ext = ExAllocatePoolWithTag(NonPagedPool,
752 sizeof(devobj_extension), 0);
754 if (dev->do_devobj_ext == NULL) {
755 if (dev->do_devext != NULL)
756 ExFreePool(dev->do_devext);
758 return(STATUS_INSUFFICIENT_RESOURCES);
761 dev->do_devobj_ext->dve_type = 0;
762 dev->do_devobj_ext->dve_size = sizeof(devobj_extension);
763 dev->do_devobj_ext->dve_devobj = dev;
766 * Attach this device to the driver object's list
767 * of devices. Note: this is not the same as attaching
768 * the device to the device stack. The driver's AddDevice
769 * routine must explicitly call IoAddDeviceToDeviceStack()
773 if (drv->dro_devobj == NULL) {
774 drv->dro_devobj = dev;
775 dev->do_nextdev = NULL;
777 dev->do_nextdev = drv->dro_devobj;
778 drv->dro_devobj = dev;
783 return(STATUS_SUCCESS);
795 if (dev->do_devobj_ext != NULL)
796 ExFreePool(dev->do_devobj_ext);
798 if (dev->do_devext != NULL)
799 ExFreePool(dev->do_devext);
801 /* Unlink the device from the driver's device list. */
803 prev = dev->do_drvobj->dro_devobj;
805 dev->do_drvobj->dro_devobj = dev->do_nextdev;
807 while (prev->do_nextdev != dev)
808 prev = prev->do_nextdev;
809 prev->do_nextdev = dev->do_nextdev;
818 IoGetAttachedDevice(dev)
828 while (d->do_attacheddev != NULL)
829 d = d->do_attacheddev;
835 IoBuildSynchronousFsdRequest(func, dobj, buf, len, off, event, status)
842 io_status_block *status;
846 ip = IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status);
849 ip->irp_usrevent = event;
855 IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status)
861 io_status_block *status;
864 io_stack_location *sl;
866 ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
870 ip->irp_usriostat = status;
871 ip->irp_tail.irp_overlay.irp_thread = NULL;
873 sl = IoGetNextIrpStackLocation(ip);
874 sl->isl_major = func;
878 sl->isl_devobj = dobj;
879 sl->isl_fileobj = NULL;
880 sl->isl_completionfunc = NULL;
882 ip->irp_userbuf = buf;
884 if (dobj->do_flags & DO_BUFFERED_IO) {
885 ip->irp_assoc.irp_sysbuf =
886 ExAllocatePoolWithTag(NonPagedPool, len, 0);
887 if (ip->irp_assoc.irp_sysbuf == NULL) {
891 bcopy(buf, ip->irp_assoc.irp_sysbuf, len);
894 if (dobj->do_flags & DO_DIRECT_IO) {
895 ip->irp_mdl = IoAllocateMdl(buf, len, FALSE, FALSE, ip);
896 if (ip->irp_mdl == NULL) {
897 if (ip->irp_assoc.irp_sysbuf != NULL)
898 ExFreePool(ip->irp_assoc.irp_sysbuf);
902 ip->irp_userbuf = NULL;
903 ip->irp_assoc.irp_sysbuf = NULL;
906 if (func == IRP_MJ_READ) {
907 sl->isl_parameters.isl_read.isl_len = len;
909 sl->isl_parameters.isl_read.isl_byteoff = *off;
911 sl->isl_parameters.isl_read.isl_byteoff = 0;
914 if (func == IRP_MJ_WRITE) {
915 sl->isl_parameters.isl_write.isl_len = len;
917 sl->isl_parameters.isl_write.isl_byteoff = *off;
919 sl->isl_parameters.isl_write.isl_byteoff = 0;
926 IoBuildDeviceIoControlRequest(iocode, dobj, ibuf, ilen, obuf, olen,
927 isinternal, event, status)
936 io_status_block *status;
939 io_stack_location *sl;
942 ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
945 ip->irp_usrevent = event;
946 ip->irp_usriostat = status;
947 ip->irp_tail.irp_overlay.irp_thread = NULL;
949 sl = IoGetNextIrpStackLocation(ip);
950 sl->isl_major = isinternal == TRUE ?
951 IRP_MJ_INTERNAL_DEVICE_CONTROL : IRP_MJ_DEVICE_CONTROL;
955 sl->isl_devobj = dobj;
956 sl->isl_fileobj = NULL;
957 sl->isl_completionfunc = NULL;
958 sl->isl_parameters.isl_ioctl.isl_iocode = iocode;
959 sl->isl_parameters.isl_ioctl.isl_ibuflen = ilen;
960 sl->isl_parameters.isl_ioctl.isl_obuflen = olen;
962 switch(IO_METHOD(iocode)) {
963 case METHOD_BUFFERED:
969 ip->irp_assoc.irp_sysbuf =
970 ExAllocatePoolWithTag(NonPagedPool, buflen, 0);
971 if (ip->irp_assoc.irp_sysbuf == NULL) {
976 if (ilen && ibuf != NULL) {
977 bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
978 bzero((char *)ip->irp_assoc.irp_sysbuf + ilen,
981 bzero(ip->irp_assoc.irp_sysbuf, ilen);
982 ip->irp_userbuf = obuf;
984 case METHOD_IN_DIRECT:
985 case METHOD_OUT_DIRECT:
986 if (ilen && ibuf != NULL) {
987 ip->irp_assoc.irp_sysbuf =
988 ExAllocatePoolWithTag(NonPagedPool, ilen, 0);
989 if (ip->irp_assoc.irp_sysbuf == NULL) {
993 bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
995 if (olen && obuf != NULL) {
996 ip->irp_mdl = IoAllocateMdl(obuf, olen,
999 * Normally we would MmProbeAndLockPages()
1000 * here, but we don't have to in our
1005 case METHOD_NEITHER:
1006 ip->irp_userbuf = obuf;
1007 sl->isl_parameters.isl_ioctl.isl_type3ibuf = ibuf;
1014 * Ideally, we should associate this IRP with the calling
1022 IoAllocateIrp(stsize, chargequota)
1024 uint8_t chargequota;
1028 i = ExAllocatePoolWithTag(NonPagedPool, IoSizeOfIrp(stsize), 0);
1032 IoInitializeIrp(i, IoSizeOfIrp(stsize), stsize);
1038 IoMakeAssociatedIrp(ip, stsize)
1044 associrp = IoAllocateIrp(stsize, FALSE);
1045 if (associrp == NULL)
1048 mtx_lock(&ntoskrnl_dispatchlock);
1049 associrp->irp_flags |= IRP_ASSOCIATED_IRP;
1050 associrp->irp_tail.irp_overlay.irp_thread =
1051 ip->irp_tail.irp_overlay.irp_thread;
1052 associrp->irp_assoc.irp_master = ip;
1053 mtx_unlock(&ntoskrnl_dispatchlock);
1067 IoInitializeIrp(io, psize, ssize)
1072 bzero((char *)io, IoSizeOfIrp(ssize));
1073 io->irp_size = psize;
1074 io->irp_stackcnt = ssize;
1075 io->irp_currentstackloc = ssize;
1076 InitializeListHead(&io->irp_thlist);
1077 io->irp_tail.irp_overlay.irp_csl =
1078 (io_stack_location *)(io + 1) + ssize;
1084 IoReuseIrp(ip, status)
1090 allocflags = ip->irp_allocflags;
1091 IoInitializeIrp(ip, ip->irp_size, ip->irp_stackcnt);
1092 ip->irp_iostat.isb_status = status;
1093 ip->irp_allocflags = allocflags;
1099 IoAcquireCancelSpinLock(irql)
1102 KeAcquireSpinLock(&ntoskrnl_cancellock, irql);
1107 IoReleaseCancelSpinLock(irql)
1110 KeReleaseSpinLock(&ntoskrnl_cancellock, irql);
1115 IoCancelIrp(irp *ip)
1119 IoAcquireCancelSpinLock(&ip->irp_cancelirql);
1120 cfunc = IoSetCancelRoutine(ip, NULL);
1121 ip->irp_cancel = TRUE;
1122 if (ip->irp_cancelfunc == NULL) {
1123 IoReleaseCancelSpinLock(ip->irp_cancelirql);
1126 MSCALL2(cfunc, IoGetCurrentIrpStackLocation(ip)->isl_devobj, ip);
1131 IofCallDriver(dobj, ip)
1132 device_object *dobj;
1135 driver_object *drvobj;
1136 io_stack_location *sl;
1138 driver_dispatch disp;
1140 drvobj = dobj->do_drvobj;
1142 if (ip->irp_currentstackloc <= 0)
1143 panic("IoCallDriver(): out of stack locations");
1145 IoSetNextIrpStackLocation(ip);
1146 sl = IoGetCurrentIrpStackLocation(ip);
1148 sl->isl_devobj = dobj;
1150 disp = drvobj->dro_dispatch[sl->isl_major];
1151 status = MSCALL2(disp, dobj, ip);
1157 IofCompleteRequest(ip, prioboost)
1163 device_object *dobj;
1164 io_stack_location *sl;
1167 ip->irp_pendingreturned =
1168 IoGetCurrentIrpStackLocation(ip)->isl_ctl & SL_PENDING_RETURNED;
1169 sl = (io_stack_location *)(ip + 1);
1171 for (i = ip->irp_currentstackloc; i < (uint32_t)ip->irp_stackcnt; i++) {
1172 if (ip->irp_currentstackloc < ip->irp_stackcnt - 1) {
1173 IoSkipCurrentIrpStackLocation(ip);
1174 dobj = IoGetCurrentIrpStackLocation(ip)->isl_devobj;
1178 if (sl[i].isl_completionfunc != NULL &&
1179 ((ip->irp_iostat.isb_status == STATUS_SUCCESS &&
1180 sl->isl_ctl & SL_INVOKE_ON_SUCCESS) ||
1181 (ip->irp_iostat.isb_status != STATUS_SUCCESS &&
1182 sl->isl_ctl & SL_INVOKE_ON_ERROR) ||
1183 (ip->irp_cancel == TRUE &&
1184 sl->isl_ctl & SL_INVOKE_ON_CANCEL))) {
1185 cf = sl->isl_completionfunc;
1186 status = MSCALL3(cf, dobj, ip, sl->isl_completionctx);
1187 if (status == STATUS_MORE_PROCESSING_REQUIRED)
1191 if (IoGetCurrentIrpStackLocation(ip)->isl_ctl &
1192 SL_PENDING_RETURNED)
1193 ip->irp_pendingreturned = TRUE;
1196 /* Handle any associated IRPs. */
1198 if (ip->irp_flags & IRP_ASSOCIATED_IRP) {
1199 uint32_t masterirpcnt;
1203 masterirp = ip->irp_assoc.irp_master;
1205 InterlockedDecrement(&masterirp->irp_assoc.irp_irpcnt);
1207 while ((m = ip->irp_mdl) != NULL) {
1208 ip->irp_mdl = m->mdl_next;
1212 if (masterirpcnt == 0)
1213 IoCompleteRequest(masterirp, IO_NO_INCREMENT);
1217 /* With any luck, these conditions will never arise. */
1219 if (ip->irp_flags & (IRP_PAGING_IO|IRP_CLOSE_OPERATION)) {
1220 if (ip->irp_usriostat != NULL)
1221 *ip->irp_usriostat = ip->irp_iostat;
1222 if (ip->irp_usrevent != NULL)
1223 KeSetEvent(ip->irp_usrevent, prioboost, FALSE);
1224 if (ip->irp_flags & IRP_PAGING_IO) {
1225 if (ip->irp_mdl != NULL)
1226 IoFreeMdl(ip->irp_mdl);
1243 KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1244 l = ntoskrnl_intlist.nle_flink;
1245 while (l != &ntoskrnl_intlist) {
1246 iobj = CONTAINING_RECORD(l, kinterrupt, ki_list);
1247 claimed = MSCALL2(iobj->ki_svcfunc, iobj, iobj->ki_svcctx);
1248 if (claimed == TRUE)
1252 KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1258 KeAcquireInterruptSpinLock(iobj)
1262 KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1267 KeReleaseInterruptSpinLock(iobj, irql)
1271 KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1276 KeSynchronizeExecution(iobj, syncfunc, syncctx)
1283 KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1284 MSCALL1(syncfunc, syncctx);
1285 KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1291 * IoConnectInterrupt() is passed only the interrupt vector and
1292 * irql that a device wants to use, but no device-specific tag
1293 * of any kind. This conflicts rather badly with FreeBSD's
1294 * bus_setup_intr(), which needs the device_t for the device
1295 * requesting interrupt delivery. In order to bypass this
1296 * inconsistency, we implement a second level of interrupt
1297 * dispatching on top of bus_setup_intr(). All devices use
1298 * ntoskrnl_intr() as their ISR, and any device requesting
1299 * interrupts will be registered with ntoskrnl_intr()'s interrupt
1300 * dispatch list. When an interrupt arrives, we walk the list
1301 * and invoke all the registered ISRs. This effectively makes all
1302 * interrupts shared, but it's the only way to duplicate the
1303 * semantics of IoConnectInterrupt() and IoDisconnectInterrupt() properly.
1307 IoConnectInterrupt(iobj, svcfunc, svcctx, lock, vector, irql,
1308 syncirql, imode, shared, affinity, savefloat)
1323 *iobj = ExAllocatePoolWithTag(NonPagedPool, sizeof(kinterrupt), 0);
1325 return(STATUS_INSUFFICIENT_RESOURCES);
1327 (*iobj)->ki_svcfunc = svcfunc;
1328 (*iobj)->ki_svcctx = svcctx;
1331 KeInitializeSpinLock(&(*iobj)->ki_lock_priv);
1332 (*iobj)->ki_lock = &(*iobj)->ki_lock_priv;
1334 (*iobj)->ki_lock = lock;
1336 KeAcquireSpinLock(&ntoskrnl_intlock, &curirql);
1337 InsertHeadList((&ntoskrnl_intlist), (&(*iobj)->ki_list));
1338 KeReleaseSpinLock(&ntoskrnl_intlock, curirql);
1340 return(STATUS_SUCCESS);
1344 IoDisconnectInterrupt(iobj)
1352 KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1353 RemoveEntryList((&iobj->ki_list));
1354 KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1362 IoAttachDeviceToDeviceStack(src, dst)
1366 device_object *attached;
1368 mtx_lock(&ntoskrnl_dispatchlock);
1369 attached = IoGetAttachedDevice(dst);
1370 attached->do_attacheddev = src;
1371 src->do_attacheddev = NULL;
1372 src->do_stacksize = attached->do_stacksize + 1;
1373 mtx_unlock(&ntoskrnl_dispatchlock);
1379 IoDetachDevice(topdev)
1380 device_object *topdev;
1382 device_object *tail;
1384 mtx_lock(&ntoskrnl_dispatchlock);
1386 /* First, break the chain. */
1387 tail = topdev->do_attacheddev;
1389 mtx_unlock(&ntoskrnl_dispatchlock);
1392 topdev->do_attacheddev = tail->do_attacheddev;
1393 topdev->do_refcnt--;
1395 /* Now reduce the stacksize count for the takm_il objects. */
1397 tail = topdev->do_attacheddev;
1398 while (tail != NULL) {
1399 tail->do_stacksize--;
1400 tail = tail->do_attacheddev;
1403 mtx_unlock(&ntoskrnl_dispatchlock);
1409 * For the most part, an object is considered signalled if
1410 * dh_sigstate == TRUE. The exception is for mutant objects
1411 * (mutexes), where the logic works like this:
1413 * - If the thread already owns the object and sigstate is
1414 * less than or equal to 0, then the object is considered
1415 * signalled (recursive acquisition).
1416 * - If dh_sigstate == 1, the object is also considered
1421 ntoskrnl_is_signalled(obj, td)
1422 nt_dispatch_header *obj;
1427 if (obj->dh_type == DISP_TYPE_MUTANT) {
1428 km = (kmutant *)obj;
1429 if ((obj->dh_sigstate <= 0 && km->km_ownerthread == td) ||
1430 obj->dh_sigstate == 1)
1435 if (obj->dh_sigstate > 0)
1441 ntoskrnl_satisfy_wait(obj, td)
1442 nt_dispatch_header *obj;
1447 switch (obj->dh_type) {
1448 case DISP_TYPE_MUTANT:
1449 km = (struct kmutant *)obj;
1452 * If sigstate reaches 0, the mutex is now
1453 * non-signalled (the new thread owns it).
1455 if (obj->dh_sigstate == 0) {
1456 km->km_ownerthread = td;
1457 if (km->km_abandoned == TRUE)
1458 km->km_abandoned = FALSE;
1461 /* Synchronization objects get reset to unsignalled. */
1462 case DISP_TYPE_SYNCHRONIZATION_EVENT:
1463 case DISP_TYPE_SYNCHRONIZATION_TIMER:
1464 obj->dh_sigstate = 0;
1466 case DISP_TYPE_SEMAPHORE:
1477 ntoskrnl_satisfy_multiple_waits(wb)
1484 td = wb->wb_kthread;
1487 ntoskrnl_satisfy_wait(wb->wb_object, td);
1488 cur->wb_awakened = TRUE;
1490 } while (cur != wb);
1495 /* Always called with dispatcher lock held. */
1497 ntoskrnl_waittest(obj, increment)
1498 nt_dispatch_header *obj;
1501 wait_block *w, *next;
1508 * Once an object has been signalled, we walk its list of
1509 * wait blocks. If a wait block can be awakened, then satisfy
1510 * waits as necessary and wake the thread.
1512 * The rules work like this:
1514 * If a wait block is marked as WAITTYPE_ANY, then
1515 * we can satisfy the wait conditions on the current
1516 * object and wake the thread right away. Satisfying
1517 * the wait also has the effect of breaking us out
1518 * of the search loop.
1520 * If the object is marked as WAITTYLE_ALL, then the
1521 * wait block will be part of a circularly linked
1522 * list of wait blocks belonging to a waiting thread
1523 * that's sleeping in KeWaitForMultipleObjects(). In
1524 * order to wake the thread, all the objects in the
1525 * wait list must be in the signalled state. If they
1526 * are, we then satisfy all of them and wake the
1531 e = obj->dh_waitlisthead.nle_flink;
1533 while (e != &obj->dh_waitlisthead && obj->dh_sigstate > 0) {
1534 w = CONTAINING_RECORD(e, wait_block, wb_waitlist);
1538 if (w->wb_waittype == WAITTYPE_ANY) {
1540 * Thread can be awakened if
1541 * any wait is satisfied.
1543 ntoskrnl_satisfy_wait(obj, td);
1545 w->wb_awakened = TRUE;
1548 * Thread can only be woken up
1549 * if all waits are satisfied.
1550 * If the thread is waiting on multiple
1551 * objects, they should all be linked
1552 * through the wb_next pointers in the
1558 if (ntoskrnl_is_signalled(obj, td) == FALSE) {
1562 next = next->wb_next;
1564 ntoskrnl_satisfy_multiple_waits(w);
1567 if (satisfied == TRUE)
1568 cv_broadcastpri(&we->we_cv, w->wb_oldpri -
1584 *tval = (uint64_t)ts.tv_nsec / 100 + (uint64_t)ts.tv_sec * 10000000 +
1591 * KeWaitForSingleObject() is a tricky beast, because it can be used
1592 * with several different object types: semaphores, timers, events,
1593 * mutexes and threads. Semaphores don't appear very often, but the
1594 * other object types are quite common. KeWaitForSingleObject() is
1595 * what's normally used to acquire a mutex, and it can be used to
1596 * wait for a thread termination.
1598 * The Windows NDIS API is implemented in terms of Windows kernel
1599 * primitives, and some of the object manipulation is duplicated in
1600 * NDIS. For example, NDIS has timers and events, which are actually
1601 * Windows kevents and ktimers. Now, you're supposed to only use the
1602 * NDIS variants of these objects within the confines of the NDIS API,
1603 * but there are some naughty developers out there who will use
1604 * KeWaitForSingleObject() on NDIS timer and event objects, so we
1605 * have to support that as well. Conseqently, our NDIS timer and event
1606 * code has to be closely tied into our ntoskrnl timer and event code,
1607 * just as it is in Windows.
1609 * KeWaitForSingleObject() may do different things for different kinds
1612 * - For events, we check if the event has been signalled. If the
1613 * event is already in the signalled state, we just return immediately,
1614 * otherwise we wait for it to be set to the signalled state by someone
1615 * else calling KeSetEvent(). Events can be either synchronization or
1616 * notification events.
1618 * - For timers, if the timer has already fired and the timer is in
1619 * the signalled state, we just return, otherwise we wait on the
1620 * timer. Unlike an event, timers get signalled automatically when
1621 * they expire rather than someone having to trip them manually.
1622 * Timers initialized with KeInitializeTimer() are always notification
1623 * events: KeInitializeTimerEx() lets you initialize a timer as
1624 * either a notification or synchronization event.
1626 * - For mutexes, we try to acquire the mutex and if we can't, we wait
1627 * on the mutex until it's available and then grab it. When a mutex is
1628 * released, it enters the signalled state, which wakes up one of the
1629 * threads waiting to acquire it. Mutexes are always synchronization
1632 * - For threads, the only thing we do is wait until the thread object
1633 * enters a signalled state, which occurs when the thread terminates.
1634 * Threads are always notification events.
1636 * A notification event wakes up all threads waiting on an object. A
1637 * synchronization event wakes up just one. Also, a synchronization event
1638 * is auto-clearing, which means we automatically set the event back to
1639 * the non-signalled state once the wakeup is done.
1643 KeWaitForSingleObject(arg, reason, mode, alertable, duetime)
1651 struct thread *td = curthread;
1656 nt_dispatch_header *obj;
1661 return(STATUS_INVALID_PARAMETER);
1663 mtx_lock(&ntoskrnl_dispatchlock);
1665 cv_init(&we.we_cv, "KeWFS");
1669 * Check to see if this object is already signalled,
1670 * and just return without waiting if it is.
1672 if (ntoskrnl_is_signalled(obj, td) == TRUE) {
1673 /* Sanity check the signal state value. */
1674 if (obj->dh_sigstate != INT32_MIN) {
1675 ntoskrnl_satisfy_wait(obj, curthread);
1676 mtx_unlock(&ntoskrnl_dispatchlock);
1677 return (STATUS_SUCCESS);
1680 * There's a limit to how many times we can
1681 * recursively acquire a mutant. If we hit
1682 * the limit, something is very wrong.
1684 if (obj->dh_type == DISP_TYPE_MUTANT) {
1685 mtx_unlock(&ntoskrnl_dispatchlock);
1686 panic("mutant limit exceeded");
1691 bzero((char *)&w, sizeof(wait_block));
1694 w.wb_waittype = WAITTYPE_ANY;
1697 w.wb_awakened = FALSE;
1698 w.wb_oldpri = td->td_priority;
1700 InsertTailList((&obj->dh_waitlisthead), (&w.wb_waitlist));
1703 * The timeout value is specified in 100 nanosecond units
1704 * and can be a positive or negative number. If it's positive,
1705 * then the duetime is absolute, and we need to convert it
1706 * to an absolute offset relative to now in order to use it.
1707 * If it's negative, then the duetime is relative and we
1708 * just have to convert the units.
1711 if (duetime != NULL) {
1713 tv.tv_sec = - (*duetime) / 10000000;
1714 tv.tv_usec = (- (*duetime) / 10) -
1715 (tv.tv_sec * 1000000);
1717 ntoskrnl_time(&curtime);
1718 if (*duetime < curtime)
1719 tv.tv_sec = tv.tv_usec = 0;
1721 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1722 tv.tv_usec = ((*duetime) - curtime) / 10 -
1723 (tv.tv_sec * 1000000);
1728 if (duetime == NULL)
1729 cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
1731 error = cv_timedwait(&we.we_cv,
1732 &ntoskrnl_dispatchlock, tvtohz(&tv));
1734 RemoveEntryList(&w.wb_waitlist);
1736 cv_destroy(&we.we_cv);
1738 /* We timed out. Leave the object alone and return status. */
1740 if (error == EWOULDBLOCK) {
1741 mtx_unlock(&ntoskrnl_dispatchlock);
1742 return(STATUS_TIMEOUT);
1745 mtx_unlock(&ntoskrnl_dispatchlock);
1747 return(STATUS_SUCCESS);
1749 return(KeWaitForMultipleObjects(1, &obj, WAITTYPE_ALL, reason,
1750 mode, alertable, duetime, &w));
1755 KeWaitForMultipleObjects(cnt, obj, wtype, reason, mode,
1756 alertable, duetime, wb_array)
1758 nt_dispatch_header *obj[];
1764 wait_block *wb_array;
1766 struct thread *td = curthread;
1767 wait_block *whead, *w;
1768 wait_block _wb_array[MAX_WAIT_OBJECTS];
1769 nt_dispatch_header *cur;
1771 int i, wcnt = 0, error = 0;
1773 struct timespec t1, t2;
1774 uint32_t status = STATUS_SUCCESS;
1777 if (cnt > MAX_WAIT_OBJECTS)
1778 return(STATUS_INVALID_PARAMETER);
1779 if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL)
1780 return(STATUS_INVALID_PARAMETER);
1782 mtx_lock(&ntoskrnl_dispatchlock);
1784 cv_init(&we.we_cv, "KeWFM");
1787 if (wb_array == NULL)
1792 bzero((char *)whead, sizeof(wait_block) * cnt);
1794 /* First pass: see if we can satisfy any waits immediately. */
1799 for (i = 0; i < cnt; i++) {
1800 InsertTailList((&obj[i]->dh_waitlisthead),
1803 w->wb_object = obj[i];
1804 w->wb_waittype = wtype;
1806 w->wb_awakened = FALSE;
1807 w->wb_oldpri = td->td_priority;
1811 if (ntoskrnl_is_signalled(obj[i], td)) {
1813 * There's a limit to how many times
1814 * we can recursively acquire a mutant.
1815 * If we hit the limit, something
1818 if (obj[i]->dh_sigstate == INT32_MIN &&
1819 obj[i]->dh_type == DISP_TYPE_MUTANT) {
1820 mtx_unlock(&ntoskrnl_dispatchlock);
1821 panic("mutant limit exceeded");
1825 * If this is a WAITTYPE_ANY wait, then
1826 * satisfy the waited object and exit
1830 if (wtype == WAITTYPE_ANY) {
1831 ntoskrnl_satisfy_wait(obj[i], td);
1832 status = STATUS_WAIT_0 + i;
1837 w->wb_object = NULL;
1838 RemoveEntryList(&w->wb_waitlist);
1844 * If this is a WAITTYPE_ALL wait and all objects are
1845 * already signalled, satisfy the waits and exit now.
1848 if (wtype == WAITTYPE_ALL && wcnt == 0) {
1849 for (i = 0; i < cnt; i++)
1850 ntoskrnl_satisfy_wait(obj[i], td);
1851 status = STATUS_SUCCESS;
1856 * Create a circular waitblock list. The waitcount
1857 * must always be non-zero when we get here.
1860 (w - 1)->wb_next = whead;
1862 /* Wait on any objects that aren't yet signalled. */
1864 /* Calculate timeout, if any. */
1866 if (duetime != NULL) {
1868 tv.tv_sec = - (*duetime) / 10000000;
1869 tv.tv_usec = (- (*duetime) / 10) -
1870 (tv.tv_sec * 1000000);
1872 ntoskrnl_time(&curtime);
1873 if (*duetime < curtime)
1874 tv.tv_sec = tv.tv_usec = 0;
1876 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1877 tv.tv_usec = ((*duetime) - curtime) / 10 -
1878 (tv.tv_sec * 1000000);
1886 if (duetime == NULL)
1887 cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
1889 error = cv_timedwait(&we.we_cv,
1890 &ntoskrnl_dispatchlock, tvtohz(&tv));
1892 /* Wait with timeout expired. */
1895 status = STATUS_TIMEOUT;
1901 /* See what's been signalled. */
1906 if (ntoskrnl_is_signalled(cur, td) == TRUE ||
1907 w->wb_awakened == TRUE) {
1908 /* Sanity check the signal state value. */
1909 if (cur->dh_sigstate == INT32_MIN &&
1910 cur->dh_type == DISP_TYPE_MUTANT) {
1911 mtx_unlock(&ntoskrnl_dispatchlock);
1912 panic("mutant limit exceeded");
1915 if (wtype == WAITTYPE_ANY) {
1916 status = w->wb_waitkey &
1922 } while (w != whead);
1925 * If all objects have been signalled, or if this
1926 * is a WAITTYPE_ANY wait and we were woke up by
1927 * someone, we can bail.
1931 status = STATUS_SUCCESS;
1936 * If this is WAITTYPE_ALL wait, and there's still
1937 * objects that haven't been signalled, deduct the
1938 * time that's elapsed so far from the timeout and
1939 * wait again (or continue waiting indefinitely if
1940 * there's no timeout).
1943 if (duetime != NULL) {
1944 tv.tv_sec -= (t2.tv_sec - t1.tv_sec);
1945 tv.tv_usec -= (t2.tv_nsec - t1.tv_nsec) / 1000;
1952 cv_destroy(&we.we_cv);
1954 for (i = 0; i < cnt; i++) {
1955 if (whead[i].wb_object != NULL)
1956 RemoveEntryList(&whead[i].wb_waitlist);
1959 mtx_unlock(&ntoskrnl_dispatchlock);
1965 WRITE_REGISTER_USHORT(reg, val)
1969 bus_space_write_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1974 READ_REGISTER_USHORT(reg)
1977 return(bus_space_read_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1981 WRITE_REGISTER_ULONG(reg, val)
1985 bus_space_write_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1990 READ_REGISTER_ULONG(reg)
1993 return(bus_space_read_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1997 READ_REGISTER_UCHAR(reg)
2000 return(bus_space_read_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
2004 WRITE_REGISTER_UCHAR(reg, val)
2008 bus_space_write_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
2092 static slist_entry *
2093 ntoskrnl_pushsl(head, entry)
2097 slist_entry *oldhead;
2099 oldhead = head->slh_list.slh_next;
2100 entry->sl_next = head->slh_list.slh_next;
2101 head->slh_list.slh_next = entry;
2102 head->slh_list.slh_depth++;
2103 head->slh_list.slh_seq++;
2108 static slist_entry *
2109 ntoskrnl_popsl(head)
2114 first = head->slh_list.slh_next;
2115 if (first != NULL) {
2116 head->slh_list.slh_next = first->sl_next;
2117 head->slh_list.slh_depth--;
2118 head->slh_list.slh_seq++;
2125 * We need this to make lookaside lists work for amd64.
2126 * We pass a pointer to ExAllocatePoolWithTag() the lookaside
2127 * list structure. For amd64 to work right, this has to be a
2128 * pointer to the wrapped version of the routine, not the
2129 * original. Letting the Windows driver invoke the original
2130 * function directly will result in a convention calling
2131 * mismatch and a pretty crash. On x86, this effectively
2132 * becomes a no-op since ipt_func and ipt_wrap are the same.
2136 ntoskrnl_findwrap(func)
2139 image_patch_table *patch;
2141 patch = ntoskrnl_functbl;
2142 while (patch->ipt_func != NULL) {
2143 if ((funcptr)patch->ipt_func == func)
2144 return((funcptr)patch->ipt_wrap);
2152 ExInitializePagedLookasideList(lookaside, allocfunc, freefunc,
2153 flags, size, tag, depth)
2154 paged_lookaside_list *lookaside;
2155 lookaside_alloc_func *allocfunc;
2156 lookaside_free_func *freefunc;
2162 bzero((char *)lookaside, sizeof(paged_lookaside_list));
2164 if (size < sizeof(slist_entry))
2165 lookaside->nll_l.gl_size = sizeof(slist_entry);
2167 lookaside->nll_l.gl_size = size;
2168 lookaside->nll_l.gl_tag = tag;
2169 if (allocfunc == NULL)
2170 lookaside->nll_l.gl_allocfunc =
2171 ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
2173 lookaside->nll_l.gl_allocfunc = allocfunc;
2175 if (freefunc == NULL)
2176 lookaside->nll_l.gl_freefunc =
2177 ntoskrnl_findwrap((funcptr)ExFreePool);
2179 lookaside->nll_l.gl_freefunc = freefunc;
2182 KeInitializeSpinLock(&lookaside->nll_obsoletelock);
2185 lookaside->nll_l.gl_type = NonPagedPool;
2186 lookaside->nll_l.gl_depth = depth;
2187 lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
2193 ExDeletePagedLookasideList(lookaside)
2194 paged_lookaside_list *lookaside;
2197 void (*freefunc)(void *);
2199 freefunc = lookaside->nll_l.gl_freefunc;
2200 while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
2201 MSCALL1(freefunc, buf);
2207 ExInitializeNPagedLookasideList(lookaside, allocfunc, freefunc,
2208 flags, size, tag, depth)
2209 npaged_lookaside_list *lookaside;
2210 lookaside_alloc_func *allocfunc;
2211 lookaside_free_func *freefunc;
2217 bzero((char *)lookaside, sizeof(npaged_lookaside_list));
2219 if (size < sizeof(slist_entry))
2220 lookaside->nll_l.gl_size = sizeof(slist_entry);
2222 lookaside->nll_l.gl_size = size;
2223 lookaside->nll_l.gl_tag = tag;
2224 if (allocfunc == NULL)
2225 lookaside->nll_l.gl_allocfunc =
2226 ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
2228 lookaside->nll_l.gl_allocfunc = allocfunc;
2230 if (freefunc == NULL)
2231 lookaside->nll_l.gl_freefunc =
2232 ntoskrnl_findwrap((funcptr)ExFreePool);
2234 lookaside->nll_l.gl_freefunc = freefunc;
2237 KeInitializeSpinLock(&lookaside->nll_obsoletelock);
2240 lookaside->nll_l.gl_type = NonPagedPool;
2241 lookaside->nll_l.gl_depth = depth;
2242 lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
2248 ExDeleteNPagedLookasideList(lookaside)
2249 npaged_lookaside_list *lookaside;
2252 void (*freefunc)(void *);
2254 freefunc = lookaside->nll_l.gl_freefunc;
2255 while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
2256 MSCALL1(freefunc, buf);
2262 InterlockedPushEntrySList(head, entry)
2266 slist_entry *oldhead;
2268 mtx_lock_spin(&ntoskrnl_interlock);
2269 oldhead = ntoskrnl_pushsl(head, entry);
2270 mtx_unlock_spin(&ntoskrnl_interlock);
2276 InterlockedPopEntrySList(head)
2281 mtx_lock_spin(&ntoskrnl_interlock);
2282 first = ntoskrnl_popsl(head);
2283 mtx_unlock_spin(&ntoskrnl_interlock);
2288 static slist_entry *
2289 ExInterlockedPushEntrySList(head, entry, lock)
2294 return(InterlockedPushEntrySList(head, entry));
2297 static slist_entry *
2298 ExInterlockedPopEntrySList(head, lock)
2302 return(InterlockedPopEntrySList(head));
2306 ExQueryDepthSList(head)
2311 mtx_lock_spin(&ntoskrnl_interlock);
2312 depth = head->slh_list.slh_depth;
2313 mtx_unlock_spin(&ntoskrnl_interlock);
2319 KeInitializeSpinLock(lock)
2329 KefAcquireSpinLockAtDpcLevel(lock)
2332 #ifdef NTOSKRNL_DEBUG_SPINLOCKS
2336 while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0) {
2338 #ifdef NTOSKRNL_DEBUG_SPINLOCKS
2349 KefReleaseSpinLockFromDpcLevel(lock)
2352 atomic_store_rel_int((volatile u_int *)lock, 0);
2358 KeAcquireSpinLockRaiseToDpc(kspin_lock *lock)
2362 if (KeGetCurrentIrql() > DISPATCH_LEVEL)
2363 panic("IRQL_NOT_LESS_THAN_OR_EQUAL");
2365 KeRaiseIrql(DISPATCH_LEVEL, &oldirql);
2366 KeAcquireSpinLockAtDpcLevel(lock);
2372 KeAcquireSpinLockAtDpcLevel(kspin_lock *lock)
2374 while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0)
2381 KeReleaseSpinLockFromDpcLevel(kspin_lock *lock)
2383 atomic_store_rel_int((volatile u_int *)lock, 0);
2387 #endif /* __i386__ */
2390 InterlockedExchange(dst, val)
2391 volatile uint32_t *dst;
2396 mtx_lock_spin(&ntoskrnl_interlock);
2399 mtx_unlock_spin(&ntoskrnl_interlock);
2405 InterlockedIncrement(addend)
2406 volatile uint32_t *addend;
2408 atomic_add_long((volatile u_long *)addend, 1);
2413 InterlockedDecrement(addend)
2414 volatile uint32_t *addend;
2416 atomic_subtract_long((volatile u_long *)addend, 1);
2421 ExInterlockedAddLargeStatistic(addend, inc)
2425 mtx_lock_spin(&ntoskrnl_interlock);
2427 mtx_unlock_spin(&ntoskrnl_interlock);
2433 IoAllocateMdl(vaddr, len, secondarybuf, chargequota, iopkt)
2436 uint8_t secondarybuf;
2437 uint8_t chargequota;
2443 if (MmSizeOfMdl(vaddr, len) > MDL_ZONE_SIZE)
2444 m = ExAllocatePoolWithTag(NonPagedPool,
2445 MmSizeOfMdl(vaddr, len), 0);
2447 m = uma_zalloc(mdl_zone, M_NOWAIT | M_ZERO);
2454 MmInitializeMdl(m, vaddr, len);
2457 * MmInitializMdl() clears the flags field, so we
2458 * have to set this here. If the MDL came from the
2459 * MDL UMA zone, tag it so we can release it to
2460 * the right place later.
2463 m->mdl_flags = MDL_ZONE_ALLOCED;
2465 if (iopkt != NULL) {
2466 if (secondarybuf == TRUE) {
2468 last = iopkt->irp_mdl;
2469 while (last->mdl_next != NULL)
2470 last = last->mdl_next;
2473 if (iopkt->irp_mdl != NULL)
2474 panic("leaking an MDL in IoAllocateMdl()");
2489 if (m->mdl_flags & MDL_ZONE_ALLOCED)
2490 uma_zfree(mdl_zone, m);
2498 MmAllocateContiguousMemory(size, highest)
2503 size_t pagelength = roundup(size, PAGE_SIZE);
2505 addr = ExAllocatePoolWithTag(NonPagedPool, pagelength, 0);
2511 MmAllocateContiguousMemorySpecifyCache(size, lowest, highest,
2512 boundary, cachetype)
2520 size_t pagelength = roundup(size, PAGE_SIZE);
2522 addr = ExAllocatePoolWithTag(NonPagedPool, pagelength, 0);
2528 MmFreeContiguousMemory(base)
2535 MmFreeContiguousMemorySpecifyCache(base, size, cachetype)
2544 MmSizeOfMdl(vaddr, len)
2550 l = sizeof(struct mdl) +
2551 (sizeof(vm_offset_t *) * SPAN_PAGES(vaddr, len));
2557 * The Microsoft documentation says this routine fills in the
2558 * page array of an MDL with the _physical_ page addresses that
2559 * comprise the buffer, but we don't really want to do that here.
2560 * Instead, we just fill in the page array with the kernel virtual
2561 * addresses of the buffers.
2564 MmBuildMdlForNonPagedPool(m)
2567 vm_offset_t *mdl_pages;
2570 pagecnt = SPAN_PAGES(m->mdl_byteoffset, m->mdl_bytecount);
2572 if (pagecnt > (m->mdl_size - sizeof(mdl)) / sizeof(vm_offset_t *))
2573 panic("not enough pages in MDL to describe buffer");
2575 mdl_pages = MmGetMdlPfnArray(m);
2577 for (i = 0; i < pagecnt; i++)
2578 *mdl_pages = (vm_offset_t)m->mdl_startva + (i * PAGE_SIZE);
2580 m->mdl_flags |= MDL_SOURCE_IS_NONPAGED_POOL;
2581 m->mdl_mappedsystemva = MmGetMdlVirtualAddress(m);
2587 MmMapLockedPages(buf, accessmode)
2591 buf->mdl_flags |= MDL_MAPPED_TO_SYSTEM_VA;
2592 return(MmGetMdlVirtualAddress(buf));
2596 MmMapLockedPagesSpecifyCache(buf, accessmode, cachetype, vaddr,
2605 return(MmMapLockedPages(buf, accessmode));
2609 MmUnmapLockedPages(vaddr, buf)
2613 buf->mdl_flags &= ~MDL_MAPPED_TO_SYSTEM_VA;
2618 * This function has a problem in that it will break if you
2619 * compile this module without PAE and try to use it on a PAE
2620 * kernel. Unfortunately, there's no way around this at the
2621 * moment. It's slightly less broken that using pmap_kextract().
2622 * You'd think the virtual memory subsystem would help us out
2623 * here, but it doesn't.
2627 MmIsAddressValid(vaddr)
2630 if (pmap_extract(kernel_map->pmap, (vm_offset_t)vaddr))
2637 MmMapIoSpace(paddr, len, cachetype)
2642 devclass_t nexus_class;
2643 device_t *nexus_devs, devp;
2644 int nexus_count = 0;
2645 device_t matching_dev = NULL;
2646 struct resource *res;
2650 /* There will always be at least one nexus. */
2652 nexus_class = devclass_find("nexus");
2653 devclass_get_devices(nexus_class, &nexus_devs, &nexus_count);
2655 for (i = 0; i < nexus_count; i++) {
2656 devp = nexus_devs[i];
2657 matching_dev = ntoskrnl_finddev(devp, paddr, &res);
2662 free(nexus_devs, M_TEMP);
2664 if (matching_dev == NULL)
2667 v = (vm_offset_t)rman_get_virtual(res);
2668 if (paddr > rman_get_start(res))
2669 v += paddr - rman_get_start(res);
2675 MmUnmapIoSpace(vaddr, len)
2684 ntoskrnl_finddev(dev, paddr, res)
2687 struct resource **res;
2689 device_t *children = NULL;
2690 device_t matching_dev;
2693 struct resource_list *rl;
2694 struct resource_list_entry *rle;
2698 /* We only want devices that have been successfully probed. */
2700 if (device_is_alive(dev) == FALSE)
2703 rl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev);
2705 #if __FreeBSD_version < 600022
2706 SLIST_FOREACH(rle, rl, link) {
2708 STAILQ_FOREACH(rle, rl, link) {
2715 flags = rman_get_flags(r);
2717 if (rle->type == SYS_RES_MEMORY &&
2718 paddr >= rman_get_start(r) &&
2719 paddr <= rman_get_end(r)) {
2720 if (!(flags & RF_ACTIVE))
2721 bus_activate_resource(dev,
2722 SYS_RES_MEMORY, 0, r);
2730 * If this device has children, do another
2731 * level of recursion to inspect them.
2734 device_get_children(dev, &children, &childcnt);
2736 for (i = 0; i < childcnt; i++) {
2737 matching_dev = ntoskrnl_finddev(children[i], paddr, res);
2738 if (matching_dev != NULL) {
2739 free(children, M_TEMP);
2740 return(matching_dev);
2745 /* Won't somebody please think of the children! */
2747 if (children != NULL)
2748 free(children, M_TEMP);
2754 * Workitems are unlike DPCs, in that they run in a user-mode thread
2755 * context rather than at DISPATCH_LEVEL in kernel context. In our
2756 * case we run them in kernel context anyway.
2759 ntoskrnl_workitem_thread(arg)
2769 InitializeListHead(&kq->kq_disp);
2770 kq->kq_td = curthread;
2772 KeInitializeSpinLock(&kq->kq_lock);
2773 KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
2776 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
2778 KeAcquireSpinLock(&kq->kq_lock, &irql);
2782 KeReleaseSpinLock(&kq->kq_lock, irql);
2786 while (!IsListEmpty(&kq->kq_disp)) {
2787 l = RemoveHeadList(&kq->kq_disp);
2788 iw = CONTAINING_RECORD(l,
2789 io_workitem, iw_listentry);
2790 InitializeListHead((&iw->iw_listentry));
2791 if (iw->iw_func == NULL)
2793 KeReleaseSpinLock(&kq->kq_lock, irql);
2794 MSCALL2(iw->iw_func, iw->iw_dobj, iw->iw_ctx);
2795 KeAcquireSpinLock(&kq->kq_lock, &irql);
2798 KeReleaseSpinLock(&kq->kq_lock, irql);
2801 #if __FreeBSD_version < 502113
2805 return; /* notreached */
2809 ntoskrnl_destroy_workitem_threads(void)
2814 for (i = 0; i < WORKITEM_THREADS; i++) {
2817 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
2819 tsleep(kq->kq_td->td_proc, PWAIT, "waitiw", hz/10);
2826 IoAllocateWorkItem(dobj)
2827 device_object *dobj;
2831 iw = uma_zalloc(iw_zone, M_NOWAIT);
2835 InitializeListHead(&iw->iw_listentry);
2838 mtx_lock(&ntoskrnl_dispatchlock);
2839 iw->iw_idx = wq_idx;
2840 WORKIDX_INC(wq_idx);
2841 mtx_unlock(&ntoskrnl_dispatchlock);
2850 uma_zfree(iw_zone, iw);
2855 IoQueueWorkItem(iw, iw_func, qtype, ctx)
2857 io_workitem_func iw_func;
2866 kq = wq_queues + iw->iw_idx;
2868 KeAcquireSpinLock(&kq->kq_lock, &irql);
2871 * Traverse the list and make sure this workitem hasn't
2872 * already been inserted. Queuing the same workitem
2873 * twice will hose the list but good.
2876 l = kq->kq_disp.nle_flink;
2877 while (l != &kq->kq_disp) {
2878 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
2880 /* Already queued -- do nothing. */
2881 KeReleaseSpinLock(&kq->kq_lock, irql);
2887 iw->iw_func = iw_func;
2890 InsertTailList((&kq->kq_disp), (&iw->iw_listentry));
2891 KeReleaseSpinLock(&kq->kq_lock, irql);
2893 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
2899 ntoskrnl_workitem(dobj, arg)
2900 device_object *dobj;
2908 w = (work_queue_item *)dobj;
2909 f = (work_item_func)w->wqi_func;
2910 uma_zfree(iw_zone, iw);
2911 MSCALL2(f, w, w->wqi_ctx);
2917 * The ExQueueWorkItem() API is deprecated in Windows XP. Microsoft
2918 * warns that it's unsafe and to use IoQueueWorkItem() instead. The
2919 * problem with ExQueueWorkItem() is that it can't guard against
2920 * the condition where a driver submits a job to the work queue and
2921 * is then unloaded before the job is able to run. IoQueueWorkItem()
2922 * acquires a reference to the device's device_object via the
2923 * object manager and retains it until after the job has completed,
2924 * which prevents the driver from being unloaded before the job
2925 * runs. (We don't currently support this behavior, though hopefully
2926 * that will change once the object manager API is fleshed out a bit.)
2928 * Having said all that, the ExQueueWorkItem() API remains, because
2929 * there are still other parts of Windows that use it, including
2930 * NDIS itself: NdisScheduleWorkItem() calls ExQueueWorkItem().
2931 * We fake up the ExQueueWorkItem() API on top of our implementation
2932 * of IoQueueWorkItem(). Workitem thread #3 is reserved exclusively
2933 * for ExQueueWorkItem() jobs, and we pass a pointer to the work
2934 * queue item (provided by the caller) in to IoAllocateWorkItem()
2935 * instead of the device_object. We need to save this pointer so
2936 * we can apply a sanity check: as with the DPC queue and other
2937 * workitem queues, we can't allow the same work queue item to
2938 * be queued twice. If it's already pending, we silently return
2942 ExQueueWorkItem(w, qtype)
2947 io_workitem_func iwf;
2955 * We need to do a special sanity test to make sure
2956 * the ExQueueWorkItem() API isn't used to queue
2957 * the same workitem twice. Rather than checking the
2958 * io_workitem pointer itself, we test the attached
2959 * device object, which is really a pointer to the
2960 * legacy work queue item structure.
2963 kq = wq_queues + WORKITEM_LEGACY_THREAD;
2964 KeAcquireSpinLock(&kq->kq_lock, &irql);
2965 l = kq->kq_disp.nle_flink;
2966 while (l != &kq->kq_disp) {
2967 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
2968 if (cur->iw_dobj == (device_object *)w) {
2969 /* Already queued -- do nothing. */
2970 KeReleaseSpinLock(&kq->kq_lock, irql);
2975 KeReleaseSpinLock(&kq->kq_lock, irql);
2977 iw = IoAllocateWorkItem((device_object *)w);
2981 iw->iw_idx = WORKITEM_LEGACY_THREAD;
2982 iwf = (io_workitem_func)ntoskrnl_findwrap((funcptr)ntoskrnl_workitem);
2983 IoQueueWorkItem(iw, iwf, qtype, iw);
2989 RtlZeroMemory(dst, len)
2998 RtlCopyMemory(dst, src, len)
3003 bcopy(src, dst, len);
3008 RtlCompareMemory(s1, s2, len)
3013 size_t i, total = 0;
3016 m1 = __DECONST(char *, s1);
3017 m2 = __DECONST(char *, s2);
3019 for (i = 0; i < len; i++) {
3027 RtlInitAnsiString(dst, src)
3037 a->as_len = a->as_maxlen = 0;
3041 a->as_len = a->as_maxlen = strlen(src);
3048 RtlInitUnicodeString(dst, src)
3049 unicode_string *dst;
3059 u->us_len = u->us_maxlen = 0;
3066 u->us_len = u->us_maxlen = i * 2;
3073 RtlUnicodeStringToInteger(ustr, base, val)
3074 unicode_string *ustr;
3083 uchr = ustr->us_buf;
3085 bzero(abuf, sizeof(abuf));
3087 if ((char)((*uchr) & 0xFF) == '-') {
3091 } else if ((char)((*uchr) & 0xFF) == '+') {
3098 if ((char)((*uchr) & 0xFF) == 'b') {
3102 } else if ((char)((*uchr) & 0xFF) == 'o') {
3106 } else if ((char)((*uchr) & 0xFF) == 'x') {
3120 ntoskrnl_unicode_to_ascii(uchr, astr, len);
3121 *val = strtoul(abuf, NULL, base);
3123 return(STATUS_SUCCESS);
3127 RtlFreeUnicodeString(ustr)
3128 unicode_string *ustr;
3130 if (ustr->us_buf == NULL)
3132 ExFreePool(ustr->us_buf);
3133 ustr->us_buf = NULL;
3138 RtlFreeAnsiString(astr)
3141 if (astr->as_buf == NULL)
3143 ExFreePool(astr->as_buf);
3144 astr->as_buf = NULL;
3152 return (int)strtol(str, (char **)NULL, 10);
3159 return strtol(str, (char **)NULL, 10);
3168 srandom(tv.tv_usec);
3169 return((int)random());
3181 IoIsWdmVersionAvailable(major, minor)
3185 if (major == WDM_MAJOR && minor == WDM_MINOR_WINXP)
3191 IoGetDeviceProperty(devobj, regprop, buflen, prop, reslen)
3192 device_object *devobj;
3201 drv = devobj->do_drvobj;
3204 case DEVPROP_DRIVER_KEYNAME:
3206 *name = drv->dro_drivername.us_buf;
3207 *reslen = drv->dro_drivername.us_len;
3210 return(STATUS_INVALID_PARAMETER_2);
3214 return(STATUS_SUCCESS);
3218 KeInitializeMutex(kmutex, level)
3222 InitializeListHead((&kmutex->km_header.dh_waitlisthead));
3223 kmutex->km_abandoned = FALSE;
3224 kmutex->km_apcdisable = 1;
3225 kmutex->km_header.dh_sigstate = 1;
3226 kmutex->km_header.dh_type = DISP_TYPE_MUTANT;
3227 kmutex->km_header.dh_size = sizeof(kmutant) / sizeof(uint32_t);
3228 kmutex->km_ownerthread = NULL;
3233 KeReleaseMutex(kmutex, kwait)
3239 mtx_lock(&ntoskrnl_dispatchlock);
3240 prevstate = kmutex->km_header.dh_sigstate;
3241 if (kmutex->km_ownerthread != curthread) {
3242 mtx_unlock(&ntoskrnl_dispatchlock);
3243 return(STATUS_MUTANT_NOT_OWNED);
3246 kmutex->km_header.dh_sigstate++;
3247 kmutex->km_abandoned = FALSE;
3249 if (kmutex->km_header.dh_sigstate == 1) {
3250 kmutex->km_ownerthread = NULL;
3251 ntoskrnl_waittest(&kmutex->km_header, IO_NO_INCREMENT);
3254 mtx_unlock(&ntoskrnl_dispatchlock);
3260 KeReadStateMutex(kmutex)
3263 return(kmutex->km_header.dh_sigstate);
3267 KeInitializeEvent(kevent, type, state)
3272 InitializeListHead((&kevent->k_header.dh_waitlisthead));
3273 kevent->k_header.dh_sigstate = state;
3274 if (type == EVENT_TYPE_NOTIFY)
3275 kevent->k_header.dh_type = DISP_TYPE_NOTIFICATION_EVENT;
3277 kevent->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_EVENT;
3278 kevent->k_header.dh_size = sizeof(nt_kevent) / sizeof(uint32_t);
3283 KeResetEvent(kevent)
3288 mtx_lock(&ntoskrnl_dispatchlock);
3289 prevstate = kevent->k_header.dh_sigstate;
3290 kevent->k_header.dh_sigstate = FALSE;
3291 mtx_unlock(&ntoskrnl_dispatchlock);
3297 KeSetEvent(kevent, increment, kwait)
3304 nt_dispatch_header *dh;
3308 mtx_lock(&ntoskrnl_dispatchlock);
3309 prevstate = kevent->k_header.dh_sigstate;
3310 dh = &kevent->k_header;
3312 if (IsListEmpty(&dh->dh_waitlisthead))
3314 * If there's nobody in the waitlist, just set
3315 * the state to signalled.
3317 dh->dh_sigstate = 1;
3320 * Get the first waiter. If this is a synchronization
3321 * event, just wake up that one thread (don't bother
3322 * setting the state to signalled since we're supposed
3323 * to automatically clear synchronization events anyway).
3325 * If it's a notification event, or the the first
3326 * waiter is doing a WAITTYPE_ALL wait, go through
3327 * the full wait satisfaction process.
3329 w = CONTAINING_RECORD(dh->dh_waitlisthead.nle_flink,
3330 wait_block, wb_waitlist);
3333 if (kevent->k_header.dh_type == DISP_TYPE_NOTIFICATION_EVENT ||
3334 w->wb_waittype == WAITTYPE_ALL) {
3335 if (prevstate == 0) {
3336 dh->dh_sigstate = 1;
3337 ntoskrnl_waittest(dh, increment);
3340 w->wb_awakened |= TRUE;
3341 cv_broadcastpri(&we->we_cv, w->wb_oldpri -
3346 mtx_unlock(&ntoskrnl_dispatchlock);
3352 KeClearEvent(kevent)
3355 kevent->k_header.dh_sigstate = FALSE;
3360 KeReadStateEvent(kevent)
3363 return(kevent->k_header.dh_sigstate);
3367 * The object manager in Windows is responsible for managing
3368 * references and access to various types of objects, including
3369 * device_objects, events, threads, timers and so on. However,
3370 * there's a difference in the way objects are handled in user
3371 * mode versus kernel mode.
3373 * In user mode (i.e. Win32 applications), all objects are
3374 * managed by the object manager. For example, when you create
3375 * a timer or event object, you actually end up with an
3376 * object_header (for the object manager's bookkeeping
3377 * purposes) and an object body (which contains the actual object
3378 * structure, e.g. ktimer, kevent, etc...). This allows Windows
3379 * to manage resource quotas and to enforce access restrictions
3380 * on basically every kind of system object handled by the kernel.
3382 * However, in kernel mode, you only end up using the object
3383 * manager some of the time. For example, in a driver, you create
3384 * a timer object by simply allocating the memory for a ktimer
3385 * structure and initializing it with KeInitializeTimer(). Hence,
3386 * the timer has no object_header and no reference counting or
3387 * security/resource checks are done on it. The assumption in
3388 * this case is that if you're running in kernel mode, you know
3389 * what you're doing, and you're already at an elevated privilege
3392 * There are some exceptions to this. The two most important ones
3393 * for our purposes are device_objects and threads. We need to use
3394 * the object manager to do reference counting on device_objects,
3395 * and for threads, you can only get a pointer to a thread's
3396 * dispatch header by using ObReferenceObjectByHandle() on the
3397 * handle returned by PsCreateSystemThread().
3401 ObReferenceObjectByHandle(handle, reqaccess, otype,
3402 accessmode, object, handleinfo)
3412 nr = malloc(sizeof(nt_objref), M_DEVBUF, M_NOWAIT|M_ZERO);
3414 return(STATUS_INSUFFICIENT_RESOURCES);
3416 InitializeListHead((&nr->no_dh.dh_waitlisthead));
3417 nr->no_obj = handle;
3418 nr->no_dh.dh_type = DISP_TYPE_THREAD;
3419 nr->no_dh.dh_sigstate = 0;
3420 nr->no_dh.dh_size = (uint8_t)(sizeof(struct thread) /
3422 TAILQ_INSERT_TAIL(&ntoskrnl_reflist, nr, link);
3425 return(STATUS_SUCCESS);
3429 ObfDereferenceObject(object)
3435 TAILQ_REMOVE(&ntoskrnl_reflist, nr, link);
3445 return(STATUS_SUCCESS);
3449 WmiQueryTraceInformation(traceclass, traceinfo, infolen, reqlen, buf)
3450 uint32_t traceclass;
3456 return(STATUS_NOT_FOUND);
3460 WmiTraceMessage(uint64_t loghandle, uint32_t messageflags,
3461 void *guid, uint16_t messagenum, ...)
3463 return(STATUS_SUCCESS);
3467 IoWMIRegistrationControl(dobj, action)
3468 device_object *dobj;
3471 return(STATUS_SUCCESS);
3475 * This is here just in case the thread returns without calling
3476 * PsTerminateSystemThread().
3479 ntoskrnl_thrfunc(arg)
3482 thread_context *thrctx;
3483 uint32_t (*tfunc)(void *);
3488 tfunc = thrctx->tc_thrfunc;
3489 tctx = thrctx->tc_thrctx;
3490 free(thrctx, M_TEMP);
3492 rval = MSCALL1(tfunc, tctx);
3494 PsTerminateSystemThread(rval);
3495 return; /* notreached */
3499 PsCreateSystemThread(handle, reqaccess, objattrs, phandle,
3500 clientid, thrfunc, thrctx)
3501 ndis_handle *handle;
3504 ndis_handle phandle;
3514 tc = malloc(sizeof(thread_context), M_TEMP, M_NOWAIT);
3516 return(STATUS_INSUFFICIENT_RESOURCES);
3518 tc->tc_thrctx = thrctx;
3519 tc->tc_thrfunc = thrfunc;
3521 sprintf(tname, "windows kthread %d", ntoskrnl_kth);
3522 error = kthread_create(ntoskrnl_thrfunc, tc, &p,
3523 RFHIGHPID, NDIS_KSTACK_PAGES, tname);
3527 return(STATUS_INSUFFICIENT_RESOURCES);
3533 return(STATUS_SUCCESS);
3537 * In Windows, the exit of a thread is an event that you're allowed
3538 * to wait on, assuming you've obtained a reference to the thread using
3539 * ObReferenceObjectByHandle(). Unfortunately, the only way we can
3540 * simulate this behavior is to register each thread we create in a
3541 * reference list, and if someone holds a reference to us, we poke
3545 PsTerminateSystemThread(status)
3548 struct nt_objref *nr;
3550 mtx_lock(&ntoskrnl_dispatchlock);
3551 TAILQ_FOREACH(nr, &ntoskrnl_reflist, link) {
3552 if (nr->no_obj != curthread->td_proc)
3554 nr->no_dh.dh_sigstate = 1;
3555 ntoskrnl_waittest(&nr->no_dh, IO_NO_INCREMENT);
3558 mtx_unlock(&ntoskrnl_dispatchlock);
3562 #if __FreeBSD_version < 502113
3566 return(0); /* notreached */
3570 DbgPrint(char *fmt, ...)
3579 return(STATUS_SUCCESS);
3586 #if __FreeBSD_version < 502113
3587 Debugger("DbgBreakPoint(): breakpoint");
3589 kdb_enter("DbgBreakPoint(): breakpoint");
3594 ntoskrnl_timercall(arg)
3601 mtx_lock(&ntoskrnl_dispatchlock);
3605 #ifdef NTOSKRNL_DEBUG_TIMERS
3606 ntoskrnl_timer_fires++;
3608 ntoskrnl_remove_timer(timer);
3611 * This should never happen, but complain
3615 if (timer->k_header.dh_inserted == FALSE) {
3616 mtx_unlock(&ntoskrnl_dispatchlock);
3617 printf("NTOS: timer %p fired even though "
3618 "it was canceled\n", timer);
3622 /* Mark the timer as no longer being on the timer queue. */
3624 timer->k_header.dh_inserted = FALSE;
3626 /* Now signal the object and satisfy any waits on it. */
3628 timer->k_header.dh_sigstate = 1;
3629 ntoskrnl_waittest(&timer->k_header, IO_NO_INCREMENT);
3632 * If this is a periodic timer, re-arm it
3633 * so it will fire again. We do this before
3634 * calling any deferred procedure calls because
3635 * it's possible the DPC might cancel the timer,
3636 * in which case it would be wrong for us to
3637 * re-arm it again afterwards.
3640 if (timer->k_period) {
3642 tv.tv_usec = timer->k_period * 1000;
3643 timer->k_header.dh_inserted = TRUE;
3644 ntoskrnl_insert_timer(timer, tvtohz(&tv));
3645 #ifdef NTOSKRNL_DEBUG_TIMERS
3646 ntoskrnl_timer_reloads++;
3652 mtx_unlock(&ntoskrnl_dispatchlock);
3654 /* If there's a DPC associated with the timer, queue it up. */
3657 KeInsertQueueDpc(dpc, NULL, NULL);
3662 #ifdef NTOSKRNL_DEBUG_TIMERS
3664 sysctl_show_timers(SYSCTL_HANDLER_ARGS)
3669 ntoskrnl_show_timers();
3670 return (sysctl_handle_int(oidp, &ret, 0, req));
3674 ntoskrnl_show_timers()
3679 mtx_lock_spin(&ntoskrnl_calllock);
3680 l = ntoskrnl_calllist.nle_flink;
3681 while(l != &ntoskrnl_calllist) {
3685 mtx_unlock_spin(&ntoskrnl_calllock);
3688 printf("%d timers available (out of %d)\n", i, NTOSKRNL_TIMEOUTS);
3689 printf("timer sets: %qu\n", ntoskrnl_timer_sets);
3690 printf("timer reloads: %qu\n", ntoskrnl_timer_reloads);
3691 printf("timer cancels: %qu\n", ntoskrnl_timer_cancels);
3692 printf("timer fires: %qu\n", ntoskrnl_timer_fires);
3700 * Must be called with dispatcher lock held.
3704 ntoskrnl_insert_timer(timer, ticks)
3713 * Try and allocate a timer.
3715 mtx_lock_spin(&ntoskrnl_calllock);
3716 if (IsListEmpty(&ntoskrnl_calllist)) {
3717 mtx_unlock_spin(&ntoskrnl_calllock);
3718 #ifdef NTOSKRNL_DEBUG_TIMERS
3719 ntoskrnl_show_timers();
3721 panic("out of timers!");
3723 l = RemoveHeadList(&ntoskrnl_calllist);
3724 mtx_unlock_spin(&ntoskrnl_calllock);
3726 e = CONTAINING_RECORD(l, callout_entry, ce_list);
3729 timer->k_callout = c;
3731 callout_init(c, CALLOUT_MPSAFE);
3732 callout_reset(c, ticks, ntoskrnl_timercall, timer);
3738 ntoskrnl_remove_timer(timer)
3743 e = (callout_entry *)timer->k_callout;
3744 callout_stop(timer->k_callout);
3746 mtx_lock_spin(&ntoskrnl_calllock);
3747 InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
3748 mtx_unlock_spin(&ntoskrnl_calllock);
3754 KeInitializeTimer(timer)
3760 KeInitializeTimerEx(timer, EVENT_TYPE_NOTIFY);
3766 KeInitializeTimerEx(timer, type)
3773 bzero((char *)timer, sizeof(ktimer));
3774 InitializeListHead((&timer->k_header.dh_waitlisthead));
3775 timer->k_header.dh_sigstate = FALSE;
3776 timer->k_header.dh_inserted = FALSE;
3777 if (type == EVENT_TYPE_NOTIFY)
3778 timer->k_header.dh_type = DISP_TYPE_NOTIFICATION_TIMER;
3780 timer->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_TIMER;
3781 timer->k_header.dh_size = sizeof(ktimer) / sizeof(uint32_t);
3787 * DPC subsystem. A Windows Defered Procedure Call has the following
3789 * - It runs at DISPATCH_LEVEL.
3790 * - It can have one of 3 importance values that control when it
3791 * runs relative to other DPCs in the queue.
3792 * - On SMP systems, it can be set to run on a specific processor.
3793 * In order to satisfy the last property, we create a DPC thread for
3794 * each CPU in the system and bind it to that CPU. Each thread
3795 * maintains three queues with different importance levels, which
3796 * will be processed in order from lowest to highest.
3798 * In Windows, interrupt handlers run as DPCs. (Not to be confused
3799 * with ISRs, which run in interrupt context and can preempt DPCs.)
3800 * ISRs are given the highest importance so that they'll take
3801 * precedence over timers and other things.
3805 ntoskrnl_dpc_thread(arg)
3815 InitializeListHead(&kq->kq_disp);
3816 kq->kq_td = curthread;
3818 kq->kq_running = FALSE;
3819 KeInitializeSpinLock(&kq->kq_lock);
3820 KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
3821 KeInitializeEvent(&kq->kq_done, EVENT_TYPE_SYNC, FALSE);
3824 * Elevate our priority. DPCs are used to run interrupt
3825 * handlers, and they should trigger as soon as possible
3826 * once scheduled by an ISR.
3829 thread_lock(curthread);
3830 #ifdef NTOSKRNL_MULTIPLE_DPCS
3831 #if __FreeBSD_version >= 502102
3832 sched_bind(curthread, kq->kq_cpu);
3835 sched_prio(curthread, PRI_MIN_KERN);
3836 #if __FreeBSD_version < 600000
3837 curthread->td_base_pri = PRI_MIN_KERN;
3839 thread_unlock(curthread);
3842 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
3844 KeAcquireSpinLock(&kq->kq_lock, &irql);
3848 KeReleaseSpinLock(&kq->kq_lock, irql);
3852 kq->kq_running = TRUE;
3854 while (!IsListEmpty(&kq->kq_disp)) {
3855 l = RemoveHeadList((&kq->kq_disp));
3856 d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
3857 InitializeListHead((&d->k_dpclistentry));
3858 KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
3859 MSCALL4(d->k_deferedfunc, d, d->k_deferredctx,
3860 d->k_sysarg1, d->k_sysarg2);
3861 KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3864 kq->kq_running = FALSE;
3866 KeReleaseSpinLock(&kq->kq_lock, irql);
3868 KeSetEvent(&kq->kq_done, IO_NO_INCREMENT, FALSE);
3871 #if __FreeBSD_version < 502113
3875 return; /* notreached */
3879 ntoskrnl_destroy_dpc_threads(void)
3886 #ifdef NTOSKRNL_MULTIPLE_DPCS
3887 for (i = 0; i < mp_ncpus; i++) {
3889 for (i = 0; i < 1; i++) {
3894 KeInitializeDpc(&dpc, NULL, NULL);
3895 KeSetTargetProcessorDpc(&dpc, i);
3896 KeInsertQueueDpc(&dpc, NULL, NULL);
3898 tsleep(kq->kq_td->td_proc, PWAIT, "dpcw", hz/10);
3905 ntoskrnl_insert_dpc(head, dpc)
3912 l = head->nle_flink;
3914 d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
3920 if (dpc->k_importance == KDPC_IMPORTANCE_LOW)
3921 InsertTailList((head), (&dpc->k_dpclistentry));
3923 InsertHeadList((head), (&dpc->k_dpclistentry));
3929 KeInitializeDpc(dpc, dpcfunc, dpcctx)
3938 dpc->k_deferedfunc = dpcfunc;
3939 dpc->k_deferredctx = dpcctx;
3940 dpc->k_num = KDPC_CPU_DEFAULT;
3941 dpc->k_importance = KDPC_IMPORTANCE_MEDIUM;
3942 InitializeListHead((&dpc->k_dpclistentry));
3948 KeInsertQueueDpc(dpc, sysarg1, sysarg2)
3962 #ifdef NTOSKRNL_MULTIPLE_DPCS
3963 KeRaiseIrql(DISPATCH_LEVEL, &irql);
3966 * By default, the DPC is queued to run on the same CPU
3967 * that scheduled it.
3970 if (dpc->k_num == KDPC_CPU_DEFAULT)
3971 kq += curthread->td_oncpu;
3974 KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3976 KeAcquireSpinLock(&kq->kq_lock, &irql);
3979 r = ntoskrnl_insert_dpc(&kq->kq_disp, dpc);
3981 dpc->k_sysarg1 = sysarg1;
3982 dpc->k_sysarg2 = sysarg2;
3984 KeReleaseSpinLock(&kq->kq_lock, irql);
3989 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
3995 KeRemoveQueueDpc(dpc)
4004 #ifdef NTOSKRNL_MULTIPLE_DPCS
4005 KeRaiseIrql(DISPATCH_LEVEL, &irql);
4007 kq = kq_queues + dpc->k_num;
4009 KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
4012 KeAcquireSpinLock(&kq->kq_lock, &irql);
4015 if (dpc->k_dpclistentry.nle_flink == &dpc->k_dpclistentry) {
4016 KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
4021 RemoveEntryList((&dpc->k_dpclistentry));
4022 InitializeListHead((&dpc->k_dpclistentry));
4024 KeReleaseSpinLock(&kq->kq_lock, irql);
4030 KeSetImportanceDpc(dpc, imp)
4034 if (imp != KDPC_IMPORTANCE_LOW &&
4035 imp != KDPC_IMPORTANCE_MEDIUM &&
4036 imp != KDPC_IMPORTANCE_HIGH)
4039 dpc->k_importance = (uint8_t)imp;
4044 KeSetTargetProcessorDpc(dpc, cpu)
4056 KeFlushQueuedDpcs(void)
4062 * Poke each DPC queue and wait
4063 * for them to drain.
4066 #ifdef NTOSKRNL_MULTIPLE_DPCS
4067 for (i = 0; i < mp_ncpus; i++) {
4069 for (i = 0; i < 1; i++) {
4072 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
4073 KeWaitForSingleObject(&kq->kq_done, 0, 0, TRUE, NULL);
4080 KeGetCurrentProcessorNumber(void)
4082 return((uint32_t)curthread->td_oncpu);
4086 KeSetTimerEx(timer, duetime, period, dpc)
4099 mtx_lock(&ntoskrnl_dispatchlock);
4101 if (timer->k_header.dh_inserted == TRUE) {
4102 ntoskrnl_remove_timer(timer);
4103 #ifdef NTOSKRNL_DEBUG_TIMERS
4104 ntoskrnl_timer_cancels++;
4106 timer->k_header.dh_inserted = FALSE;
4111 timer->k_duetime = duetime;
4112 timer->k_period = period;
4113 timer->k_header.dh_sigstate = FALSE;
4117 tv.tv_sec = - (duetime) / 10000000;
4118 tv.tv_usec = (- (duetime) / 10) -
4119 (tv.tv_sec * 1000000);
4121 ntoskrnl_time(&curtime);
4122 if (duetime < curtime)
4123 tv.tv_sec = tv.tv_usec = 0;
4125 tv.tv_sec = ((duetime) - curtime) / 10000000;
4126 tv.tv_usec = ((duetime) - curtime) / 10 -
4127 (tv.tv_sec * 1000000);
4131 timer->k_header.dh_inserted = TRUE;
4132 ntoskrnl_insert_timer(timer, tvtohz(&tv));
4133 #ifdef NTOSKRNL_DEBUG_TIMERS
4134 ntoskrnl_timer_sets++;
4137 mtx_unlock(&ntoskrnl_dispatchlock);
4143 KeSetTimer(timer, duetime, dpc)
4148 return (KeSetTimerEx(timer, duetime, 0, dpc));
4152 * The Windows DDK documentation seems to say that cancelling
4153 * a timer that has a DPC will result in the DPC also being
4154 * cancelled, but this isn't really the case.
4158 KeCancelTimer(timer)
4166 mtx_lock(&ntoskrnl_dispatchlock);
4168 pending = timer->k_header.dh_inserted;
4170 if (timer->k_header.dh_inserted == TRUE) {
4171 timer->k_header.dh_inserted = FALSE;
4172 ntoskrnl_remove_timer(timer);
4173 #ifdef NTOSKRNL_DEBUG_TIMERS
4174 ntoskrnl_timer_cancels++;
4178 mtx_unlock(&ntoskrnl_dispatchlock);
4184 KeReadStateTimer(timer)
4187 return(timer->k_header.dh_sigstate);
4193 printf ("ntoskrnl dummy called...\n");
4198 image_patch_table ntoskrnl_functbl[] = {
4199 IMPORT_SFUNC(RtlZeroMemory, 2),
4200 IMPORT_SFUNC(RtlCopyMemory, 3),
4201 IMPORT_SFUNC(RtlCompareMemory, 3),
4202 IMPORT_SFUNC(RtlEqualUnicodeString, 3),
4203 IMPORT_SFUNC(RtlCopyUnicodeString, 2),
4204 IMPORT_SFUNC(RtlUnicodeStringToAnsiString, 3),
4205 IMPORT_SFUNC(RtlAnsiStringToUnicodeString, 3),
4206 IMPORT_SFUNC(RtlInitAnsiString, 2),
4207 IMPORT_SFUNC_MAP(RtlInitString, RtlInitAnsiString, 2),
4208 IMPORT_SFUNC(RtlInitUnicodeString, 2),
4209 IMPORT_SFUNC(RtlFreeAnsiString, 1),
4210 IMPORT_SFUNC(RtlFreeUnicodeString, 1),
4211 IMPORT_SFUNC(RtlUnicodeStringToInteger, 3),
4212 IMPORT_CFUNC(sprintf, 0),
4213 IMPORT_CFUNC(vsprintf, 0),
4214 IMPORT_CFUNC_MAP(_snprintf, snprintf, 0),
4215 IMPORT_CFUNC_MAP(_vsnprintf, vsnprintf, 0),
4216 IMPORT_CFUNC(DbgPrint, 0),
4217 IMPORT_SFUNC(DbgBreakPoint, 0),
4218 IMPORT_CFUNC(strncmp, 0),
4219 IMPORT_CFUNC(strcmp, 0),
4220 IMPORT_CFUNC_MAP(stricmp, strcasecmp, 0),
4221 IMPORT_CFUNC(strncpy, 0),
4222 IMPORT_CFUNC(strcpy, 0),
4223 IMPORT_CFUNC(strlen, 0),
4224 IMPORT_CFUNC_MAP(toupper, ntoskrnl_toupper, 0),
4225 IMPORT_CFUNC_MAP(tolower, ntoskrnl_tolower, 0),
4226 IMPORT_CFUNC_MAP(strstr, ntoskrnl_strstr, 0),
4227 IMPORT_CFUNC_MAP(strchr, index, 0),
4228 IMPORT_CFUNC_MAP(strrchr, rindex, 0),
4229 IMPORT_CFUNC(memcpy, 0),
4230 IMPORT_CFUNC_MAP(memmove, ntoskrnl_memmove, 0),
4231 IMPORT_CFUNC_MAP(memset, ntoskrnl_memset, 0),
4232 IMPORT_CFUNC_MAP(memchr, ntoskrnl_memchr, 0),
4233 IMPORT_SFUNC(IoAllocateDriverObjectExtension, 4),
4234 IMPORT_SFUNC(IoGetDriverObjectExtension, 2),
4235 IMPORT_FFUNC(IofCallDriver, 2),
4236 IMPORT_FFUNC(IofCompleteRequest, 2),
4237 IMPORT_SFUNC(IoAcquireCancelSpinLock, 1),
4238 IMPORT_SFUNC(IoReleaseCancelSpinLock, 1),
4239 IMPORT_SFUNC(IoCancelIrp, 1),
4240 IMPORT_SFUNC(IoConnectInterrupt, 11),
4241 IMPORT_SFUNC(IoDisconnectInterrupt, 1),
4242 IMPORT_SFUNC(IoCreateDevice, 7),
4243 IMPORT_SFUNC(IoDeleteDevice, 1),
4244 IMPORT_SFUNC(IoGetAttachedDevice, 1),
4245 IMPORT_SFUNC(IoAttachDeviceToDeviceStack, 2),
4246 IMPORT_SFUNC(IoDetachDevice, 1),
4247 IMPORT_SFUNC(IoBuildSynchronousFsdRequest, 7),
4248 IMPORT_SFUNC(IoBuildAsynchronousFsdRequest, 6),
4249 IMPORT_SFUNC(IoBuildDeviceIoControlRequest, 9),
4250 IMPORT_SFUNC(IoAllocateIrp, 2),
4251 IMPORT_SFUNC(IoReuseIrp, 2),
4252 IMPORT_SFUNC(IoMakeAssociatedIrp, 2),
4253 IMPORT_SFUNC(IoFreeIrp, 1),
4254 IMPORT_SFUNC(IoInitializeIrp, 3),
4255 IMPORT_SFUNC(KeAcquireInterruptSpinLock, 1),
4256 IMPORT_SFUNC(KeReleaseInterruptSpinLock, 2),
4257 IMPORT_SFUNC(KeSynchronizeExecution, 3),
4258 IMPORT_SFUNC(KeWaitForSingleObject, 5),
4259 IMPORT_SFUNC(KeWaitForMultipleObjects, 8),
4260 IMPORT_SFUNC(_allmul, 4),
4261 IMPORT_SFUNC(_alldiv, 4),
4262 IMPORT_SFUNC(_allrem, 4),
4263 IMPORT_RFUNC(_allshr, 0),
4264 IMPORT_RFUNC(_allshl, 0),
4265 IMPORT_SFUNC(_aullmul, 4),
4266 IMPORT_SFUNC(_aulldiv, 4),
4267 IMPORT_SFUNC(_aullrem, 4),
4268 IMPORT_RFUNC(_aullshr, 0),
4269 IMPORT_RFUNC(_aullshl, 0),
4270 IMPORT_CFUNC(atoi, 0),
4271 IMPORT_CFUNC(atol, 0),
4272 IMPORT_CFUNC(rand, 0),
4273 IMPORT_CFUNC(srand, 0),
4274 IMPORT_SFUNC(WRITE_REGISTER_USHORT, 2),
4275 IMPORT_SFUNC(READ_REGISTER_USHORT, 1),
4276 IMPORT_SFUNC(WRITE_REGISTER_ULONG, 2),
4277 IMPORT_SFUNC(READ_REGISTER_ULONG, 1),
4278 IMPORT_SFUNC(READ_REGISTER_UCHAR, 1),
4279 IMPORT_SFUNC(WRITE_REGISTER_UCHAR, 2),
4280 IMPORT_SFUNC(ExInitializePagedLookasideList, 7),
4281 IMPORT_SFUNC(ExDeletePagedLookasideList, 1),
4282 IMPORT_SFUNC(ExInitializeNPagedLookasideList, 7),
4283 IMPORT_SFUNC(ExDeleteNPagedLookasideList, 1),
4284 IMPORT_FFUNC(InterlockedPopEntrySList, 1),
4285 IMPORT_FFUNC(InterlockedPushEntrySList, 2),
4286 IMPORT_SFUNC(ExQueryDepthSList, 1),
4287 IMPORT_FFUNC_MAP(ExpInterlockedPopEntrySList,
4288 InterlockedPopEntrySList, 1),
4289 IMPORT_FFUNC_MAP(ExpInterlockedPushEntrySList,
4290 InterlockedPushEntrySList, 2),
4291 IMPORT_FFUNC(ExInterlockedPopEntrySList, 2),
4292 IMPORT_FFUNC(ExInterlockedPushEntrySList, 3),
4293 IMPORT_SFUNC(ExAllocatePoolWithTag, 3),
4294 IMPORT_SFUNC(ExFreePool, 1),
4296 IMPORT_FFUNC(KefAcquireSpinLockAtDpcLevel, 1),
4297 IMPORT_FFUNC(KefReleaseSpinLockFromDpcLevel,1),
4298 IMPORT_FFUNC(KeAcquireSpinLockRaiseToDpc, 1),
4301 * For AMD64, we can get away with just mapping
4302 * KeAcquireSpinLockRaiseToDpc() directly to KfAcquireSpinLock()
4303 * because the calling conventions end up being the same.
4304 * On i386, we have to be careful because KfAcquireSpinLock()
4305 * is _fastcall but KeAcquireSpinLockRaiseToDpc() isn't.
4307 IMPORT_SFUNC(KeAcquireSpinLockAtDpcLevel, 1),
4308 IMPORT_SFUNC(KeReleaseSpinLockFromDpcLevel, 1),
4309 IMPORT_SFUNC_MAP(KeAcquireSpinLockRaiseToDpc, KfAcquireSpinLock, 1),
4311 IMPORT_SFUNC_MAP(KeReleaseSpinLock, KfReleaseSpinLock, 1),
4312 IMPORT_FFUNC(InterlockedIncrement, 1),
4313 IMPORT_FFUNC(InterlockedDecrement, 1),
4314 IMPORT_FFUNC(InterlockedExchange, 2),
4315 IMPORT_FFUNC(ExInterlockedAddLargeStatistic, 2),
4316 IMPORT_SFUNC(IoAllocateMdl, 5),
4317 IMPORT_SFUNC(IoFreeMdl, 1),
4318 IMPORT_SFUNC(MmAllocateContiguousMemory, 2),
4319 IMPORT_SFUNC(MmAllocateContiguousMemorySpecifyCache, 5),
4320 IMPORT_SFUNC(MmFreeContiguousMemory, 1),
4321 IMPORT_SFUNC(MmFreeContiguousMemorySpecifyCache, 3),
4322 IMPORT_SFUNC_MAP(MmGetPhysicalAddress, pmap_kextract, 1),
4323 IMPORT_SFUNC(MmSizeOfMdl, 1),
4324 IMPORT_SFUNC(MmMapLockedPages, 2),
4325 IMPORT_SFUNC(MmMapLockedPagesSpecifyCache, 6),
4326 IMPORT_SFUNC(MmUnmapLockedPages, 2),
4327 IMPORT_SFUNC(MmBuildMdlForNonPagedPool, 1),
4328 IMPORT_SFUNC(MmIsAddressValid, 1),
4329 IMPORT_SFUNC(MmMapIoSpace, 3 + 1),
4330 IMPORT_SFUNC(MmUnmapIoSpace, 2),
4331 IMPORT_SFUNC(KeInitializeSpinLock, 1),
4332 IMPORT_SFUNC(IoIsWdmVersionAvailable, 2),
4333 IMPORT_SFUNC(IoGetDeviceProperty, 5),
4334 IMPORT_SFUNC(IoAllocateWorkItem, 1),
4335 IMPORT_SFUNC(IoFreeWorkItem, 1),
4336 IMPORT_SFUNC(IoQueueWorkItem, 4),
4337 IMPORT_SFUNC(ExQueueWorkItem, 2),
4338 IMPORT_SFUNC(ntoskrnl_workitem, 2),
4339 IMPORT_SFUNC(KeInitializeMutex, 2),
4340 IMPORT_SFUNC(KeReleaseMutex, 2),
4341 IMPORT_SFUNC(KeReadStateMutex, 1),
4342 IMPORT_SFUNC(KeInitializeEvent, 3),
4343 IMPORT_SFUNC(KeSetEvent, 3),
4344 IMPORT_SFUNC(KeResetEvent, 1),
4345 IMPORT_SFUNC(KeClearEvent, 1),
4346 IMPORT_SFUNC(KeReadStateEvent, 1),
4347 IMPORT_SFUNC(KeInitializeTimer, 1),
4348 IMPORT_SFUNC(KeInitializeTimerEx, 2),
4349 IMPORT_SFUNC(KeSetTimer, 3),
4350 IMPORT_SFUNC(KeSetTimerEx, 4),
4351 IMPORT_SFUNC(KeCancelTimer, 1),
4352 IMPORT_SFUNC(KeReadStateTimer, 1),
4353 IMPORT_SFUNC(KeInitializeDpc, 3),
4354 IMPORT_SFUNC(KeInsertQueueDpc, 3),
4355 IMPORT_SFUNC(KeRemoveQueueDpc, 1),
4356 IMPORT_SFUNC(KeSetImportanceDpc, 2),
4357 IMPORT_SFUNC(KeSetTargetProcessorDpc, 2),
4358 IMPORT_SFUNC(KeFlushQueuedDpcs, 0),
4359 IMPORT_SFUNC(KeGetCurrentProcessorNumber, 1),
4360 IMPORT_SFUNC(ObReferenceObjectByHandle, 6),
4361 IMPORT_FFUNC(ObfDereferenceObject, 1),
4362 IMPORT_SFUNC(ZwClose, 1),
4363 IMPORT_SFUNC(PsCreateSystemThread, 7),
4364 IMPORT_SFUNC(PsTerminateSystemThread, 1),
4365 IMPORT_SFUNC(IoWMIRegistrationControl, 2),
4366 IMPORT_SFUNC(WmiQueryTraceInformation, 5),
4367 IMPORT_CFUNC(WmiTraceMessage, 0),
4370 * This last entry is a catch-all for any function we haven't
4371 * implemented yet. The PE import list patching routine will
4372 * use it for any function that doesn't have an explicit match
4376 { NULL, (FUNC)dummy, NULL, 0, WINDRV_WRAP_STDCALL },
4380 { NULL, NULL, NULL }