3 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <sys/ctype.h>
37 #include <sys/unistd.h>
38 #include <sys/param.h>
39 #include <sys/types.h>
40 #include <sys/errno.h>
41 #include <sys/systm.h>
42 #include <sys/malloc.h>
44 #include <sys/mutex.h>
46 #include <sys/callout.h>
48 #include <sys/kernel.h>
50 #include <sys/condvar.h>
51 #include <sys/kthread.h>
52 #include <sys/module.h>
54 #include <sys/sched.h>
55 #include <sys/sysctl.h>
57 #include <machine/atomic.h>
58 #include <machine/bus.h>
59 #include <machine/stdarg.h>
60 #include <machine/resource.h>
66 #include <vm/vm_param.h>
69 #include <vm/vm_kern.h>
70 #include <vm/vm_map.h>
71 #include <vm/vm_extern.h>
73 #include <compat/ndis/pe_var.h>
74 #include <compat/ndis/cfg_var.h>
75 #include <compat/ndis/resource_var.h>
76 #include <compat/ndis/ntoskrnl_var.h>
77 #include <compat/ndis/hal_var.h>
78 #include <compat/ndis/ndis_var.h>
80 #ifdef NTOSKRNL_DEBUG_TIMERS
81 static int sysctl_show_timers(SYSCTL_HANDLER_ARGS);
83 SYSCTL_PROC(_debug, OID_AUTO, ntoskrnl_timers, CTLFLAG_RW, 0, 0,
84 sysctl_show_timers, "I", "Show ntoskrnl timer stats");
98 typedef struct kdpc_queue kdpc_queue;
102 struct thread *we_td;
105 typedef struct wb_ext wb_ext;
107 #define NTOSKRNL_TIMEOUTS 256
108 #ifdef NTOSKRNL_DEBUG_TIMERS
109 static uint64_t ntoskrnl_timer_fires;
110 static uint64_t ntoskrnl_timer_sets;
111 static uint64_t ntoskrnl_timer_reloads;
112 static uint64_t ntoskrnl_timer_cancels;
115 struct callout_entry {
116 struct callout ce_callout;
120 typedef struct callout_entry callout_entry;
122 static struct list_entry ntoskrnl_calllist;
123 static struct mtx ntoskrnl_calllock;
125 static struct list_entry ntoskrnl_intlist;
126 static kspin_lock ntoskrnl_intlock;
128 static uint8_t RtlEqualUnicodeString(unicode_string *,
129 unicode_string *, uint8_t);
130 static void RtlCopyUnicodeString(unicode_string *,
132 static irp *IoBuildSynchronousFsdRequest(uint32_t, device_object *,
133 void *, uint32_t, uint64_t *, nt_kevent *, io_status_block *);
134 static irp *IoBuildAsynchronousFsdRequest(uint32_t,
135 device_object *, void *, uint32_t, uint64_t *, io_status_block *);
136 static irp *IoBuildDeviceIoControlRequest(uint32_t,
137 device_object *, void *, uint32_t, void *, uint32_t,
138 uint8_t, nt_kevent *, io_status_block *);
139 static irp *IoAllocateIrp(uint8_t, uint8_t);
140 static void IoReuseIrp(irp *, uint32_t);
141 static void IoFreeIrp(irp *);
142 static void IoInitializeIrp(irp *, uint16_t, uint8_t);
143 static irp *IoMakeAssociatedIrp(irp *, uint8_t);
144 static uint32_t KeWaitForMultipleObjects(uint32_t,
145 nt_dispatch_header **, uint32_t, uint32_t, uint32_t, uint8_t,
146 int64_t *, wait_block *);
147 static void ntoskrnl_waittest(nt_dispatch_header *, uint32_t);
148 static void ntoskrnl_satisfy_wait(nt_dispatch_header *, struct thread *);
149 static void ntoskrnl_satisfy_multiple_waits(wait_block *);
150 static int ntoskrnl_is_signalled(nt_dispatch_header *, struct thread *);
151 static void ntoskrnl_insert_timer(ktimer *, int);
152 static void ntoskrnl_remove_timer(ktimer *);
153 #ifdef NTOSKRNL_DEBUG_TIMERS
154 static void ntoskrnl_show_timers(void);
156 static void ntoskrnl_timercall(void *);
157 static void ntoskrnl_dpc_thread(void *);
158 static void ntoskrnl_destroy_dpc_threads(void);
159 static void ntoskrnl_destroy_workitem_threads(void);
160 static void ntoskrnl_workitem_thread(void *);
161 static void ntoskrnl_workitem(device_object *, void *);
162 static void ntoskrnl_unicode_to_ascii(uint16_t *, char *, int);
163 static void ntoskrnl_ascii_to_unicode(char *, uint16_t *, int);
164 static uint8_t ntoskrnl_insert_dpc(list_entry *, kdpc *);
165 static void WRITE_REGISTER_USHORT(uint16_t *, uint16_t);
166 static uint16_t READ_REGISTER_USHORT(uint16_t *);
167 static void WRITE_REGISTER_ULONG(uint32_t *, uint32_t);
168 static uint32_t READ_REGISTER_ULONG(uint32_t *);
169 static void WRITE_REGISTER_UCHAR(uint8_t *, uint8_t);
170 static uint8_t READ_REGISTER_UCHAR(uint8_t *);
171 static int64_t _allmul(int64_t, int64_t);
172 static int64_t _alldiv(int64_t, int64_t);
173 static int64_t _allrem(int64_t, int64_t);
174 static int64_t _allshr(int64_t, uint8_t);
175 static int64_t _allshl(int64_t, uint8_t);
176 static uint64_t _aullmul(uint64_t, uint64_t);
177 static uint64_t _aulldiv(uint64_t, uint64_t);
178 static uint64_t _aullrem(uint64_t, uint64_t);
179 static uint64_t _aullshr(uint64_t, uint8_t);
180 static uint64_t _aullshl(uint64_t, uint8_t);
181 static slist_entry *ntoskrnl_pushsl(slist_header *, slist_entry *);
182 static slist_entry *ntoskrnl_popsl(slist_header *);
183 static void ExInitializePagedLookasideList(paged_lookaside_list *,
184 lookaside_alloc_func *, lookaside_free_func *,
185 uint32_t, size_t, uint32_t, uint16_t);
186 static void ExDeletePagedLookasideList(paged_lookaside_list *);
187 static void ExInitializeNPagedLookasideList(npaged_lookaside_list *,
188 lookaside_alloc_func *, lookaside_free_func *,
189 uint32_t, size_t, uint32_t, uint16_t);
190 static void ExDeleteNPagedLookasideList(npaged_lookaside_list *);
192 *ExInterlockedPushEntrySList(slist_header *,
193 slist_entry *, kspin_lock *);
195 *ExInterlockedPopEntrySList(slist_header *, kspin_lock *);
196 static uint32_t InterlockedIncrement(volatile uint32_t *);
197 static uint32_t InterlockedDecrement(volatile uint32_t *);
198 static void ExInterlockedAddLargeStatistic(uint64_t *, uint32_t);
199 static void *MmAllocateContiguousMemory(uint32_t, uint64_t);
200 static void *MmAllocateContiguousMemorySpecifyCache(uint32_t,
201 uint64_t, uint64_t, uint64_t, enum nt_caching_type);
202 static void MmFreeContiguousMemory(void *);
203 static void MmFreeContiguousMemorySpecifyCache(void *, uint32_t,
204 enum nt_caching_type);
205 static uint32_t MmSizeOfMdl(void *, size_t);
206 static void *MmMapLockedPages(mdl *, uint8_t);
207 static void *MmMapLockedPagesSpecifyCache(mdl *,
208 uint8_t, uint32_t, void *, uint32_t, uint32_t);
209 static void MmUnmapLockedPages(void *, mdl *);
210 static device_t ntoskrnl_finddev(device_t, uint64_t, struct resource **);
211 static void RtlZeroMemory(void *, size_t);
212 static void RtlCopyMemory(void *, const void *, size_t);
213 static size_t RtlCompareMemory(const void *, const void *, size_t);
214 static ndis_status RtlUnicodeStringToInteger(unicode_string *,
215 uint32_t, uint32_t *);
216 static int atoi (const char *);
217 static long atol (const char *);
218 static int rand(void);
219 static void srand(unsigned int);
220 static void KeQuerySystemTime(uint64_t *);
221 static uint32_t KeTickCount(void);
222 static uint8_t IoIsWdmVersionAvailable(uint8_t, uint8_t);
223 static void ntoskrnl_thrfunc(void *);
224 static ndis_status PsCreateSystemThread(ndis_handle *,
225 uint32_t, void *, ndis_handle, void *, void *, void *);
226 static ndis_status PsTerminateSystemThread(ndis_status);
227 static ndis_status IoGetDeviceObjectPointer(unicode_string *,
228 uint32_t, void *, device_object *);
229 static ndis_status IoGetDeviceProperty(device_object *, uint32_t,
230 uint32_t, void *, uint32_t *);
231 static void KeInitializeMutex(kmutant *, uint32_t);
232 static uint32_t KeReleaseMutex(kmutant *, uint8_t);
233 static uint32_t KeReadStateMutex(kmutant *);
234 static ndis_status ObReferenceObjectByHandle(ndis_handle,
235 uint32_t, void *, uint8_t, void **, void **);
236 static void ObfDereferenceObject(void *);
237 static uint32_t ZwClose(ndis_handle);
238 static uint32_t WmiQueryTraceInformation(uint32_t, void *, uint32_t,
240 static uint32_t WmiTraceMessage(uint64_t, uint32_t, void *, uint16_t, ...);
241 static uint32_t IoWMIRegistrationControl(device_object *, uint32_t);
242 static void *ntoskrnl_memset(void *, int, size_t);
243 static void *ntoskrnl_memmove(void *, void *, size_t);
244 static void *ntoskrnl_memchr(void *, unsigned char, size_t);
245 static char *ntoskrnl_strstr(char *, char *);
246 static char *ntoskrnl_strncat(char *, char *, size_t);
247 static int ntoskrnl_toupper(int);
248 static int ntoskrnl_tolower(int);
249 static funcptr ntoskrnl_findwrap(funcptr);
250 static uint32_t DbgPrint(char *, ...);
251 static void DbgBreakPoint(void);
252 static void KeBugCheckEx(uint32_t, u_long, u_long, u_long, u_long);
253 static int32_t KeDelayExecutionThread(uint8_t, uint8_t, int64_t *);
254 static int32_t KeSetPriorityThread(struct thread *, int32_t);
255 static void dummy(void);
257 static struct mtx ntoskrnl_dispatchlock;
258 static struct mtx ntoskrnl_interlock;
259 static kspin_lock ntoskrnl_cancellock;
260 static int ntoskrnl_kth = 0;
261 static struct nt_objref_head ntoskrnl_reflist;
262 static uma_zone_t mdl_zone;
263 static uma_zone_t iw_zone;
264 static struct kdpc_queue *kq_queues;
265 static struct kdpc_queue *wq_queues;
266 static int wq_idx = 0;
271 image_patch_table *patch;
278 mtx_init(&ntoskrnl_dispatchlock,
279 "ntoskrnl dispatch lock", MTX_NDIS_LOCK, MTX_DEF|MTX_RECURSE);
280 mtx_init(&ntoskrnl_interlock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN);
281 KeInitializeSpinLock(&ntoskrnl_cancellock);
282 KeInitializeSpinLock(&ntoskrnl_intlock);
283 TAILQ_INIT(&ntoskrnl_reflist);
285 InitializeListHead(&ntoskrnl_calllist);
286 InitializeListHead(&ntoskrnl_intlist);
287 mtx_init(&ntoskrnl_calllock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN);
289 kq_queues = ExAllocatePoolWithTag(NonPagedPool,
290 #ifdef NTOSKRNL_MULTIPLE_DPCS
291 sizeof(kdpc_queue) * mp_ncpus, 0);
293 sizeof(kdpc_queue), 0);
296 if (kq_queues == NULL)
299 wq_queues = ExAllocatePoolWithTag(NonPagedPool,
300 sizeof(kdpc_queue) * WORKITEM_THREADS, 0);
302 if (wq_queues == NULL)
305 #ifdef NTOSKRNL_MULTIPLE_DPCS
306 bzero((char *)kq_queues, sizeof(kdpc_queue) * mp_ncpus);
308 bzero((char *)kq_queues, sizeof(kdpc_queue));
310 bzero((char *)wq_queues, sizeof(kdpc_queue) * WORKITEM_THREADS);
313 * Launch the DPC threads.
316 #ifdef NTOSKRNL_MULTIPLE_DPCS
317 for (i = 0; i < mp_ncpus; i++) {
319 for (i = 0; i < 1; i++) {
323 error = kproc_create(ntoskrnl_dpc_thread, kq, &p,
324 RFHIGHPID, NDIS_KSTACK_PAGES, "Windows DPC %d", i);
326 panic("failed to launch DPC thread");
330 * Launch the workitem threads.
333 for (i = 0; i < WORKITEM_THREADS; i++) {
335 error = kproc_create(ntoskrnl_workitem_thread, kq, &p,
336 RFHIGHPID, NDIS_KSTACK_PAGES, "Windows Workitem %d", i);
338 panic("failed to launch workitem thread");
341 patch = ntoskrnl_functbl;
342 while (patch->ipt_func != NULL) {
343 windrv_wrap((funcptr)patch->ipt_func,
344 (funcptr *)&patch->ipt_wrap,
345 patch->ipt_argcnt, patch->ipt_ftype);
349 for (i = 0; i < NTOSKRNL_TIMEOUTS; i++) {
350 e = ExAllocatePoolWithTag(NonPagedPool,
351 sizeof(callout_entry), 0);
353 panic("failed to allocate timeouts");
354 mtx_lock_spin(&ntoskrnl_calllock);
355 InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
356 mtx_unlock_spin(&ntoskrnl_calllock);
360 * MDLs are supposed to be variable size (they describe
361 * buffers containing some number of pages, but we don't
362 * know ahead of time how many pages that will be). But
363 * always allocating them off the heap is very slow. As
364 * a compromise, we create an MDL UMA zone big enough to
365 * handle any buffer requiring up to 16 pages, and we
366 * use those for any MDLs for buffers of 16 pages or less
367 * in size. For buffers larger than that (which we assume
368 * will be few and far between, we allocate the MDLs off
372 mdl_zone = uma_zcreate("Windows MDL", MDL_ZONE_SIZE,
373 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
375 iw_zone = uma_zcreate("Windows WorkItem", sizeof(io_workitem),
376 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
384 image_patch_table *patch;
388 patch = ntoskrnl_functbl;
389 while (patch->ipt_func != NULL) {
390 windrv_unwrap(patch->ipt_wrap);
394 /* Stop the workitem queues. */
395 ntoskrnl_destroy_workitem_threads();
396 /* Stop the DPC queues. */
397 ntoskrnl_destroy_dpc_threads();
399 ExFreePool(kq_queues);
400 ExFreePool(wq_queues);
402 uma_zdestroy(mdl_zone);
403 uma_zdestroy(iw_zone);
405 mtx_lock_spin(&ntoskrnl_calllock);
406 while(!IsListEmpty(&ntoskrnl_calllist)) {
407 l = RemoveHeadList(&ntoskrnl_calllist);
408 e = CONTAINING_RECORD(l, callout_entry, ce_list);
409 mtx_unlock_spin(&ntoskrnl_calllock);
411 mtx_lock_spin(&ntoskrnl_calllock);
413 mtx_unlock_spin(&ntoskrnl_calllock);
415 mtx_destroy(&ntoskrnl_dispatchlock);
416 mtx_destroy(&ntoskrnl_interlock);
417 mtx_destroy(&ntoskrnl_calllock);
423 * We need to be able to reference this externally from the wrapper;
424 * GCC only generates a local implementation of memset.
427 ntoskrnl_memset(buf, ch, size)
432 return (memset(buf, ch, size));
436 ntoskrnl_memmove(dst, src, size)
441 bcopy(src, dst, size);
446 ntoskrnl_memchr(void *buf, unsigned char ch, size_t len)
449 unsigned char *p = buf;
454 } while (--len != 0);
460 ntoskrnl_strstr(s, find)
466 if ((c = *find++) != 0) {
470 if ((sc = *s++) == 0)
473 } while (strncmp(s, find, len) != 0);
479 /* Taken from libc */
481 ntoskrnl_strncat(dst, src, n)
493 if ((*d = *s++) == 0)
517 RtlEqualUnicodeString(unicode_string *str1, unicode_string *str2,
518 uint8_t caseinsensitive)
522 if (str1->us_len != str2->us_len)
525 for (i = 0; i < str1->us_len; i++) {
526 if (caseinsensitive == TRUE) {
527 if (toupper((char)(str1->us_buf[i] & 0xFF)) !=
528 toupper((char)(str2->us_buf[i] & 0xFF)))
531 if (str1->us_buf[i] != str2->us_buf[i])
540 RtlCopyUnicodeString(dest, src)
541 unicode_string *dest;
545 if (dest->us_maxlen >= src->us_len)
546 dest->us_len = src->us_len;
548 dest->us_len = dest->us_maxlen;
549 memcpy(dest->us_buf, src->us_buf, dest->us_len);
553 ntoskrnl_ascii_to_unicode(ascii, unicode, len)
562 for (i = 0; i < len; i++) {
563 *ustr = (uint16_t)ascii[i];
569 ntoskrnl_unicode_to_ascii(unicode, ascii, len)
578 for (i = 0; i < len / 2; i++) {
579 *astr = (uint8_t)unicode[i];
585 RtlUnicodeStringToAnsiString(ansi_string *dest, unicode_string *src, uint8_t allocate)
587 if (dest == NULL || src == NULL)
588 return (STATUS_INVALID_PARAMETER);
590 dest->as_len = src->us_len / 2;
591 if (dest->as_maxlen < dest->as_len)
592 dest->as_len = dest->as_maxlen;
594 if (allocate == TRUE) {
595 dest->as_buf = ExAllocatePoolWithTag(NonPagedPool,
596 (src->us_len / 2) + 1, 0);
597 if (dest->as_buf == NULL)
598 return (STATUS_INSUFFICIENT_RESOURCES);
599 dest->as_len = dest->as_maxlen = src->us_len / 2;
601 dest->as_len = src->us_len / 2; /* XXX */
602 if (dest->as_maxlen < dest->as_len)
603 dest->as_len = dest->as_maxlen;
606 ntoskrnl_unicode_to_ascii(src->us_buf, dest->as_buf,
609 return (STATUS_SUCCESS);
613 RtlAnsiStringToUnicodeString(unicode_string *dest, ansi_string *src,
616 if (dest == NULL || src == NULL)
617 return (STATUS_INVALID_PARAMETER);
619 if (allocate == TRUE) {
620 dest->us_buf = ExAllocatePoolWithTag(NonPagedPool,
622 if (dest->us_buf == NULL)
623 return (STATUS_INSUFFICIENT_RESOURCES);
624 dest->us_len = dest->us_maxlen = strlen(src->as_buf) * 2;
626 dest->us_len = src->as_len * 2; /* XXX */
627 if (dest->us_maxlen < dest->us_len)
628 dest->us_len = dest->us_maxlen;
631 ntoskrnl_ascii_to_unicode(src->as_buf, dest->us_buf,
634 return (STATUS_SUCCESS);
638 ExAllocatePoolWithTag(pooltype, len, tag)
645 buf = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
660 IoAllocateDriverObjectExtension(drv, clid, extlen, ext)
666 custom_extension *ce;
668 ce = ExAllocatePoolWithTag(NonPagedPool, sizeof(custom_extension)
672 return (STATUS_INSUFFICIENT_RESOURCES);
675 InsertTailList((&drv->dro_driverext->dre_usrext), (&ce->ce_list));
677 *ext = (void *)(ce + 1);
679 return (STATUS_SUCCESS);
683 IoGetDriverObjectExtension(drv, clid)
688 custom_extension *ce;
691 * Sanity check. Our dummy bus drivers don't have
692 * any driver extentions.
695 if (drv->dro_driverext == NULL)
698 e = drv->dro_driverext->dre_usrext.nle_flink;
699 while (e != &drv->dro_driverext->dre_usrext) {
700 ce = (custom_extension *)e;
701 if (ce->ce_clid == clid)
702 return ((void *)(ce + 1));
711 IoCreateDevice(driver_object *drv, uint32_t devextlen, unicode_string *devname,
712 uint32_t devtype, uint32_t devchars, uint8_t exclusive,
713 device_object **newdev)
717 dev = ExAllocatePoolWithTag(NonPagedPool, sizeof(device_object), 0);
719 return (STATUS_INSUFFICIENT_RESOURCES);
721 dev->do_type = devtype;
722 dev->do_drvobj = drv;
723 dev->do_currirp = NULL;
727 dev->do_devext = ExAllocatePoolWithTag(NonPagedPool,
730 if (dev->do_devext == NULL) {
732 return (STATUS_INSUFFICIENT_RESOURCES);
735 bzero(dev->do_devext, devextlen);
737 dev->do_devext = NULL;
739 dev->do_size = sizeof(device_object) + devextlen;
741 dev->do_attacheddev = NULL;
742 dev->do_nextdev = NULL;
743 dev->do_devtype = devtype;
744 dev->do_stacksize = 1;
745 dev->do_alignreq = 1;
746 dev->do_characteristics = devchars;
747 dev->do_iotimer = NULL;
748 KeInitializeEvent(&dev->do_devlock, EVENT_TYPE_SYNC, TRUE);
751 * Vpd is used for disk/tape devices,
752 * but we don't support those. (Yet.)
756 dev->do_devobj_ext = ExAllocatePoolWithTag(NonPagedPool,
757 sizeof(devobj_extension), 0);
759 if (dev->do_devobj_ext == NULL) {
760 if (dev->do_devext != NULL)
761 ExFreePool(dev->do_devext);
763 return (STATUS_INSUFFICIENT_RESOURCES);
766 dev->do_devobj_ext->dve_type = 0;
767 dev->do_devobj_ext->dve_size = sizeof(devobj_extension);
768 dev->do_devobj_ext->dve_devobj = dev;
771 * Attach this device to the driver object's list
772 * of devices. Note: this is not the same as attaching
773 * the device to the device stack. The driver's AddDevice
774 * routine must explicitly call IoAddDeviceToDeviceStack()
778 if (drv->dro_devobj == NULL) {
779 drv->dro_devobj = dev;
780 dev->do_nextdev = NULL;
782 dev->do_nextdev = drv->dro_devobj;
783 drv->dro_devobj = dev;
788 return (STATUS_SUCCESS);
800 if (dev->do_devobj_ext != NULL)
801 ExFreePool(dev->do_devobj_ext);
803 if (dev->do_devext != NULL)
804 ExFreePool(dev->do_devext);
806 /* Unlink the device from the driver's device list. */
808 prev = dev->do_drvobj->dro_devobj;
810 dev->do_drvobj->dro_devobj = dev->do_nextdev;
812 while (prev->do_nextdev != dev)
813 prev = prev->do_nextdev;
814 prev->do_nextdev = dev->do_nextdev;
821 IoGetAttachedDevice(dev)
831 while (d->do_attacheddev != NULL)
832 d = d->do_attacheddev;
838 IoBuildSynchronousFsdRequest(func, dobj, buf, len, off, event, status)
845 io_status_block *status;
849 ip = IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status);
852 ip->irp_usrevent = event;
858 IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status)
864 io_status_block *status;
867 io_stack_location *sl;
869 ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
873 ip->irp_usriostat = status;
874 ip->irp_tail.irp_overlay.irp_thread = NULL;
876 sl = IoGetNextIrpStackLocation(ip);
877 sl->isl_major = func;
881 sl->isl_devobj = dobj;
882 sl->isl_fileobj = NULL;
883 sl->isl_completionfunc = NULL;
885 ip->irp_userbuf = buf;
887 if (dobj->do_flags & DO_BUFFERED_IO) {
888 ip->irp_assoc.irp_sysbuf =
889 ExAllocatePoolWithTag(NonPagedPool, len, 0);
890 if (ip->irp_assoc.irp_sysbuf == NULL) {
894 bcopy(buf, ip->irp_assoc.irp_sysbuf, len);
897 if (dobj->do_flags & DO_DIRECT_IO) {
898 ip->irp_mdl = IoAllocateMdl(buf, len, FALSE, FALSE, ip);
899 if (ip->irp_mdl == NULL) {
900 if (ip->irp_assoc.irp_sysbuf != NULL)
901 ExFreePool(ip->irp_assoc.irp_sysbuf);
905 ip->irp_userbuf = NULL;
906 ip->irp_assoc.irp_sysbuf = NULL;
909 if (func == IRP_MJ_READ) {
910 sl->isl_parameters.isl_read.isl_len = len;
912 sl->isl_parameters.isl_read.isl_byteoff = *off;
914 sl->isl_parameters.isl_read.isl_byteoff = 0;
917 if (func == IRP_MJ_WRITE) {
918 sl->isl_parameters.isl_write.isl_len = len;
920 sl->isl_parameters.isl_write.isl_byteoff = *off;
922 sl->isl_parameters.isl_write.isl_byteoff = 0;
929 IoBuildDeviceIoControlRequest(uint32_t iocode, device_object *dobj, void *ibuf,
930 uint32_t ilen, void *obuf, uint32_t olen, uint8_t isinternal,
931 nt_kevent *event, io_status_block *status)
934 io_stack_location *sl;
937 ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
940 ip->irp_usrevent = event;
941 ip->irp_usriostat = status;
942 ip->irp_tail.irp_overlay.irp_thread = NULL;
944 sl = IoGetNextIrpStackLocation(ip);
945 sl->isl_major = isinternal == TRUE ?
946 IRP_MJ_INTERNAL_DEVICE_CONTROL : IRP_MJ_DEVICE_CONTROL;
950 sl->isl_devobj = dobj;
951 sl->isl_fileobj = NULL;
952 sl->isl_completionfunc = NULL;
953 sl->isl_parameters.isl_ioctl.isl_iocode = iocode;
954 sl->isl_parameters.isl_ioctl.isl_ibuflen = ilen;
955 sl->isl_parameters.isl_ioctl.isl_obuflen = olen;
957 switch(IO_METHOD(iocode)) {
958 case METHOD_BUFFERED:
964 ip->irp_assoc.irp_sysbuf =
965 ExAllocatePoolWithTag(NonPagedPool, buflen, 0);
966 if (ip->irp_assoc.irp_sysbuf == NULL) {
971 if (ilen && ibuf != NULL) {
972 bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
973 bzero((char *)ip->irp_assoc.irp_sysbuf + ilen,
976 bzero(ip->irp_assoc.irp_sysbuf, ilen);
977 ip->irp_userbuf = obuf;
979 case METHOD_IN_DIRECT:
980 case METHOD_OUT_DIRECT:
981 if (ilen && ibuf != NULL) {
982 ip->irp_assoc.irp_sysbuf =
983 ExAllocatePoolWithTag(NonPagedPool, ilen, 0);
984 if (ip->irp_assoc.irp_sysbuf == NULL) {
988 bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
990 if (olen && obuf != NULL) {
991 ip->irp_mdl = IoAllocateMdl(obuf, olen,
994 * Normally we would MmProbeAndLockPages()
995 * here, but we don't have to in our
1000 case METHOD_NEITHER:
1001 ip->irp_userbuf = obuf;
1002 sl->isl_parameters.isl_ioctl.isl_type3ibuf = ibuf;
1009 * Ideally, we should associate this IRP with the calling
1017 IoAllocateIrp(uint8_t stsize, uint8_t chargequota)
1021 i = ExAllocatePoolWithTag(NonPagedPool, IoSizeOfIrp(stsize), 0);
1025 IoInitializeIrp(i, IoSizeOfIrp(stsize), stsize);
1031 IoMakeAssociatedIrp(irp *ip, uint8_t stsize)
1035 associrp = IoAllocateIrp(stsize, FALSE);
1036 if (associrp == NULL)
1039 mtx_lock(&ntoskrnl_dispatchlock);
1040 associrp->irp_flags |= IRP_ASSOCIATED_IRP;
1041 associrp->irp_tail.irp_overlay.irp_thread =
1042 ip->irp_tail.irp_overlay.irp_thread;
1043 associrp->irp_assoc.irp_master = ip;
1044 mtx_unlock(&ntoskrnl_dispatchlock);
1057 IoInitializeIrp(irp *io, uint16_t psize, uint8_t ssize)
1059 bzero((char *)io, IoSizeOfIrp(ssize));
1060 io->irp_size = psize;
1061 io->irp_stackcnt = ssize;
1062 io->irp_currentstackloc = ssize;
1063 InitializeListHead(&io->irp_thlist);
1064 io->irp_tail.irp_overlay.irp_csl =
1065 (io_stack_location *)(io + 1) + ssize;
1069 IoReuseIrp(ip, status)
1075 allocflags = ip->irp_allocflags;
1076 IoInitializeIrp(ip, ip->irp_size, ip->irp_stackcnt);
1077 ip->irp_iostat.isb_status = status;
1078 ip->irp_allocflags = allocflags;
1082 IoAcquireCancelSpinLock(uint8_t *irql)
1084 KeAcquireSpinLock(&ntoskrnl_cancellock, irql);
1088 IoReleaseCancelSpinLock(uint8_t irql)
1090 KeReleaseSpinLock(&ntoskrnl_cancellock, irql);
1094 IoCancelIrp(irp *ip)
1099 IoAcquireCancelSpinLock(&cancelirql);
1100 cfunc = IoSetCancelRoutine(ip, NULL);
1101 ip->irp_cancel = TRUE;
1102 if (cfunc == NULL) {
1103 IoReleaseCancelSpinLock(cancelirql);
1106 ip->irp_cancelirql = cancelirql;
1107 MSCALL2(cfunc, IoGetCurrentIrpStackLocation(ip)->isl_devobj, ip);
1108 return (uint8_t)IoSetCancelValue(ip, TRUE);
1112 IofCallDriver(dobj, ip)
1113 device_object *dobj;
1116 driver_object *drvobj;
1117 io_stack_location *sl;
1119 driver_dispatch disp;
1121 drvobj = dobj->do_drvobj;
1123 if (ip->irp_currentstackloc <= 0)
1124 panic("IoCallDriver(): out of stack locations");
1126 IoSetNextIrpStackLocation(ip);
1127 sl = IoGetCurrentIrpStackLocation(ip);
1129 sl->isl_devobj = dobj;
1131 disp = drvobj->dro_dispatch[sl->isl_major];
1132 status = MSCALL2(disp, dobj, ip);
1138 IofCompleteRequest(irp *ip, uint8_t prioboost)
1141 device_object *dobj;
1142 io_stack_location *sl;
1145 KASSERT(ip->irp_iostat.isb_status != STATUS_PENDING,
1146 ("incorrect IRP(%p) status (STATUS_PENDING)", ip));
1148 sl = IoGetCurrentIrpStackLocation(ip);
1149 IoSkipCurrentIrpStackLocation(ip);
1152 if (sl->isl_ctl & SL_PENDING_RETURNED)
1153 ip->irp_pendingreturned = TRUE;
1155 if (ip->irp_currentstackloc != (ip->irp_stackcnt + 1))
1156 dobj = IoGetCurrentIrpStackLocation(ip)->isl_devobj;
1160 if (sl->isl_completionfunc != NULL &&
1161 ((ip->irp_iostat.isb_status == STATUS_SUCCESS &&
1162 sl->isl_ctl & SL_INVOKE_ON_SUCCESS) ||
1163 (ip->irp_iostat.isb_status != STATUS_SUCCESS &&
1164 sl->isl_ctl & SL_INVOKE_ON_ERROR) ||
1165 (ip->irp_cancel == TRUE &&
1166 sl->isl_ctl & SL_INVOKE_ON_CANCEL))) {
1167 cf = sl->isl_completionfunc;
1168 status = MSCALL3(cf, dobj, ip, sl->isl_completionctx);
1169 if (status == STATUS_MORE_PROCESSING_REQUIRED)
1172 if ((ip->irp_currentstackloc <= ip->irp_stackcnt) &&
1173 (ip->irp_pendingreturned == TRUE))
1174 IoMarkIrpPending(ip);
1177 /* move to the next. */
1178 IoSkipCurrentIrpStackLocation(ip);
1180 } while (ip->irp_currentstackloc <= (ip->irp_stackcnt + 1));
1182 if (ip->irp_usriostat != NULL)
1183 *ip->irp_usriostat = ip->irp_iostat;
1184 if (ip->irp_usrevent != NULL)
1185 KeSetEvent(ip->irp_usrevent, prioboost, FALSE);
1187 /* Handle any associated IRPs. */
1189 if (ip->irp_flags & IRP_ASSOCIATED_IRP) {
1190 uint32_t masterirpcnt;
1194 masterirp = ip->irp_assoc.irp_master;
1196 InterlockedDecrement(&masterirp->irp_assoc.irp_irpcnt);
1198 while ((m = ip->irp_mdl) != NULL) {
1199 ip->irp_mdl = m->mdl_next;
1203 if (masterirpcnt == 0)
1204 IoCompleteRequest(masterirp, IO_NO_INCREMENT);
1208 /* With any luck, these conditions will never arise. */
1210 if (ip->irp_flags & IRP_PAGING_IO) {
1211 if (ip->irp_mdl != NULL)
1212 IoFreeMdl(ip->irp_mdl);
1226 KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1227 l = ntoskrnl_intlist.nle_flink;
1228 while (l != &ntoskrnl_intlist) {
1229 iobj = CONTAINING_RECORD(l, kinterrupt, ki_list);
1230 claimed = MSCALL2(iobj->ki_svcfunc, iobj, iobj->ki_svcctx);
1231 if (claimed == TRUE)
1235 KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1239 KeAcquireInterruptSpinLock(iobj)
1243 KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1248 KeReleaseInterruptSpinLock(kinterrupt *iobj, uint8_t irql)
1250 KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1254 KeSynchronizeExecution(iobj, syncfunc, syncctx)
1261 KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1262 MSCALL1(syncfunc, syncctx);
1263 KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1269 * IoConnectInterrupt() is passed only the interrupt vector and
1270 * irql that a device wants to use, but no device-specific tag
1271 * of any kind. This conflicts rather badly with FreeBSD's
1272 * bus_setup_intr(), which needs the device_t for the device
1273 * requesting interrupt delivery. In order to bypass this
1274 * inconsistency, we implement a second level of interrupt
1275 * dispatching on top of bus_setup_intr(). All devices use
1276 * ntoskrnl_intr() as their ISR, and any device requesting
1277 * interrupts will be registered with ntoskrnl_intr()'s interrupt
1278 * dispatch list. When an interrupt arrives, we walk the list
1279 * and invoke all the registered ISRs. This effectively makes all
1280 * interrupts shared, but it's the only way to duplicate the
1281 * semantics of IoConnectInterrupt() and IoDisconnectInterrupt() properly.
1285 IoConnectInterrupt(kinterrupt **iobj, void *svcfunc, void *svcctx,
1286 kspin_lock *lock, uint32_t vector, uint8_t irql, uint8_t syncirql,
1287 uint8_t imode, uint8_t shared, uint32_t affinity, uint8_t savefloat)
1291 *iobj = ExAllocatePoolWithTag(NonPagedPool, sizeof(kinterrupt), 0);
1293 return (STATUS_INSUFFICIENT_RESOURCES);
1295 (*iobj)->ki_svcfunc = svcfunc;
1296 (*iobj)->ki_svcctx = svcctx;
1299 KeInitializeSpinLock(&(*iobj)->ki_lock_priv);
1300 (*iobj)->ki_lock = &(*iobj)->ki_lock_priv;
1302 (*iobj)->ki_lock = lock;
1304 KeAcquireSpinLock(&ntoskrnl_intlock, &curirql);
1305 InsertHeadList((&ntoskrnl_intlist), (&(*iobj)->ki_list));
1306 KeReleaseSpinLock(&ntoskrnl_intlock, curirql);
1308 return (STATUS_SUCCESS);
1312 IoDisconnectInterrupt(iobj)
1320 KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1321 RemoveEntryList((&iobj->ki_list));
1322 KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1328 IoAttachDeviceToDeviceStack(src, dst)
1332 device_object *attached;
1334 mtx_lock(&ntoskrnl_dispatchlock);
1335 attached = IoGetAttachedDevice(dst);
1336 attached->do_attacheddev = src;
1337 src->do_attacheddev = NULL;
1338 src->do_stacksize = attached->do_stacksize + 1;
1339 mtx_unlock(&ntoskrnl_dispatchlock);
1345 IoDetachDevice(topdev)
1346 device_object *topdev;
1348 device_object *tail;
1350 mtx_lock(&ntoskrnl_dispatchlock);
1352 /* First, break the chain. */
1353 tail = topdev->do_attacheddev;
1355 mtx_unlock(&ntoskrnl_dispatchlock);
1358 topdev->do_attacheddev = tail->do_attacheddev;
1359 topdev->do_refcnt--;
1361 /* Now reduce the stacksize count for the takm_il objects. */
1363 tail = topdev->do_attacheddev;
1364 while (tail != NULL) {
1365 tail->do_stacksize--;
1366 tail = tail->do_attacheddev;
1369 mtx_unlock(&ntoskrnl_dispatchlock);
1373 * For the most part, an object is considered signalled if
1374 * dh_sigstate == TRUE. The exception is for mutant objects
1375 * (mutexes), where the logic works like this:
1377 * - If the thread already owns the object and sigstate is
1378 * less than or equal to 0, then the object is considered
1379 * signalled (recursive acquisition).
1380 * - If dh_sigstate == 1, the object is also considered
1385 ntoskrnl_is_signalled(obj, td)
1386 nt_dispatch_header *obj;
1391 if (obj->dh_type == DISP_TYPE_MUTANT) {
1392 km = (kmutant *)obj;
1393 if ((obj->dh_sigstate <= 0 && km->km_ownerthread == td) ||
1394 obj->dh_sigstate == 1)
1399 if (obj->dh_sigstate > 0)
1405 ntoskrnl_satisfy_wait(obj, td)
1406 nt_dispatch_header *obj;
1411 switch (obj->dh_type) {
1412 case DISP_TYPE_MUTANT:
1413 km = (struct kmutant *)obj;
1416 * If sigstate reaches 0, the mutex is now
1417 * non-signalled (the new thread owns it).
1419 if (obj->dh_sigstate == 0) {
1420 km->km_ownerthread = td;
1421 if (km->km_abandoned == TRUE)
1422 km->km_abandoned = FALSE;
1425 /* Synchronization objects get reset to unsignalled. */
1426 case DISP_TYPE_SYNCHRONIZATION_EVENT:
1427 case DISP_TYPE_SYNCHRONIZATION_TIMER:
1428 obj->dh_sigstate = 0;
1430 case DISP_TYPE_SEMAPHORE:
1439 ntoskrnl_satisfy_multiple_waits(wb)
1446 td = wb->wb_kthread;
1449 ntoskrnl_satisfy_wait(wb->wb_object, td);
1450 cur->wb_awakened = TRUE;
1452 } while (cur != wb);
1455 /* Always called with dispatcher lock held. */
1457 ntoskrnl_waittest(obj, increment)
1458 nt_dispatch_header *obj;
1461 wait_block *w, *next;
1468 * Once an object has been signalled, we walk its list of
1469 * wait blocks. If a wait block can be awakened, then satisfy
1470 * waits as necessary and wake the thread.
1472 * The rules work like this:
1474 * If a wait block is marked as WAITTYPE_ANY, then
1475 * we can satisfy the wait conditions on the current
1476 * object and wake the thread right away. Satisfying
1477 * the wait also has the effect of breaking us out
1478 * of the search loop.
1480 * If the object is marked as WAITTYLE_ALL, then the
1481 * wait block will be part of a circularly linked
1482 * list of wait blocks belonging to a waiting thread
1483 * that's sleeping in KeWaitForMultipleObjects(). In
1484 * order to wake the thread, all the objects in the
1485 * wait list must be in the signalled state. If they
1486 * are, we then satisfy all of them and wake the
1491 e = obj->dh_waitlisthead.nle_flink;
1493 while (e != &obj->dh_waitlisthead && obj->dh_sigstate > 0) {
1494 w = CONTAINING_RECORD(e, wait_block, wb_waitlist);
1498 if (w->wb_waittype == WAITTYPE_ANY) {
1500 * Thread can be awakened if
1501 * any wait is satisfied.
1503 ntoskrnl_satisfy_wait(obj, td);
1505 w->wb_awakened = TRUE;
1508 * Thread can only be woken up
1509 * if all waits are satisfied.
1510 * If the thread is waiting on multiple
1511 * objects, they should all be linked
1512 * through the wb_next pointers in the
1518 if (ntoskrnl_is_signalled(obj, td) == FALSE) {
1522 next = next->wb_next;
1524 ntoskrnl_satisfy_multiple_waits(w);
1527 if (satisfied == TRUE)
1528 cv_broadcastpri(&we->we_cv,
1529 (w->wb_oldpri - (increment * 4)) > PRI_MIN_KERN ?
1530 w->wb_oldpri - (increment * 4) : PRI_MIN_KERN);
1537 * Return the number of 100 nanosecond intervals since
1538 * January 1, 1601. (?!?!)
1547 *tval = (uint64_t)ts.tv_nsec / 100 + (uint64_t)ts.tv_sec * 10000000 +
1548 11644473600 * 10000000; /* 100ns ticks from 1601 to 1970 */
1552 KeQuerySystemTime(current_time)
1553 uint64_t *current_time;
1555 ntoskrnl_time(current_time);
1562 getmicrouptime(&tv);
1568 * KeWaitForSingleObject() is a tricky beast, because it can be used
1569 * with several different object types: semaphores, timers, events,
1570 * mutexes and threads. Semaphores don't appear very often, but the
1571 * other object types are quite common. KeWaitForSingleObject() is
1572 * what's normally used to acquire a mutex, and it can be used to
1573 * wait for a thread termination.
1575 * The Windows NDIS API is implemented in terms of Windows kernel
1576 * primitives, and some of the object manipulation is duplicated in
1577 * NDIS. For example, NDIS has timers and events, which are actually
1578 * Windows kevents and ktimers. Now, you're supposed to only use the
1579 * NDIS variants of these objects within the confines of the NDIS API,
1580 * but there are some naughty developers out there who will use
1581 * KeWaitForSingleObject() on NDIS timer and event objects, so we
1582 * have to support that as well. Conseqently, our NDIS timer and event
1583 * code has to be closely tied into our ntoskrnl timer and event code,
1584 * just as it is in Windows.
1586 * KeWaitForSingleObject() may do different things for different kinds
1589 * - For events, we check if the event has been signalled. If the
1590 * event is already in the signalled state, we just return immediately,
1591 * otherwise we wait for it to be set to the signalled state by someone
1592 * else calling KeSetEvent(). Events can be either synchronization or
1593 * notification events.
1595 * - For timers, if the timer has already fired and the timer is in
1596 * the signalled state, we just return, otherwise we wait on the
1597 * timer. Unlike an event, timers get signalled automatically when
1598 * they expire rather than someone having to trip them manually.
1599 * Timers initialized with KeInitializeTimer() are always notification
1600 * events: KeInitializeTimerEx() lets you initialize a timer as
1601 * either a notification or synchronization event.
1603 * - For mutexes, we try to acquire the mutex and if we can't, we wait
1604 * on the mutex until it's available and then grab it. When a mutex is
1605 * released, it enters the signalled state, which wakes up one of the
1606 * threads waiting to acquire it. Mutexes are always synchronization
1609 * - For threads, the only thing we do is wait until the thread object
1610 * enters a signalled state, which occurs when the thread terminates.
1611 * Threads are always notification events.
1613 * A notification event wakes up all threads waiting on an object. A
1614 * synchronization event wakes up just one. Also, a synchronization event
1615 * is auto-clearing, which means we automatically set the event back to
1616 * the non-signalled state once the wakeup is done.
1620 KeWaitForSingleObject(void *arg, uint32_t reason, uint32_t mode,
1621 uint8_t alertable, int64_t *duetime)
1624 struct thread *td = curthread;
1629 nt_dispatch_header *obj;
1634 return (STATUS_INVALID_PARAMETER);
1636 mtx_lock(&ntoskrnl_dispatchlock);
1638 cv_init(&we.we_cv, "KeWFS");
1642 * Check to see if this object is already signalled,
1643 * and just return without waiting if it is.
1645 if (ntoskrnl_is_signalled(obj, td) == TRUE) {
1646 /* Sanity check the signal state value. */
1647 if (obj->dh_sigstate != INT32_MIN) {
1648 ntoskrnl_satisfy_wait(obj, curthread);
1649 mtx_unlock(&ntoskrnl_dispatchlock);
1650 return (STATUS_SUCCESS);
1653 * There's a limit to how many times we can
1654 * recursively acquire a mutant. If we hit
1655 * the limit, something is very wrong.
1657 if (obj->dh_type == DISP_TYPE_MUTANT) {
1658 mtx_unlock(&ntoskrnl_dispatchlock);
1659 panic("mutant limit exceeded");
1664 bzero((char *)&w, sizeof(wait_block));
1667 w.wb_waittype = WAITTYPE_ANY;
1670 w.wb_awakened = FALSE;
1671 w.wb_oldpri = td->td_priority;
1673 InsertTailList((&obj->dh_waitlisthead), (&w.wb_waitlist));
1676 * The timeout value is specified in 100 nanosecond units
1677 * and can be a positive or negative number. If it's positive,
1678 * then the duetime is absolute, and we need to convert it
1679 * to an absolute offset relative to now in order to use it.
1680 * If it's negative, then the duetime is relative and we
1681 * just have to convert the units.
1684 if (duetime != NULL) {
1686 tv.tv_sec = - (*duetime) / 10000000;
1687 tv.tv_usec = (- (*duetime) / 10) -
1688 (tv.tv_sec * 1000000);
1690 ntoskrnl_time(&curtime);
1691 if (*duetime < curtime)
1692 tv.tv_sec = tv.tv_usec = 0;
1694 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1695 tv.tv_usec = ((*duetime) - curtime) / 10 -
1696 (tv.tv_sec * 1000000);
1701 if (duetime == NULL)
1702 cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
1704 error = cv_timedwait(&we.we_cv,
1705 &ntoskrnl_dispatchlock, tvtohz(&tv));
1707 RemoveEntryList(&w.wb_waitlist);
1709 cv_destroy(&we.we_cv);
1711 /* We timed out. Leave the object alone and return status. */
1713 if (error == EWOULDBLOCK) {
1714 mtx_unlock(&ntoskrnl_dispatchlock);
1715 return (STATUS_TIMEOUT);
1718 mtx_unlock(&ntoskrnl_dispatchlock);
1720 return (STATUS_SUCCESS);
1722 return (KeWaitForMultipleObjects(1, &obj, WAITTYPE_ALL, reason,
1723 mode, alertable, duetime, &w));
1728 KeWaitForMultipleObjects(uint32_t cnt, nt_dispatch_header *obj[], uint32_t wtype,
1729 uint32_t reason, uint32_t mode, uint8_t alertable, int64_t *duetime,
1730 wait_block *wb_array)
1732 struct thread *td = curthread;
1733 wait_block *whead, *w;
1734 wait_block _wb_array[MAX_WAIT_OBJECTS];
1735 nt_dispatch_header *cur;
1737 int i, wcnt = 0, error = 0;
1739 struct timespec t1, t2;
1740 uint32_t status = STATUS_SUCCESS;
1743 if (cnt > MAX_WAIT_OBJECTS)
1744 return (STATUS_INVALID_PARAMETER);
1745 if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL)
1746 return (STATUS_INVALID_PARAMETER);
1748 mtx_lock(&ntoskrnl_dispatchlock);
1750 cv_init(&we.we_cv, "KeWFM");
1753 if (wb_array == NULL)
1758 bzero((char *)whead, sizeof(wait_block) * cnt);
1760 /* First pass: see if we can satisfy any waits immediately. */
1765 for (i = 0; i < cnt; i++) {
1766 InsertTailList((&obj[i]->dh_waitlisthead),
1769 w->wb_object = obj[i];
1770 w->wb_waittype = wtype;
1772 w->wb_awakened = FALSE;
1773 w->wb_oldpri = td->td_priority;
1777 if (ntoskrnl_is_signalled(obj[i], td)) {
1779 * There's a limit to how many times
1780 * we can recursively acquire a mutant.
1781 * If we hit the limit, something
1784 if (obj[i]->dh_sigstate == INT32_MIN &&
1785 obj[i]->dh_type == DISP_TYPE_MUTANT) {
1786 mtx_unlock(&ntoskrnl_dispatchlock);
1787 panic("mutant limit exceeded");
1791 * If this is a WAITTYPE_ANY wait, then
1792 * satisfy the waited object and exit
1796 if (wtype == WAITTYPE_ANY) {
1797 ntoskrnl_satisfy_wait(obj[i], td);
1798 status = STATUS_WAIT_0 + i;
1803 w->wb_object = NULL;
1804 RemoveEntryList(&w->wb_waitlist);
1810 * If this is a WAITTYPE_ALL wait and all objects are
1811 * already signalled, satisfy the waits and exit now.
1814 if (wtype == WAITTYPE_ALL && wcnt == 0) {
1815 for (i = 0; i < cnt; i++)
1816 ntoskrnl_satisfy_wait(obj[i], td);
1817 status = STATUS_SUCCESS;
1822 * Create a circular waitblock list. The waitcount
1823 * must always be non-zero when we get here.
1826 (w - 1)->wb_next = whead;
1828 /* Wait on any objects that aren't yet signalled. */
1830 /* Calculate timeout, if any. */
1832 if (duetime != NULL) {
1834 tv.tv_sec = - (*duetime) / 10000000;
1835 tv.tv_usec = (- (*duetime) / 10) -
1836 (tv.tv_sec * 1000000);
1838 ntoskrnl_time(&curtime);
1839 if (*duetime < curtime)
1840 tv.tv_sec = tv.tv_usec = 0;
1842 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1843 tv.tv_usec = ((*duetime) - curtime) / 10 -
1844 (tv.tv_sec * 1000000);
1852 if (duetime == NULL)
1853 cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
1855 error = cv_timedwait(&we.we_cv,
1856 &ntoskrnl_dispatchlock, tvtohz(&tv));
1858 /* Wait with timeout expired. */
1861 status = STATUS_TIMEOUT;
1867 /* See what's been signalled. */
1872 if (ntoskrnl_is_signalled(cur, td) == TRUE ||
1873 w->wb_awakened == TRUE) {
1874 /* Sanity check the signal state value. */
1875 if (cur->dh_sigstate == INT32_MIN &&
1876 cur->dh_type == DISP_TYPE_MUTANT) {
1877 mtx_unlock(&ntoskrnl_dispatchlock);
1878 panic("mutant limit exceeded");
1881 if (wtype == WAITTYPE_ANY) {
1882 status = w->wb_waitkey &
1888 } while (w != whead);
1891 * If all objects have been signalled, or if this
1892 * is a WAITTYPE_ANY wait and we were woke up by
1893 * someone, we can bail.
1897 status = STATUS_SUCCESS;
1902 * If this is WAITTYPE_ALL wait, and there's still
1903 * objects that haven't been signalled, deduct the
1904 * time that's elapsed so far from the timeout and
1905 * wait again (or continue waiting indefinitely if
1906 * there's no timeout).
1909 if (duetime != NULL) {
1910 tv.tv_sec -= (t2.tv_sec - t1.tv_sec);
1911 tv.tv_usec -= (t2.tv_nsec - t1.tv_nsec) / 1000;
1918 cv_destroy(&we.we_cv);
1920 for (i = 0; i < cnt; i++) {
1921 if (whead[i].wb_object != NULL)
1922 RemoveEntryList(&whead[i].wb_waitlist);
1925 mtx_unlock(&ntoskrnl_dispatchlock);
1931 WRITE_REGISTER_USHORT(uint16_t *reg, uint16_t val)
1933 bus_space_write_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1937 READ_REGISTER_USHORT(reg)
1940 return (bus_space_read_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1944 WRITE_REGISTER_ULONG(reg, val)
1948 bus_space_write_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1952 READ_REGISTER_ULONG(reg)
1955 return (bus_space_read_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1959 READ_REGISTER_UCHAR(uint8_t *reg)
1961 return (bus_space_read_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1965 WRITE_REGISTER_UCHAR(uint8_t *reg, uint8_t val)
1967 bus_space_write_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
2019 _allshl(int64_t a, uint8_t b)
2025 _aullshl(uint64_t a, uint8_t b)
2031 _allshr(int64_t a, uint8_t b)
2037 _aullshr(uint64_t a, uint8_t b)
2042 static slist_entry *
2043 ntoskrnl_pushsl(head, entry)
2047 slist_entry *oldhead;
2049 oldhead = head->slh_list.slh_next;
2050 entry->sl_next = head->slh_list.slh_next;
2051 head->slh_list.slh_next = entry;
2052 head->slh_list.slh_depth++;
2053 head->slh_list.slh_seq++;
2058 static slist_entry *
2059 ntoskrnl_popsl(head)
2064 first = head->slh_list.slh_next;
2065 if (first != NULL) {
2066 head->slh_list.slh_next = first->sl_next;
2067 head->slh_list.slh_depth--;
2068 head->slh_list.slh_seq++;
2075 * We need this to make lookaside lists work for amd64.
2076 * We pass a pointer to ExAllocatePoolWithTag() the lookaside
2077 * list structure. For amd64 to work right, this has to be a
2078 * pointer to the wrapped version of the routine, not the
2079 * original. Letting the Windows driver invoke the original
2080 * function directly will result in a convention calling
2081 * mismatch and a pretty crash. On x86, this effectively
2082 * becomes a no-op since ipt_func and ipt_wrap are the same.
2086 ntoskrnl_findwrap(func)
2089 image_patch_table *patch;
2091 patch = ntoskrnl_functbl;
2092 while (patch->ipt_func != NULL) {
2093 if ((funcptr)patch->ipt_func == func)
2094 return ((funcptr)patch->ipt_wrap);
2102 ExInitializePagedLookasideList(paged_lookaside_list *lookaside,
2103 lookaside_alloc_func *allocfunc, lookaside_free_func *freefunc,
2104 uint32_t flags, size_t size, uint32_t tag, uint16_t depth)
2106 bzero((char *)lookaside, sizeof(paged_lookaside_list));
2108 if (size < sizeof(slist_entry))
2109 lookaside->nll_l.gl_size = sizeof(slist_entry);
2111 lookaside->nll_l.gl_size = size;
2112 lookaside->nll_l.gl_tag = tag;
2113 if (allocfunc == NULL)
2114 lookaside->nll_l.gl_allocfunc =
2115 ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
2117 lookaside->nll_l.gl_allocfunc = allocfunc;
2119 if (freefunc == NULL)
2120 lookaside->nll_l.gl_freefunc =
2121 ntoskrnl_findwrap((funcptr)ExFreePool);
2123 lookaside->nll_l.gl_freefunc = freefunc;
2126 KeInitializeSpinLock(&lookaside->nll_obsoletelock);
2129 lookaside->nll_l.gl_type = NonPagedPool;
2130 lookaside->nll_l.gl_depth = depth;
2131 lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
2135 ExDeletePagedLookasideList(lookaside)
2136 paged_lookaside_list *lookaside;
2139 void (*freefunc)(void *);
2141 freefunc = lookaside->nll_l.gl_freefunc;
2142 while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
2143 MSCALL1(freefunc, buf);
2147 ExInitializeNPagedLookasideList(npaged_lookaside_list *lookaside,
2148 lookaside_alloc_func *allocfunc, lookaside_free_func *freefunc,
2149 uint32_t flags, size_t size, uint32_t tag, uint16_t depth)
2151 bzero((char *)lookaside, sizeof(npaged_lookaside_list));
2153 if (size < sizeof(slist_entry))
2154 lookaside->nll_l.gl_size = sizeof(slist_entry);
2156 lookaside->nll_l.gl_size = size;
2157 lookaside->nll_l.gl_tag = tag;
2158 if (allocfunc == NULL)
2159 lookaside->nll_l.gl_allocfunc =
2160 ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
2162 lookaside->nll_l.gl_allocfunc = allocfunc;
2164 if (freefunc == NULL)
2165 lookaside->nll_l.gl_freefunc =
2166 ntoskrnl_findwrap((funcptr)ExFreePool);
2168 lookaside->nll_l.gl_freefunc = freefunc;
2171 KeInitializeSpinLock(&lookaside->nll_obsoletelock);
2174 lookaside->nll_l.gl_type = NonPagedPool;
2175 lookaside->nll_l.gl_depth = depth;
2176 lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
2180 ExDeleteNPagedLookasideList(lookaside)
2181 npaged_lookaside_list *lookaside;
2184 void (*freefunc)(void *);
2186 freefunc = lookaside->nll_l.gl_freefunc;
2187 while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
2188 MSCALL1(freefunc, buf);
2192 InterlockedPushEntrySList(head, entry)
2196 slist_entry *oldhead;
2198 mtx_lock_spin(&ntoskrnl_interlock);
2199 oldhead = ntoskrnl_pushsl(head, entry);
2200 mtx_unlock_spin(&ntoskrnl_interlock);
2206 InterlockedPopEntrySList(head)
2211 mtx_lock_spin(&ntoskrnl_interlock);
2212 first = ntoskrnl_popsl(head);
2213 mtx_unlock_spin(&ntoskrnl_interlock);
2218 static slist_entry *
2219 ExInterlockedPushEntrySList(head, entry, lock)
2224 return (InterlockedPushEntrySList(head, entry));
2227 static slist_entry *
2228 ExInterlockedPopEntrySList(head, lock)
2232 return (InterlockedPopEntrySList(head));
2236 ExQueryDepthSList(head)
2241 mtx_lock_spin(&ntoskrnl_interlock);
2242 depth = head->slh_list.slh_depth;
2243 mtx_unlock_spin(&ntoskrnl_interlock);
2249 KeInitializeSpinLock(lock)
2257 KefAcquireSpinLockAtDpcLevel(lock)
2260 #ifdef NTOSKRNL_DEBUG_SPINLOCKS
2264 while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0) {
2266 #ifdef NTOSKRNL_DEBUG_SPINLOCKS
2275 KefReleaseSpinLockFromDpcLevel(lock)
2278 atomic_store_rel_int((volatile u_int *)lock, 0);
2282 KeAcquireSpinLockRaiseToDpc(kspin_lock *lock)
2286 if (KeGetCurrentIrql() > DISPATCH_LEVEL)
2287 panic("IRQL_NOT_LESS_THAN_OR_EQUAL");
2289 KeRaiseIrql(DISPATCH_LEVEL, &oldirql);
2290 KeAcquireSpinLockAtDpcLevel(lock);
2296 KeAcquireSpinLockAtDpcLevel(kspin_lock *lock)
2298 while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0)
2303 KeReleaseSpinLockFromDpcLevel(kspin_lock *lock)
2305 atomic_store_rel_int((volatile u_int *)lock, 0);
2307 #endif /* __i386__ */
2310 InterlockedExchange(dst, val)
2311 volatile uint32_t *dst;
2316 mtx_lock_spin(&ntoskrnl_interlock);
2319 mtx_unlock_spin(&ntoskrnl_interlock);
2325 InterlockedIncrement(addend)
2326 volatile uint32_t *addend;
2328 atomic_add_long((volatile u_long *)addend, 1);
2333 InterlockedDecrement(addend)
2334 volatile uint32_t *addend;
2336 atomic_subtract_long((volatile u_long *)addend, 1);
2341 ExInterlockedAddLargeStatistic(addend, inc)
2345 mtx_lock_spin(&ntoskrnl_interlock);
2347 mtx_unlock_spin(&ntoskrnl_interlock);
2351 IoAllocateMdl(void *vaddr, uint32_t len, uint8_t secondarybuf,
2352 uint8_t chargequota, irp *iopkt)
2357 if (MmSizeOfMdl(vaddr, len) > MDL_ZONE_SIZE)
2358 m = ExAllocatePoolWithTag(NonPagedPool,
2359 MmSizeOfMdl(vaddr, len), 0);
2361 m = uma_zalloc(mdl_zone, M_NOWAIT | M_ZERO);
2368 MmInitializeMdl(m, vaddr, len);
2371 * MmInitializMdl() clears the flags field, so we
2372 * have to set this here. If the MDL came from the
2373 * MDL UMA zone, tag it so we can release it to
2374 * the right place later.
2377 m->mdl_flags = MDL_ZONE_ALLOCED;
2379 if (iopkt != NULL) {
2380 if (secondarybuf == TRUE) {
2382 last = iopkt->irp_mdl;
2383 while (last->mdl_next != NULL)
2384 last = last->mdl_next;
2387 if (iopkt->irp_mdl != NULL)
2388 panic("leaking an MDL in IoAllocateMdl()");
2403 if (m->mdl_flags & MDL_ZONE_ALLOCED)
2404 uma_zfree(mdl_zone, m);
2410 MmAllocateContiguousMemory(size, highest)
2415 size_t pagelength = roundup(size, PAGE_SIZE);
2417 addr = ExAllocatePoolWithTag(NonPagedPool, pagelength, 0);
2423 MmAllocateContiguousMemorySpecifyCache(size, lowest, highest,
2424 boundary, cachetype)
2429 enum nt_caching_type cachetype;
2431 vm_memattr_t memattr;
2434 switch (cachetype) {
2436 memattr = VM_MEMATTR_UNCACHEABLE;
2438 case MmWriteCombined:
2439 memattr = VM_MEMATTR_WRITE_COMBINING;
2441 case MmNonCachedUnordered:
2442 memattr = VM_MEMATTR_UNCACHEABLE;
2445 case MmHardwareCoherentCached:
2448 memattr = VM_MEMATTR_DEFAULT;
2452 ret = (void *)kmem_alloc_contig(kernel_map, size, M_ZERO | M_NOWAIT,
2453 lowest, highest, PAGE_SIZE, boundary, memattr);
2455 malloc_type_allocated(M_DEVBUF, round_page(size));
2460 MmFreeContiguousMemory(base)
2467 MmFreeContiguousMemorySpecifyCache(base, size, cachetype)
2470 enum nt_caching_type cachetype;
2472 contigfree(base, size, M_DEVBUF);
2476 MmSizeOfMdl(vaddr, len)
2482 l = sizeof(struct mdl) +
2483 (sizeof(vm_offset_t *) * SPAN_PAGES(vaddr, len));
2489 * The Microsoft documentation says this routine fills in the
2490 * page array of an MDL with the _physical_ page addresses that
2491 * comprise the buffer, but we don't really want to do that here.
2492 * Instead, we just fill in the page array with the kernel virtual
2493 * addresses of the buffers.
2496 MmBuildMdlForNonPagedPool(m)
2499 vm_offset_t *mdl_pages;
2502 pagecnt = SPAN_PAGES(m->mdl_byteoffset, m->mdl_bytecount);
2504 if (pagecnt > (m->mdl_size - sizeof(mdl)) / sizeof(vm_offset_t *))
2505 panic("not enough pages in MDL to describe buffer");
2507 mdl_pages = MmGetMdlPfnArray(m);
2509 for (i = 0; i < pagecnt; i++)
2510 *mdl_pages = (vm_offset_t)m->mdl_startva + (i * PAGE_SIZE);
2512 m->mdl_flags |= MDL_SOURCE_IS_NONPAGED_POOL;
2513 m->mdl_mappedsystemva = MmGetMdlVirtualAddress(m);
2517 MmMapLockedPages(mdl *buf, uint8_t accessmode)
2519 buf->mdl_flags |= MDL_MAPPED_TO_SYSTEM_VA;
2520 return (MmGetMdlVirtualAddress(buf));
2524 MmMapLockedPagesSpecifyCache(mdl *buf, uint8_t accessmode, uint32_t cachetype,
2525 void *vaddr, uint32_t bugcheck, uint32_t prio)
2527 return (MmMapLockedPages(buf, accessmode));
2531 MmUnmapLockedPages(vaddr, buf)
2535 buf->mdl_flags &= ~MDL_MAPPED_TO_SYSTEM_VA;
2539 * This function has a problem in that it will break if you
2540 * compile this module without PAE and try to use it on a PAE
2541 * kernel. Unfortunately, there's no way around this at the
2542 * moment. It's slightly less broken that using pmap_kextract().
2543 * You'd think the virtual memory subsystem would help us out
2544 * here, but it doesn't.
2548 MmGetPhysicalAddress(void *base)
2550 return (pmap_extract(kernel_map->pmap, (vm_offset_t)base));
2554 MmIsAddressValid(vaddr)
2557 if (pmap_extract(kernel_map->pmap, (vm_offset_t)vaddr))
2564 MmMapIoSpace(paddr, len, cachetype)
2569 devclass_t nexus_class;
2570 device_t *nexus_devs, devp;
2571 int nexus_count = 0;
2572 device_t matching_dev = NULL;
2573 struct resource *res;
2577 /* There will always be at least one nexus. */
2579 nexus_class = devclass_find("nexus");
2580 devclass_get_devices(nexus_class, &nexus_devs, &nexus_count);
2582 for (i = 0; i < nexus_count; i++) {
2583 devp = nexus_devs[i];
2584 matching_dev = ntoskrnl_finddev(devp, paddr, &res);
2589 free(nexus_devs, M_TEMP);
2591 if (matching_dev == NULL)
2594 v = (vm_offset_t)rman_get_virtual(res);
2595 if (paddr > rman_get_start(res))
2596 v += paddr - rman_get_start(res);
2602 MmUnmapIoSpace(vaddr, len)
2610 ntoskrnl_finddev(dev, paddr, res)
2613 struct resource **res;
2615 device_t *children = NULL;
2616 device_t matching_dev;
2619 struct resource_list *rl;
2620 struct resource_list_entry *rle;
2624 /* We only want devices that have been successfully probed. */
2626 if (device_is_alive(dev) == FALSE)
2629 rl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev);
2631 STAILQ_FOREACH(rle, rl, link) {
2637 flags = rman_get_flags(r);
2639 if (rle->type == SYS_RES_MEMORY &&
2640 paddr >= rman_get_start(r) &&
2641 paddr <= rman_get_end(r)) {
2642 if (!(flags & RF_ACTIVE))
2643 bus_activate_resource(dev,
2644 SYS_RES_MEMORY, 0, r);
2652 * If this device has children, do another
2653 * level of recursion to inspect them.
2656 device_get_children(dev, &children, &childcnt);
2658 for (i = 0; i < childcnt; i++) {
2659 matching_dev = ntoskrnl_finddev(children[i], paddr, res);
2660 if (matching_dev != NULL) {
2661 free(children, M_TEMP);
2662 return (matching_dev);
2667 /* Won't somebody please think of the children! */
2669 if (children != NULL)
2670 free(children, M_TEMP);
2676 * Workitems are unlike DPCs, in that they run in a user-mode thread
2677 * context rather than at DISPATCH_LEVEL in kernel context. In our
2678 * case we run them in kernel context anyway.
2681 ntoskrnl_workitem_thread(arg)
2691 InitializeListHead(&kq->kq_disp);
2692 kq->kq_td = curthread;
2694 KeInitializeSpinLock(&kq->kq_lock);
2695 KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
2698 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
2700 KeAcquireSpinLock(&kq->kq_lock, &irql);
2704 KeReleaseSpinLock(&kq->kq_lock, irql);
2708 while (!IsListEmpty(&kq->kq_disp)) {
2709 l = RemoveHeadList(&kq->kq_disp);
2710 iw = CONTAINING_RECORD(l,
2711 io_workitem, iw_listentry);
2712 InitializeListHead((&iw->iw_listentry));
2713 if (iw->iw_func == NULL)
2715 KeReleaseSpinLock(&kq->kq_lock, irql);
2716 MSCALL2(iw->iw_func, iw->iw_dobj, iw->iw_ctx);
2717 KeAcquireSpinLock(&kq->kq_lock, &irql);
2720 KeReleaseSpinLock(&kq->kq_lock, irql);
2724 return; /* notreached */
2728 ntoskrnl_destroy_workitem_threads(void)
2733 for (i = 0; i < WORKITEM_THREADS; i++) {
2736 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
2738 tsleep(kq->kq_td->td_proc, PWAIT, "waitiw", hz/10);
2743 IoAllocateWorkItem(dobj)
2744 device_object *dobj;
2748 iw = uma_zalloc(iw_zone, M_NOWAIT);
2752 InitializeListHead(&iw->iw_listentry);
2755 mtx_lock(&ntoskrnl_dispatchlock);
2756 iw->iw_idx = wq_idx;
2757 WORKIDX_INC(wq_idx);
2758 mtx_unlock(&ntoskrnl_dispatchlock);
2767 uma_zfree(iw_zone, iw);
2771 IoQueueWorkItem(iw, iw_func, qtype, ctx)
2773 io_workitem_func iw_func;
2782 kq = wq_queues + iw->iw_idx;
2784 KeAcquireSpinLock(&kq->kq_lock, &irql);
2787 * Traverse the list and make sure this workitem hasn't
2788 * already been inserted. Queuing the same workitem
2789 * twice will hose the list but good.
2792 l = kq->kq_disp.nle_flink;
2793 while (l != &kq->kq_disp) {
2794 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
2796 /* Already queued -- do nothing. */
2797 KeReleaseSpinLock(&kq->kq_lock, irql);
2803 iw->iw_func = iw_func;
2806 InsertTailList((&kq->kq_disp), (&iw->iw_listentry));
2807 KeReleaseSpinLock(&kq->kq_lock, irql);
2809 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
2813 ntoskrnl_workitem(dobj, arg)
2814 device_object *dobj;
2822 w = (work_queue_item *)dobj;
2823 f = (work_item_func)w->wqi_func;
2824 uma_zfree(iw_zone, iw);
2825 MSCALL2(f, w, w->wqi_ctx);
2829 * The ExQueueWorkItem() API is deprecated in Windows XP. Microsoft
2830 * warns that it's unsafe and to use IoQueueWorkItem() instead. The
2831 * problem with ExQueueWorkItem() is that it can't guard against
2832 * the condition where a driver submits a job to the work queue and
2833 * is then unloaded before the job is able to run. IoQueueWorkItem()
2834 * acquires a reference to the device's device_object via the
2835 * object manager and retains it until after the job has completed,
2836 * which prevents the driver from being unloaded before the job
2837 * runs. (We don't currently support this behavior, though hopefully
2838 * that will change once the object manager API is fleshed out a bit.)
2840 * Having said all that, the ExQueueWorkItem() API remains, because
2841 * there are still other parts of Windows that use it, including
2842 * NDIS itself: NdisScheduleWorkItem() calls ExQueueWorkItem().
2843 * We fake up the ExQueueWorkItem() API on top of our implementation
2844 * of IoQueueWorkItem(). Workitem thread #3 is reserved exclusively
2845 * for ExQueueWorkItem() jobs, and we pass a pointer to the work
2846 * queue item (provided by the caller) in to IoAllocateWorkItem()
2847 * instead of the device_object. We need to save this pointer so
2848 * we can apply a sanity check: as with the DPC queue and other
2849 * workitem queues, we can't allow the same work queue item to
2850 * be queued twice. If it's already pending, we silently return
2854 ExQueueWorkItem(w, qtype)
2859 io_workitem_func iwf;
2867 * We need to do a special sanity test to make sure
2868 * the ExQueueWorkItem() API isn't used to queue
2869 * the same workitem twice. Rather than checking the
2870 * io_workitem pointer itself, we test the attached
2871 * device object, which is really a pointer to the
2872 * legacy work queue item structure.
2875 kq = wq_queues + WORKITEM_LEGACY_THREAD;
2876 KeAcquireSpinLock(&kq->kq_lock, &irql);
2877 l = kq->kq_disp.nle_flink;
2878 while (l != &kq->kq_disp) {
2879 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
2880 if (cur->iw_dobj == (device_object *)w) {
2881 /* Already queued -- do nothing. */
2882 KeReleaseSpinLock(&kq->kq_lock, irql);
2887 KeReleaseSpinLock(&kq->kq_lock, irql);
2889 iw = IoAllocateWorkItem((device_object *)w);
2893 iw->iw_idx = WORKITEM_LEGACY_THREAD;
2894 iwf = (io_workitem_func)ntoskrnl_findwrap((funcptr)ntoskrnl_workitem);
2895 IoQueueWorkItem(iw, iwf, qtype, iw);
2899 RtlZeroMemory(dst, len)
2907 RtlCopyMemory(dst, src, len)
2912 bcopy(src, dst, len);
2916 RtlCompareMemory(s1, s2, len)
2921 size_t i, total = 0;
2924 m1 = __DECONST(char *, s1);
2925 m2 = __DECONST(char *, s2);
2927 for (i = 0; i < len; i++) {
2935 RtlInitAnsiString(dst, src)
2945 a->as_len = a->as_maxlen = 0;
2949 a->as_len = a->as_maxlen = strlen(src);
2954 RtlInitUnicodeString(dst, src)
2955 unicode_string *dst;
2965 u->us_len = u->us_maxlen = 0;
2972 u->us_len = u->us_maxlen = i * 2;
2977 RtlUnicodeStringToInteger(ustr, base, val)
2978 unicode_string *ustr;
2987 uchr = ustr->us_buf;
2989 bzero(abuf, sizeof(abuf));
2991 if ((char)((*uchr) & 0xFF) == '-') {
2995 } else if ((char)((*uchr) & 0xFF) == '+') {
3002 if ((char)((*uchr) & 0xFF) == 'b') {
3006 } else if ((char)((*uchr) & 0xFF) == 'o') {
3010 } else if ((char)((*uchr) & 0xFF) == 'x') {
3024 ntoskrnl_unicode_to_ascii(uchr, astr, len);
3025 *val = strtoul(abuf, NULL, base);
3027 return (STATUS_SUCCESS);
3031 RtlFreeUnicodeString(ustr)
3032 unicode_string *ustr;
3034 if (ustr->us_buf == NULL)
3036 ExFreePool(ustr->us_buf);
3037 ustr->us_buf = NULL;
3041 RtlFreeAnsiString(astr)
3044 if (astr->as_buf == NULL)
3046 ExFreePool(astr->as_buf);
3047 astr->as_buf = NULL;
3054 return (int)strtol(str, (char **)NULL, 10);
3061 return strtol(str, (char **)NULL, 10);
3070 srandom(tv.tv_usec);
3071 return ((int)random());
3082 IoIsWdmVersionAvailable(uint8_t major, uint8_t minor)
3084 if (major == WDM_MAJOR && minor == WDM_MINOR_WINXP)
3090 IoGetDeviceObjectPointer(name, reqaccess, fileobj, devobj)
3091 unicode_string *name;
3094 device_object *devobj;
3096 return (STATUS_SUCCESS);
3100 IoGetDeviceProperty(devobj, regprop, buflen, prop, reslen)
3101 device_object *devobj;
3110 drv = devobj->do_drvobj;
3113 case DEVPROP_DRIVER_KEYNAME:
3115 *name = drv->dro_drivername.us_buf;
3116 *reslen = drv->dro_drivername.us_len;
3119 return (STATUS_INVALID_PARAMETER_2);
3123 return (STATUS_SUCCESS);
3127 KeInitializeMutex(kmutex, level)
3131 InitializeListHead((&kmutex->km_header.dh_waitlisthead));
3132 kmutex->km_abandoned = FALSE;
3133 kmutex->km_apcdisable = 1;
3134 kmutex->km_header.dh_sigstate = 1;
3135 kmutex->km_header.dh_type = DISP_TYPE_MUTANT;
3136 kmutex->km_header.dh_size = sizeof(kmutant) / sizeof(uint32_t);
3137 kmutex->km_ownerthread = NULL;
3141 KeReleaseMutex(kmutant *kmutex, uint8_t kwait)
3145 mtx_lock(&ntoskrnl_dispatchlock);
3146 prevstate = kmutex->km_header.dh_sigstate;
3147 if (kmutex->km_ownerthread != curthread) {
3148 mtx_unlock(&ntoskrnl_dispatchlock);
3149 return (STATUS_MUTANT_NOT_OWNED);
3152 kmutex->km_header.dh_sigstate++;
3153 kmutex->km_abandoned = FALSE;
3155 if (kmutex->km_header.dh_sigstate == 1) {
3156 kmutex->km_ownerthread = NULL;
3157 ntoskrnl_waittest(&kmutex->km_header, IO_NO_INCREMENT);
3160 mtx_unlock(&ntoskrnl_dispatchlock);
3166 KeReadStateMutex(kmutex)
3169 return (kmutex->km_header.dh_sigstate);
3173 KeInitializeEvent(nt_kevent *kevent, uint32_t type, uint8_t state)
3175 InitializeListHead((&kevent->k_header.dh_waitlisthead));
3176 kevent->k_header.dh_sigstate = state;
3177 if (type == EVENT_TYPE_NOTIFY)
3178 kevent->k_header.dh_type = DISP_TYPE_NOTIFICATION_EVENT;
3180 kevent->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_EVENT;
3181 kevent->k_header.dh_size = sizeof(nt_kevent) / sizeof(uint32_t);
3185 KeResetEvent(kevent)
3190 mtx_lock(&ntoskrnl_dispatchlock);
3191 prevstate = kevent->k_header.dh_sigstate;
3192 kevent->k_header.dh_sigstate = FALSE;
3193 mtx_unlock(&ntoskrnl_dispatchlock);
3199 KeSetEvent(nt_kevent *kevent, uint32_t increment, uint8_t kwait)
3203 nt_dispatch_header *dh;
3207 mtx_lock(&ntoskrnl_dispatchlock);
3208 prevstate = kevent->k_header.dh_sigstate;
3209 dh = &kevent->k_header;
3211 if (IsListEmpty(&dh->dh_waitlisthead))
3213 * If there's nobody in the waitlist, just set
3214 * the state to signalled.
3216 dh->dh_sigstate = 1;
3219 * Get the first waiter. If this is a synchronization
3220 * event, just wake up that one thread (don't bother
3221 * setting the state to signalled since we're supposed
3222 * to automatically clear synchronization events anyway).
3224 * If it's a notification event, or the the first
3225 * waiter is doing a WAITTYPE_ALL wait, go through
3226 * the full wait satisfaction process.
3228 w = CONTAINING_RECORD(dh->dh_waitlisthead.nle_flink,
3229 wait_block, wb_waitlist);
3232 if (kevent->k_header.dh_type == DISP_TYPE_NOTIFICATION_EVENT ||
3233 w->wb_waittype == WAITTYPE_ALL) {
3234 if (prevstate == 0) {
3235 dh->dh_sigstate = 1;
3236 ntoskrnl_waittest(dh, increment);
3239 w->wb_awakened |= TRUE;
3240 cv_broadcastpri(&we->we_cv,
3241 (w->wb_oldpri - (increment * 4)) > PRI_MIN_KERN ?
3242 w->wb_oldpri - (increment * 4) : PRI_MIN_KERN);
3246 mtx_unlock(&ntoskrnl_dispatchlock);
3252 KeClearEvent(kevent)
3255 kevent->k_header.dh_sigstate = FALSE;
3259 KeReadStateEvent(kevent)
3262 return (kevent->k_header.dh_sigstate);
3266 * The object manager in Windows is responsible for managing
3267 * references and access to various types of objects, including
3268 * device_objects, events, threads, timers and so on. However,
3269 * there's a difference in the way objects are handled in user
3270 * mode versus kernel mode.
3272 * In user mode (i.e. Win32 applications), all objects are
3273 * managed by the object manager. For example, when you create
3274 * a timer or event object, you actually end up with an
3275 * object_header (for the object manager's bookkeeping
3276 * purposes) and an object body (which contains the actual object
3277 * structure, e.g. ktimer, kevent, etc...). This allows Windows
3278 * to manage resource quotas and to enforce access restrictions
3279 * on basically every kind of system object handled by the kernel.
3281 * However, in kernel mode, you only end up using the object
3282 * manager some of the time. For example, in a driver, you create
3283 * a timer object by simply allocating the memory for a ktimer
3284 * structure and initializing it with KeInitializeTimer(). Hence,
3285 * the timer has no object_header and no reference counting or
3286 * security/resource checks are done on it. The assumption in
3287 * this case is that if you're running in kernel mode, you know
3288 * what you're doing, and you're already at an elevated privilege
3291 * There are some exceptions to this. The two most important ones
3292 * for our purposes are device_objects and threads. We need to use
3293 * the object manager to do reference counting on device_objects,
3294 * and for threads, you can only get a pointer to a thread's
3295 * dispatch header by using ObReferenceObjectByHandle() on the
3296 * handle returned by PsCreateSystemThread().
3300 ObReferenceObjectByHandle(ndis_handle handle, uint32_t reqaccess, void *otype,
3301 uint8_t accessmode, void **object, void **handleinfo)
3305 nr = malloc(sizeof(nt_objref), M_DEVBUF, M_NOWAIT|M_ZERO);
3307 return (STATUS_INSUFFICIENT_RESOURCES);
3309 InitializeListHead((&nr->no_dh.dh_waitlisthead));
3310 nr->no_obj = handle;
3311 nr->no_dh.dh_type = DISP_TYPE_THREAD;
3312 nr->no_dh.dh_sigstate = 0;
3313 nr->no_dh.dh_size = (uint8_t)(sizeof(struct thread) /
3315 TAILQ_INSERT_TAIL(&ntoskrnl_reflist, nr, link);
3318 return (STATUS_SUCCESS);
3322 ObfDereferenceObject(object)
3328 TAILQ_REMOVE(&ntoskrnl_reflist, nr, link);
3336 return (STATUS_SUCCESS);
3340 WmiQueryTraceInformation(traceclass, traceinfo, infolen, reqlen, buf)
3341 uint32_t traceclass;
3347 return (STATUS_NOT_FOUND);
3351 WmiTraceMessage(uint64_t loghandle, uint32_t messageflags,
3352 void *guid, uint16_t messagenum, ...)
3354 return (STATUS_SUCCESS);
3358 IoWMIRegistrationControl(dobj, action)
3359 device_object *dobj;
3362 return (STATUS_SUCCESS);
3366 * This is here just in case the thread returns without calling
3367 * PsTerminateSystemThread().
3370 ntoskrnl_thrfunc(arg)
3373 thread_context *thrctx;
3374 uint32_t (*tfunc)(void *);
3379 tfunc = thrctx->tc_thrfunc;
3380 tctx = thrctx->tc_thrctx;
3381 free(thrctx, M_TEMP);
3383 rval = MSCALL1(tfunc, tctx);
3385 PsTerminateSystemThread(rval);
3386 return; /* notreached */
3390 PsCreateSystemThread(handle, reqaccess, objattrs, phandle,
3391 clientid, thrfunc, thrctx)
3392 ndis_handle *handle;
3395 ndis_handle phandle;
3404 tc = malloc(sizeof(thread_context), M_TEMP, M_NOWAIT);
3406 return (STATUS_INSUFFICIENT_RESOURCES);
3408 tc->tc_thrctx = thrctx;
3409 tc->tc_thrfunc = thrfunc;
3411 error = kproc_create(ntoskrnl_thrfunc, tc, &p,
3412 RFHIGHPID, NDIS_KSTACK_PAGES, "Windows Kthread %d", ntoskrnl_kth);
3416 return (STATUS_INSUFFICIENT_RESOURCES);
3422 return (STATUS_SUCCESS);
3426 * In Windows, the exit of a thread is an event that you're allowed
3427 * to wait on, assuming you've obtained a reference to the thread using
3428 * ObReferenceObjectByHandle(). Unfortunately, the only way we can
3429 * simulate this behavior is to register each thread we create in a
3430 * reference list, and if someone holds a reference to us, we poke
3434 PsTerminateSystemThread(status)
3437 struct nt_objref *nr;
3439 mtx_lock(&ntoskrnl_dispatchlock);
3440 TAILQ_FOREACH(nr, &ntoskrnl_reflist, link) {
3441 if (nr->no_obj != curthread->td_proc)
3443 nr->no_dh.dh_sigstate = 1;
3444 ntoskrnl_waittest(&nr->no_dh, IO_NO_INCREMENT);
3447 mtx_unlock(&ntoskrnl_dispatchlock);
3452 return (0); /* notreached */
3456 DbgPrint(char *fmt, ...)
3465 return (STATUS_SUCCESS);
3472 kdb_enter(KDB_WHY_NDIS, "DbgBreakPoint(): breakpoint");
3476 KeBugCheckEx(code, param1, param2, param3, param4)
3483 panic("KeBugCheckEx: STOP 0x%X", code);
3487 ntoskrnl_timercall(arg)
3494 mtx_lock(&ntoskrnl_dispatchlock);
3498 #ifdef NTOSKRNL_DEBUG_TIMERS
3499 ntoskrnl_timer_fires++;
3501 ntoskrnl_remove_timer(timer);
3504 * This should never happen, but complain
3508 if (timer->k_header.dh_inserted == FALSE) {
3509 mtx_unlock(&ntoskrnl_dispatchlock);
3510 printf("NTOS: timer %p fired even though "
3511 "it was canceled\n", timer);
3515 /* Mark the timer as no longer being on the timer queue. */
3517 timer->k_header.dh_inserted = FALSE;
3519 /* Now signal the object and satisfy any waits on it. */
3521 timer->k_header.dh_sigstate = 1;
3522 ntoskrnl_waittest(&timer->k_header, IO_NO_INCREMENT);
3525 * If this is a periodic timer, re-arm it
3526 * so it will fire again. We do this before
3527 * calling any deferred procedure calls because
3528 * it's possible the DPC might cancel the timer,
3529 * in which case it would be wrong for us to
3530 * re-arm it again afterwards.
3533 if (timer->k_period) {
3535 tv.tv_usec = timer->k_period * 1000;
3536 timer->k_header.dh_inserted = TRUE;
3537 ntoskrnl_insert_timer(timer, tvtohz(&tv));
3538 #ifdef NTOSKRNL_DEBUG_TIMERS
3539 ntoskrnl_timer_reloads++;
3545 mtx_unlock(&ntoskrnl_dispatchlock);
3547 /* If there's a DPC associated with the timer, queue it up. */
3550 KeInsertQueueDpc(dpc, NULL, NULL);
3553 #ifdef NTOSKRNL_DEBUG_TIMERS
3555 sysctl_show_timers(SYSCTL_HANDLER_ARGS)
3560 ntoskrnl_show_timers();
3561 return (sysctl_handle_int(oidp, &ret, 0, req));
3565 ntoskrnl_show_timers()
3570 mtx_lock_spin(&ntoskrnl_calllock);
3571 l = ntoskrnl_calllist.nle_flink;
3572 while(l != &ntoskrnl_calllist) {
3576 mtx_unlock_spin(&ntoskrnl_calllock);
3579 printf("%d timers available (out of %d)\n", i, NTOSKRNL_TIMEOUTS);
3580 printf("timer sets: %qu\n", ntoskrnl_timer_sets);
3581 printf("timer reloads: %qu\n", ntoskrnl_timer_reloads);
3582 printf("timer cancels: %qu\n", ntoskrnl_timer_cancels);
3583 printf("timer fires: %qu\n", ntoskrnl_timer_fires);
3589 * Must be called with dispatcher lock held.
3593 ntoskrnl_insert_timer(timer, ticks)
3602 * Try and allocate a timer.
3604 mtx_lock_spin(&ntoskrnl_calllock);
3605 if (IsListEmpty(&ntoskrnl_calllist)) {
3606 mtx_unlock_spin(&ntoskrnl_calllock);
3607 #ifdef NTOSKRNL_DEBUG_TIMERS
3608 ntoskrnl_show_timers();
3610 panic("out of timers!");
3612 l = RemoveHeadList(&ntoskrnl_calllist);
3613 mtx_unlock_spin(&ntoskrnl_calllock);
3615 e = CONTAINING_RECORD(l, callout_entry, ce_list);
3618 timer->k_callout = c;
3620 callout_init(c, CALLOUT_MPSAFE);
3621 callout_reset(c, ticks, ntoskrnl_timercall, timer);
3625 ntoskrnl_remove_timer(timer)
3630 e = (callout_entry *)timer->k_callout;
3631 callout_stop(timer->k_callout);
3633 mtx_lock_spin(&ntoskrnl_calllock);
3634 InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
3635 mtx_unlock_spin(&ntoskrnl_calllock);
3639 KeInitializeTimer(timer)
3645 KeInitializeTimerEx(timer, EVENT_TYPE_NOTIFY);
3649 KeInitializeTimerEx(timer, type)
3656 bzero((char *)timer, sizeof(ktimer));
3657 InitializeListHead((&timer->k_header.dh_waitlisthead));
3658 timer->k_header.dh_sigstate = FALSE;
3659 timer->k_header.dh_inserted = FALSE;
3660 if (type == EVENT_TYPE_NOTIFY)
3661 timer->k_header.dh_type = DISP_TYPE_NOTIFICATION_TIMER;
3663 timer->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_TIMER;
3664 timer->k_header.dh_size = sizeof(ktimer) / sizeof(uint32_t);
3668 * DPC subsystem. A Windows Defered Procedure Call has the following
3670 * - It runs at DISPATCH_LEVEL.
3671 * - It can have one of 3 importance values that control when it
3672 * runs relative to other DPCs in the queue.
3673 * - On SMP systems, it can be set to run on a specific processor.
3674 * In order to satisfy the last property, we create a DPC thread for
3675 * each CPU in the system and bind it to that CPU. Each thread
3676 * maintains three queues with different importance levels, which
3677 * will be processed in order from lowest to highest.
3679 * In Windows, interrupt handlers run as DPCs. (Not to be confused
3680 * with ISRs, which run in interrupt context and can preempt DPCs.)
3681 * ISRs are given the highest importance so that they'll take
3682 * precedence over timers and other things.
3686 ntoskrnl_dpc_thread(arg)
3696 InitializeListHead(&kq->kq_disp);
3697 kq->kq_td = curthread;
3699 kq->kq_running = FALSE;
3700 KeInitializeSpinLock(&kq->kq_lock);
3701 KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
3702 KeInitializeEvent(&kq->kq_done, EVENT_TYPE_SYNC, FALSE);
3705 * Elevate our priority. DPCs are used to run interrupt
3706 * handlers, and they should trigger as soon as possible
3707 * once scheduled by an ISR.
3710 thread_lock(curthread);
3711 #ifdef NTOSKRNL_MULTIPLE_DPCS
3712 sched_bind(curthread, kq->kq_cpu);
3714 sched_prio(curthread, PRI_MIN_KERN);
3715 thread_unlock(curthread);
3718 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
3720 KeAcquireSpinLock(&kq->kq_lock, &irql);
3724 KeReleaseSpinLock(&kq->kq_lock, irql);
3728 kq->kq_running = TRUE;
3730 while (!IsListEmpty(&kq->kq_disp)) {
3731 l = RemoveHeadList((&kq->kq_disp));
3732 d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
3733 InitializeListHead((&d->k_dpclistentry));
3734 KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
3735 MSCALL4(d->k_deferedfunc, d, d->k_deferredctx,
3736 d->k_sysarg1, d->k_sysarg2);
3737 KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3740 kq->kq_running = FALSE;
3742 KeReleaseSpinLock(&kq->kq_lock, irql);
3744 KeSetEvent(&kq->kq_done, IO_NO_INCREMENT, FALSE);
3748 return; /* notreached */
3752 ntoskrnl_destroy_dpc_threads(void)
3759 #ifdef NTOSKRNL_MULTIPLE_DPCS
3760 for (i = 0; i < mp_ncpus; i++) {
3762 for (i = 0; i < 1; i++) {
3767 KeInitializeDpc(&dpc, NULL, NULL);
3768 KeSetTargetProcessorDpc(&dpc, i);
3769 KeInsertQueueDpc(&dpc, NULL, NULL);
3771 tsleep(kq->kq_td->td_proc, PWAIT, "dpcw", hz/10);
3776 ntoskrnl_insert_dpc(head, dpc)
3783 l = head->nle_flink;
3785 d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
3791 if (dpc->k_importance == KDPC_IMPORTANCE_LOW)
3792 InsertTailList((head), (&dpc->k_dpclistentry));
3794 InsertHeadList((head), (&dpc->k_dpclistentry));
3800 KeInitializeDpc(dpc, dpcfunc, dpcctx)
3809 dpc->k_deferedfunc = dpcfunc;
3810 dpc->k_deferredctx = dpcctx;
3811 dpc->k_num = KDPC_CPU_DEFAULT;
3812 dpc->k_importance = KDPC_IMPORTANCE_MEDIUM;
3813 InitializeListHead((&dpc->k_dpclistentry));
3817 KeInsertQueueDpc(dpc, sysarg1, sysarg2)
3831 #ifdef NTOSKRNL_MULTIPLE_DPCS
3832 KeRaiseIrql(DISPATCH_LEVEL, &irql);
3835 * By default, the DPC is queued to run on the same CPU
3836 * that scheduled it.
3839 if (dpc->k_num == KDPC_CPU_DEFAULT)
3840 kq += curthread->td_oncpu;
3843 KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3845 KeAcquireSpinLock(&kq->kq_lock, &irql);
3848 r = ntoskrnl_insert_dpc(&kq->kq_disp, dpc);
3850 dpc->k_sysarg1 = sysarg1;
3851 dpc->k_sysarg2 = sysarg2;
3853 KeReleaseSpinLock(&kq->kq_lock, irql);
3858 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
3864 KeRemoveQueueDpc(dpc)
3873 #ifdef NTOSKRNL_MULTIPLE_DPCS
3874 KeRaiseIrql(DISPATCH_LEVEL, &irql);
3876 kq = kq_queues + dpc->k_num;
3878 KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3881 KeAcquireSpinLock(&kq->kq_lock, &irql);
3884 if (dpc->k_dpclistentry.nle_flink == &dpc->k_dpclistentry) {
3885 KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
3890 RemoveEntryList((&dpc->k_dpclistentry));
3891 InitializeListHead((&dpc->k_dpclistentry));
3893 KeReleaseSpinLock(&kq->kq_lock, irql);
3899 KeSetImportanceDpc(dpc, imp)
3903 if (imp != KDPC_IMPORTANCE_LOW &&
3904 imp != KDPC_IMPORTANCE_MEDIUM &&
3905 imp != KDPC_IMPORTANCE_HIGH)
3908 dpc->k_importance = (uint8_t)imp;
3912 KeSetTargetProcessorDpc(kdpc *dpc, uint8_t cpu)
3921 KeFlushQueuedDpcs(void)
3927 * Poke each DPC queue and wait
3928 * for them to drain.
3931 #ifdef NTOSKRNL_MULTIPLE_DPCS
3932 for (i = 0; i < mp_ncpus; i++) {
3934 for (i = 0; i < 1; i++) {
3937 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
3938 KeWaitForSingleObject(&kq->kq_done, 0, 0, TRUE, NULL);
3943 KeGetCurrentProcessorNumber(void)
3945 return ((uint32_t)curthread->td_oncpu);
3949 KeSetTimerEx(timer, duetime, period, dpc)
3962 mtx_lock(&ntoskrnl_dispatchlock);
3964 if (timer->k_header.dh_inserted == TRUE) {
3965 ntoskrnl_remove_timer(timer);
3966 #ifdef NTOSKRNL_DEBUG_TIMERS
3967 ntoskrnl_timer_cancels++;
3969 timer->k_header.dh_inserted = FALSE;
3974 timer->k_duetime = duetime;
3975 timer->k_period = period;
3976 timer->k_header.dh_sigstate = FALSE;
3980 tv.tv_sec = - (duetime) / 10000000;
3981 tv.tv_usec = (- (duetime) / 10) -
3982 (tv.tv_sec * 1000000);
3984 ntoskrnl_time(&curtime);
3985 if (duetime < curtime)
3986 tv.tv_sec = tv.tv_usec = 0;
3988 tv.tv_sec = ((duetime) - curtime) / 10000000;
3989 tv.tv_usec = ((duetime) - curtime) / 10 -
3990 (tv.tv_sec * 1000000);
3994 timer->k_header.dh_inserted = TRUE;
3995 ntoskrnl_insert_timer(timer, tvtohz(&tv));
3996 #ifdef NTOSKRNL_DEBUG_TIMERS
3997 ntoskrnl_timer_sets++;
4000 mtx_unlock(&ntoskrnl_dispatchlock);
4006 KeSetTimer(timer, duetime, dpc)
4011 return (KeSetTimerEx(timer, duetime, 0, dpc));
4015 * The Windows DDK documentation seems to say that cancelling
4016 * a timer that has a DPC will result in the DPC also being
4017 * cancelled, but this isn't really the case.
4021 KeCancelTimer(timer)
4029 mtx_lock(&ntoskrnl_dispatchlock);
4031 pending = timer->k_header.dh_inserted;
4033 if (timer->k_header.dh_inserted == TRUE) {
4034 timer->k_header.dh_inserted = FALSE;
4035 ntoskrnl_remove_timer(timer);
4036 #ifdef NTOSKRNL_DEBUG_TIMERS
4037 ntoskrnl_timer_cancels++;
4041 mtx_unlock(&ntoskrnl_dispatchlock);
4047 KeReadStateTimer(timer)
4050 return (timer->k_header.dh_sigstate);
4054 KeDelayExecutionThread(uint8_t wait_mode, uint8_t alertable, int64_t *interval)
4059 panic("invalid wait_mode %d", wait_mode);
4061 KeInitializeTimer(&timer);
4062 KeSetTimer(&timer, *interval, NULL);
4063 KeWaitForSingleObject(&timer, 0, 0, alertable, NULL);
4065 return STATUS_SUCCESS;
4069 KeQueryInterruptTime(void)
4074 getmicrouptime(&tv);
4076 ticks = tvtohz(&tv);
4078 return ticks * ((10000000 + hz - 1) / hz);
4081 static struct thread *
4082 KeGetCurrentThread(void)
4089 KeSetPriorityThread(td, pri)
4096 return LOW_REALTIME_PRIORITY;
4098 if (td->td_priority <= PRI_MIN_KERN)
4099 old = HIGH_PRIORITY;
4100 else if (td->td_priority >= PRI_MAX_KERN)
4103 old = LOW_REALTIME_PRIORITY;
4106 if (pri == HIGH_PRIORITY)
4107 sched_prio(td, PRI_MIN_KERN);
4108 if (pri == LOW_REALTIME_PRIORITY)
4109 sched_prio(td, PRI_MIN_KERN + (PRI_MAX_KERN - PRI_MIN_KERN) / 2);
4110 if (pri == LOW_PRIORITY)
4111 sched_prio(td, PRI_MAX_KERN);
4120 printf("ntoskrnl dummy called...\n");
4124 image_patch_table ntoskrnl_functbl[] = {
4125 IMPORT_SFUNC(RtlZeroMemory, 2),
4126 IMPORT_SFUNC(RtlCopyMemory, 3),
4127 IMPORT_SFUNC(RtlCompareMemory, 3),
4128 IMPORT_SFUNC(RtlEqualUnicodeString, 3),
4129 IMPORT_SFUNC(RtlCopyUnicodeString, 2),
4130 IMPORT_SFUNC(RtlUnicodeStringToAnsiString, 3),
4131 IMPORT_SFUNC(RtlAnsiStringToUnicodeString, 3),
4132 IMPORT_SFUNC(RtlInitAnsiString, 2),
4133 IMPORT_SFUNC_MAP(RtlInitString, RtlInitAnsiString, 2),
4134 IMPORT_SFUNC(RtlInitUnicodeString, 2),
4135 IMPORT_SFUNC(RtlFreeAnsiString, 1),
4136 IMPORT_SFUNC(RtlFreeUnicodeString, 1),
4137 IMPORT_SFUNC(RtlUnicodeStringToInteger, 3),
4138 IMPORT_CFUNC(sprintf, 0),
4139 IMPORT_CFUNC(vsprintf, 0),
4140 IMPORT_CFUNC_MAP(_snprintf, snprintf, 0),
4141 IMPORT_CFUNC_MAP(_vsnprintf, vsnprintf, 0),
4142 IMPORT_CFUNC(DbgPrint, 0),
4143 IMPORT_SFUNC(DbgBreakPoint, 0),
4144 IMPORT_SFUNC(KeBugCheckEx, 5),
4145 IMPORT_CFUNC(strncmp, 0),
4146 IMPORT_CFUNC(strcmp, 0),
4147 IMPORT_CFUNC_MAP(stricmp, strcasecmp, 0),
4148 IMPORT_CFUNC(strncpy, 0),
4149 IMPORT_CFUNC(strcpy, 0),
4150 IMPORT_CFUNC(strlen, 0),
4151 IMPORT_CFUNC_MAP(toupper, ntoskrnl_toupper, 0),
4152 IMPORT_CFUNC_MAP(tolower, ntoskrnl_tolower, 0),
4153 IMPORT_CFUNC_MAP(strstr, ntoskrnl_strstr, 0),
4154 IMPORT_CFUNC_MAP(strncat, ntoskrnl_strncat, 0),
4155 IMPORT_CFUNC_MAP(strchr, index, 0),
4156 IMPORT_CFUNC_MAP(strrchr, rindex, 0),
4157 IMPORT_CFUNC(memcpy, 0),
4158 IMPORT_CFUNC_MAP(memmove, ntoskrnl_memmove, 0),
4159 IMPORT_CFUNC_MAP(memset, ntoskrnl_memset, 0),
4160 IMPORT_CFUNC_MAP(memchr, ntoskrnl_memchr, 0),
4161 IMPORT_SFUNC(IoAllocateDriverObjectExtension, 4),
4162 IMPORT_SFUNC(IoGetDriverObjectExtension, 2),
4163 IMPORT_FFUNC(IofCallDriver, 2),
4164 IMPORT_FFUNC(IofCompleteRequest, 2),
4165 IMPORT_SFUNC(IoAcquireCancelSpinLock, 1),
4166 IMPORT_SFUNC(IoReleaseCancelSpinLock, 1),
4167 IMPORT_SFUNC(IoCancelIrp, 1),
4168 IMPORT_SFUNC(IoConnectInterrupt, 11),
4169 IMPORT_SFUNC(IoDisconnectInterrupt, 1),
4170 IMPORT_SFUNC(IoCreateDevice, 7),
4171 IMPORT_SFUNC(IoDeleteDevice, 1),
4172 IMPORT_SFUNC(IoGetAttachedDevice, 1),
4173 IMPORT_SFUNC(IoAttachDeviceToDeviceStack, 2),
4174 IMPORT_SFUNC(IoDetachDevice, 1),
4175 IMPORT_SFUNC(IoBuildSynchronousFsdRequest, 7),
4176 IMPORT_SFUNC(IoBuildAsynchronousFsdRequest, 6),
4177 IMPORT_SFUNC(IoBuildDeviceIoControlRequest, 9),
4178 IMPORT_SFUNC(IoAllocateIrp, 2),
4179 IMPORT_SFUNC(IoReuseIrp, 2),
4180 IMPORT_SFUNC(IoMakeAssociatedIrp, 2),
4181 IMPORT_SFUNC(IoFreeIrp, 1),
4182 IMPORT_SFUNC(IoInitializeIrp, 3),
4183 IMPORT_SFUNC(KeAcquireInterruptSpinLock, 1),
4184 IMPORT_SFUNC(KeReleaseInterruptSpinLock, 2),
4185 IMPORT_SFUNC(KeSynchronizeExecution, 3),
4186 IMPORT_SFUNC(KeWaitForSingleObject, 5),
4187 IMPORT_SFUNC(KeWaitForMultipleObjects, 8),
4188 IMPORT_SFUNC(_allmul, 4),
4189 IMPORT_SFUNC(_alldiv, 4),
4190 IMPORT_SFUNC(_allrem, 4),
4191 IMPORT_RFUNC(_allshr, 0),
4192 IMPORT_RFUNC(_allshl, 0),
4193 IMPORT_SFUNC(_aullmul, 4),
4194 IMPORT_SFUNC(_aulldiv, 4),
4195 IMPORT_SFUNC(_aullrem, 4),
4196 IMPORT_RFUNC(_aullshr, 0),
4197 IMPORT_RFUNC(_aullshl, 0),
4198 IMPORT_CFUNC(atoi, 0),
4199 IMPORT_CFUNC(atol, 0),
4200 IMPORT_CFUNC(rand, 0),
4201 IMPORT_CFUNC(srand, 0),
4202 IMPORT_SFUNC(WRITE_REGISTER_USHORT, 2),
4203 IMPORT_SFUNC(READ_REGISTER_USHORT, 1),
4204 IMPORT_SFUNC(WRITE_REGISTER_ULONG, 2),
4205 IMPORT_SFUNC(READ_REGISTER_ULONG, 1),
4206 IMPORT_SFUNC(READ_REGISTER_UCHAR, 1),
4207 IMPORT_SFUNC(WRITE_REGISTER_UCHAR, 2),
4208 IMPORT_SFUNC(ExInitializePagedLookasideList, 7),
4209 IMPORT_SFUNC(ExDeletePagedLookasideList, 1),
4210 IMPORT_SFUNC(ExInitializeNPagedLookasideList, 7),
4211 IMPORT_SFUNC(ExDeleteNPagedLookasideList, 1),
4212 IMPORT_FFUNC(InterlockedPopEntrySList, 1),
4213 IMPORT_FFUNC(InterlockedPushEntrySList, 2),
4214 IMPORT_SFUNC(ExQueryDepthSList, 1),
4215 IMPORT_FFUNC_MAP(ExpInterlockedPopEntrySList,
4216 InterlockedPopEntrySList, 1),
4217 IMPORT_FFUNC_MAP(ExpInterlockedPushEntrySList,
4218 InterlockedPushEntrySList, 2),
4219 IMPORT_FFUNC(ExInterlockedPopEntrySList, 2),
4220 IMPORT_FFUNC(ExInterlockedPushEntrySList, 3),
4221 IMPORT_SFUNC(ExAllocatePoolWithTag, 3),
4222 IMPORT_SFUNC(ExFreePool, 1),
4224 IMPORT_FFUNC(KefAcquireSpinLockAtDpcLevel, 1),
4225 IMPORT_FFUNC(KefReleaseSpinLockFromDpcLevel,1),
4226 IMPORT_FFUNC(KeAcquireSpinLockRaiseToDpc, 1),
4229 * For AMD64, we can get away with just mapping
4230 * KeAcquireSpinLockRaiseToDpc() directly to KfAcquireSpinLock()
4231 * because the calling conventions end up being the same.
4232 * On i386, we have to be careful because KfAcquireSpinLock()
4233 * is _fastcall but KeAcquireSpinLockRaiseToDpc() isn't.
4235 IMPORT_SFUNC(KeAcquireSpinLockAtDpcLevel, 1),
4236 IMPORT_SFUNC(KeReleaseSpinLockFromDpcLevel, 1),
4237 IMPORT_SFUNC_MAP(KeAcquireSpinLockRaiseToDpc, KfAcquireSpinLock, 1),
4239 IMPORT_SFUNC_MAP(KeReleaseSpinLock, KfReleaseSpinLock, 1),
4240 IMPORT_FFUNC(InterlockedIncrement, 1),
4241 IMPORT_FFUNC(InterlockedDecrement, 1),
4242 IMPORT_FFUNC(InterlockedExchange, 2),
4243 IMPORT_FFUNC(ExInterlockedAddLargeStatistic, 2),
4244 IMPORT_SFUNC(IoAllocateMdl, 5),
4245 IMPORT_SFUNC(IoFreeMdl, 1),
4246 IMPORT_SFUNC(MmAllocateContiguousMemory, 2 + 1),
4247 IMPORT_SFUNC(MmAllocateContiguousMemorySpecifyCache, 5 + 3),
4248 IMPORT_SFUNC(MmFreeContiguousMemory, 1),
4249 IMPORT_SFUNC(MmFreeContiguousMemorySpecifyCache, 3),
4250 IMPORT_SFUNC(MmSizeOfMdl, 1),
4251 IMPORT_SFUNC(MmMapLockedPages, 2),
4252 IMPORT_SFUNC(MmMapLockedPagesSpecifyCache, 6),
4253 IMPORT_SFUNC(MmUnmapLockedPages, 2),
4254 IMPORT_SFUNC(MmBuildMdlForNonPagedPool, 1),
4255 IMPORT_SFUNC(MmGetPhysicalAddress, 1),
4256 IMPORT_SFUNC(MmIsAddressValid, 1),
4257 IMPORT_SFUNC(MmMapIoSpace, 3 + 1),
4258 IMPORT_SFUNC(MmUnmapIoSpace, 2),
4259 IMPORT_SFUNC(KeInitializeSpinLock, 1),
4260 IMPORT_SFUNC(IoIsWdmVersionAvailable, 2),
4261 IMPORT_SFUNC(IoGetDeviceObjectPointer, 4),
4262 IMPORT_SFUNC(IoGetDeviceProperty, 5),
4263 IMPORT_SFUNC(IoAllocateWorkItem, 1),
4264 IMPORT_SFUNC(IoFreeWorkItem, 1),
4265 IMPORT_SFUNC(IoQueueWorkItem, 4),
4266 IMPORT_SFUNC(ExQueueWorkItem, 2),
4267 IMPORT_SFUNC(ntoskrnl_workitem, 2),
4268 IMPORT_SFUNC(KeInitializeMutex, 2),
4269 IMPORT_SFUNC(KeReleaseMutex, 2),
4270 IMPORT_SFUNC(KeReadStateMutex, 1),
4271 IMPORT_SFUNC(KeInitializeEvent, 3),
4272 IMPORT_SFUNC(KeSetEvent, 3),
4273 IMPORT_SFUNC(KeResetEvent, 1),
4274 IMPORT_SFUNC(KeClearEvent, 1),
4275 IMPORT_SFUNC(KeReadStateEvent, 1),
4276 IMPORT_SFUNC(KeInitializeTimer, 1),
4277 IMPORT_SFUNC(KeInitializeTimerEx, 2),
4278 IMPORT_SFUNC(KeSetTimer, 3),
4279 IMPORT_SFUNC(KeSetTimerEx, 4),
4280 IMPORT_SFUNC(KeCancelTimer, 1),
4281 IMPORT_SFUNC(KeReadStateTimer, 1),
4282 IMPORT_SFUNC(KeInitializeDpc, 3),
4283 IMPORT_SFUNC(KeInsertQueueDpc, 3),
4284 IMPORT_SFUNC(KeRemoveQueueDpc, 1),
4285 IMPORT_SFUNC(KeSetImportanceDpc, 2),
4286 IMPORT_SFUNC(KeSetTargetProcessorDpc, 2),
4287 IMPORT_SFUNC(KeFlushQueuedDpcs, 0),
4288 IMPORT_SFUNC(KeGetCurrentProcessorNumber, 1),
4289 IMPORT_SFUNC(ObReferenceObjectByHandle, 6),
4290 IMPORT_FFUNC(ObfDereferenceObject, 1),
4291 IMPORT_SFUNC(ZwClose, 1),
4292 IMPORT_SFUNC(PsCreateSystemThread, 7),
4293 IMPORT_SFUNC(PsTerminateSystemThread, 1),
4294 IMPORT_SFUNC(IoWMIRegistrationControl, 2),
4295 IMPORT_SFUNC(WmiQueryTraceInformation, 5),
4296 IMPORT_CFUNC(WmiTraceMessage, 0),
4297 IMPORT_SFUNC(KeQuerySystemTime, 1),
4298 IMPORT_CFUNC(KeTickCount, 0),
4299 IMPORT_SFUNC(KeDelayExecutionThread, 3),
4300 IMPORT_SFUNC(KeQueryInterruptTime, 0),
4301 IMPORT_SFUNC(KeGetCurrentThread, 0),
4302 IMPORT_SFUNC(KeSetPriorityThread, 2),
4305 * This last entry is a catch-all for any function we haven't
4306 * implemented yet. The PE import list patching routine will
4307 * use it for any function that doesn't have an explicit match
4311 { NULL, (FUNC)dummy, NULL, 0, WINDRV_WRAP_STDCALL },
4315 { NULL, NULL, NULL }