]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/compat/ndis/subr_ntoskrnl.c
add -n option to suppress clearing the build tree and add -DNO_CLEAN
[FreeBSD/FreeBSD.git] / sys / compat / ndis / subr_ntoskrnl.c
1 /*-
2  * Copyright (c) 2003
3  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *      This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #include <sys/ctype.h>
37 #include <sys/unistd.h>
38 #include <sys/param.h>
39 #include <sys/types.h>
40 #include <sys/errno.h>
41 #include <sys/systm.h>
42 #include <sys/malloc.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45
46 #include <sys/callout.h>
47 #if __FreeBSD_version > 502113
48 #include <sys/kdb.h>
49 #endif
50 #include <sys/kernel.h>
51 #include <sys/proc.h>
52 #include <sys/condvar.h>
53 #include <sys/kthread.h>
54 #include <sys/module.h>
55 #include <sys/smp.h>
56 #include <sys/sched.h>
57 #include <sys/sysctl.h>
58
59 #include <machine/atomic.h>
60 #include <machine/bus.h>
61 #include <machine/stdarg.h>
62 #include <machine/resource.h>
63
64 #include <sys/bus.h>
65 #include <sys/rman.h>
66
67 #include <vm/vm.h>
68 #include <vm/vm_param.h>
69 #include <vm/pmap.h>
70 #include <vm/uma.h>
71 #include <vm/vm_kern.h>
72 #include <vm/vm_map.h>
73
74 #include <compat/ndis/pe_var.h>
75 #include <compat/ndis/cfg_var.h>
76 #include <compat/ndis/resource_var.h>
77 #include <compat/ndis/ntoskrnl_var.h>
78 #include <compat/ndis/hal_var.h>
79 #include <compat/ndis/ndis_var.h>
80
81 #ifdef NTOSKRNL_DEBUG_TIMERS
82 static int sysctl_show_timers(SYSCTL_HANDLER_ARGS);
83
84 SYSCTL_PROC(_debug, OID_AUTO, ntoskrnl_timers, CTLFLAG_RW, 0, 0,
85         sysctl_show_timers, "I", "Show ntoskrnl timer stats");
86 #endif
87
88 struct kdpc_queue {
89         list_entry              kq_disp;
90         struct thread           *kq_td;
91         int                     kq_cpu;
92         int                     kq_exit;
93         int                     kq_running;
94         kspin_lock              kq_lock;
95         nt_kevent               kq_proc;
96         nt_kevent               kq_done;
97 };
98
99 typedef struct kdpc_queue kdpc_queue;
100
101 struct wb_ext {
102         struct cv               we_cv;
103         struct thread           *we_td;
104 };
105
106 typedef struct wb_ext wb_ext;
107
108 #define NTOSKRNL_TIMEOUTS       256
109 #ifdef NTOSKRNL_DEBUG_TIMERS
110 static uint64_t ntoskrnl_timer_fires;
111 static uint64_t ntoskrnl_timer_sets;
112 static uint64_t ntoskrnl_timer_reloads;
113 static uint64_t ntoskrnl_timer_cancels;
114 #endif
115
116 struct callout_entry {
117         struct callout          ce_callout;
118         list_entry              ce_list;
119 };
120
121 typedef struct callout_entry callout_entry;
122
123 static struct list_entry ntoskrnl_calllist;
124 static struct mtx ntoskrnl_calllock;
125
126 static struct list_entry ntoskrnl_intlist;
127 static kspin_lock ntoskrnl_intlock;
128
129 static uint8_t RtlEqualUnicodeString(unicode_string *,
130         unicode_string *, uint8_t);
131 static void RtlCopyUnicodeString(unicode_string *,
132         unicode_string *);
133 static irp *IoBuildSynchronousFsdRequest(uint32_t, device_object *,
134          void *, uint32_t, uint64_t *, nt_kevent *, io_status_block *);
135 static irp *IoBuildAsynchronousFsdRequest(uint32_t,
136         device_object *, void *, uint32_t, uint64_t *, io_status_block *);
137 static irp *IoBuildDeviceIoControlRequest(uint32_t,
138         device_object *, void *, uint32_t, void *, uint32_t,
139         uint8_t, nt_kevent *, io_status_block *);
140 static irp *IoAllocateIrp(uint8_t, uint8_t);
141 static void IoReuseIrp(irp *, uint32_t);
142 static void IoFreeIrp(irp *);
143 static void IoInitializeIrp(irp *, uint16_t, uint8_t);
144 static irp *IoMakeAssociatedIrp(irp *, uint8_t);
145 static uint32_t KeWaitForMultipleObjects(uint32_t,
146         nt_dispatch_header **, uint32_t, uint32_t, uint32_t, uint8_t,
147         int64_t *, wait_block *);
148 static void ntoskrnl_waittest(nt_dispatch_header *, uint32_t);
149 static void ntoskrnl_satisfy_wait(nt_dispatch_header *, struct thread *);
150 static void ntoskrnl_satisfy_multiple_waits(wait_block *);
151 static int ntoskrnl_is_signalled(nt_dispatch_header *, struct thread *);
152 static void ntoskrnl_insert_timer(ktimer *, int);
153 static void ntoskrnl_remove_timer(ktimer *);
154 #ifdef NTOSKRNL_DEBUG_TIMERS
155 static void ntoskrnl_show_timers(void);
156 #endif
157 static void ntoskrnl_timercall(void *);
158 static void ntoskrnl_dpc_thread(void *);
159 static void ntoskrnl_destroy_dpc_threads(void);
160 static void ntoskrnl_destroy_workitem_threads(void);
161 static void ntoskrnl_workitem_thread(void *);
162 static void ntoskrnl_workitem(device_object *, void *);
163 static void ntoskrnl_unicode_to_ascii(uint16_t *, char *, int);
164 static void ntoskrnl_ascii_to_unicode(char *, uint16_t *, int);
165 static uint8_t ntoskrnl_insert_dpc(list_entry *, kdpc *);
166 static void WRITE_REGISTER_USHORT(uint16_t *, uint16_t);
167 static uint16_t READ_REGISTER_USHORT(uint16_t *);
168 static void WRITE_REGISTER_ULONG(uint32_t *, uint32_t);
169 static uint32_t READ_REGISTER_ULONG(uint32_t *);
170 static void WRITE_REGISTER_UCHAR(uint8_t *, uint8_t);
171 static uint8_t READ_REGISTER_UCHAR(uint8_t *);
172 static int64_t _allmul(int64_t, int64_t);
173 static int64_t _alldiv(int64_t, int64_t);
174 static int64_t _allrem(int64_t, int64_t);
175 static int64_t _allshr(int64_t, uint8_t);
176 static int64_t _allshl(int64_t, uint8_t);
177 static uint64_t _aullmul(uint64_t, uint64_t);
178 static uint64_t _aulldiv(uint64_t, uint64_t);
179 static uint64_t _aullrem(uint64_t, uint64_t);
180 static uint64_t _aullshr(uint64_t, uint8_t);
181 static uint64_t _aullshl(uint64_t, uint8_t);
182 static slist_entry *ntoskrnl_pushsl(slist_header *, slist_entry *);
183 static slist_entry *ntoskrnl_popsl(slist_header *);
184 static void ExInitializePagedLookasideList(paged_lookaside_list *,
185         lookaside_alloc_func *, lookaside_free_func *,
186         uint32_t, size_t, uint32_t, uint16_t);
187 static void ExDeletePagedLookasideList(paged_lookaside_list *);
188 static void ExInitializeNPagedLookasideList(npaged_lookaside_list *,
189         lookaside_alloc_func *, lookaside_free_func *,
190         uint32_t, size_t, uint32_t, uint16_t);
191 static void ExDeleteNPagedLookasideList(npaged_lookaside_list *);
192 static slist_entry
193         *ExInterlockedPushEntrySList(slist_header *,
194         slist_entry *, kspin_lock *);
195 static slist_entry
196         *ExInterlockedPopEntrySList(slist_header *, kspin_lock *);
197 static uint32_t InterlockedIncrement(volatile uint32_t *);
198 static uint32_t InterlockedDecrement(volatile uint32_t *);
199 static void ExInterlockedAddLargeStatistic(uint64_t *, uint32_t);
200 static void *MmAllocateContiguousMemory(uint32_t, uint64_t);
201 static void *MmAllocateContiguousMemorySpecifyCache(uint32_t,
202         uint64_t, uint64_t, uint64_t, uint32_t);
203 static void MmFreeContiguousMemory(void *);
204 static void MmFreeContiguousMemorySpecifyCache(void *, uint32_t, uint32_t);
205 static uint32_t MmSizeOfMdl(void *, size_t);
206 static void *MmMapLockedPages(mdl *, uint8_t);
207 static void *MmMapLockedPagesSpecifyCache(mdl *,
208         uint8_t, uint32_t, void *, uint32_t, uint32_t);
209 static void MmUnmapLockedPages(void *, mdl *);
210 static uint8_t MmIsAddressValid(void *);
211 static device_t ntoskrnl_finddev(device_t, uint64_t, struct resource **);
212 static void RtlZeroMemory(void *, size_t);
213 static void RtlCopyMemory(void *, const void *, size_t);
214 static size_t RtlCompareMemory(const void *, const void *, size_t);
215 static ndis_status RtlUnicodeStringToInteger(unicode_string *,
216         uint32_t, uint32_t *);
217 static int atoi (const char *);
218 static long atol (const char *);
219 static int rand(void);
220 static void srand(unsigned int);
221 static void KeQuerySystemTime(uint64_t *);
222 static uint32_t KeTickCount(void);
223 static uint8_t IoIsWdmVersionAvailable(uint8_t, uint8_t);
224 static void ntoskrnl_thrfunc(void *);
225 static ndis_status PsCreateSystemThread(ndis_handle *,
226         uint32_t, void *, ndis_handle, void *, void *, void *);
227 static ndis_status PsTerminateSystemThread(ndis_status);
228 static ndis_status IoGetDeviceObjectPointer(unicode_string *,
229         uint32_t, void *, device_object *);
230 static ndis_status IoGetDeviceProperty(device_object *, uint32_t,
231         uint32_t, void *, uint32_t *);
232 static void KeInitializeMutex(kmutant *, uint32_t);
233 static uint32_t KeReleaseMutex(kmutant *, uint8_t);
234 static uint32_t KeReadStateMutex(kmutant *);
235 static ndis_status ObReferenceObjectByHandle(ndis_handle,
236         uint32_t, void *, uint8_t, void **, void **);
237 static void ObfDereferenceObject(void *);
238 static uint32_t ZwClose(ndis_handle);
239 static uint32_t WmiQueryTraceInformation(uint32_t, void *, uint32_t,
240         uint32_t, void *);
241 static uint32_t WmiTraceMessage(uint64_t, uint32_t, void *, uint16_t, ...);
242 static uint32_t IoWMIRegistrationControl(device_object *, uint32_t);
243 static void *ntoskrnl_memset(void *, int, size_t);
244 static void *ntoskrnl_memmove(void *, void *, size_t);
245 static void *ntoskrnl_memchr(void *, unsigned char, size_t);
246 static char *ntoskrnl_strstr(char *, char *);
247 static char *ntoskrnl_strncat(char *, char *, size_t);
248 static int ntoskrnl_toupper(int);
249 static int ntoskrnl_tolower(int);
250 static funcptr ntoskrnl_findwrap(funcptr);
251 static uint32_t DbgPrint(char *, ...);
252 static void DbgBreakPoint(void);
253 static void KeBugCheckEx(uint32_t, u_long, u_long, u_long, u_long);
254 static void dummy(void);
255
256 static struct mtx ntoskrnl_dispatchlock;
257 static struct mtx ntoskrnl_interlock;
258 static kspin_lock ntoskrnl_cancellock;
259 static int ntoskrnl_kth = 0;
260 static struct nt_objref_head ntoskrnl_reflist;
261 static uma_zone_t mdl_zone;
262 static uma_zone_t iw_zone;
263 static struct kdpc_queue *kq_queues;
264 static struct kdpc_queue *wq_queues;
265 static int wq_idx = 0;
266
267 int
268 ntoskrnl_libinit()
269 {
270         image_patch_table       *patch;
271         int                     error;
272         struct proc             *p;
273         kdpc_queue              *kq;
274         callout_entry           *e;
275         int                     i;
276         char                    name[64];
277
278         mtx_init(&ntoskrnl_dispatchlock,
279             "ntoskrnl dispatch lock", MTX_NDIS_LOCK, MTX_DEF|MTX_RECURSE);
280         mtx_init(&ntoskrnl_interlock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN);
281         KeInitializeSpinLock(&ntoskrnl_cancellock);
282         KeInitializeSpinLock(&ntoskrnl_intlock);
283         TAILQ_INIT(&ntoskrnl_reflist);
284
285         InitializeListHead(&ntoskrnl_calllist);
286         InitializeListHead(&ntoskrnl_intlist);
287         mtx_init(&ntoskrnl_calllock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN);
288
289         kq_queues = ExAllocatePoolWithTag(NonPagedPool,
290 #ifdef NTOSKRNL_MULTIPLE_DPCS
291             sizeof(kdpc_queue) * mp_ncpus, 0);
292 #else
293             sizeof(kdpc_queue), 0);
294 #endif
295
296         if (kq_queues == NULL)
297                 return(ENOMEM);
298
299         wq_queues = ExAllocatePoolWithTag(NonPagedPool,
300             sizeof(kdpc_queue) * WORKITEM_THREADS, 0);
301
302         if (wq_queues == NULL)
303                 return(ENOMEM);
304
305 #ifdef NTOSKRNL_MULTIPLE_DPCS
306         bzero((char *)kq_queues, sizeof(kdpc_queue) * mp_ncpus);
307 #else
308         bzero((char *)kq_queues, sizeof(kdpc_queue));
309 #endif
310         bzero((char *)wq_queues, sizeof(kdpc_queue) * WORKITEM_THREADS);
311
312         /*
313          * Launch the DPC threads.
314          */
315
316 #ifdef NTOSKRNL_MULTIPLE_DPCS
317         for (i = 0; i < mp_ncpus; i++) {
318 #else
319         for (i = 0; i < 1; i++) {
320 #endif
321                 kq = kq_queues + i;
322                 kq->kq_cpu = i;
323                 sprintf(name, "Windows DPC %d", i);
324                 error = kproc_create(ntoskrnl_dpc_thread, kq, &p,
325                     RFHIGHPID, NDIS_KSTACK_PAGES, name);
326                 if (error)
327                         panic("failed to launch DPC thread");
328         }
329
330         /*
331          * Launch the workitem threads.
332          */
333
334         for (i = 0; i < WORKITEM_THREADS; i++) {
335                 kq = wq_queues + i;
336                 sprintf(name, "Windows Workitem %d", i);
337                 error = kproc_create(ntoskrnl_workitem_thread, kq, &p,
338                     RFHIGHPID, NDIS_KSTACK_PAGES, name);
339                 if (error)
340                         panic("failed to launch workitem thread");
341         }
342
343         patch = ntoskrnl_functbl;
344         while (patch->ipt_func != NULL) {
345                 windrv_wrap((funcptr)patch->ipt_func,
346                     (funcptr *)&patch->ipt_wrap,
347                     patch->ipt_argcnt, patch->ipt_ftype);
348                 patch++;
349         }
350
351         for (i = 0; i < NTOSKRNL_TIMEOUTS; i++) {
352                 e = ExAllocatePoolWithTag(NonPagedPool,
353                     sizeof(callout_entry), 0);
354                 if (e == NULL)
355                         panic("failed to allocate timeouts");
356                 mtx_lock_spin(&ntoskrnl_calllock);
357                 InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
358                 mtx_unlock_spin(&ntoskrnl_calllock);
359         }
360
361         /*
362          * MDLs are supposed to be variable size (they describe
363          * buffers containing some number of pages, but we don't
364          * know ahead of time how many pages that will be). But
365          * always allocating them off the heap is very slow. As
366          * a compromise, we create an MDL UMA zone big enough to
367          * handle any buffer requiring up to 16 pages, and we
368          * use those for any MDLs for buffers of 16 pages or less
369          * in size. For buffers larger than that (which we assume
370          * will be few and far between, we allocate the MDLs off
371          * the heap.
372          */
373
374         mdl_zone = uma_zcreate("Windows MDL", MDL_ZONE_SIZE,
375             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
376
377         iw_zone = uma_zcreate("Windows WorkItem", sizeof(io_workitem),
378             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
379
380         return(0);
381 }
382
383 int
384 ntoskrnl_libfini()
385 {
386         image_patch_table       *patch;
387         callout_entry           *e;
388         list_entry              *l;
389
390         patch = ntoskrnl_functbl;
391         while (patch->ipt_func != NULL) {
392                 windrv_unwrap(patch->ipt_wrap);
393                 patch++;
394         }
395
396         /* Stop the workitem queues. */
397         ntoskrnl_destroy_workitem_threads();
398         /* Stop the DPC queues. */
399         ntoskrnl_destroy_dpc_threads();
400
401         ExFreePool(kq_queues);
402         ExFreePool(wq_queues);
403
404         uma_zdestroy(mdl_zone);
405         uma_zdestroy(iw_zone);
406
407         mtx_lock_spin(&ntoskrnl_calllock);
408         while(!IsListEmpty(&ntoskrnl_calllist)) {
409                 l = RemoveHeadList(&ntoskrnl_calllist);
410                 e = CONTAINING_RECORD(l, callout_entry, ce_list);
411                 mtx_unlock_spin(&ntoskrnl_calllock);
412                 ExFreePool(e);
413                 mtx_lock_spin(&ntoskrnl_calllock);
414         }
415         mtx_unlock_spin(&ntoskrnl_calllock);
416
417         mtx_destroy(&ntoskrnl_dispatchlock);
418         mtx_destroy(&ntoskrnl_interlock);
419         mtx_destroy(&ntoskrnl_calllock);
420
421         return(0);
422 }
423
424 /*
425  * We need to be able to reference this externally from the wrapper;
426  * GCC only generates a local implementation of memset.
427  */
428 static void *
429 ntoskrnl_memset(buf, ch, size)
430         void                    *buf;
431         int                     ch;
432         size_t                  size;
433 {
434         return(memset(buf, ch, size));
435 }
436
437 static void *
438 ntoskrnl_memmove(dst, src, size)
439         void                    *src;
440         void                    *dst;
441         size_t                  size;
442 {
443         bcopy(src, dst, size);
444         return(dst);
445 }
446
447 static void *
448 ntoskrnl_memchr(buf, ch, len)
449         void                    *buf;
450         unsigned char           ch;
451         size_t                  len;
452 {
453         if (len != 0) {
454                 unsigned char *p = buf;
455
456                 do {
457                         if (*p++ == ch)
458                                 return (p - 1);
459                 } while (--len != 0);
460         }
461         return (NULL);
462 }
463
464 static char *
465 ntoskrnl_strstr(s, find)
466         char *s, *find;
467 {
468         char c, sc;
469         size_t len;
470
471         if ((c = *find++) != 0) {
472                 len = strlen(find);
473                 do {
474                         do {
475                                 if ((sc = *s++) == 0)
476                                         return (NULL);
477                         } while (sc != c);
478                 } while (strncmp(s, find, len) != 0);
479                 s--;
480         }
481         return ((char *)s);
482 }
483
484 /* Taken from libc */
485 static char *
486 ntoskrnl_strncat(dst, src, n)
487         char            *dst;
488         char            *src;
489         size_t          n;
490 {
491         if (n != 0) {
492                 char *d = dst;
493                 const char *s = src;
494
495                 while (*d != 0)
496                         d++;
497                 do {
498                         if ((*d = *s++) == 0)
499                                 break;
500                         d++;
501                 } while (--n != 0);
502                 *d = 0;
503         }
504         return (dst);
505 }
506
507 static int
508 ntoskrnl_toupper(c)
509         int                     c;
510 {
511         return(toupper(c));
512 }
513
514 static int
515 ntoskrnl_tolower(c)
516         int                     c;
517 {
518         return(tolower(c));
519 }
520
521 static uint8_t 
522 RtlEqualUnicodeString(str1, str2, caseinsensitive)
523         unicode_string          *str1;
524         unicode_string          *str2;
525         uint8_t                 caseinsensitive;
526 {
527         int                     i;
528
529         if (str1->us_len != str2->us_len)
530                 return(FALSE);
531
532         for (i = 0; i < str1->us_len; i++) {
533                 if (caseinsensitive == TRUE) {
534                         if (toupper((char)(str1->us_buf[i] & 0xFF)) !=
535                             toupper((char)(str2->us_buf[i] & 0xFF)))
536                                 return(FALSE);
537                 } else {
538                         if (str1->us_buf[i] != str2->us_buf[i])
539                                 return(FALSE);
540                 }
541         }
542
543         return(TRUE);
544 }
545
546 static void
547 RtlCopyUnicodeString(dest, src)
548         unicode_string          *dest;
549         unicode_string          *src;
550 {
551
552         if (dest->us_maxlen >= src->us_len)
553                 dest->us_len = src->us_len;
554         else
555                 dest->us_len = dest->us_maxlen;
556         memcpy(dest->us_buf, src->us_buf, dest->us_len);
557         return;
558 }
559
560 static void
561 ntoskrnl_ascii_to_unicode(ascii, unicode, len)
562         char                    *ascii;
563         uint16_t                *unicode;
564         int                     len;
565 {
566         int                     i;
567         uint16_t                *ustr;
568
569         ustr = unicode;
570         for (i = 0; i < len; i++) {
571                 *ustr = (uint16_t)ascii[i];
572                 ustr++;
573         }
574
575         return;
576 }
577
578 static void
579 ntoskrnl_unicode_to_ascii(unicode, ascii, len)
580         uint16_t                *unicode;
581         char                    *ascii;
582         int                     len;
583 {
584         int                     i;
585         uint8_t                 *astr;
586
587         astr = ascii;
588         for (i = 0; i < len / 2; i++) {
589                 *astr = (uint8_t)unicode[i];
590                 astr++;
591         }
592
593         return;
594 }
595
596 uint32_t
597 RtlUnicodeStringToAnsiString(dest, src, allocate)
598         ansi_string             *dest;
599         unicode_string          *src;
600         uint8_t                 allocate;
601 {
602         if (dest == NULL || src == NULL)
603                 return(STATUS_INVALID_PARAMETER);
604
605         dest->as_len = src->us_len / 2;
606         if (dest->as_maxlen < dest->as_len)
607                 dest->as_len = dest->as_maxlen;
608
609         if (allocate == TRUE) {
610                 dest->as_buf = ExAllocatePoolWithTag(NonPagedPool,
611                     (src->us_len / 2) + 1, 0);
612                 if (dest->as_buf == NULL)
613                         return(STATUS_INSUFFICIENT_RESOURCES);
614                 dest->as_len = dest->as_maxlen = src->us_len / 2;
615         } else {
616                 dest->as_len = src->us_len / 2; /* XXX */
617                 if (dest->as_maxlen < dest->as_len)
618                         dest->as_len = dest->as_maxlen;
619         }
620
621         ntoskrnl_unicode_to_ascii(src->us_buf, dest->as_buf,
622             dest->as_len * 2);
623
624         return (STATUS_SUCCESS);
625 }
626
627 uint32_t
628 RtlAnsiStringToUnicodeString(dest, src, allocate)
629         unicode_string          *dest;
630         ansi_string             *src;
631         uint8_t                 allocate;
632 {
633         if (dest == NULL || src == NULL)
634                 return(STATUS_INVALID_PARAMETER);
635
636         if (allocate == TRUE) {
637                 dest->us_buf = ExAllocatePoolWithTag(NonPagedPool,
638                     src->as_len * 2, 0);
639                 if (dest->us_buf == NULL)
640                         return(STATUS_INSUFFICIENT_RESOURCES);
641                 dest->us_len = dest->us_maxlen = strlen(src->as_buf) * 2;
642         } else {
643                 dest->us_len = src->as_len * 2; /* XXX */
644                 if (dest->us_maxlen < dest->us_len)
645                         dest->us_len = dest->us_maxlen;
646         }
647
648         ntoskrnl_ascii_to_unicode(src->as_buf, dest->us_buf,
649             dest->us_len / 2);
650
651         return (STATUS_SUCCESS);
652 }
653
654 void *
655 ExAllocatePoolWithTag(pooltype, len, tag)
656         uint32_t                pooltype;
657         size_t                  len;
658         uint32_t                tag;
659 {
660         void                    *buf;
661
662         buf = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
663         if (buf == NULL)
664                 return(NULL);
665
666         return(buf);
667 }
668
669 void
670 ExFreePool(buf)
671         void                    *buf;
672 {
673         free(buf, M_DEVBUF);
674         return;
675 }
676
677 uint32_t
678 IoAllocateDriverObjectExtension(drv, clid, extlen, ext)
679         driver_object           *drv;
680         void                    *clid;
681         uint32_t                extlen;
682         void                    **ext;
683 {
684         custom_extension        *ce;
685
686         ce = ExAllocatePoolWithTag(NonPagedPool, sizeof(custom_extension)
687             + extlen, 0);
688
689         if (ce == NULL)
690                 return(STATUS_INSUFFICIENT_RESOURCES);
691
692         ce->ce_clid = clid;
693         InsertTailList((&drv->dro_driverext->dre_usrext), (&ce->ce_list));
694
695         *ext = (void *)(ce + 1);
696
697         return(STATUS_SUCCESS);
698 }
699
700 void *
701 IoGetDriverObjectExtension(drv, clid)
702         driver_object           *drv;
703         void                    *clid;
704 {
705         list_entry              *e;
706         custom_extension        *ce;
707
708         /*
709          * Sanity check. Our dummy bus drivers don't have
710          * any driver extentions.
711          */
712
713         if (drv->dro_driverext == NULL)
714                 return(NULL);
715
716         e = drv->dro_driverext->dre_usrext.nle_flink;
717         while (e != &drv->dro_driverext->dre_usrext) {
718                 ce = (custom_extension *)e;
719                 if (ce->ce_clid == clid)
720                         return((void *)(ce + 1));
721                 e = e->nle_flink;
722         }
723
724         return(NULL);
725 }
726
727
728 uint32_t
729 IoCreateDevice(drv, devextlen, devname, devtype, devchars, exclusive, newdev)
730         driver_object           *drv;
731         uint32_t                devextlen;
732         unicode_string          *devname;
733         uint32_t                devtype;
734         uint32_t                devchars;
735         uint8_t                 exclusive;
736         device_object           **newdev;
737 {
738         device_object           *dev;
739
740         dev = ExAllocatePoolWithTag(NonPagedPool, sizeof(device_object), 0);
741         if (dev == NULL)
742                 return(STATUS_INSUFFICIENT_RESOURCES);
743
744         dev->do_type = devtype;
745         dev->do_drvobj = drv;
746         dev->do_currirp = NULL;
747         dev->do_flags = 0;
748
749         if (devextlen) {
750                 dev->do_devext = ExAllocatePoolWithTag(NonPagedPool,
751                     devextlen, 0);
752
753                 if (dev->do_devext == NULL) {
754                         ExFreePool(dev);
755                         return(STATUS_INSUFFICIENT_RESOURCES);
756                 }
757
758                 bzero(dev->do_devext, devextlen);
759         } else
760                 dev->do_devext = NULL;
761
762         dev->do_size = sizeof(device_object) + devextlen;
763         dev->do_refcnt = 1;
764         dev->do_attacheddev = NULL;
765         dev->do_nextdev = NULL;
766         dev->do_devtype = devtype;
767         dev->do_stacksize = 1;
768         dev->do_alignreq = 1;
769         dev->do_characteristics = devchars;
770         dev->do_iotimer = NULL;
771         KeInitializeEvent(&dev->do_devlock, EVENT_TYPE_SYNC, TRUE);
772
773         /*
774          * Vpd is used for disk/tape devices,
775          * but we don't support those. (Yet.)
776          */
777         dev->do_vpb = NULL;
778
779         dev->do_devobj_ext = ExAllocatePoolWithTag(NonPagedPool,
780             sizeof(devobj_extension), 0);
781
782         if (dev->do_devobj_ext == NULL) {
783                 if (dev->do_devext != NULL)
784                         ExFreePool(dev->do_devext);
785                 ExFreePool(dev);
786                 return(STATUS_INSUFFICIENT_RESOURCES);
787         }
788
789         dev->do_devobj_ext->dve_type = 0;
790         dev->do_devobj_ext->dve_size = sizeof(devobj_extension);
791         dev->do_devobj_ext->dve_devobj = dev;
792
793         /*
794          * Attach this device to the driver object's list
795          * of devices. Note: this is not the same as attaching
796          * the device to the device stack. The driver's AddDevice
797          * routine must explicitly call IoAddDeviceToDeviceStack()
798          * to do that.
799          */
800
801         if (drv->dro_devobj == NULL) {
802                 drv->dro_devobj = dev;
803                 dev->do_nextdev = NULL;
804         } else {
805                 dev->do_nextdev = drv->dro_devobj;
806                 drv->dro_devobj = dev;
807         }
808
809         *newdev = dev;
810
811         return(STATUS_SUCCESS);
812 }
813
814 void
815 IoDeleteDevice(dev)
816         device_object           *dev;
817 {
818         device_object           *prev;
819
820         if (dev == NULL)
821                 return;
822
823         if (dev->do_devobj_ext != NULL)
824                 ExFreePool(dev->do_devobj_ext);
825
826         if (dev->do_devext != NULL)
827                 ExFreePool(dev->do_devext);
828
829         /* Unlink the device from the driver's device list. */
830
831         prev = dev->do_drvobj->dro_devobj;
832         if (prev == dev)
833                 dev->do_drvobj->dro_devobj = dev->do_nextdev;
834         else {
835                 while (prev->do_nextdev != dev)
836                         prev = prev->do_nextdev;
837                 prev->do_nextdev = dev->do_nextdev;
838         }
839
840         ExFreePool(dev);
841
842         return;
843 }
844
845 device_object *
846 IoGetAttachedDevice(dev)
847         device_object           *dev;
848 {
849         device_object           *d;
850
851         if (dev == NULL)
852                 return (NULL);
853
854         d = dev;
855
856         while (d->do_attacheddev != NULL)
857                 d = d->do_attacheddev;
858
859         return (d);
860 }
861
862 static irp *
863 IoBuildSynchronousFsdRequest(func, dobj, buf, len, off, event, status)
864         uint32_t                func;
865         device_object           *dobj;
866         void                    *buf;
867         uint32_t                len;
868         uint64_t                *off;
869         nt_kevent               *event;
870         io_status_block         *status;
871 {
872         irp                     *ip;
873
874         ip = IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status);
875         if (ip == NULL)
876                 return(NULL);
877         ip->irp_usrevent = event;
878
879         return(ip);
880 }
881
882 static irp *
883 IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status)
884         uint32_t                func;
885         device_object           *dobj;
886         void                    *buf;
887         uint32_t                len;
888         uint64_t                *off;
889         io_status_block         *status;
890 {
891         irp                     *ip;
892         io_stack_location       *sl;
893
894         ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
895         if (ip == NULL)
896                 return(NULL);
897
898         ip->irp_usriostat = status;
899         ip->irp_tail.irp_overlay.irp_thread = NULL;
900
901         sl = IoGetNextIrpStackLocation(ip);
902         sl->isl_major = func;
903         sl->isl_minor = 0;
904         sl->isl_flags = 0;
905         sl->isl_ctl = 0;
906         sl->isl_devobj = dobj;
907         sl->isl_fileobj = NULL;
908         sl->isl_completionfunc = NULL;
909
910         ip->irp_userbuf = buf;
911
912         if (dobj->do_flags & DO_BUFFERED_IO) {
913                 ip->irp_assoc.irp_sysbuf =
914                     ExAllocatePoolWithTag(NonPagedPool, len, 0);
915                 if (ip->irp_assoc.irp_sysbuf == NULL) {
916                         IoFreeIrp(ip);
917                         return(NULL);
918                 }
919                 bcopy(buf, ip->irp_assoc.irp_sysbuf, len);
920         }
921
922         if (dobj->do_flags & DO_DIRECT_IO) {
923                 ip->irp_mdl = IoAllocateMdl(buf, len, FALSE, FALSE, ip);
924                 if (ip->irp_mdl == NULL) {
925                         if (ip->irp_assoc.irp_sysbuf != NULL)
926                                 ExFreePool(ip->irp_assoc.irp_sysbuf);
927                         IoFreeIrp(ip);
928                         return(NULL);
929                 }
930                 ip->irp_userbuf = NULL;
931                 ip->irp_assoc.irp_sysbuf = NULL;
932         }
933
934         if (func == IRP_MJ_READ) {
935                 sl->isl_parameters.isl_read.isl_len = len;
936                 if (off != NULL)
937                         sl->isl_parameters.isl_read.isl_byteoff = *off;
938                 else
939                         sl->isl_parameters.isl_read.isl_byteoff = 0;
940         }
941
942         if (func == IRP_MJ_WRITE) {
943                 sl->isl_parameters.isl_write.isl_len = len;
944                 if (off != NULL)
945                         sl->isl_parameters.isl_write.isl_byteoff = *off;
946                 else
947                         sl->isl_parameters.isl_write.isl_byteoff = 0;
948         }       
949
950         return(ip);
951 }
952
953 static irp *
954 IoBuildDeviceIoControlRequest(iocode, dobj, ibuf, ilen, obuf, olen,
955     isinternal, event, status)
956         uint32_t                iocode;
957         device_object           *dobj;
958         void                    *ibuf;
959         uint32_t                ilen;
960         void                    *obuf;
961         uint32_t                olen;
962         uint8_t                 isinternal;
963         nt_kevent               *event;
964         io_status_block         *status;
965 {
966         irp                     *ip;
967         io_stack_location       *sl;
968         uint32_t                buflen;
969
970         ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
971         if (ip == NULL)
972                 return(NULL);
973         ip->irp_usrevent = event;
974         ip->irp_usriostat = status;
975         ip->irp_tail.irp_overlay.irp_thread = NULL;
976
977         sl = IoGetNextIrpStackLocation(ip);
978         sl->isl_major = isinternal == TRUE ?
979             IRP_MJ_INTERNAL_DEVICE_CONTROL : IRP_MJ_DEVICE_CONTROL;
980         sl->isl_minor = 0;
981         sl->isl_flags = 0;
982         sl->isl_ctl = 0;
983         sl->isl_devobj = dobj;
984         sl->isl_fileobj = NULL;
985         sl->isl_completionfunc = NULL;
986         sl->isl_parameters.isl_ioctl.isl_iocode = iocode;
987         sl->isl_parameters.isl_ioctl.isl_ibuflen = ilen;
988         sl->isl_parameters.isl_ioctl.isl_obuflen = olen;
989
990         switch(IO_METHOD(iocode)) {
991         case METHOD_BUFFERED:
992                 if (ilen > olen)
993                         buflen = ilen;
994                 else
995                         buflen = olen;
996                 if (buflen) {
997                         ip->irp_assoc.irp_sysbuf =
998                             ExAllocatePoolWithTag(NonPagedPool, buflen, 0);
999                         if (ip->irp_assoc.irp_sysbuf == NULL) {
1000                                 IoFreeIrp(ip);
1001                                 return(NULL);
1002                         }
1003                 }
1004                 if (ilen && ibuf != NULL) {
1005                         bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
1006                         bzero((char *)ip->irp_assoc.irp_sysbuf + ilen,
1007                             buflen - ilen);
1008                 } else
1009                         bzero(ip->irp_assoc.irp_sysbuf, ilen);
1010                 ip->irp_userbuf = obuf;
1011                 break;
1012         case METHOD_IN_DIRECT:
1013         case METHOD_OUT_DIRECT:
1014                 if (ilen && ibuf != NULL) {
1015                         ip->irp_assoc.irp_sysbuf =
1016                             ExAllocatePoolWithTag(NonPagedPool, ilen, 0);
1017                         if (ip->irp_assoc.irp_sysbuf == NULL) {
1018                                 IoFreeIrp(ip);
1019                                 return(NULL);
1020                         }
1021                         bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
1022                 }
1023                 if (olen && obuf != NULL) {
1024                         ip->irp_mdl = IoAllocateMdl(obuf, olen,
1025                             FALSE, FALSE, ip);
1026                         /*
1027                          * Normally we would MmProbeAndLockPages()
1028                          * here, but we don't have to in our
1029                          * imlementation.
1030                          */
1031                 }
1032                 break;
1033         case METHOD_NEITHER:
1034                 ip->irp_userbuf = obuf;
1035                 sl->isl_parameters.isl_ioctl.isl_type3ibuf = ibuf;
1036                 break;
1037         default:
1038                 break;
1039         }
1040
1041         /*
1042          * Ideally, we should associate this IRP with the calling
1043          * thread here.
1044          */
1045
1046         return (ip);
1047 }
1048
1049 static irp *
1050 IoAllocateIrp(stsize, chargequota)
1051         uint8_t                 stsize;
1052         uint8_t                 chargequota;
1053 {
1054         irp                     *i;
1055
1056         i = ExAllocatePoolWithTag(NonPagedPool, IoSizeOfIrp(stsize), 0);
1057         if (i == NULL)
1058                 return (NULL);
1059
1060         IoInitializeIrp(i, IoSizeOfIrp(stsize), stsize);
1061
1062         return (i);
1063 }
1064
1065 static irp *
1066 IoMakeAssociatedIrp(ip, stsize)
1067         irp                     *ip;
1068         uint8_t                 stsize;
1069 {
1070         irp                     *associrp;
1071
1072         associrp = IoAllocateIrp(stsize, FALSE);
1073         if (associrp == NULL)
1074                 return(NULL);
1075
1076         mtx_lock(&ntoskrnl_dispatchlock);
1077         associrp->irp_flags |= IRP_ASSOCIATED_IRP;
1078         associrp->irp_tail.irp_overlay.irp_thread =
1079             ip->irp_tail.irp_overlay.irp_thread;
1080         associrp->irp_assoc.irp_master = ip;
1081         mtx_unlock(&ntoskrnl_dispatchlock);
1082
1083         return(associrp);
1084 }
1085
1086 static void
1087 IoFreeIrp(ip)
1088         irp                     *ip;
1089 {
1090         ExFreePool(ip);
1091         return;
1092 }
1093
1094 static void
1095 IoInitializeIrp(io, psize, ssize)
1096         irp                     *io;
1097         uint16_t                psize;
1098         uint8_t                 ssize;
1099 {
1100         bzero((char *)io, IoSizeOfIrp(ssize));
1101         io->irp_size = psize;
1102         io->irp_stackcnt = ssize;
1103         io->irp_currentstackloc = ssize;
1104         InitializeListHead(&io->irp_thlist);
1105         io->irp_tail.irp_overlay.irp_csl =
1106             (io_stack_location *)(io + 1) + ssize;
1107
1108         return;
1109 }
1110
1111 static void
1112 IoReuseIrp(ip, status)
1113         irp                     *ip;
1114         uint32_t                status;
1115 {
1116         uint8_t                 allocflags;
1117
1118         allocflags = ip->irp_allocflags;
1119         IoInitializeIrp(ip, ip->irp_size, ip->irp_stackcnt);
1120         ip->irp_iostat.isb_status = status;
1121         ip->irp_allocflags = allocflags;
1122
1123         return;
1124 }
1125
1126 void
1127 IoAcquireCancelSpinLock(irql)
1128         uint8_t                 *irql;
1129 {
1130         KeAcquireSpinLock(&ntoskrnl_cancellock, irql);
1131         return;
1132 }
1133
1134 void
1135 IoReleaseCancelSpinLock(irql)
1136         uint8_t                 irql;
1137 {
1138         KeReleaseSpinLock(&ntoskrnl_cancellock, irql);
1139         return;
1140 }
1141
1142 uint8_t
1143 IoCancelIrp(irp *ip)
1144 {
1145         cancel_func             cfunc;
1146
1147         IoAcquireCancelSpinLock(&ip->irp_cancelirql);
1148         cfunc = IoSetCancelRoutine(ip, NULL);
1149         ip->irp_cancel = TRUE;
1150         if (ip->irp_cancelfunc == NULL) {
1151                 IoReleaseCancelSpinLock(ip->irp_cancelirql);
1152                 return(FALSE);
1153         }
1154         MSCALL2(cfunc, IoGetCurrentIrpStackLocation(ip)->isl_devobj, ip);
1155         return(TRUE);
1156 }
1157
1158 uint32_t
1159 IofCallDriver(dobj, ip)
1160         device_object           *dobj;
1161         irp                     *ip;
1162 {
1163         driver_object           *drvobj;
1164         io_stack_location       *sl;
1165         uint32_t                status;
1166         driver_dispatch         disp;
1167
1168         drvobj = dobj->do_drvobj;
1169
1170         if (ip->irp_currentstackloc <= 0)
1171                 panic("IoCallDriver(): out of stack locations");
1172
1173         IoSetNextIrpStackLocation(ip);
1174         sl = IoGetCurrentIrpStackLocation(ip);
1175
1176         sl->isl_devobj = dobj;
1177
1178         disp = drvobj->dro_dispatch[sl->isl_major];
1179         status = MSCALL2(disp, dobj, ip);
1180
1181         return(status);
1182 }
1183
1184 void
1185 IofCompleteRequest(ip, prioboost)
1186         irp                     *ip;
1187         uint8_t                 prioboost;
1188 {
1189         uint32_t                i;
1190         uint32_t                status;
1191         device_object           *dobj;
1192         io_stack_location       *sl;
1193         completion_func         cf;
1194
1195         ip->irp_pendingreturned =
1196             IoGetCurrentIrpStackLocation(ip)->isl_ctl & SL_PENDING_RETURNED;
1197         sl = (io_stack_location *)(ip + 1);
1198
1199         for (i = ip->irp_currentstackloc; i < (uint32_t)ip->irp_stackcnt; i++) {
1200                 if (ip->irp_currentstackloc < ip->irp_stackcnt - 1) {
1201                         IoSkipCurrentIrpStackLocation(ip);
1202                         dobj = IoGetCurrentIrpStackLocation(ip)->isl_devobj;
1203                 } else
1204                         dobj = NULL;
1205
1206                 if (sl[i].isl_completionfunc != NULL &&
1207                     ((ip->irp_iostat.isb_status == STATUS_SUCCESS &&
1208                     sl->isl_ctl & SL_INVOKE_ON_SUCCESS) ||
1209                     (ip->irp_iostat.isb_status != STATUS_SUCCESS &&
1210                     sl->isl_ctl & SL_INVOKE_ON_ERROR) ||
1211                     (ip->irp_cancel == TRUE &&
1212                     sl->isl_ctl & SL_INVOKE_ON_CANCEL))) {
1213                         cf = sl->isl_completionfunc;
1214                         status = MSCALL3(cf, dobj, ip, sl->isl_completionctx);
1215                         if (status == STATUS_MORE_PROCESSING_REQUIRED)
1216                                 return;
1217                 }
1218
1219                 if (IoGetCurrentIrpStackLocation(ip)->isl_ctl &
1220                     SL_PENDING_RETURNED)
1221                         ip->irp_pendingreturned = TRUE;
1222         }
1223
1224         /* Handle any associated IRPs. */
1225
1226         if (ip->irp_flags & IRP_ASSOCIATED_IRP) {
1227                 uint32_t                masterirpcnt;
1228                 irp                     *masterirp;
1229                 mdl                     *m;
1230
1231                 masterirp = ip->irp_assoc.irp_master;
1232                 masterirpcnt =
1233                     InterlockedDecrement(&masterirp->irp_assoc.irp_irpcnt);
1234
1235                 while ((m = ip->irp_mdl) != NULL) {
1236                         ip->irp_mdl = m->mdl_next;
1237                         IoFreeMdl(m);
1238                 }
1239                 IoFreeIrp(ip);
1240                 if (masterirpcnt == 0)
1241                         IoCompleteRequest(masterirp, IO_NO_INCREMENT);
1242                 return;
1243         }
1244
1245         /* With any luck, these conditions will never arise. */
1246
1247         if (ip->irp_flags & (IRP_PAGING_IO|IRP_CLOSE_OPERATION)) {
1248                 if (ip->irp_usriostat != NULL)
1249                         *ip->irp_usriostat = ip->irp_iostat;
1250                 if (ip->irp_usrevent != NULL)
1251                         KeSetEvent(ip->irp_usrevent, prioboost, FALSE);
1252                 if (ip->irp_flags & IRP_PAGING_IO) {
1253                         if (ip->irp_mdl != NULL)
1254                                 IoFreeMdl(ip->irp_mdl);
1255                         IoFreeIrp(ip);
1256                 }
1257         }
1258
1259         return;
1260 }
1261
1262 void
1263 ntoskrnl_intr(arg)
1264         void                    *arg;
1265 {
1266         kinterrupt              *iobj;
1267         uint8_t                 irql;
1268         uint8_t                 claimed;
1269         list_entry              *l;
1270
1271         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1272         l = ntoskrnl_intlist.nle_flink;
1273         while (l != &ntoskrnl_intlist) {
1274                 iobj = CONTAINING_RECORD(l, kinterrupt, ki_list);
1275                 claimed = MSCALL2(iobj->ki_svcfunc, iobj, iobj->ki_svcctx);
1276                 if (claimed == TRUE)
1277                         break;
1278                 l = l->nle_flink;
1279         }
1280         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1281
1282         return;
1283 }
1284
1285 uint8_t
1286 KeAcquireInterruptSpinLock(iobj)
1287         kinterrupt              *iobj;
1288 {
1289         uint8_t                 irql;
1290         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1291         return(irql);
1292 }
1293
1294 void
1295 KeReleaseInterruptSpinLock(iobj, irql)
1296         kinterrupt              *iobj;
1297         uint8_t                 irql;
1298 {
1299         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1300         return;
1301 }
1302
1303 uint8_t
1304 KeSynchronizeExecution(iobj, syncfunc, syncctx)
1305         kinterrupt              *iobj;
1306         void                    *syncfunc;
1307         void                    *syncctx;
1308 {
1309         uint8_t                 irql;
1310         
1311         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1312         MSCALL1(syncfunc, syncctx);
1313         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1314
1315         return(TRUE);
1316 }
1317
1318 /*
1319  * IoConnectInterrupt() is passed only the interrupt vector and
1320  * irql that a device wants to use, but no device-specific tag
1321  * of any kind. This conflicts rather badly with FreeBSD's
1322  * bus_setup_intr(), which needs the device_t for the device
1323  * requesting interrupt delivery. In order to bypass this
1324  * inconsistency, we implement a second level of interrupt
1325  * dispatching on top of bus_setup_intr(). All devices use
1326  * ntoskrnl_intr() as their ISR, and any device requesting
1327  * interrupts will be registered with ntoskrnl_intr()'s interrupt
1328  * dispatch list. When an interrupt arrives, we walk the list
1329  * and invoke all the registered ISRs. This effectively makes all
1330  * interrupts shared, but it's the only way to duplicate the
1331  * semantics of IoConnectInterrupt() and IoDisconnectInterrupt() properly.
1332  */
1333
1334 uint32_t
1335 IoConnectInterrupt(iobj, svcfunc, svcctx, lock, vector, irql,
1336         syncirql, imode, shared, affinity, savefloat)
1337         kinterrupt              **iobj;
1338         void                    *svcfunc;
1339         void                    *svcctx;
1340         uint32_t                vector;
1341         kspin_lock              *lock;
1342         uint8_t                 irql;
1343         uint8_t                 syncirql;
1344         uint8_t                 imode;
1345         uint8_t                 shared;
1346         uint32_t                affinity;
1347         uint8_t                 savefloat;
1348 {
1349         uint8_t                 curirql;
1350
1351         *iobj = ExAllocatePoolWithTag(NonPagedPool, sizeof(kinterrupt), 0);
1352         if (*iobj == NULL)
1353                 return(STATUS_INSUFFICIENT_RESOURCES);
1354
1355         (*iobj)->ki_svcfunc = svcfunc;
1356         (*iobj)->ki_svcctx = svcctx;
1357
1358         if (lock == NULL) {
1359                 KeInitializeSpinLock(&(*iobj)->ki_lock_priv);
1360                 (*iobj)->ki_lock = &(*iobj)->ki_lock_priv;
1361         } else
1362                 (*iobj)->ki_lock = lock;
1363
1364         KeAcquireSpinLock(&ntoskrnl_intlock, &curirql);
1365         InsertHeadList((&ntoskrnl_intlist), (&(*iobj)->ki_list));
1366         KeReleaseSpinLock(&ntoskrnl_intlock, curirql);
1367
1368         return(STATUS_SUCCESS);
1369 }
1370
1371 void
1372 IoDisconnectInterrupt(iobj)
1373         kinterrupt              *iobj;
1374 {
1375         uint8_t                 irql;
1376
1377         if (iobj == NULL)
1378                 return;
1379
1380         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1381         RemoveEntryList((&iobj->ki_list));
1382         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1383
1384         ExFreePool(iobj);
1385
1386         return;
1387 }
1388
1389 device_object *
1390 IoAttachDeviceToDeviceStack(src, dst)
1391         device_object           *src;
1392         device_object           *dst;
1393 {
1394         device_object           *attached;
1395
1396         mtx_lock(&ntoskrnl_dispatchlock);
1397         attached = IoGetAttachedDevice(dst);
1398         attached->do_attacheddev = src;
1399         src->do_attacheddev = NULL;
1400         src->do_stacksize = attached->do_stacksize + 1;
1401         mtx_unlock(&ntoskrnl_dispatchlock);
1402
1403         return(attached);
1404 }
1405
1406 void
1407 IoDetachDevice(topdev)
1408         device_object           *topdev;
1409 {
1410         device_object           *tail;
1411
1412         mtx_lock(&ntoskrnl_dispatchlock);
1413
1414         /* First, break the chain. */
1415         tail = topdev->do_attacheddev;
1416         if (tail == NULL) {
1417                 mtx_unlock(&ntoskrnl_dispatchlock);
1418                 return;
1419         }
1420         topdev->do_attacheddev = tail->do_attacheddev;
1421         topdev->do_refcnt--;
1422
1423         /* Now reduce the stacksize count for the takm_il objects. */
1424
1425         tail = topdev->do_attacheddev;
1426         while (tail != NULL) {
1427                 tail->do_stacksize--;
1428                 tail = tail->do_attacheddev;
1429         }
1430
1431         mtx_unlock(&ntoskrnl_dispatchlock);
1432
1433         return;
1434 }
1435
1436 /*
1437  * For the most part, an object is considered signalled if
1438  * dh_sigstate == TRUE. The exception is for mutant objects
1439  * (mutexes), where the logic works like this:
1440  *
1441  * - If the thread already owns the object and sigstate is
1442  *   less than or equal to 0, then the object is considered
1443  *   signalled (recursive acquisition).
1444  * - If dh_sigstate == 1, the object is also considered
1445  *   signalled.
1446  */
1447
1448 static int
1449 ntoskrnl_is_signalled(obj, td)
1450         nt_dispatch_header      *obj;
1451         struct thread           *td;
1452 {
1453         kmutant                 *km;
1454         
1455         if (obj->dh_type == DISP_TYPE_MUTANT) {
1456                 km = (kmutant *)obj;
1457                 if ((obj->dh_sigstate <= 0 && km->km_ownerthread == td) ||
1458                     obj->dh_sigstate == 1)
1459                         return(TRUE);
1460                 return(FALSE);
1461         }
1462
1463         if (obj->dh_sigstate > 0)
1464                 return(TRUE);
1465         return(FALSE);
1466 }
1467
1468 static void
1469 ntoskrnl_satisfy_wait(obj, td)
1470         nt_dispatch_header      *obj;
1471         struct thread           *td;
1472 {
1473         kmutant                 *km;
1474
1475         switch (obj->dh_type) {
1476         case DISP_TYPE_MUTANT:
1477                 km = (struct kmutant *)obj;
1478                 obj->dh_sigstate--;
1479                 /*
1480                  * If sigstate reaches 0, the mutex is now
1481                  * non-signalled (the new thread owns it).
1482                  */
1483                 if (obj->dh_sigstate == 0) {
1484                         km->km_ownerthread = td;
1485                         if (km->km_abandoned == TRUE)
1486                                 km->km_abandoned = FALSE;
1487                 }
1488                 break;
1489         /* Synchronization objects get reset to unsignalled. */
1490         case DISP_TYPE_SYNCHRONIZATION_EVENT:
1491         case DISP_TYPE_SYNCHRONIZATION_TIMER:
1492                 obj->dh_sigstate = 0;
1493                 break;
1494         case DISP_TYPE_SEMAPHORE:
1495                 obj->dh_sigstate--;
1496                 break;
1497         default:
1498                 break;
1499         }
1500
1501         return;
1502 }
1503
1504 static void
1505 ntoskrnl_satisfy_multiple_waits(wb)
1506         wait_block              *wb;
1507 {
1508         wait_block              *cur;
1509         struct thread           *td;
1510
1511         cur = wb;
1512         td = wb->wb_kthread;
1513
1514         do {
1515                 ntoskrnl_satisfy_wait(wb->wb_object, td);
1516                 cur->wb_awakened = TRUE;
1517                 cur = cur->wb_next;
1518         } while (cur != wb);
1519
1520         return;
1521 }
1522
1523 /* Always called with dispatcher lock held. */
1524 static void
1525 ntoskrnl_waittest(obj, increment)
1526         nt_dispatch_header      *obj;
1527         uint32_t                increment;
1528 {
1529         wait_block              *w, *next;
1530         list_entry              *e;
1531         struct thread           *td;
1532         wb_ext                  *we;
1533         int                     satisfied;
1534
1535         /*
1536          * Once an object has been signalled, we walk its list of
1537          * wait blocks. If a wait block can be awakened, then satisfy
1538          * waits as necessary and wake the thread.
1539          *
1540          * The rules work like this:
1541          *
1542          * If a wait block is marked as WAITTYPE_ANY, then
1543          * we can satisfy the wait conditions on the current
1544          * object and wake the thread right away. Satisfying
1545          * the wait also has the effect of breaking us out
1546          * of the search loop.
1547          *
1548          * If the object is marked as WAITTYLE_ALL, then the
1549          * wait block will be part of a circularly linked
1550          * list of wait blocks belonging to a waiting thread
1551          * that's sleeping in KeWaitForMultipleObjects(). In
1552          * order to wake the thread, all the objects in the
1553          * wait list must be in the signalled state. If they
1554          * are, we then satisfy all of them and wake the
1555          * thread.
1556          *
1557          */
1558
1559         e = obj->dh_waitlisthead.nle_flink;
1560
1561         while (e != &obj->dh_waitlisthead && obj->dh_sigstate > 0) {
1562                 w = CONTAINING_RECORD(e, wait_block, wb_waitlist);
1563                 we = w->wb_ext;
1564                 td = we->we_td;
1565                 satisfied = FALSE;
1566                 if (w->wb_waittype == WAITTYPE_ANY) {
1567                         /*
1568                          * Thread can be awakened if
1569                          * any wait is satisfied.
1570                          */
1571                         ntoskrnl_satisfy_wait(obj, td);
1572                         satisfied = TRUE;
1573                         w->wb_awakened = TRUE;
1574                 } else {
1575                         /*
1576                          * Thread can only be woken up
1577                          * if all waits are satisfied.
1578                          * If the thread is waiting on multiple
1579                          * objects, they should all be linked
1580                          * through the wb_next pointers in the
1581                          * wait blocks.
1582                          */
1583                         satisfied = TRUE;
1584                         next = w->wb_next;
1585                         while (next != w) {
1586                                 if (ntoskrnl_is_signalled(obj, td) == FALSE) {
1587                                         satisfied = FALSE;
1588                                         break;
1589                                 }
1590                                 next = next->wb_next;
1591                         }
1592                         ntoskrnl_satisfy_multiple_waits(w);
1593                 }
1594
1595                 if (satisfied == TRUE)
1596                         cv_broadcastpri(&we->we_cv,
1597                             (w->wb_oldpri - (increment * 4)) > PRI_MIN_KERN ?
1598                             w->wb_oldpri - (increment * 4) : PRI_MIN_KERN);
1599
1600                 e = e->nle_flink;
1601         }
1602
1603         return;
1604 }
1605
1606 /*
1607  * Return the number of 100 nanosecond intervals since
1608  * January 1, 1601. (?!?!)
1609  */
1610 void
1611 ntoskrnl_time(tval)
1612         uint64_t                *tval;
1613 {
1614         struct timespec         ts;
1615
1616         nanotime(&ts);
1617         *tval = (uint64_t)ts.tv_nsec / 100 + (uint64_t)ts.tv_sec * 10000000 +
1618             11644473600 * 10000000; /* 100ns ticks from 1601 to 1970 */
1619
1620         return;
1621 }
1622
1623 static void
1624 KeQuerySystemTime(current_time)
1625         uint64_t                *current_time;
1626 {
1627         ntoskrnl_time(current_time);
1628 }
1629
1630 static uint32_t
1631 KeTickCount(void)
1632 {
1633         struct timeval tv;
1634         getmicrouptime(&tv);
1635         return tvtohz(&tv);
1636 }
1637
1638
1639 /*
1640  * KeWaitForSingleObject() is a tricky beast, because it can be used
1641  * with several different object types: semaphores, timers, events,
1642  * mutexes and threads. Semaphores don't appear very often, but the
1643  * other object types are quite common. KeWaitForSingleObject() is
1644  * what's normally used to acquire a mutex, and it can be used to
1645  * wait for a thread termination.
1646  *
1647  * The Windows NDIS API is implemented in terms of Windows kernel
1648  * primitives, and some of the object manipulation is duplicated in
1649  * NDIS. For example, NDIS has timers and events, which are actually
1650  * Windows kevents and ktimers. Now, you're supposed to only use the
1651  * NDIS variants of these objects within the confines of the NDIS API,
1652  * but there are some naughty developers out there who will use
1653  * KeWaitForSingleObject() on NDIS timer and event objects, so we
1654  * have to support that as well. Conseqently, our NDIS timer and event
1655  * code has to be closely tied into our ntoskrnl timer and event code,
1656  * just as it is in Windows.
1657  *
1658  * KeWaitForSingleObject() may do different things for different kinds
1659  * of objects:
1660  *
1661  * - For events, we check if the event has been signalled. If the
1662  *   event is already in the signalled state, we just return immediately,
1663  *   otherwise we wait for it to be set to the signalled state by someone
1664  *   else calling KeSetEvent(). Events can be either synchronization or
1665  *   notification events.
1666  *
1667  * - For timers, if the timer has already fired and the timer is in
1668  *   the signalled state, we just return, otherwise we wait on the
1669  *   timer. Unlike an event, timers get signalled automatically when
1670  *   they expire rather than someone having to trip them manually.
1671  *   Timers initialized with KeInitializeTimer() are always notification
1672  *   events: KeInitializeTimerEx() lets you initialize a timer as
1673  *   either a notification or synchronization event.
1674  *
1675  * - For mutexes, we try to acquire the mutex and if we can't, we wait
1676  *   on the mutex until it's available and then grab it. When a mutex is
1677  *   released, it enters the signalled state, which wakes up one of the
1678  *   threads waiting to acquire it. Mutexes are always synchronization
1679  *   events.
1680  *
1681  * - For threads, the only thing we do is wait until the thread object
1682  *   enters a signalled state, which occurs when the thread terminates.
1683  *   Threads are always notification events.
1684  *
1685  * A notification event wakes up all threads waiting on an object. A
1686  * synchronization event wakes up just one. Also, a synchronization event
1687  * is auto-clearing, which means we automatically set the event back to
1688  * the non-signalled state once the wakeup is done.
1689  */
1690
1691 uint32_t
1692 KeWaitForSingleObject(arg, reason, mode, alertable, duetime)
1693         void                    *arg;
1694         uint32_t                reason;
1695         uint32_t                mode;
1696         uint8_t                 alertable;
1697         int64_t                 *duetime;
1698 {
1699         wait_block              w;
1700         struct thread           *td = curthread;
1701         struct timeval          tv;
1702         int                     error = 0;
1703         uint64_t                curtime;
1704         wb_ext                  we;
1705         nt_dispatch_header      *obj;
1706
1707         obj = arg;
1708
1709         if (obj == NULL)
1710                 return(STATUS_INVALID_PARAMETER);
1711
1712         mtx_lock(&ntoskrnl_dispatchlock);
1713
1714         cv_init(&we.we_cv, "KeWFS");
1715         we.we_td = td;
1716
1717         /*
1718          * Check to see if this object is already signalled,
1719          * and just return without waiting if it is.
1720          */
1721         if (ntoskrnl_is_signalled(obj, td) == TRUE) {
1722                 /* Sanity check the signal state value. */
1723                 if (obj->dh_sigstate != INT32_MIN) {
1724                         ntoskrnl_satisfy_wait(obj, curthread);
1725                         mtx_unlock(&ntoskrnl_dispatchlock);
1726                         return (STATUS_SUCCESS);
1727                 } else {
1728                         /*
1729                          * There's a limit to how many times we can
1730                          * recursively acquire a mutant. If we hit
1731                          * the limit, something is very wrong.
1732                          */
1733                         if (obj->dh_type == DISP_TYPE_MUTANT) {
1734                                 mtx_unlock(&ntoskrnl_dispatchlock);
1735                                 panic("mutant limit exceeded");
1736                         }
1737                 }
1738         }
1739
1740         bzero((char *)&w, sizeof(wait_block));
1741         w.wb_object = obj;
1742         w.wb_ext = &we;
1743         w.wb_waittype = WAITTYPE_ANY;
1744         w.wb_next = &w;
1745         w.wb_waitkey = 0;
1746         w.wb_awakened = FALSE;
1747         w.wb_oldpri = td->td_priority;
1748
1749         InsertTailList((&obj->dh_waitlisthead), (&w.wb_waitlist));
1750
1751         /*
1752          * The timeout value is specified in 100 nanosecond units
1753          * and can be a positive or negative number. If it's positive,
1754          * then the duetime is absolute, and we need to convert it
1755          * to an absolute offset relative to now in order to use it.
1756          * If it's negative, then the duetime is relative and we
1757          * just have to convert the units.
1758          */
1759
1760         if (duetime != NULL) {
1761                 if (*duetime < 0) {
1762                         tv.tv_sec = - (*duetime) / 10000000;
1763                         tv.tv_usec = (- (*duetime) / 10) -
1764                             (tv.tv_sec * 1000000);
1765                 } else {
1766                         ntoskrnl_time(&curtime);
1767                         if (*duetime < curtime)
1768                                 tv.tv_sec = tv.tv_usec = 0;
1769                         else {
1770                                 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1771                                 tv.tv_usec = ((*duetime) - curtime) / 10 -
1772                                     (tv.tv_sec * 1000000);
1773                         }
1774                 }
1775         }
1776
1777         if (duetime == NULL)
1778                 cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
1779         else
1780                 error = cv_timedwait(&we.we_cv,
1781                     &ntoskrnl_dispatchlock, tvtohz(&tv));
1782
1783         RemoveEntryList(&w.wb_waitlist);
1784
1785         cv_destroy(&we.we_cv);
1786
1787         /* We timed out. Leave the object alone and return status. */
1788
1789         if (error == EWOULDBLOCK) {
1790                 mtx_unlock(&ntoskrnl_dispatchlock);
1791                 return(STATUS_TIMEOUT);
1792         }
1793
1794         mtx_unlock(&ntoskrnl_dispatchlock);
1795
1796         return(STATUS_SUCCESS);
1797 /*
1798         return(KeWaitForMultipleObjects(1, &obj, WAITTYPE_ALL, reason,
1799             mode, alertable, duetime, &w));
1800 */
1801 }
1802
1803 static uint32_t
1804 KeWaitForMultipleObjects(cnt, obj, wtype, reason, mode,
1805         alertable, duetime, wb_array)
1806         uint32_t                cnt;
1807         nt_dispatch_header      *obj[];
1808         uint32_t                wtype;
1809         uint32_t                reason;
1810         uint32_t                mode;
1811         uint8_t                 alertable;
1812         int64_t                 *duetime;
1813         wait_block              *wb_array;
1814 {
1815         struct thread           *td = curthread;
1816         wait_block              *whead, *w;
1817         wait_block              _wb_array[MAX_WAIT_OBJECTS];
1818         nt_dispatch_header      *cur;
1819         struct timeval          tv;
1820         int                     i, wcnt = 0, error = 0;
1821         uint64_t                curtime;
1822         struct timespec         t1, t2;
1823         uint32_t                status = STATUS_SUCCESS;
1824         wb_ext                  we;
1825
1826         if (cnt > MAX_WAIT_OBJECTS)
1827                 return(STATUS_INVALID_PARAMETER);
1828         if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL)
1829                 return(STATUS_INVALID_PARAMETER);
1830
1831         mtx_lock(&ntoskrnl_dispatchlock);
1832
1833         cv_init(&we.we_cv, "KeWFM");
1834         we.we_td = td;
1835
1836         if (wb_array == NULL)
1837                 whead = _wb_array;
1838         else
1839                 whead = wb_array;
1840
1841         bzero((char *)whead, sizeof(wait_block) * cnt);
1842
1843         /* First pass: see if we can satisfy any waits immediately. */
1844
1845         wcnt = 0;
1846         w = whead;
1847
1848         for (i = 0; i < cnt; i++) {
1849                 InsertTailList((&obj[i]->dh_waitlisthead),
1850                     (&w->wb_waitlist));
1851                 w->wb_ext = &we;
1852                 w->wb_object = obj[i];
1853                 w->wb_waittype = wtype;
1854                 w->wb_waitkey = i;
1855                 w->wb_awakened = FALSE;
1856                 w->wb_oldpri = td->td_priority;
1857                 w->wb_next = w + 1;
1858                 w++;
1859                 wcnt++;
1860                 if (ntoskrnl_is_signalled(obj[i], td)) {
1861                         /*
1862                          * There's a limit to how many times
1863                          * we can recursively acquire a mutant.
1864                          * If we hit the limit, something
1865                          * is very wrong.
1866                          */
1867                         if (obj[i]->dh_sigstate == INT32_MIN &&
1868                             obj[i]->dh_type == DISP_TYPE_MUTANT) {
1869                                 mtx_unlock(&ntoskrnl_dispatchlock);
1870                                 panic("mutant limit exceeded");
1871                         }
1872
1873                         /*
1874                          * If this is a WAITTYPE_ANY wait, then
1875                          * satisfy the waited object and exit
1876                          * right now.
1877                          */
1878
1879                         if (wtype == WAITTYPE_ANY) {
1880                                 ntoskrnl_satisfy_wait(obj[i], td);
1881                                 status = STATUS_WAIT_0 + i;
1882                                 goto wait_done;
1883                         } else {
1884                                 w--;
1885                                 wcnt--;
1886                                 w->wb_object = NULL;
1887                                 RemoveEntryList(&w->wb_waitlist);
1888                         }
1889                 }
1890         }
1891
1892         /*
1893          * If this is a WAITTYPE_ALL wait and all objects are
1894          * already signalled, satisfy the waits and exit now.
1895          */
1896
1897         if (wtype == WAITTYPE_ALL && wcnt == 0) {
1898                 for (i = 0; i < cnt; i++)
1899                         ntoskrnl_satisfy_wait(obj[i], td);
1900                 status = STATUS_SUCCESS;
1901                 goto wait_done;
1902         }
1903
1904         /*
1905          * Create a circular waitblock list. The waitcount
1906          * must always be non-zero when we get here.
1907          */
1908
1909         (w - 1)->wb_next = whead;
1910
1911         /* Wait on any objects that aren't yet signalled. */
1912
1913         /* Calculate timeout, if any. */
1914
1915         if (duetime != NULL) {
1916                 if (*duetime < 0) {
1917                         tv.tv_sec = - (*duetime) / 10000000;
1918                         tv.tv_usec = (- (*duetime) / 10) -
1919                             (tv.tv_sec * 1000000);
1920                 } else {
1921                         ntoskrnl_time(&curtime);
1922                         if (*duetime < curtime)
1923                                 tv.tv_sec = tv.tv_usec = 0;
1924                         else {
1925                                 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1926                                 tv.tv_usec = ((*duetime) - curtime) / 10 -
1927                                     (tv.tv_sec * 1000000);
1928                         }
1929                 }
1930         }
1931
1932         while (wcnt) {
1933                 nanotime(&t1);
1934
1935                 if (duetime == NULL)
1936                         cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
1937                 else
1938                         error = cv_timedwait(&we.we_cv,
1939                             &ntoskrnl_dispatchlock, tvtohz(&tv));
1940
1941                 /* Wait with timeout expired. */
1942
1943                 if (error) {
1944                         status = STATUS_TIMEOUT;
1945                         goto wait_done;
1946                 }
1947
1948                 nanotime(&t2);
1949
1950                 /* See what's been signalled. */
1951
1952                 w = whead;
1953                 do {
1954                         cur = w->wb_object;
1955                         if (ntoskrnl_is_signalled(cur, td) == TRUE ||
1956                             w->wb_awakened == TRUE) {
1957                                 /* Sanity check the signal state value. */
1958                                 if (cur->dh_sigstate == INT32_MIN &&
1959                                     cur->dh_type == DISP_TYPE_MUTANT) {
1960                                         mtx_unlock(&ntoskrnl_dispatchlock);
1961                                         panic("mutant limit exceeded");
1962                                 }
1963                                 wcnt--;
1964                                 if (wtype == WAITTYPE_ANY) {
1965                                         status = w->wb_waitkey &
1966                                             STATUS_WAIT_0;
1967                                         goto wait_done;
1968                                 }
1969                         }
1970                         w = w->wb_next;
1971                 } while (w != whead);
1972
1973                 /*
1974                  * If all objects have been signalled, or if this
1975                  * is a WAITTYPE_ANY wait and we were woke up by
1976                  * someone, we can bail.
1977                  */
1978
1979                 if (wcnt == 0) {
1980                         status = STATUS_SUCCESS;
1981                         goto wait_done;
1982                 }
1983
1984                 /*
1985                  * If this is WAITTYPE_ALL wait, and there's still
1986                  * objects that haven't been signalled, deduct the
1987                  * time that's elapsed so far from the timeout and
1988                  * wait again (or continue waiting indefinitely if
1989                  * there's no timeout).
1990                  */
1991
1992                 if (duetime != NULL) {
1993                         tv.tv_sec -= (t2.tv_sec - t1.tv_sec);
1994                         tv.tv_usec -= (t2.tv_nsec - t1.tv_nsec) / 1000;
1995                 }
1996         }
1997
1998
1999 wait_done:
2000
2001         cv_destroy(&we.we_cv);
2002
2003         for (i = 0; i < cnt; i++) {
2004                 if (whead[i].wb_object != NULL)
2005                         RemoveEntryList(&whead[i].wb_waitlist);
2006
2007         }
2008         mtx_unlock(&ntoskrnl_dispatchlock);
2009
2010         return(status);
2011 }
2012
2013 static void
2014 WRITE_REGISTER_USHORT(reg, val)
2015         uint16_t                *reg;
2016         uint16_t                val;
2017 {
2018         bus_space_write_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
2019         return;
2020 }
2021
2022 static uint16_t
2023 READ_REGISTER_USHORT(reg)
2024         uint16_t                *reg;
2025 {
2026         return(bus_space_read_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
2027 }
2028
2029 static void
2030 WRITE_REGISTER_ULONG(reg, val)
2031         uint32_t                *reg;
2032         uint32_t                val;
2033 {
2034         bus_space_write_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
2035         return;
2036 }
2037
2038 static uint32_t
2039 READ_REGISTER_ULONG(reg)
2040         uint32_t                *reg;
2041 {
2042         return(bus_space_read_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
2043 }
2044
2045 static uint8_t
2046 READ_REGISTER_UCHAR(reg)
2047         uint8_t                 *reg;
2048 {
2049         return(bus_space_read_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
2050 }
2051
2052 static void
2053 WRITE_REGISTER_UCHAR(reg, val)
2054         uint8_t                 *reg;
2055         uint8_t                 val;
2056 {
2057         bus_space_write_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
2058         return;
2059 }
2060
2061 static int64_t
2062 _allmul(a, b)
2063         int64_t                 a;
2064         int64_t                 b;
2065 {
2066         return (a * b);
2067 }
2068
2069 static int64_t
2070 _alldiv(a, b)
2071         int64_t                 a;
2072         int64_t                 b;
2073 {
2074         return (a / b);
2075 }
2076
2077 static int64_t
2078 _allrem(a, b)
2079         int64_t                 a;
2080         int64_t                 b;
2081 {
2082         return (a % b);
2083 }
2084
2085 static uint64_t
2086 _aullmul(a, b)
2087         uint64_t                a;
2088         uint64_t                b;
2089 {
2090         return (a * b);
2091 }
2092
2093 static uint64_t
2094 _aulldiv(a, b)
2095         uint64_t                a;
2096         uint64_t                b;
2097 {
2098         return (a / b);
2099 }
2100
2101 static uint64_t
2102 _aullrem(a, b)
2103         uint64_t                a;
2104         uint64_t                b;
2105 {
2106         return (a % b);
2107 }
2108
2109 static int64_t
2110 _allshl(a, b)
2111         int64_t                 a;
2112         uint8_t                 b;
2113 {
2114         return (a << b);
2115 }
2116
2117 static uint64_t
2118 _aullshl(a, b)
2119         uint64_t                a;
2120         uint8_t                 b;
2121 {
2122         return (a << b);
2123 }
2124
2125 static int64_t
2126 _allshr(a, b)
2127         int64_t                 a;
2128         uint8_t                 b;
2129 {
2130         return (a >> b);
2131 }
2132
2133 static uint64_t
2134 _aullshr(a, b)
2135         uint64_t                a;
2136         uint8_t                 b;
2137 {
2138         return (a >> b);
2139 }
2140
2141 static slist_entry *
2142 ntoskrnl_pushsl(head, entry)
2143         slist_header            *head;
2144         slist_entry             *entry;
2145 {
2146         slist_entry             *oldhead;
2147
2148         oldhead = head->slh_list.slh_next;
2149         entry->sl_next = head->slh_list.slh_next;
2150         head->slh_list.slh_next = entry;
2151         head->slh_list.slh_depth++;
2152         head->slh_list.slh_seq++;
2153
2154         return(oldhead);
2155 }
2156
2157 static slist_entry *
2158 ntoskrnl_popsl(head)
2159         slist_header            *head;
2160 {
2161         slist_entry             *first;
2162
2163         first = head->slh_list.slh_next;
2164         if (first != NULL) {
2165                 head->slh_list.slh_next = first->sl_next;
2166                 head->slh_list.slh_depth--;
2167                 head->slh_list.slh_seq++;
2168         }
2169
2170         return(first);
2171 }
2172
2173 /*
2174  * We need this to make lookaside lists work for amd64.
2175  * We pass a pointer to ExAllocatePoolWithTag() the lookaside
2176  * list structure. For amd64 to work right, this has to be a
2177  * pointer to the wrapped version of the routine, not the
2178  * original. Letting the Windows driver invoke the original
2179  * function directly will result in a convention calling
2180  * mismatch and a pretty crash. On x86, this effectively
2181  * becomes a no-op since ipt_func and ipt_wrap are the same.
2182  */
2183
2184 static funcptr
2185 ntoskrnl_findwrap(func)
2186         funcptr                 func;
2187 {
2188         image_patch_table       *patch;
2189
2190         patch = ntoskrnl_functbl;
2191         while (patch->ipt_func != NULL) {
2192                 if ((funcptr)patch->ipt_func == func)
2193                         return((funcptr)patch->ipt_wrap);
2194                 patch++;
2195         }
2196
2197         return(NULL);
2198 }
2199
2200 static void
2201 ExInitializePagedLookasideList(lookaside, allocfunc, freefunc,
2202     flags, size, tag, depth)
2203         paged_lookaside_list    *lookaside;
2204         lookaside_alloc_func    *allocfunc;
2205         lookaside_free_func     *freefunc;
2206         uint32_t                flags;
2207         size_t                  size;
2208         uint32_t                tag;
2209         uint16_t                depth;
2210 {
2211         bzero((char *)lookaside, sizeof(paged_lookaside_list));
2212
2213         if (size < sizeof(slist_entry))
2214                 lookaside->nll_l.gl_size = sizeof(slist_entry);
2215         else
2216                 lookaside->nll_l.gl_size = size;
2217         lookaside->nll_l.gl_tag = tag;
2218         if (allocfunc == NULL)
2219                 lookaside->nll_l.gl_allocfunc =
2220                     ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
2221         else
2222                 lookaside->nll_l.gl_allocfunc = allocfunc;
2223
2224         if (freefunc == NULL)
2225                 lookaside->nll_l.gl_freefunc =
2226                     ntoskrnl_findwrap((funcptr)ExFreePool);
2227         else
2228                 lookaside->nll_l.gl_freefunc = freefunc;
2229
2230 #ifdef __i386__
2231         KeInitializeSpinLock(&lookaside->nll_obsoletelock);
2232 #endif
2233
2234         lookaside->nll_l.gl_type = NonPagedPool;
2235         lookaside->nll_l.gl_depth = depth;
2236         lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
2237
2238         return;
2239 }
2240
2241 static void
2242 ExDeletePagedLookasideList(lookaside)
2243         paged_lookaside_list   *lookaside;
2244 {
2245         void                    *buf;
2246         void            (*freefunc)(void *);
2247
2248         freefunc = lookaside->nll_l.gl_freefunc;
2249         while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
2250                 MSCALL1(freefunc, buf);
2251
2252         return;
2253 }
2254
2255 static void
2256 ExInitializeNPagedLookasideList(lookaside, allocfunc, freefunc,
2257     flags, size, tag, depth)
2258         npaged_lookaside_list   *lookaside;
2259         lookaside_alloc_func    *allocfunc;
2260         lookaside_free_func     *freefunc;
2261         uint32_t                flags;
2262         size_t                  size;
2263         uint32_t                tag;
2264         uint16_t                depth;
2265 {
2266         bzero((char *)lookaside, sizeof(npaged_lookaside_list));
2267
2268         if (size < sizeof(slist_entry))
2269                 lookaside->nll_l.gl_size = sizeof(slist_entry);
2270         else
2271                 lookaside->nll_l.gl_size = size;
2272         lookaside->nll_l.gl_tag = tag;
2273         if (allocfunc == NULL)
2274                 lookaside->nll_l.gl_allocfunc =
2275                     ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
2276         else
2277                 lookaside->nll_l.gl_allocfunc = allocfunc;
2278
2279         if (freefunc == NULL)
2280                 lookaside->nll_l.gl_freefunc =
2281                     ntoskrnl_findwrap((funcptr)ExFreePool);
2282         else
2283                 lookaside->nll_l.gl_freefunc = freefunc;
2284
2285 #ifdef __i386__
2286         KeInitializeSpinLock(&lookaside->nll_obsoletelock);
2287 #endif
2288
2289         lookaside->nll_l.gl_type = NonPagedPool;
2290         lookaside->nll_l.gl_depth = depth;
2291         lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
2292
2293         return;
2294 }
2295
2296 static void
2297 ExDeleteNPagedLookasideList(lookaside)
2298         npaged_lookaside_list   *lookaside;
2299 {
2300         void                    *buf;
2301         void            (*freefunc)(void *);
2302
2303         freefunc = lookaside->nll_l.gl_freefunc;
2304         while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
2305                 MSCALL1(freefunc, buf);
2306
2307         return;
2308 }
2309
2310 slist_entry *
2311 InterlockedPushEntrySList(head, entry)
2312         slist_header            *head;
2313         slist_entry             *entry;
2314 {
2315         slist_entry             *oldhead;
2316
2317         mtx_lock_spin(&ntoskrnl_interlock);
2318         oldhead = ntoskrnl_pushsl(head, entry);
2319         mtx_unlock_spin(&ntoskrnl_interlock);
2320
2321         return(oldhead);
2322 }
2323
2324 slist_entry *
2325 InterlockedPopEntrySList(head)
2326         slist_header            *head;
2327 {
2328         slist_entry             *first;
2329
2330         mtx_lock_spin(&ntoskrnl_interlock);
2331         first = ntoskrnl_popsl(head);
2332         mtx_unlock_spin(&ntoskrnl_interlock);
2333
2334         return(first);
2335 }
2336
2337 static slist_entry *
2338 ExInterlockedPushEntrySList(head, entry, lock)
2339         slist_header            *head;
2340         slist_entry             *entry;
2341         kspin_lock              *lock;
2342 {
2343         return(InterlockedPushEntrySList(head, entry));
2344 }
2345
2346 static slist_entry *
2347 ExInterlockedPopEntrySList(head, lock)
2348         slist_header            *head;
2349         kspin_lock              *lock;
2350 {
2351         return(InterlockedPopEntrySList(head));
2352 }
2353
2354 uint16_t
2355 ExQueryDepthSList(head)
2356         slist_header            *head;
2357 {
2358         uint16_t                depth;
2359
2360         mtx_lock_spin(&ntoskrnl_interlock);
2361         depth = head->slh_list.slh_depth;
2362         mtx_unlock_spin(&ntoskrnl_interlock);
2363
2364         return(depth);
2365 }
2366
2367 void
2368 KeInitializeSpinLock(lock)
2369         kspin_lock              *lock;
2370 {
2371         *lock = 0;
2372
2373         return;
2374 }
2375
2376 #ifdef __i386__
2377 void
2378 KefAcquireSpinLockAtDpcLevel(lock)
2379         kspin_lock              *lock;
2380 {
2381 #ifdef NTOSKRNL_DEBUG_SPINLOCKS
2382         int                     i = 0;
2383 #endif
2384
2385         while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0) {
2386                 /* sit and spin */;
2387 #ifdef NTOSKRNL_DEBUG_SPINLOCKS
2388                 i++;
2389                 if (i > 200000000)
2390                         panic("DEADLOCK!");
2391 #endif
2392         }
2393
2394         return;
2395 }
2396
2397 void
2398 KefReleaseSpinLockFromDpcLevel(lock)
2399         kspin_lock              *lock;
2400 {
2401         atomic_store_rel_int((volatile u_int *)lock, 0);
2402
2403         return;
2404 }
2405
2406 uint8_t
2407 KeAcquireSpinLockRaiseToDpc(kspin_lock *lock)
2408 {
2409         uint8_t                 oldirql;
2410
2411         if (KeGetCurrentIrql() > DISPATCH_LEVEL)
2412                 panic("IRQL_NOT_LESS_THAN_OR_EQUAL");
2413
2414         KeRaiseIrql(DISPATCH_LEVEL, &oldirql);
2415         KeAcquireSpinLockAtDpcLevel(lock);
2416
2417         return(oldirql);
2418 }
2419 #else
2420 void
2421 KeAcquireSpinLockAtDpcLevel(kspin_lock *lock)
2422 {
2423         while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0)
2424                 /* sit and spin */;
2425
2426         return;
2427 }
2428
2429 void
2430 KeReleaseSpinLockFromDpcLevel(kspin_lock *lock)
2431 {
2432         atomic_store_rel_int((volatile u_int *)lock, 0);
2433
2434         return;
2435 }
2436 #endif /* __i386__ */
2437
2438 uintptr_t
2439 InterlockedExchange(dst, val)
2440         volatile uint32_t       *dst;
2441         uintptr_t               val;
2442 {
2443         uintptr_t               r;
2444
2445         mtx_lock_spin(&ntoskrnl_interlock);
2446         r = *dst;
2447         *dst = val;
2448         mtx_unlock_spin(&ntoskrnl_interlock);
2449
2450         return(r);
2451 }
2452
2453 static uint32_t
2454 InterlockedIncrement(addend)
2455         volatile uint32_t       *addend;
2456 {
2457         atomic_add_long((volatile u_long *)addend, 1);
2458         return(*addend);
2459 }
2460
2461 static uint32_t
2462 InterlockedDecrement(addend)
2463         volatile uint32_t       *addend;
2464 {
2465         atomic_subtract_long((volatile u_long *)addend, 1);
2466         return(*addend);
2467 }
2468
2469 static void
2470 ExInterlockedAddLargeStatistic(addend, inc)
2471         uint64_t                *addend;
2472         uint32_t                inc;
2473 {
2474         mtx_lock_spin(&ntoskrnl_interlock);
2475         *addend += inc;
2476         mtx_unlock_spin(&ntoskrnl_interlock);
2477
2478         return;
2479 };
2480
2481 mdl *
2482 IoAllocateMdl(vaddr, len, secondarybuf, chargequota, iopkt)
2483         void                    *vaddr;
2484         uint32_t                len;
2485         uint8_t                 secondarybuf;
2486         uint8_t                 chargequota;
2487         irp                     *iopkt;
2488 {
2489         mdl                     *m;
2490         int                     zone = 0;
2491
2492         if (MmSizeOfMdl(vaddr, len) > MDL_ZONE_SIZE)
2493                 m = ExAllocatePoolWithTag(NonPagedPool,
2494                     MmSizeOfMdl(vaddr, len), 0);
2495         else {
2496                 m = uma_zalloc(mdl_zone, M_NOWAIT | M_ZERO);
2497                 zone++;
2498         }
2499
2500         if (m == NULL)
2501                 return (NULL);
2502
2503         MmInitializeMdl(m, vaddr, len);
2504
2505         /*
2506          * MmInitializMdl() clears the flags field, so we
2507          * have to set this here. If the MDL came from the
2508          * MDL UMA zone, tag it so we can release it to
2509          * the right place later.
2510          */
2511         if (zone)
2512                 m->mdl_flags = MDL_ZONE_ALLOCED;
2513
2514         if (iopkt != NULL) {
2515                 if (secondarybuf == TRUE) {
2516                         mdl                     *last;
2517                         last = iopkt->irp_mdl;
2518                         while (last->mdl_next != NULL)
2519                                 last = last->mdl_next;
2520                         last->mdl_next = m;
2521                 } else {
2522                         if (iopkt->irp_mdl != NULL)
2523                                 panic("leaking an MDL in IoAllocateMdl()");
2524                         iopkt->irp_mdl = m;
2525                 }
2526         }
2527
2528         return (m);
2529 }
2530
2531 void
2532 IoFreeMdl(m)
2533         mdl                     *m;
2534 {
2535         if (m == NULL)
2536                 return;
2537
2538         if (m->mdl_flags & MDL_ZONE_ALLOCED)
2539                 uma_zfree(mdl_zone, m);
2540         else
2541                 ExFreePool(m);
2542
2543         return;
2544 }
2545
2546 static void *
2547 MmAllocateContiguousMemory(size, highest)
2548         uint32_t                size;
2549         uint64_t                highest;
2550 {
2551         void *addr;
2552         size_t pagelength = roundup(size, PAGE_SIZE);
2553
2554         addr = ExAllocatePoolWithTag(NonPagedPool, pagelength, 0);
2555
2556         return(addr);
2557 }
2558
2559 static void *
2560 MmAllocateContiguousMemorySpecifyCache(size, lowest, highest,
2561     boundary, cachetype)
2562         uint32_t                size;
2563         uint64_t                lowest;
2564         uint64_t                highest;
2565         uint64_t                boundary;
2566         uint32_t                cachetype;
2567 {
2568         void *addr;
2569         size_t pagelength = roundup(size, PAGE_SIZE);
2570
2571         addr = ExAllocatePoolWithTag(NonPagedPool, pagelength, 0);
2572
2573         return(addr);
2574 }
2575
2576 static void
2577 MmFreeContiguousMemory(base)
2578         void                    *base;
2579 {
2580         ExFreePool(base);
2581 }
2582
2583 static void
2584 MmFreeContiguousMemorySpecifyCache(base, size, cachetype)
2585         void                    *base;
2586         uint32_t                size;
2587         uint32_t                cachetype;
2588 {
2589         ExFreePool(base);
2590 }
2591
2592 static uint32_t
2593 MmSizeOfMdl(vaddr, len)
2594         void                    *vaddr;
2595         size_t                  len;
2596 {
2597         uint32_t                l;
2598
2599         l = sizeof(struct mdl) +
2600             (sizeof(vm_offset_t *) * SPAN_PAGES(vaddr, len));
2601
2602         return(l);
2603 }
2604
2605 /*
2606  * The Microsoft documentation says this routine fills in the
2607  * page array of an MDL with the _physical_ page addresses that
2608  * comprise the buffer, but we don't really want to do that here.
2609  * Instead, we just fill in the page array with the kernel virtual
2610  * addresses of the buffers.
2611  */
2612 void
2613 MmBuildMdlForNonPagedPool(m)
2614         mdl                     *m;
2615 {
2616         vm_offset_t             *mdl_pages;
2617         int                     pagecnt, i;
2618
2619         pagecnt = SPAN_PAGES(m->mdl_byteoffset, m->mdl_bytecount);
2620
2621         if (pagecnt > (m->mdl_size - sizeof(mdl)) / sizeof(vm_offset_t *))
2622                 panic("not enough pages in MDL to describe buffer");
2623
2624         mdl_pages = MmGetMdlPfnArray(m);
2625
2626         for (i = 0; i < pagecnt; i++)
2627                 *mdl_pages = (vm_offset_t)m->mdl_startva + (i * PAGE_SIZE);
2628
2629         m->mdl_flags |= MDL_SOURCE_IS_NONPAGED_POOL;
2630         m->mdl_mappedsystemva = MmGetMdlVirtualAddress(m);
2631
2632         return;
2633 }
2634
2635 static void *
2636 MmMapLockedPages(buf, accessmode)
2637         mdl                     *buf;
2638         uint8_t                 accessmode;
2639 {
2640         buf->mdl_flags |= MDL_MAPPED_TO_SYSTEM_VA;
2641         return(MmGetMdlVirtualAddress(buf));
2642 }
2643
2644 static void *
2645 MmMapLockedPagesSpecifyCache(buf, accessmode, cachetype, vaddr,
2646     bugcheck, prio)
2647         mdl                     *buf;
2648         uint8_t                 accessmode;
2649         uint32_t                cachetype;
2650         void                    *vaddr;
2651         uint32_t                bugcheck;
2652         uint32_t                prio;
2653 {
2654         return(MmMapLockedPages(buf, accessmode));
2655 }
2656
2657 static void
2658 MmUnmapLockedPages(vaddr, buf)
2659         void                    *vaddr;
2660         mdl                     *buf;
2661 {
2662         buf->mdl_flags &= ~MDL_MAPPED_TO_SYSTEM_VA;
2663         return;
2664 }
2665
2666 /*
2667  * This function has a problem in that it will break if you
2668  * compile this module without PAE and try to use it on a PAE
2669  * kernel. Unfortunately, there's no way around this at the
2670  * moment. It's slightly less broken that using pmap_kextract().
2671  * You'd think the virtual memory subsystem would help us out
2672  * here, but it doesn't.
2673  */
2674
2675 static uint8_t
2676 MmIsAddressValid(vaddr)
2677         void                    *vaddr;
2678 {
2679         if (pmap_extract(kernel_map->pmap, (vm_offset_t)vaddr))
2680                 return(TRUE);
2681
2682         return(FALSE);
2683 }
2684
2685 void *
2686 MmMapIoSpace(paddr, len, cachetype)
2687         uint64_t                paddr;
2688         uint32_t                len;
2689         uint32_t                cachetype;
2690 {
2691         devclass_t              nexus_class;
2692         device_t                *nexus_devs, devp;
2693         int                     nexus_count = 0;
2694         device_t                matching_dev = NULL;
2695         struct resource         *res;
2696         int                     i;
2697         vm_offset_t             v;
2698
2699         /* There will always be at least one nexus. */
2700
2701         nexus_class = devclass_find("nexus");
2702         devclass_get_devices(nexus_class, &nexus_devs, &nexus_count);
2703
2704         for (i = 0; i < nexus_count; i++) {
2705                 devp = nexus_devs[i];
2706                 matching_dev = ntoskrnl_finddev(devp, paddr, &res);
2707                 if (matching_dev)
2708                         break;
2709         }
2710
2711         free(nexus_devs, M_TEMP);
2712
2713         if (matching_dev == NULL)
2714                 return(NULL);
2715
2716         v = (vm_offset_t)rman_get_virtual(res);
2717         if (paddr > rman_get_start(res))
2718                 v += paddr - rman_get_start(res);
2719
2720         return((void *)v);
2721 }
2722
2723 void
2724 MmUnmapIoSpace(vaddr, len)
2725         void                    *vaddr;
2726         size_t                  len;
2727 {
2728         return;
2729 }
2730
2731
2732 static device_t
2733 ntoskrnl_finddev(dev, paddr, res)
2734         device_t                dev;
2735         uint64_t                paddr;
2736         struct resource         **res;
2737 {
2738         device_t                *children = NULL;
2739         device_t                matching_dev;
2740         int                     childcnt;
2741         struct resource         *r;
2742         struct resource_list    *rl;
2743         struct resource_list_entry      *rle;
2744         uint32_t                flags;
2745         int                     i;
2746
2747         /* We only want devices that have been successfully probed. */
2748
2749         if (device_is_alive(dev) == FALSE)
2750                 return(NULL);
2751
2752         rl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev);
2753         if (rl != NULL) {
2754 #if __FreeBSD_version < 600022
2755                 SLIST_FOREACH(rle, rl, link) {
2756 #else
2757                 STAILQ_FOREACH(rle, rl, link) {
2758 #endif
2759                         r = rle->res;
2760
2761                         if (r == NULL)
2762                                 continue;
2763
2764                         flags = rman_get_flags(r);
2765
2766                         if (rle->type == SYS_RES_MEMORY &&
2767                             paddr >= rman_get_start(r) &&
2768                             paddr <= rman_get_end(r)) {
2769                                 if (!(flags & RF_ACTIVE))
2770                                         bus_activate_resource(dev,
2771                                             SYS_RES_MEMORY, 0, r);
2772                                 *res = r;
2773                                 return(dev);
2774                         }
2775                 }
2776         }
2777
2778         /*
2779          * If this device has children, do another
2780          * level of recursion to inspect them.
2781          */
2782
2783         device_get_children(dev, &children, &childcnt);
2784
2785         for (i = 0; i < childcnt; i++) {
2786                 matching_dev = ntoskrnl_finddev(children[i], paddr, res);
2787                 if (matching_dev != NULL) {
2788                         free(children, M_TEMP);
2789                         return(matching_dev);
2790                 }
2791         }
2792
2793         
2794         /* Won't somebody please think of the children! */
2795
2796         if (children != NULL)
2797                 free(children, M_TEMP);
2798
2799         return(NULL);
2800 }
2801
2802 /*
2803  * Workitems are unlike DPCs, in that they run in a user-mode thread
2804  * context rather than at DISPATCH_LEVEL in kernel context. In our
2805  * case we run them in kernel context anyway.
2806  */
2807 static void
2808 ntoskrnl_workitem_thread(arg)
2809         void                    *arg;
2810 {
2811         kdpc_queue              *kq;
2812         list_entry              *l;
2813         io_workitem             *iw;
2814         uint8_t                 irql;
2815
2816         kq = arg;
2817
2818         InitializeListHead(&kq->kq_disp);
2819         kq->kq_td = curthread;
2820         kq->kq_exit = 0;
2821         KeInitializeSpinLock(&kq->kq_lock);
2822         KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
2823
2824         while (1) {
2825                 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
2826
2827                 KeAcquireSpinLock(&kq->kq_lock, &irql);
2828
2829                 if (kq->kq_exit) {
2830                         kq->kq_exit = 0;
2831                         KeReleaseSpinLock(&kq->kq_lock, irql);
2832                         break;
2833                 }
2834
2835                 while (!IsListEmpty(&kq->kq_disp)) {
2836                         l = RemoveHeadList(&kq->kq_disp);
2837                         iw = CONTAINING_RECORD(l,
2838                             io_workitem, iw_listentry);
2839                         InitializeListHead((&iw->iw_listentry));
2840                         if (iw->iw_func == NULL)
2841                                 continue;
2842                         KeReleaseSpinLock(&kq->kq_lock, irql);
2843                         MSCALL2(iw->iw_func, iw->iw_dobj, iw->iw_ctx);
2844                         KeAcquireSpinLock(&kq->kq_lock, &irql);
2845                 }
2846
2847                 KeReleaseSpinLock(&kq->kq_lock, irql);
2848         }
2849
2850 #if __FreeBSD_version < 502113
2851         mtx_lock(&Giant);
2852 #endif
2853         kproc_exit(0);
2854         return; /* notreached */
2855 }
2856
2857 static void
2858 ntoskrnl_destroy_workitem_threads(void)
2859 {
2860         kdpc_queue              *kq;
2861         int                     i;
2862
2863         for (i = 0; i < WORKITEM_THREADS; i++) {
2864                 kq = wq_queues + i;
2865                 kq->kq_exit = 1;
2866                 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);       
2867                 while (kq->kq_exit)
2868                         tsleep(kq->kq_td->td_proc, PWAIT, "waitiw", hz/10);
2869         }
2870
2871         return;
2872 }
2873
2874 io_workitem *
2875 IoAllocateWorkItem(dobj)
2876         device_object           *dobj;
2877 {
2878         io_workitem             *iw;
2879
2880         iw = uma_zalloc(iw_zone, M_NOWAIT);
2881         if (iw == NULL)
2882                 return(NULL);
2883
2884         InitializeListHead(&iw->iw_listentry);
2885         iw->iw_dobj = dobj;
2886
2887         mtx_lock(&ntoskrnl_dispatchlock);
2888         iw->iw_idx = wq_idx;
2889         WORKIDX_INC(wq_idx);
2890         mtx_unlock(&ntoskrnl_dispatchlock);
2891
2892         return(iw);
2893 }
2894
2895 void
2896 IoFreeWorkItem(iw)
2897         io_workitem             *iw;
2898 {
2899         uma_zfree(iw_zone, iw);
2900         return;
2901 }
2902
2903 void
2904 IoQueueWorkItem(iw, iw_func, qtype, ctx)
2905         io_workitem             *iw;
2906         io_workitem_func        iw_func;
2907         uint32_t                qtype;
2908         void                    *ctx;
2909 {
2910         kdpc_queue              *kq;
2911         list_entry              *l;
2912         io_workitem             *cur;
2913         uint8_t                 irql;
2914
2915         kq = wq_queues + iw->iw_idx;
2916
2917         KeAcquireSpinLock(&kq->kq_lock, &irql);
2918
2919         /*
2920          * Traverse the list and make sure this workitem hasn't
2921          * already been inserted. Queuing the same workitem
2922          * twice will hose the list but good.
2923          */
2924
2925         l = kq->kq_disp.nle_flink;
2926         while (l != &kq->kq_disp) {
2927                 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
2928                 if (cur == iw) {
2929                         /* Already queued -- do nothing. */
2930                         KeReleaseSpinLock(&kq->kq_lock, irql);
2931                         return;
2932                 }
2933                 l = l->nle_flink;
2934         }
2935
2936         iw->iw_func = iw_func;
2937         iw->iw_ctx = ctx;
2938
2939         InsertTailList((&kq->kq_disp), (&iw->iw_listentry));
2940         KeReleaseSpinLock(&kq->kq_lock, irql);
2941
2942         KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
2943
2944         return;
2945 }
2946
2947 static void
2948 ntoskrnl_workitem(dobj, arg)
2949         device_object           *dobj;
2950         void                    *arg;
2951 {
2952         io_workitem             *iw;
2953         work_queue_item         *w;
2954         work_item_func          f;
2955
2956         iw = arg;
2957         w = (work_queue_item *)dobj;
2958         f = (work_item_func)w->wqi_func;
2959         uma_zfree(iw_zone, iw);
2960         MSCALL2(f, w, w->wqi_ctx);
2961
2962         return;
2963 }
2964
2965 /*
2966  * The ExQueueWorkItem() API is deprecated in Windows XP. Microsoft
2967  * warns that it's unsafe and to use IoQueueWorkItem() instead. The
2968  * problem with ExQueueWorkItem() is that it can't guard against
2969  * the condition where a driver submits a job to the work queue and
2970  * is then unloaded before the job is able to run. IoQueueWorkItem()
2971  * acquires a reference to the device's device_object via the
2972  * object manager and retains it until after the job has completed,
2973  * which prevents the driver from being unloaded before the job
2974  * runs. (We don't currently support this behavior, though hopefully
2975  * that will change once the object manager API is fleshed out a bit.)
2976  *
2977  * Having said all that, the ExQueueWorkItem() API remains, because
2978  * there are still other parts of Windows that use it, including
2979  * NDIS itself: NdisScheduleWorkItem() calls ExQueueWorkItem().
2980  * We fake up the ExQueueWorkItem() API on top of our implementation
2981  * of IoQueueWorkItem(). Workitem thread #3 is reserved exclusively
2982  * for ExQueueWorkItem() jobs, and we pass a pointer to the work
2983  * queue item (provided by the caller) in to IoAllocateWorkItem()
2984  * instead of the device_object. We need to save this pointer so
2985  * we can apply a sanity check: as with the DPC queue and other
2986  * workitem queues, we can't allow the same work queue item to
2987  * be queued twice. If it's already pending, we silently return
2988  */
2989
2990 void
2991 ExQueueWorkItem(w, qtype)
2992         work_queue_item         *w;
2993         uint32_t                qtype;
2994 {
2995         io_workitem             *iw;
2996         io_workitem_func        iwf;
2997         kdpc_queue              *kq;
2998         list_entry              *l;
2999         io_workitem             *cur;
3000         uint8_t                 irql;
3001
3002
3003         /*
3004          * We need to do a special sanity test to make sure
3005          * the ExQueueWorkItem() API isn't used to queue
3006          * the same workitem twice. Rather than checking the
3007          * io_workitem pointer itself, we test the attached
3008          * device object, which is really a pointer to the
3009          * legacy work queue item structure.
3010          */
3011
3012         kq = wq_queues + WORKITEM_LEGACY_THREAD;
3013         KeAcquireSpinLock(&kq->kq_lock, &irql);
3014         l = kq->kq_disp.nle_flink;
3015         while (l != &kq->kq_disp) {
3016                 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
3017                 if (cur->iw_dobj == (device_object *)w) {
3018                         /* Already queued -- do nothing. */
3019                         KeReleaseSpinLock(&kq->kq_lock, irql);
3020                         return;
3021                 }
3022                 l = l->nle_flink;
3023         }
3024         KeReleaseSpinLock(&kq->kq_lock, irql);
3025
3026         iw = IoAllocateWorkItem((device_object *)w);
3027         if (iw == NULL)
3028                 return;
3029
3030         iw->iw_idx = WORKITEM_LEGACY_THREAD;
3031         iwf = (io_workitem_func)ntoskrnl_findwrap((funcptr)ntoskrnl_workitem);
3032         IoQueueWorkItem(iw, iwf, qtype, iw);
3033
3034         return;
3035 }
3036
3037 static void
3038 RtlZeroMemory(dst, len)
3039         void                    *dst;
3040         size_t                  len;
3041 {
3042         bzero(dst, len);
3043         return;
3044 }
3045
3046 static void
3047 RtlCopyMemory(dst, src, len)
3048         void                    *dst;
3049         const void              *src;
3050         size_t                  len;
3051 {
3052         bcopy(src, dst, len);
3053         return;
3054 }
3055
3056 static size_t
3057 RtlCompareMemory(s1, s2, len)
3058         const void              *s1;
3059         const void              *s2;
3060         size_t                  len;
3061 {
3062         size_t                  i, total = 0;
3063         uint8_t                 *m1, *m2;
3064
3065         m1 = __DECONST(char *, s1);
3066         m2 = __DECONST(char *, s2);
3067
3068         for (i = 0; i < len; i++) {
3069                 if (m1[i] == m2[i])
3070                         total++;
3071         }
3072         return(total);
3073 }
3074
3075 void
3076 RtlInitAnsiString(dst, src)
3077         ansi_string             *dst;
3078         char                    *src;
3079 {
3080         ansi_string             *a;
3081
3082         a = dst;
3083         if (a == NULL)
3084                 return;
3085         if (src == NULL) {
3086                 a->as_len = a->as_maxlen = 0;
3087                 a->as_buf = NULL;
3088         } else {
3089                 a->as_buf = src;
3090                 a->as_len = a->as_maxlen = strlen(src);
3091         }
3092
3093         return;
3094 }
3095
3096 void
3097 RtlInitUnicodeString(dst, src)
3098         unicode_string          *dst;
3099         uint16_t                *src;
3100 {
3101         unicode_string          *u;
3102         int                     i;
3103
3104         u = dst;
3105         if (u == NULL)
3106                 return;
3107         if (src == NULL) {
3108                 u->us_len = u->us_maxlen = 0;
3109                 u->us_buf = NULL;
3110         } else {
3111                 i = 0;
3112                 while(src[i] != 0)
3113                         i++;
3114                 u->us_buf = src;
3115                 u->us_len = u->us_maxlen = i * 2;
3116         }
3117
3118         return;
3119 }
3120
3121 ndis_status
3122 RtlUnicodeStringToInteger(ustr, base, val)
3123         unicode_string          *ustr;
3124         uint32_t                base;
3125         uint32_t                *val;
3126 {
3127         uint16_t                *uchr;
3128         int                     len, neg = 0;
3129         char                    abuf[64];
3130         char                    *astr;
3131
3132         uchr = ustr->us_buf;
3133         len = ustr->us_len;
3134         bzero(abuf, sizeof(abuf));
3135
3136         if ((char)((*uchr) & 0xFF) == '-') {
3137                 neg = 1;
3138                 uchr++;
3139                 len -= 2;
3140         } else if ((char)((*uchr) & 0xFF) == '+') {
3141                 neg = 0;
3142                 uchr++;
3143                 len -= 2;
3144         }
3145
3146         if (base == 0) {
3147                 if ((char)((*uchr) & 0xFF) == 'b') {
3148                         base = 2;
3149                         uchr++;
3150                         len -= 2;
3151                 } else if ((char)((*uchr) & 0xFF) == 'o') {
3152                         base = 8;
3153                         uchr++;
3154                         len -= 2;
3155                 } else if ((char)((*uchr) & 0xFF) == 'x') {
3156                         base = 16;
3157                         uchr++;
3158                         len -= 2;
3159                 } else
3160                         base = 10;
3161         }
3162
3163         astr = abuf;
3164         if (neg) {
3165                 strcpy(astr, "-");
3166                 astr++;
3167         }
3168
3169         ntoskrnl_unicode_to_ascii(uchr, astr, len);
3170         *val = strtoul(abuf, NULL, base);
3171
3172         return(STATUS_SUCCESS);
3173 }
3174
3175 void
3176 RtlFreeUnicodeString(ustr)
3177         unicode_string          *ustr;
3178 {
3179         if (ustr->us_buf == NULL)
3180                 return;
3181         ExFreePool(ustr->us_buf);
3182         ustr->us_buf = NULL;
3183         return;
3184 }
3185
3186 void
3187 RtlFreeAnsiString(astr)
3188         ansi_string             *astr;
3189 {
3190         if (astr->as_buf == NULL)
3191                 return;
3192         ExFreePool(astr->as_buf);
3193         astr->as_buf = NULL;
3194         return;
3195 }
3196
3197 static int
3198 atoi(str)
3199         const char              *str;
3200 {
3201         return (int)strtol(str, (char **)NULL, 10);
3202 }
3203
3204 static long
3205 atol(str)
3206         const char              *str;
3207 {
3208         return strtol(str, (char **)NULL, 10);
3209 }
3210
3211 static int
3212 rand(void)
3213 {
3214         struct timeval          tv;
3215
3216         microtime(&tv);
3217         srandom(tv.tv_usec);
3218         return((int)random());
3219 }
3220
3221 static void
3222 srand(seed)
3223         unsigned int            seed;
3224 {
3225         srandom(seed);
3226         return;
3227 }
3228
3229 static uint8_t
3230 IoIsWdmVersionAvailable(major, minor)
3231         uint8_t                 major;
3232         uint8_t                 minor;
3233 {
3234         if (major == WDM_MAJOR && minor == WDM_MINOR_WINXP)
3235                 return(TRUE);
3236         return(FALSE);
3237 }
3238
3239 static ndis_status
3240 IoGetDeviceObjectPointer(name, reqaccess, fileobj, devobj)
3241         unicode_string          *name;
3242         uint32_t                reqaccess;
3243         void                    *fileobj;
3244         device_object           *devobj;
3245 {
3246         return(STATUS_SUCCESS);
3247 }
3248
3249 static ndis_status
3250 IoGetDeviceProperty(devobj, regprop, buflen, prop, reslen)
3251         device_object           *devobj;
3252         uint32_t                regprop;
3253         uint32_t                buflen;
3254         void                    *prop;
3255         uint32_t                *reslen;
3256 {
3257         driver_object           *drv;
3258         uint16_t                **name;
3259
3260         drv = devobj->do_drvobj;
3261
3262         switch (regprop) {
3263         case DEVPROP_DRIVER_KEYNAME:
3264                 name = prop;
3265                 *name = drv->dro_drivername.us_buf;
3266                 *reslen = drv->dro_drivername.us_len;
3267                 break;
3268         default:
3269                 return(STATUS_INVALID_PARAMETER_2);
3270                 break;
3271         }
3272
3273         return(STATUS_SUCCESS);
3274 }
3275
3276 static void
3277 KeInitializeMutex(kmutex, level)
3278         kmutant                 *kmutex;
3279         uint32_t                level;
3280 {
3281         InitializeListHead((&kmutex->km_header.dh_waitlisthead));
3282         kmutex->km_abandoned = FALSE;
3283         kmutex->km_apcdisable = 1;
3284         kmutex->km_header.dh_sigstate = 1;
3285         kmutex->km_header.dh_type = DISP_TYPE_MUTANT;
3286         kmutex->km_header.dh_size = sizeof(kmutant) / sizeof(uint32_t);
3287         kmutex->km_ownerthread = NULL;
3288         return;
3289 }
3290
3291 static uint32_t
3292 KeReleaseMutex(kmutex, kwait)
3293         kmutant                 *kmutex;
3294         uint8_t                 kwait;
3295 {
3296         uint32_t                prevstate;
3297
3298         mtx_lock(&ntoskrnl_dispatchlock);
3299         prevstate = kmutex->km_header.dh_sigstate;
3300         if (kmutex->km_ownerthread != curthread) {
3301                 mtx_unlock(&ntoskrnl_dispatchlock);
3302                 return(STATUS_MUTANT_NOT_OWNED);
3303         }
3304
3305         kmutex->km_header.dh_sigstate++;
3306         kmutex->km_abandoned = FALSE;
3307
3308         if (kmutex->km_header.dh_sigstate == 1) {
3309                 kmutex->km_ownerthread = NULL;
3310                 ntoskrnl_waittest(&kmutex->km_header, IO_NO_INCREMENT);
3311         }
3312
3313         mtx_unlock(&ntoskrnl_dispatchlock);
3314
3315         return(prevstate);
3316 }
3317
3318 static uint32_t
3319 KeReadStateMutex(kmutex)
3320         kmutant                 *kmutex;
3321 {
3322         return(kmutex->km_header.dh_sigstate);
3323 }
3324
3325 void
3326 KeInitializeEvent(kevent, type, state)
3327         nt_kevent               *kevent;
3328         uint32_t                type;
3329         uint8_t                 state;
3330 {
3331         InitializeListHead((&kevent->k_header.dh_waitlisthead));
3332         kevent->k_header.dh_sigstate = state;
3333         if (type == EVENT_TYPE_NOTIFY)
3334                 kevent->k_header.dh_type = DISP_TYPE_NOTIFICATION_EVENT;
3335         else
3336                 kevent->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_EVENT;
3337         kevent->k_header.dh_size = sizeof(nt_kevent) / sizeof(uint32_t);
3338         return;
3339 }
3340
3341 uint32_t
3342 KeResetEvent(kevent)
3343         nt_kevent               *kevent;
3344 {
3345         uint32_t                prevstate;
3346
3347         mtx_lock(&ntoskrnl_dispatchlock);
3348         prevstate = kevent->k_header.dh_sigstate;
3349         kevent->k_header.dh_sigstate = FALSE;
3350         mtx_unlock(&ntoskrnl_dispatchlock);
3351
3352         return(prevstate);
3353 }
3354
3355 uint32_t
3356 KeSetEvent(kevent, increment, kwait)
3357         nt_kevent               *kevent;
3358         uint32_t                increment;
3359         uint8_t                 kwait;
3360 {
3361         uint32_t                prevstate;
3362         wait_block              *w;
3363         nt_dispatch_header      *dh;
3364         struct thread           *td;
3365         wb_ext                  *we;
3366
3367         mtx_lock(&ntoskrnl_dispatchlock);
3368         prevstate = kevent->k_header.dh_sigstate;
3369         dh = &kevent->k_header;
3370
3371         if (IsListEmpty(&dh->dh_waitlisthead))
3372                 /*
3373                  * If there's nobody in the waitlist, just set
3374                  * the state to signalled.
3375                  */
3376                 dh->dh_sigstate = 1;
3377         else {
3378                 /*
3379                  * Get the first waiter. If this is a synchronization
3380                  * event, just wake up that one thread (don't bother
3381                  * setting the state to signalled since we're supposed
3382                  * to automatically clear synchronization events anyway).
3383                  *
3384                  * If it's a notification event, or the the first
3385                  * waiter is doing a WAITTYPE_ALL wait, go through
3386                  * the full wait satisfaction process.
3387                  */
3388                 w = CONTAINING_RECORD(dh->dh_waitlisthead.nle_flink,
3389                     wait_block, wb_waitlist);
3390                 we = w->wb_ext;
3391                 td = we->we_td;
3392                 if (kevent->k_header.dh_type == DISP_TYPE_NOTIFICATION_EVENT ||
3393                     w->wb_waittype == WAITTYPE_ALL) {
3394                         if (prevstate == 0) {
3395                                 dh->dh_sigstate = 1;
3396                                 ntoskrnl_waittest(dh, increment);
3397                         }
3398                 } else {
3399                         w->wb_awakened |= TRUE;
3400                         cv_broadcastpri(&we->we_cv,
3401                             (w->wb_oldpri - (increment * 4)) > PRI_MIN_KERN ?
3402                             w->wb_oldpri - (increment * 4) : PRI_MIN_KERN);
3403                 }
3404         }
3405
3406         mtx_unlock(&ntoskrnl_dispatchlock);
3407
3408         return(prevstate);
3409 }
3410
3411 void
3412 KeClearEvent(kevent)
3413         nt_kevent               *kevent;
3414 {
3415         kevent->k_header.dh_sigstate = FALSE;
3416         return;
3417 }
3418
3419 uint32_t
3420 KeReadStateEvent(kevent)
3421         nt_kevent               *kevent;
3422 {
3423         return(kevent->k_header.dh_sigstate);
3424 }
3425
3426 /*
3427  * The object manager in Windows is responsible for managing
3428  * references and access to various types of objects, including
3429  * device_objects, events, threads, timers and so on. However,
3430  * there's a difference in the way objects are handled in user
3431  * mode versus kernel mode.
3432  *
3433  * In user mode (i.e. Win32 applications), all objects are
3434  * managed by the object manager. For example, when you create
3435  * a timer or event object, you actually end up with an 
3436  * object_header (for the object manager's bookkeeping
3437  * purposes) and an object body (which contains the actual object
3438  * structure, e.g. ktimer, kevent, etc...). This allows Windows
3439  * to manage resource quotas and to enforce access restrictions
3440  * on basically every kind of system object handled by the kernel.
3441  *
3442  * However, in kernel mode, you only end up using the object
3443  * manager some of the time. For example, in a driver, you create
3444  * a timer object by simply allocating the memory for a ktimer
3445  * structure and initializing it with KeInitializeTimer(). Hence,
3446  * the timer has no object_header and no reference counting or
3447  * security/resource checks are done on it. The assumption in
3448  * this case is that if you're running in kernel mode, you know
3449  * what you're doing, and you're already at an elevated privilege
3450  * anyway.
3451  *
3452  * There are some exceptions to this. The two most important ones
3453  * for our purposes are device_objects and threads. We need to use
3454  * the object manager to do reference counting on device_objects,
3455  * and for threads, you can only get a pointer to a thread's
3456  * dispatch header by using ObReferenceObjectByHandle() on the
3457  * handle returned by PsCreateSystemThread().
3458  */
3459
3460 static ndis_status
3461 ObReferenceObjectByHandle(handle, reqaccess, otype,
3462     accessmode, object, handleinfo)
3463         ndis_handle             handle;
3464         uint32_t                reqaccess;
3465         void                    *otype;
3466         uint8_t                 accessmode;
3467         void                    **object;
3468         void                    **handleinfo;
3469 {
3470         nt_objref               *nr;
3471
3472         nr = malloc(sizeof(nt_objref), M_DEVBUF, M_NOWAIT|M_ZERO);
3473         if (nr == NULL)
3474                 return(STATUS_INSUFFICIENT_RESOURCES);
3475
3476         InitializeListHead((&nr->no_dh.dh_waitlisthead));
3477         nr->no_obj = handle;
3478         nr->no_dh.dh_type = DISP_TYPE_THREAD;
3479         nr->no_dh.dh_sigstate = 0;
3480         nr->no_dh.dh_size = (uint8_t)(sizeof(struct thread) /
3481             sizeof(uint32_t));
3482         TAILQ_INSERT_TAIL(&ntoskrnl_reflist, nr, link);
3483         *object = nr;
3484
3485         return(STATUS_SUCCESS);
3486 }
3487
3488 static void
3489 ObfDereferenceObject(object)
3490         void                    *object;
3491 {
3492         nt_objref               *nr;
3493
3494         nr = object;
3495         TAILQ_REMOVE(&ntoskrnl_reflist, nr, link);
3496         free(nr, M_DEVBUF);
3497
3498         return;
3499 }
3500
3501 static uint32_t
3502 ZwClose(handle)
3503         ndis_handle             handle;
3504 {
3505         return(STATUS_SUCCESS);
3506 }
3507
3508 static uint32_t
3509 WmiQueryTraceInformation(traceclass, traceinfo, infolen, reqlen, buf)
3510         uint32_t                traceclass;
3511         void                    *traceinfo;
3512         uint32_t                infolen;
3513         uint32_t                reqlen;
3514         void                    *buf;
3515 {
3516         return(STATUS_NOT_FOUND);
3517 }
3518
3519 static uint32_t
3520 WmiTraceMessage(uint64_t loghandle, uint32_t messageflags,
3521         void *guid, uint16_t messagenum, ...)
3522 {
3523         return(STATUS_SUCCESS);
3524 }
3525
3526 static uint32_t
3527 IoWMIRegistrationControl(dobj, action)
3528         device_object           *dobj;
3529         uint32_t                action;
3530 {
3531         return(STATUS_SUCCESS);
3532 }
3533
3534 /*
3535  * This is here just in case the thread returns without calling
3536  * PsTerminateSystemThread().
3537  */
3538 static void
3539 ntoskrnl_thrfunc(arg)
3540         void                    *arg;
3541 {
3542         thread_context          *thrctx;
3543         uint32_t (*tfunc)(void *);
3544         void                    *tctx;
3545         uint32_t                rval;
3546
3547         thrctx = arg;
3548         tfunc = thrctx->tc_thrfunc;
3549         tctx = thrctx->tc_thrctx;
3550         free(thrctx, M_TEMP);
3551
3552         rval = MSCALL1(tfunc, tctx);
3553
3554         PsTerminateSystemThread(rval);
3555         return; /* notreached */
3556 }
3557
3558 static ndis_status
3559 PsCreateSystemThread(handle, reqaccess, objattrs, phandle,
3560         clientid, thrfunc, thrctx)
3561         ndis_handle             *handle;
3562         uint32_t                reqaccess;
3563         void                    *objattrs;
3564         ndis_handle             phandle;
3565         void                    *clientid;
3566         void                    *thrfunc;
3567         void                    *thrctx;
3568 {
3569         int                     error;
3570         char                    tname[128];
3571         thread_context          *tc;
3572         struct proc             *p;
3573
3574         tc = malloc(sizeof(thread_context), M_TEMP, M_NOWAIT);
3575         if (tc == NULL)
3576                 return(STATUS_INSUFFICIENT_RESOURCES);
3577
3578         tc->tc_thrctx = thrctx;
3579         tc->tc_thrfunc = thrfunc;
3580
3581         sprintf(tname, "windows kthread %d", ntoskrnl_kth);
3582         error = kproc_create(ntoskrnl_thrfunc, tc, &p,
3583             RFHIGHPID, NDIS_KSTACK_PAGES, tname);
3584
3585         if (error) {
3586                 free(tc, M_TEMP);
3587                 return(STATUS_INSUFFICIENT_RESOURCES);
3588         }
3589
3590         *handle = p;
3591         ntoskrnl_kth++;
3592
3593         return(STATUS_SUCCESS);
3594 }
3595
3596 /*
3597  * In Windows, the exit of a thread is an event that you're allowed
3598  * to wait on, assuming you've obtained a reference to the thread using
3599  * ObReferenceObjectByHandle(). Unfortunately, the only way we can
3600  * simulate this behavior is to register each thread we create in a
3601  * reference list, and if someone holds a reference to us, we poke
3602  * them.
3603  */
3604 static ndis_status
3605 PsTerminateSystemThread(status)
3606         ndis_status             status;
3607 {
3608         struct nt_objref        *nr;
3609
3610         mtx_lock(&ntoskrnl_dispatchlock);
3611         TAILQ_FOREACH(nr, &ntoskrnl_reflist, link) {
3612                 if (nr->no_obj != curthread->td_proc)
3613                         continue;
3614                 nr->no_dh.dh_sigstate = 1;
3615                 ntoskrnl_waittest(&nr->no_dh, IO_NO_INCREMENT);
3616                 break;
3617         }
3618         mtx_unlock(&ntoskrnl_dispatchlock);
3619
3620         ntoskrnl_kth--;
3621
3622 #if __FreeBSD_version < 502113
3623         mtx_lock(&Giant);
3624 #endif
3625         kproc_exit(0);
3626         return(0);      /* notreached */
3627 }
3628
3629 static uint32_t
3630 DbgPrint(char *fmt, ...)
3631 {
3632         va_list                 ap;
3633
3634         if (bootverbose) {
3635                 va_start(ap, fmt);
3636                 vprintf(fmt, ap);
3637         }
3638
3639         return(STATUS_SUCCESS);
3640 }
3641
3642 static void
3643 DbgBreakPoint(void)
3644 {
3645
3646 #if __FreeBSD_version < 502113
3647         Debugger("DbgBreakPoint(): breakpoint");
3648 #else
3649         kdb_enter(KDB_WHY_NDIS, "DbgBreakPoint(): breakpoint");
3650 #endif
3651 }
3652
3653 static void
3654 KeBugCheckEx(code, param1, param2, param3, param4)
3655     uint32_t                    code;
3656     u_long                      param1;
3657     u_long                      param2;
3658     u_long                      param3;
3659     u_long                      param4;
3660 {
3661         panic("KeBugCheckEx: STOP 0x%X", code);
3662 }
3663
3664 static void
3665 ntoskrnl_timercall(arg)
3666         void                    *arg;
3667 {
3668         ktimer                  *timer;
3669         struct timeval          tv;
3670         kdpc                    *dpc;
3671
3672         mtx_lock(&ntoskrnl_dispatchlock);
3673
3674         timer = arg;
3675
3676 #ifdef NTOSKRNL_DEBUG_TIMERS
3677         ntoskrnl_timer_fires++;
3678 #endif
3679         ntoskrnl_remove_timer(timer);
3680
3681         /*
3682          * This should never happen, but complain
3683          * if it does.
3684          */
3685
3686         if (timer->k_header.dh_inserted == FALSE) {
3687                 mtx_unlock(&ntoskrnl_dispatchlock);
3688                 printf("NTOS: timer %p fired even though "
3689                     "it was canceled\n", timer);
3690                 return;
3691         }
3692
3693         /* Mark the timer as no longer being on the timer queue. */
3694
3695         timer->k_header.dh_inserted = FALSE;
3696
3697         /* Now signal the object and satisfy any waits on it. */
3698
3699         timer->k_header.dh_sigstate = 1;
3700         ntoskrnl_waittest(&timer->k_header, IO_NO_INCREMENT);
3701
3702         /*
3703          * If this is a periodic timer, re-arm it
3704          * so it will fire again. We do this before
3705          * calling any deferred procedure calls because
3706          * it's possible the DPC might cancel the timer,
3707          * in which case it would be wrong for us to
3708          * re-arm it again afterwards.
3709          */
3710
3711         if (timer->k_period) {
3712                 tv.tv_sec = 0;
3713                 tv.tv_usec = timer->k_period * 1000;
3714                 timer->k_header.dh_inserted = TRUE;
3715                 ntoskrnl_insert_timer(timer, tvtohz(&tv));
3716 #ifdef NTOSKRNL_DEBUG_TIMERS
3717                 ntoskrnl_timer_reloads++;
3718 #endif
3719         }
3720
3721         dpc = timer->k_dpc;
3722
3723         mtx_unlock(&ntoskrnl_dispatchlock);
3724
3725         /* If there's a DPC associated with the timer, queue it up. */
3726
3727         if (dpc != NULL)
3728                 KeInsertQueueDpc(dpc, NULL, NULL);
3729
3730         return;
3731 }
3732
3733 #ifdef NTOSKRNL_DEBUG_TIMERS
3734 static int
3735 sysctl_show_timers(SYSCTL_HANDLER_ARGS)
3736 {
3737         int                     ret;
3738
3739         ret = 0;
3740         ntoskrnl_show_timers();
3741         return (sysctl_handle_int(oidp, &ret, 0, req));
3742 }
3743
3744 static void
3745 ntoskrnl_show_timers()
3746 {
3747         int                     i = 0;
3748         list_entry              *l;
3749
3750         mtx_lock_spin(&ntoskrnl_calllock);
3751         l = ntoskrnl_calllist.nle_flink;
3752         while(l != &ntoskrnl_calllist) {
3753                 i++;
3754                 l = l->nle_flink;
3755         }
3756         mtx_unlock_spin(&ntoskrnl_calllock);
3757
3758         printf("\n");
3759         printf("%d timers available (out of %d)\n", i, NTOSKRNL_TIMEOUTS);
3760         printf("timer sets: %qu\n", ntoskrnl_timer_sets);
3761         printf("timer reloads: %qu\n", ntoskrnl_timer_reloads);
3762         printf("timer cancels: %qu\n", ntoskrnl_timer_cancels);
3763         printf("timer fires: %qu\n", ntoskrnl_timer_fires);
3764         printf("\n");
3765
3766         return;
3767 }
3768 #endif
3769
3770 /*
3771  * Must be called with dispatcher lock held.
3772  */
3773
3774 static void
3775 ntoskrnl_insert_timer(timer, ticks)
3776         ktimer                  *timer;
3777         int                     ticks;
3778 {
3779         callout_entry           *e;
3780         list_entry              *l;
3781         struct callout          *c;
3782
3783         /*
3784          * Try and allocate a timer.
3785          */
3786         mtx_lock_spin(&ntoskrnl_calllock);
3787         if (IsListEmpty(&ntoskrnl_calllist)) {
3788                 mtx_unlock_spin(&ntoskrnl_calllock);
3789 #ifdef NTOSKRNL_DEBUG_TIMERS
3790                 ntoskrnl_show_timers();
3791 #endif
3792                 panic("out of timers!");
3793         }
3794         l = RemoveHeadList(&ntoskrnl_calllist);
3795         mtx_unlock_spin(&ntoskrnl_calllock);
3796
3797         e = CONTAINING_RECORD(l, callout_entry, ce_list);
3798         c = &e->ce_callout;
3799
3800         timer->k_callout = c;
3801
3802         callout_init(c, CALLOUT_MPSAFE);
3803         callout_reset(c, ticks, ntoskrnl_timercall, timer);
3804
3805         return;
3806 }
3807
3808 static void
3809 ntoskrnl_remove_timer(timer)
3810         ktimer                  *timer;
3811 {
3812         callout_entry           *e;
3813
3814         e = (callout_entry *)timer->k_callout;
3815         callout_stop(timer->k_callout);
3816
3817         mtx_lock_spin(&ntoskrnl_calllock);
3818         InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
3819         mtx_unlock_spin(&ntoskrnl_calllock);
3820
3821         return;
3822 }
3823
3824 void
3825 KeInitializeTimer(timer)
3826         ktimer                  *timer;
3827 {
3828         if (timer == NULL)
3829                 return;
3830
3831         KeInitializeTimerEx(timer,  EVENT_TYPE_NOTIFY);
3832
3833         return;
3834 }
3835
3836 void
3837 KeInitializeTimerEx(timer, type)
3838         ktimer                  *timer;
3839         uint32_t                type;
3840 {
3841         if (timer == NULL)
3842                 return;
3843
3844         bzero((char *)timer, sizeof(ktimer));
3845         InitializeListHead((&timer->k_header.dh_waitlisthead));
3846         timer->k_header.dh_sigstate = FALSE;
3847         timer->k_header.dh_inserted = FALSE;
3848         if (type == EVENT_TYPE_NOTIFY)
3849                 timer->k_header.dh_type = DISP_TYPE_NOTIFICATION_TIMER;
3850         else
3851                 timer->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_TIMER;
3852         timer->k_header.dh_size = sizeof(ktimer) / sizeof(uint32_t);
3853
3854         return;
3855 }
3856
3857 /*
3858  * DPC subsystem. A Windows Defered Procedure Call has the following
3859  * properties:
3860  * - It runs at DISPATCH_LEVEL.
3861  * - It can have one of 3 importance values that control when it
3862  *   runs relative to other DPCs in the queue.
3863  * - On SMP systems, it can be set to run on a specific processor.
3864  * In order to satisfy the last property, we create a DPC thread for
3865  * each CPU in the system and bind it to that CPU. Each thread
3866  * maintains three queues with different importance levels, which
3867  * will be processed in order from lowest to highest.
3868  *
3869  * In Windows, interrupt handlers run as DPCs. (Not to be confused
3870  * with ISRs, which run in interrupt context and can preempt DPCs.)
3871  * ISRs are given the highest importance so that they'll take
3872  * precedence over timers and other things.
3873  */
3874
3875 static void
3876 ntoskrnl_dpc_thread(arg)
3877         void                    *arg;
3878 {
3879         kdpc_queue              *kq;
3880         kdpc                    *d;
3881         list_entry              *l;
3882         uint8_t                 irql;
3883
3884         kq = arg;
3885
3886         InitializeListHead(&kq->kq_disp);
3887         kq->kq_td = curthread;
3888         kq->kq_exit = 0;
3889         kq->kq_running = FALSE;
3890         KeInitializeSpinLock(&kq->kq_lock);
3891         KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
3892         KeInitializeEvent(&kq->kq_done, EVENT_TYPE_SYNC, FALSE);
3893
3894         /*
3895          * Elevate our priority. DPCs are used to run interrupt
3896          * handlers, and they should trigger as soon as possible
3897          * once scheduled by an ISR.
3898          */
3899
3900         thread_lock(curthread);
3901 #ifdef NTOSKRNL_MULTIPLE_DPCS
3902 #if __FreeBSD_version >= 502102
3903         sched_bind(curthread, kq->kq_cpu);
3904 #endif
3905 #endif
3906         sched_prio(curthread, PRI_MIN_KERN);
3907 #if __FreeBSD_version < 600000
3908         curthread->td_base_pri = PRI_MIN_KERN;
3909 #endif
3910         thread_unlock(curthread);
3911
3912         while (1) {
3913                 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
3914
3915                 KeAcquireSpinLock(&kq->kq_lock, &irql);
3916
3917                 if (kq->kq_exit) {
3918                         kq->kq_exit = 0;
3919                         KeReleaseSpinLock(&kq->kq_lock, irql);
3920                         break;
3921                 }
3922
3923                 kq->kq_running = TRUE;
3924
3925                 while (!IsListEmpty(&kq->kq_disp)) {
3926                         l = RemoveHeadList((&kq->kq_disp));
3927                         d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
3928                         InitializeListHead((&d->k_dpclistentry));
3929                         KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
3930                         MSCALL4(d->k_deferedfunc, d, d->k_deferredctx,
3931                             d->k_sysarg1, d->k_sysarg2);
3932                         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3933                 }
3934
3935                 kq->kq_running = FALSE;
3936
3937                 KeReleaseSpinLock(&kq->kq_lock, irql);
3938
3939                 KeSetEvent(&kq->kq_done, IO_NO_INCREMENT, FALSE);
3940         }
3941
3942 #if __FreeBSD_version < 502113
3943         mtx_lock(&Giant);
3944 #endif
3945         kproc_exit(0);
3946         return; /* notreached */
3947 }
3948
3949 static void
3950 ntoskrnl_destroy_dpc_threads(void)
3951 {
3952         kdpc_queue              *kq;
3953         kdpc                    dpc;
3954         int                     i;
3955
3956         kq = kq_queues;
3957 #ifdef NTOSKRNL_MULTIPLE_DPCS
3958         for (i = 0; i < mp_ncpus; i++) {
3959 #else
3960         for (i = 0; i < 1; i++) {
3961 #endif
3962                 kq += i;
3963
3964                 kq->kq_exit = 1;
3965                 KeInitializeDpc(&dpc, NULL, NULL);
3966                 KeSetTargetProcessorDpc(&dpc, i);
3967                 KeInsertQueueDpc(&dpc, NULL, NULL);
3968                 while (kq->kq_exit)
3969                         tsleep(kq->kq_td->td_proc, PWAIT, "dpcw", hz/10);
3970         }
3971
3972         return;
3973 }
3974
3975 static uint8_t
3976 ntoskrnl_insert_dpc(head, dpc)
3977         list_entry              *head;
3978         kdpc                    *dpc;
3979 {
3980         list_entry              *l;
3981         kdpc                    *d;
3982
3983         l = head->nle_flink;
3984         while (l != head) {
3985                 d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
3986                 if (d == dpc)
3987                         return(FALSE);
3988                 l = l->nle_flink;
3989         }
3990
3991         if (dpc->k_importance == KDPC_IMPORTANCE_LOW)
3992                 InsertTailList((head), (&dpc->k_dpclistentry));
3993         else
3994                 InsertHeadList((head), (&dpc->k_dpclistentry));
3995
3996         return (TRUE);
3997 }
3998
3999 void
4000 KeInitializeDpc(dpc, dpcfunc, dpcctx)
4001         kdpc                    *dpc;
4002         void                    *dpcfunc;
4003         void                    *dpcctx;
4004 {
4005
4006         if (dpc == NULL)
4007                 return;
4008
4009         dpc->k_deferedfunc = dpcfunc;
4010         dpc->k_deferredctx = dpcctx;
4011         dpc->k_num = KDPC_CPU_DEFAULT;
4012         dpc->k_importance = KDPC_IMPORTANCE_MEDIUM;
4013         InitializeListHead((&dpc->k_dpclistentry));
4014
4015         return;
4016 }
4017
4018 uint8_t
4019 KeInsertQueueDpc(dpc, sysarg1, sysarg2)
4020         kdpc                    *dpc;
4021         void                    *sysarg1;
4022         void                    *sysarg2;
4023 {
4024         kdpc_queue              *kq;
4025         uint8_t                 r;
4026         uint8_t                 irql;
4027
4028         if (dpc == NULL)
4029                 return(FALSE);
4030
4031         kq = kq_queues;
4032
4033 #ifdef NTOSKRNL_MULTIPLE_DPCS
4034         KeRaiseIrql(DISPATCH_LEVEL, &irql);
4035
4036         /*
4037          * By default, the DPC is queued to run on the same CPU
4038          * that scheduled it.
4039          */
4040
4041         if (dpc->k_num == KDPC_CPU_DEFAULT)
4042                 kq += curthread->td_oncpu;
4043         else
4044                 kq += dpc->k_num;
4045         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
4046 #else
4047         KeAcquireSpinLock(&kq->kq_lock, &irql);
4048 #endif
4049
4050         r = ntoskrnl_insert_dpc(&kq->kq_disp, dpc);
4051         if (r == TRUE) {
4052                 dpc->k_sysarg1 = sysarg1;
4053                 dpc->k_sysarg2 = sysarg2;
4054         }
4055         KeReleaseSpinLock(&kq->kq_lock, irql);
4056
4057         if (r == FALSE)
4058                 return(r);
4059
4060         KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
4061
4062         return(r);
4063 }
4064
4065 uint8_t
4066 KeRemoveQueueDpc(dpc)
4067         kdpc                    *dpc;
4068 {
4069         kdpc_queue              *kq;
4070         uint8_t                 irql;
4071
4072         if (dpc == NULL)
4073                 return(FALSE);
4074
4075 #ifdef NTOSKRNL_MULTIPLE_DPCS
4076         KeRaiseIrql(DISPATCH_LEVEL, &irql);
4077
4078         kq = kq_queues + dpc->k_num;
4079
4080         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
4081 #else
4082         kq = kq_queues;
4083         KeAcquireSpinLock(&kq->kq_lock, &irql);
4084 #endif
4085
4086         if (dpc->k_dpclistentry.nle_flink == &dpc->k_dpclistentry) {
4087                 KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
4088                 KeLowerIrql(irql);
4089                 return(FALSE);
4090         }
4091
4092         RemoveEntryList((&dpc->k_dpclistentry));
4093         InitializeListHead((&dpc->k_dpclistentry));
4094
4095         KeReleaseSpinLock(&kq->kq_lock, irql);
4096
4097         return(TRUE);
4098 }
4099
4100 void
4101 KeSetImportanceDpc(dpc, imp)
4102         kdpc                    *dpc;
4103         uint32_t                imp;
4104 {
4105         if (imp != KDPC_IMPORTANCE_LOW &&
4106             imp != KDPC_IMPORTANCE_MEDIUM &&
4107             imp != KDPC_IMPORTANCE_HIGH)
4108                 return;
4109
4110         dpc->k_importance = (uint8_t)imp;
4111         return;
4112 }
4113
4114 void
4115 KeSetTargetProcessorDpc(dpc, cpu)
4116         kdpc                    *dpc;
4117         uint8_t                 cpu;
4118 {
4119         if (cpu > mp_ncpus)
4120                 return;
4121
4122         dpc->k_num = cpu;
4123         return;
4124 }
4125
4126 void
4127 KeFlushQueuedDpcs(void)
4128 {
4129         kdpc_queue              *kq;
4130         int                     i;
4131
4132         /*
4133          * Poke each DPC queue and wait
4134          * for them to drain.
4135          */
4136
4137 #ifdef NTOSKRNL_MULTIPLE_DPCS
4138         for (i = 0; i < mp_ncpus; i++) {
4139 #else
4140         for (i = 0; i < 1; i++) {
4141 #endif
4142                 kq = kq_queues + i;
4143                 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
4144                 KeWaitForSingleObject(&kq->kq_done, 0, 0, TRUE, NULL);
4145         }
4146
4147         return;
4148 }
4149
4150 uint32_t
4151 KeGetCurrentProcessorNumber(void)
4152 {
4153         return((uint32_t)curthread->td_oncpu);
4154 }
4155
4156 uint8_t
4157 KeSetTimerEx(timer, duetime, period, dpc)
4158         ktimer                  *timer;
4159         int64_t                 duetime;
4160         uint32_t                period;
4161         kdpc                    *dpc;
4162 {
4163         struct timeval          tv;
4164         uint64_t                curtime;
4165         uint8_t                 pending;
4166
4167         if (timer == NULL)
4168                 return(FALSE);
4169
4170         mtx_lock(&ntoskrnl_dispatchlock);
4171
4172         if (timer->k_header.dh_inserted == TRUE) {
4173                 ntoskrnl_remove_timer(timer);
4174 #ifdef NTOSKRNL_DEBUG_TIMERS
4175                 ntoskrnl_timer_cancels++;
4176 #endif
4177                 timer->k_header.dh_inserted = FALSE;
4178                 pending = TRUE;
4179         } else
4180                 pending = FALSE;
4181
4182         timer->k_duetime = duetime;
4183         timer->k_period = period;
4184         timer->k_header.dh_sigstate = FALSE;
4185         timer->k_dpc = dpc;
4186
4187         if (duetime < 0) {
4188                 tv.tv_sec = - (duetime) / 10000000;
4189                 tv.tv_usec = (- (duetime) / 10) -
4190                     (tv.tv_sec * 1000000);
4191         } else {
4192                 ntoskrnl_time(&curtime);
4193                 if (duetime < curtime)
4194                         tv.tv_sec = tv.tv_usec = 0;
4195                 else {
4196                         tv.tv_sec = ((duetime) - curtime) / 10000000;
4197                         tv.tv_usec = ((duetime) - curtime) / 10 -
4198                             (tv.tv_sec * 1000000);
4199                 }
4200         }
4201
4202         timer->k_header.dh_inserted = TRUE;
4203         ntoskrnl_insert_timer(timer, tvtohz(&tv));
4204 #ifdef NTOSKRNL_DEBUG_TIMERS
4205         ntoskrnl_timer_sets++;
4206 #endif
4207
4208         mtx_unlock(&ntoskrnl_dispatchlock);
4209
4210         return(pending);
4211 }
4212
4213 uint8_t
4214 KeSetTimer(timer, duetime, dpc)
4215         ktimer                  *timer;
4216         int64_t                 duetime;
4217         kdpc                    *dpc;
4218 {
4219         return (KeSetTimerEx(timer, duetime, 0, dpc));
4220 }
4221
4222 /*
4223  * The Windows DDK documentation seems to say that cancelling
4224  * a timer that has a DPC will result in the DPC also being
4225  * cancelled, but this isn't really the case.
4226  */
4227
4228 uint8_t
4229 KeCancelTimer(timer)
4230         ktimer                  *timer;
4231 {
4232         uint8_t                 pending;
4233
4234         if (timer == NULL)
4235                 return(FALSE);
4236
4237         mtx_lock(&ntoskrnl_dispatchlock);
4238
4239         pending = timer->k_header.dh_inserted;
4240
4241         if (timer->k_header.dh_inserted == TRUE) {
4242                 timer->k_header.dh_inserted = FALSE;
4243                 ntoskrnl_remove_timer(timer);
4244 #ifdef NTOSKRNL_DEBUG_TIMERS
4245                 ntoskrnl_timer_cancels++;
4246 #endif
4247         }
4248
4249         mtx_unlock(&ntoskrnl_dispatchlock);
4250
4251         return(pending);
4252 }
4253
4254 uint8_t
4255 KeReadStateTimer(timer)
4256         ktimer                  *timer;
4257 {
4258         return(timer->k_header.dh_sigstate);
4259 }
4260
4261 static void
4262 dummy()
4263 {
4264         printf ("ntoskrnl dummy called...\n");
4265         return;
4266 }
4267
4268
4269 image_patch_table ntoskrnl_functbl[] = {
4270         IMPORT_SFUNC(RtlZeroMemory, 2),
4271         IMPORT_SFUNC(RtlCopyMemory, 3),
4272         IMPORT_SFUNC(RtlCompareMemory, 3),
4273         IMPORT_SFUNC(RtlEqualUnicodeString, 3),
4274         IMPORT_SFUNC(RtlCopyUnicodeString, 2),
4275         IMPORT_SFUNC(RtlUnicodeStringToAnsiString, 3),
4276         IMPORT_SFUNC(RtlAnsiStringToUnicodeString, 3),
4277         IMPORT_SFUNC(RtlInitAnsiString, 2),
4278         IMPORT_SFUNC_MAP(RtlInitString, RtlInitAnsiString, 2),
4279         IMPORT_SFUNC(RtlInitUnicodeString, 2),
4280         IMPORT_SFUNC(RtlFreeAnsiString, 1),
4281         IMPORT_SFUNC(RtlFreeUnicodeString, 1),
4282         IMPORT_SFUNC(RtlUnicodeStringToInteger, 3),
4283         IMPORT_CFUNC(sprintf, 0),
4284         IMPORT_CFUNC(vsprintf, 0),
4285         IMPORT_CFUNC_MAP(_snprintf, snprintf, 0),
4286         IMPORT_CFUNC_MAP(_vsnprintf, vsnprintf, 0),
4287         IMPORT_CFUNC(DbgPrint, 0),
4288         IMPORT_SFUNC(DbgBreakPoint, 0),
4289         IMPORT_SFUNC(KeBugCheckEx, 5),
4290         IMPORT_CFUNC(strncmp, 0),
4291         IMPORT_CFUNC(strcmp, 0),
4292         IMPORT_CFUNC_MAP(stricmp, strcasecmp, 0),
4293         IMPORT_CFUNC(strncpy, 0),
4294         IMPORT_CFUNC(strcpy, 0),
4295         IMPORT_CFUNC(strlen, 0),
4296         IMPORT_CFUNC_MAP(toupper, ntoskrnl_toupper, 0),
4297         IMPORT_CFUNC_MAP(tolower, ntoskrnl_tolower, 0),
4298         IMPORT_CFUNC_MAP(strstr, ntoskrnl_strstr, 0),
4299         IMPORT_CFUNC_MAP(strncat, ntoskrnl_strncat, 0),
4300         IMPORT_CFUNC_MAP(strchr, index, 0),
4301         IMPORT_CFUNC_MAP(strrchr, rindex, 0),
4302         IMPORT_CFUNC(memcpy, 0),
4303         IMPORT_CFUNC_MAP(memmove, ntoskrnl_memmove, 0),
4304         IMPORT_CFUNC_MAP(memset, ntoskrnl_memset, 0),
4305         IMPORT_CFUNC_MAP(memchr, ntoskrnl_memchr, 0),
4306         IMPORT_SFUNC(IoAllocateDriverObjectExtension, 4),
4307         IMPORT_SFUNC(IoGetDriverObjectExtension, 2),
4308         IMPORT_FFUNC(IofCallDriver, 2),
4309         IMPORT_FFUNC(IofCompleteRequest, 2),
4310         IMPORT_SFUNC(IoAcquireCancelSpinLock, 1),
4311         IMPORT_SFUNC(IoReleaseCancelSpinLock, 1),
4312         IMPORT_SFUNC(IoCancelIrp, 1),
4313         IMPORT_SFUNC(IoConnectInterrupt, 11),
4314         IMPORT_SFUNC(IoDisconnectInterrupt, 1),
4315         IMPORT_SFUNC(IoCreateDevice, 7),
4316         IMPORT_SFUNC(IoDeleteDevice, 1),
4317         IMPORT_SFUNC(IoGetAttachedDevice, 1),
4318         IMPORT_SFUNC(IoAttachDeviceToDeviceStack, 2),
4319         IMPORT_SFUNC(IoDetachDevice, 1),
4320         IMPORT_SFUNC(IoBuildSynchronousFsdRequest, 7),
4321         IMPORT_SFUNC(IoBuildAsynchronousFsdRequest, 6),
4322         IMPORT_SFUNC(IoBuildDeviceIoControlRequest, 9),
4323         IMPORT_SFUNC(IoAllocateIrp, 2),
4324         IMPORT_SFUNC(IoReuseIrp, 2),
4325         IMPORT_SFUNC(IoMakeAssociatedIrp, 2),
4326         IMPORT_SFUNC(IoFreeIrp, 1),
4327         IMPORT_SFUNC(IoInitializeIrp, 3),
4328         IMPORT_SFUNC(KeAcquireInterruptSpinLock, 1),
4329         IMPORT_SFUNC(KeReleaseInterruptSpinLock, 2),
4330         IMPORT_SFUNC(KeSynchronizeExecution, 3),
4331         IMPORT_SFUNC(KeWaitForSingleObject, 5),
4332         IMPORT_SFUNC(KeWaitForMultipleObjects, 8),
4333         IMPORT_SFUNC(_allmul, 4),
4334         IMPORT_SFUNC(_alldiv, 4),
4335         IMPORT_SFUNC(_allrem, 4),
4336         IMPORT_RFUNC(_allshr, 0),
4337         IMPORT_RFUNC(_allshl, 0),
4338         IMPORT_SFUNC(_aullmul, 4),
4339         IMPORT_SFUNC(_aulldiv, 4),
4340         IMPORT_SFUNC(_aullrem, 4),
4341         IMPORT_RFUNC(_aullshr, 0),
4342         IMPORT_RFUNC(_aullshl, 0),
4343         IMPORT_CFUNC(atoi, 0),
4344         IMPORT_CFUNC(atol, 0),
4345         IMPORT_CFUNC(rand, 0),
4346         IMPORT_CFUNC(srand, 0),
4347         IMPORT_SFUNC(WRITE_REGISTER_USHORT, 2),
4348         IMPORT_SFUNC(READ_REGISTER_USHORT, 1),
4349         IMPORT_SFUNC(WRITE_REGISTER_ULONG, 2),
4350         IMPORT_SFUNC(READ_REGISTER_ULONG, 1),
4351         IMPORT_SFUNC(READ_REGISTER_UCHAR, 1),
4352         IMPORT_SFUNC(WRITE_REGISTER_UCHAR, 2),
4353         IMPORT_SFUNC(ExInitializePagedLookasideList, 7),
4354         IMPORT_SFUNC(ExDeletePagedLookasideList, 1),
4355         IMPORT_SFUNC(ExInitializeNPagedLookasideList, 7),
4356         IMPORT_SFUNC(ExDeleteNPagedLookasideList, 1),
4357         IMPORT_FFUNC(InterlockedPopEntrySList, 1),
4358         IMPORT_FFUNC(InterlockedPushEntrySList, 2),
4359         IMPORT_SFUNC(ExQueryDepthSList, 1),
4360         IMPORT_FFUNC_MAP(ExpInterlockedPopEntrySList,
4361                 InterlockedPopEntrySList, 1),
4362         IMPORT_FFUNC_MAP(ExpInterlockedPushEntrySList,
4363                 InterlockedPushEntrySList, 2),
4364         IMPORT_FFUNC(ExInterlockedPopEntrySList, 2),
4365         IMPORT_FFUNC(ExInterlockedPushEntrySList, 3),
4366         IMPORT_SFUNC(ExAllocatePoolWithTag, 3),
4367         IMPORT_SFUNC(ExFreePool, 1),
4368 #ifdef __i386__
4369         IMPORT_FFUNC(KefAcquireSpinLockAtDpcLevel, 1),
4370         IMPORT_FFUNC(KefReleaseSpinLockFromDpcLevel,1),
4371         IMPORT_FFUNC(KeAcquireSpinLockRaiseToDpc, 1),
4372 #else
4373         /*
4374          * For AMD64, we can get away with just mapping
4375          * KeAcquireSpinLockRaiseToDpc() directly to KfAcquireSpinLock()
4376          * because the calling conventions end up being the same.
4377          * On i386, we have to be careful because KfAcquireSpinLock()
4378          * is _fastcall but KeAcquireSpinLockRaiseToDpc() isn't.
4379          */
4380         IMPORT_SFUNC(KeAcquireSpinLockAtDpcLevel, 1),
4381         IMPORT_SFUNC(KeReleaseSpinLockFromDpcLevel, 1),
4382         IMPORT_SFUNC_MAP(KeAcquireSpinLockRaiseToDpc, KfAcquireSpinLock, 1),
4383 #endif
4384         IMPORT_SFUNC_MAP(KeReleaseSpinLock, KfReleaseSpinLock, 1),
4385         IMPORT_FFUNC(InterlockedIncrement, 1),
4386         IMPORT_FFUNC(InterlockedDecrement, 1),
4387         IMPORT_FFUNC(InterlockedExchange, 2),
4388         IMPORT_FFUNC(ExInterlockedAddLargeStatistic, 2),
4389         IMPORT_SFUNC(IoAllocateMdl, 5),
4390         IMPORT_SFUNC(IoFreeMdl, 1),
4391         IMPORT_SFUNC(MmAllocateContiguousMemory, 2),
4392         IMPORT_SFUNC(MmAllocateContiguousMemorySpecifyCache, 5),
4393         IMPORT_SFUNC(MmFreeContiguousMemory, 1),
4394         IMPORT_SFUNC(MmFreeContiguousMemorySpecifyCache, 3),
4395         IMPORT_SFUNC_MAP(MmGetPhysicalAddress, pmap_kextract, 1),
4396         IMPORT_SFUNC(MmSizeOfMdl, 1),
4397         IMPORT_SFUNC(MmMapLockedPages, 2),
4398         IMPORT_SFUNC(MmMapLockedPagesSpecifyCache, 6),
4399         IMPORT_SFUNC(MmUnmapLockedPages, 2),
4400         IMPORT_SFUNC(MmBuildMdlForNonPagedPool, 1),
4401         IMPORT_SFUNC(MmIsAddressValid, 1),
4402         IMPORT_SFUNC(MmMapIoSpace, 3 + 1),
4403         IMPORT_SFUNC(MmUnmapIoSpace, 2),
4404         IMPORT_SFUNC(KeInitializeSpinLock, 1),
4405         IMPORT_SFUNC(IoIsWdmVersionAvailable, 2),
4406         IMPORT_SFUNC(IoGetDeviceObjectPointer, 4),
4407         IMPORT_SFUNC(IoGetDeviceProperty, 5),
4408         IMPORT_SFUNC(IoAllocateWorkItem, 1),
4409         IMPORT_SFUNC(IoFreeWorkItem, 1),
4410         IMPORT_SFUNC(IoQueueWorkItem, 4),
4411         IMPORT_SFUNC(ExQueueWorkItem, 2),
4412         IMPORT_SFUNC(ntoskrnl_workitem, 2),
4413         IMPORT_SFUNC(KeInitializeMutex, 2),
4414         IMPORT_SFUNC(KeReleaseMutex, 2),
4415         IMPORT_SFUNC(KeReadStateMutex, 1),
4416         IMPORT_SFUNC(KeInitializeEvent, 3),
4417         IMPORT_SFUNC(KeSetEvent, 3),
4418         IMPORT_SFUNC(KeResetEvent, 1),
4419         IMPORT_SFUNC(KeClearEvent, 1),
4420         IMPORT_SFUNC(KeReadStateEvent, 1),
4421         IMPORT_SFUNC(KeInitializeTimer, 1),
4422         IMPORT_SFUNC(KeInitializeTimerEx, 2),
4423         IMPORT_SFUNC(KeSetTimer, 3),
4424         IMPORT_SFUNC(KeSetTimerEx, 4),
4425         IMPORT_SFUNC(KeCancelTimer, 1),
4426         IMPORT_SFUNC(KeReadStateTimer, 1),
4427         IMPORT_SFUNC(KeInitializeDpc, 3),
4428         IMPORT_SFUNC(KeInsertQueueDpc, 3),
4429         IMPORT_SFUNC(KeRemoveQueueDpc, 1),
4430         IMPORT_SFUNC(KeSetImportanceDpc, 2),
4431         IMPORT_SFUNC(KeSetTargetProcessorDpc, 2),
4432         IMPORT_SFUNC(KeFlushQueuedDpcs, 0),
4433         IMPORT_SFUNC(KeGetCurrentProcessorNumber, 1),
4434         IMPORT_SFUNC(ObReferenceObjectByHandle, 6),
4435         IMPORT_FFUNC(ObfDereferenceObject, 1),
4436         IMPORT_SFUNC(ZwClose, 1),
4437         IMPORT_SFUNC(PsCreateSystemThread, 7),
4438         IMPORT_SFUNC(PsTerminateSystemThread, 1),
4439         IMPORT_SFUNC(IoWMIRegistrationControl, 2),
4440         IMPORT_SFUNC(WmiQueryTraceInformation, 5),
4441         IMPORT_CFUNC(WmiTraceMessage, 0),
4442         IMPORT_SFUNC(KeQuerySystemTime, 1),
4443         IMPORT_CFUNC(KeTickCount, 0),
4444
4445         /*
4446          * This last entry is a catch-all for any function we haven't
4447          * implemented yet. The PE import list patching routine will
4448          * use it for any function that doesn't have an explicit match
4449          * in this table.
4450          */
4451
4452         { NULL, (FUNC)dummy, NULL, 0, WINDRV_WRAP_STDCALL },
4453
4454         /* End of list. */
4455
4456         { NULL, NULL, NULL }
4457 };