]> CyberLeo.Net >> Repos - FreeBSD/releng/8.2.git/blob - sys/compat/ndis/subr_ntoskrnl.c
Copy stable/8 to releng/8.2 in preparation for FreeBSD-8.2 release.
[FreeBSD/releng/8.2.git] / sys / compat / ndis / subr_ntoskrnl.c
1 /*-
2  * Copyright (c) 2003
3  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *      This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #include <sys/ctype.h>
37 #include <sys/unistd.h>
38 #include <sys/param.h>
39 #include <sys/types.h>
40 #include <sys/errno.h>
41 #include <sys/systm.h>
42 #include <sys/malloc.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45
46 #include <sys/callout.h>
47 #include <sys/kdb.h>
48 #include <sys/kernel.h>
49 #include <sys/proc.h>
50 #include <sys/condvar.h>
51 #include <sys/kthread.h>
52 #include <sys/module.h>
53 #include <sys/smp.h>
54 #include <sys/sched.h>
55 #include <sys/sysctl.h>
56
57 #include <machine/atomic.h>
58 #include <machine/bus.h>
59 #include <machine/stdarg.h>
60 #include <machine/resource.h>
61
62 #include <sys/bus.h>
63 #include <sys/rman.h>
64
65 #include <vm/vm.h>
66 #include <vm/vm_param.h>
67 #include <vm/pmap.h>
68 #include <vm/uma.h>
69 #include <vm/vm_kern.h>
70 #include <vm/vm_map.h>
71 #include <vm/vm_extern.h>
72
73 #include <compat/ndis/pe_var.h>
74 #include <compat/ndis/cfg_var.h>
75 #include <compat/ndis/resource_var.h>
76 #include <compat/ndis/ntoskrnl_var.h>
77 #include <compat/ndis/hal_var.h>
78 #include <compat/ndis/ndis_var.h>
79
80 #ifdef NTOSKRNL_DEBUG_TIMERS
81 static int sysctl_show_timers(SYSCTL_HANDLER_ARGS);
82
83 SYSCTL_PROC(_debug, OID_AUTO, ntoskrnl_timers, CTLFLAG_RW, 0, 0,
84         sysctl_show_timers, "I", "Show ntoskrnl timer stats");
85 #endif
86
87 struct kdpc_queue {
88         list_entry              kq_disp;
89         struct thread           *kq_td;
90         int                     kq_cpu;
91         int                     kq_exit;
92         int                     kq_running;
93         kspin_lock              kq_lock;
94         nt_kevent               kq_proc;
95         nt_kevent               kq_done;
96 };
97
98 typedef struct kdpc_queue kdpc_queue;
99
100 struct wb_ext {
101         struct cv               we_cv;
102         struct thread           *we_td;
103 };
104
105 typedef struct wb_ext wb_ext;
106
107 #define NTOSKRNL_TIMEOUTS       256
108 #ifdef NTOSKRNL_DEBUG_TIMERS
109 static uint64_t ntoskrnl_timer_fires;
110 static uint64_t ntoskrnl_timer_sets;
111 static uint64_t ntoskrnl_timer_reloads;
112 static uint64_t ntoskrnl_timer_cancels;
113 #endif
114
115 struct callout_entry {
116         struct callout          ce_callout;
117         list_entry              ce_list;
118 };
119
120 typedef struct callout_entry callout_entry;
121
122 static struct list_entry ntoskrnl_calllist;
123 static struct mtx ntoskrnl_calllock;
124 struct kuser_shared_data kuser_shared_data;
125
126 static struct list_entry ntoskrnl_intlist;
127 static kspin_lock ntoskrnl_intlock;
128
129 static uint8_t RtlEqualUnicodeString(unicode_string *,
130         unicode_string *, uint8_t);
131 static void RtlCopyString(ansi_string *, const ansi_string *);
132 static void RtlCopyUnicodeString(unicode_string *,
133         unicode_string *);
134 static irp *IoBuildSynchronousFsdRequest(uint32_t, device_object *,
135          void *, uint32_t, uint64_t *, nt_kevent *, io_status_block *);
136 static irp *IoBuildAsynchronousFsdRequest(uint32_t,
137         device_object *, void *, uint32_t, uint64_t *, io_status_block *);
138 static irp *IoBuildDeviceIoControlRequest(uint32_t,
139         device_object *, void *, uint32_t, void *, uint32_t,
140         uint8_t, nt_kevent *, io_status_block *);
141 static irp *IoAllocateIrp(uint8_t, uint8_t);
142 static void IoReuseIrp(irp *, uint32_t);
143 static void IoFreeIrp(irp *);
144 static void IoInitializeIrp(irp *, uint16_t, uint8_t);
145 static irp *IoMakeAssociatedIrp(irp *, uint8_t);
146 static uint32_t KeWaitForMultipleObjects(uint32_t,
147         nt_dispatch_header **, uint32_t, uint32_t, uint32_t, uint8_t,
148         int64_t *, wait_block *);
149 static void ntoskrnl_waittest(nt_dispatch_header *, uint32_t);
150 static void ntoskrnl_satisfy_wait(nt_dispatch_header *, struct thread *);
151 static void ntoskrnl_satisfy_multiple_waits(wait_block *);
152 static int ntoskrnl_is_signalled(nt_dispatch_header *, struct thread *);
153 static void ntoskrnl_insert_timer(ktimer *, int);
154 static void ntoskrnl_remove_timer(ktimer *);
155 #ifdef NTOSKRNL_DEBUG_TIMERS
156 static void ntoskrnl_show_timers(void);
157 #endif
158 static void ntoskrnl_timercall(void *);
159 static void ntoskrnl_dpc_thread(void *);
160 static void ntoskrnl_destroy_dpc_threads(void);
161 static void ntoskrnl_destroy_workitem_threads(void);
162 static void ntoskrnl_workitem_thread(void *);
163 static void ntoskrnl_workitem(device_object *, void *);
164 static void ntoskrnl_unicode_to_ascii(uint16_t *, char *, int);
165 static void ntoskrnl_ascii_to_unicode(char *, uint16_t *, int);
166 static uint8_t ntoskrnl_insert_dpc(list_entry *, kdpc *);
167 static void WRITE_REGISTER_USHORT(uint16_t *, uint16_t);
168 static uint16_t READ_REGISTER_USHORT(uint16_t *);
169 static void WRITE_REGISTER_ULONG(uint32_t *, uint32_t);
170 static uint32_t READ_REGISTER_ULONG(uint32_t *);
171 static void WRITE_REGISTER_UCHAR(uint8_t *, uint8_t);
172 static uint8_t READ_REGISTER_UCHAR(uint8_t *);
173 static int64_t _allmul(int64_t, int64_t);
174 static int64_t _alldiv(int64_t, int64_t);
175 static int64_t _allrem(int64_t, int64_t);
176 static int64_t _allshr(int64_t, uint8_t);
177 static int64_t _allshl(int64_t, uint8_t);
178 static uint64_t _aullmul(uint64_t, uint64_t);
179 static uint64_t _aulldiv(uint64_t, uint64_t);
180 static uint64_t _aullrem(uint64_t, uint64_t);
181 static uint64_t _aullshr(uint64_t, uint8_t);
182 static uint64_t _aullshl(uint64_t, uint8_t);
183 static slist_entry *ntoskrnl_pushsl(slist_header *, slist_entry *);
184 static void InitializeSListHead(slist_header *);
185 static slist_entry *ntoskrnl_popsl(slist_header *);
186 static void ExFreePoolWithTag(void *, uint32_t);
187 static void ExInitializePagedLookasideList(paged_lookaside_list *,
188         lookaside_alloc_func *, lookaside_free_func *,
189         uint32_t, size_t, uint32_t, uint16_t);
190 static void ExDeletePagedLookasideList(paged_lookaside_list *);
191 static void ExInitializeNPagedLookasideList(npaged_lookaside_list *,
192         lookaside_alloc_func *, lookaside_free_func *,
193         uint32_t, size_t, uint32_t, uint16_t);
194 static void ExDeleteNPagedLookasideList(npaged_lookaside_list *);
195 static slist_entry
196         *ExInterlockedPushEntrySList(slist_header *,
197         slist_entry *, kspin_lock *);
198 static slist_entry
199         *ExInterlockedPopEntrySList(slist_header *, kspin_lock *);
200 static uint32_t InterlockedIncrement(volatile uint32_t *);
201 static uint32_t InterlockedDecrement(volatile uint32_t *);
202 static void ExInterlockedAddLargeStatistic(uint64_t *, uint32_t);
203 static void *MmAllocateContiguousMemory(uint32_t, uint64_t);
204 static void *MmAllocateContiguousMemorySpecifyCache(uint32_t,
205         uint64_t, uint64_t, uint64_t, enum nt_caching_type);
206 static void MmFreeContiguousMemory(void *);
207 static void MmFreeContiguousMemorySpecifyCache(void *, uint32_t,
208         enum nt_caching_type);
209 static uint32_t MmSizeOfMdl(void *, size_t);
210 static void *MmMapLockedPages(mdl *, uint8_t);
211 static void *MmMapLockedPagesSpecifyCache(mdl *,
212         uint8_t, uint32_t, void *, uint32_t, uint32_t);
213 static void MmUnmapLockedPages(void *, mdl *);
214 static device_t ntoskrnl_finddev(device_t, uint64_t, struct resource **);
215 static void RtlZeroMemory(void *, size_t);
216 static void RtlSecureZeroMemory(void *, size_t);
217 static void RtlFillMemory(void *, size_t, uint8_t);
218 static void RtlMoveMemory(void *, const void *, size_t);
219 static ndis_status RtlCharToInteger(const char *, uint32_t, uint32_t *);
220 static void RtlCopyMemory(void *, const void *, size_t);
221 static size_t RtlCompareMemory(const void *, const void *, size_t);
222 static ndis_status RtlUnicodeStringToInteger(unicode_string *,
223         uint32_t, uint32_t *);
224 static int atoi (const char *);
225 static long atol (const char *);
226 static int rand(void);
227 static void srand(unsigned int);
228 static void KeQuerySystemTime(uint64_t *);
229 static uint32_t KeTickCount(void);
230 static uint8_t IoIsWdmVersionAvailable(uint8_t, uint8_t);
231 static void ntoskrnl_thrfunc(void *);
232 static ndis_status PsCreateSystemThread(ndis_handle *,
233         uint32_t, void *, ndis_handle, void *, void *, void *);
234 static ndis_status PsTerminateSystemThread(ndis_status);
235 static ndis_status IoGetDeviceObjectPointer(unicode_string *,
236         uint32_t, void *, device_object *);
237 static ndis_status IoGetDeviceProperty(device_object *, uint32_t,
238         uint32_t, void *, uint32_t *);
239 static void KeInitializeMutex(kmutant *, uint32_t);
240 static uint32_t KeReleaseMutex(kmutant *, uint8_t);
241 static uint32_t KeReadStateMutex(kmutant *);
242 static ndis_status ObReferenceObjectByHandle(ndis_handle,
243         uint32_t, void *, uint8_t, void **, void **);
244 static void ObfDereferenceObject(void *);
245 static uint32_t ZwClose(ndis_handle);
246 static uint32_t WmiQueryTraceInformation(uint32_t, void *, uint32_t,
247         uint32_t, void *);
248 static uint32_t WmiTraceMessage(uint64_t, uint32_t, void *, uint16_t, ...);
249 static uint32_t IoWMIRegistrationControl(device_object *, uint32_t);
250 static void *ntoskrnl_memset(void *, int, size_t);
251 static void *ntoskrnl_memmove(void *, void *, size_t);
252 static void *ntoskrnl_memchr(void *, unsigned char, size_t);
253 static char *ntoskrnl_strstr(char *, char *);
254 static char *ntoskrnl_strncat(char *, char *, size_t);
255 static int ntoskrnl_toupper(int);
256 static int ntoskrnl_tolower(int);
257 static funcptr ntoskrnl_findwrap(funcptr);
258 static uint32_t DbgPrint(char *, ...);
259 static void DbgBreakPoint(void);
260 static void KeBugCheckEx(uint32_t, u_long, u_long, u_long, u_long);
261 static int32_t KeDelayExecutionThread(uint8_t, uint8_t, int64_t *);
262 static int32_t KeSetPriorityThread(struct thread *, int32_t);
263 static void dummy(void);
264
265 static struct mtx ntoskrnl_dispatchlock;
266 static struct mtx ntoskrnl_interlock;
267 static kspin_lock ntoskrnl_cancellock;
268 static int ntoskrnl_kth = 0;
269 static struct nt_objref_head ntoskrnl_reflist;
270 static uma_zone_t mdl_zone;
271 static uma_zone_t iw_zone;
272 static struct kdpc_queue *kq_queues;
273 static struct kdpc_queue *wq_queues;
274 static int wq_idx = 0;
275
276 int
277 ntoskrnl_libinit()
278 {
279         image_patch_table       *patch;
280         int                     error;
281         struct proc             *p;
282         kdpc_queue              *kq;
283         callout_entry           *e;
284         int                     i;
285
286         mtx_init(&ntoskrnl_dispatchlock,
287             "ntoskrnl dispatch lock", MTX_NDIS_LOCK, MTX_DEF|MTX_RECURSE);
288         mtx_init(&ntoskrnl_interlock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN);
289         KeInitializeSpinLock(&ntoskrnl_cancellock);
290         KeInitializeSpinLock(&ntoskrnl_intlock);
291         TAILQ_INIT(&ntoskrnl_reflist);
292
293         InitializeListHead(&ntoskrnl_calllist);
294         InitializeListHead(&ntoskrnl_intlist);
295         mtx_init(&ntoskrnl_calllock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN);
296
297         kq_queues = ExAllocatePoolWithTag(NonPagedPool,
298 #ifdef NTOSKRNL_MULTIPLE_DPCS
299             sizeof(kdpc_queue) * mp_ncpus, 0);
300 #else
301             sizeof(kdpc_queue), 0);
302 #endif
303
304         if (kq_queues == NULL)
305                 return (ENOMEM);
306
307         wq_queues = ExAllocatePoolWithTag(NonPagedPool,
308             sizeof(kdpc_queue) * WORKITEM_THREADS, 0);
309
310         if (wq_queues == NULL)
311                 return (ENOMEM);
312
313 #ifdef NTOSKRNL_MULTIPLE_DPCS
314         bzero((char *)kq_queues, sizeof(kdpc_queue) * mp_ncpus);
315 #else
316         bzero((char *)kq_queues, sizeof(kdpc_queue));
317 #endif
318         bzero((char *)wq_queues, sizeof(kdpc_queue) * WORKITEM_THREADS);
319
320         /*
321          * Launch the DPC threads.
322          */
323
324 #ifdef NTOSKRNL_MULTIPLE_DPCS
325         for (i = 0; i < mp_ncpus; i++) {
326 #else
327         for (i = 0; i < 1; i++) {
328 #endif
329                 kq = kq_queues + i;
330                 kq->kq_cpu = i;
331                 error = kproc_create(ntoskrnl_dpc_thread, kq, &p,
332                     RFHIGHPID, NDIS_KSTACK_PAGES, "Windows DPC %d", i);
333                 if (error)
334                         panic("failed to launch DPC thread");
335         }
336
337         /*
338          * Launch the workitem threads.
339          */
340
341         for (i = 0; i < WORKITEM_THREADS; i++) {
342                 kq = wq_queues + i;
343                 error = kproc_create(ntoskrnl_workitem_thread, kq, &p,
344                     RFHIGHPID, NDIS_KSTACK_PAGES, "Windows Workitem %d", i);
345                 if (error)
346                         panic("failed to launch workitem thread");
347         }
348
349         patch = ntoskrnl_functbl;
350         while (patch->ipt_func != NULL) {
351                 windrv_wrap((funcptr)patch->ipt_func,
352                     (funcptr *)&patch->ipt_wrap,
353                     patch->ipt_argcnt, patch->ipt_ftype);
354                 patch++;
355         }
356
357         for (i = 0; i < NTOSKRNL_TIMEOUTS; i++) {
358                 e = ExAllocatePoolWithTag(NonPagedPool,
359                     sizeof(callout_entry), 0);
360                 if (e == NULL)
361                         panic("failed to allocate timeouts");
362                 mtx_lock_spin(&ntoskrnl_calllock);
363                 InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
364                 mtx_unlock_spin(&ntoskrnl_calllock);
365         }
366
367         /*
368          * MDLs are supposed to be variable size (they describe
369          * buffers containing some number of pages, but we don't
370          * know ahead of time how many pages that will be). But
371          * always allocating them off the heap is very slow. As
372          * a compromise, we create an MDL UMA zone big enough to
373          * handle any buffer requiring up to 16 pages, and we
374          * use those for any MDLs for buffers of 16 pages or less
375          * in size. For buffers larger than that (which we assume
376          * will be few and far between, we allocate the MDLs off
377          * the heap.
378          */
379
380         mdl_zone = uma_zcreate("Windows MDL", MDL_ZONE_SIZE,
381             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
382
383         iw_zone = uma_zcreate("Windows WorkItem", sizeof(io_workitem),
384             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
385
386         return (0);
387 }
388
389 int
390 ntoskrnl_libfini()
391 {
392         image_patch_table       *patch;
393         callout_entry           *e;
394         list_entry              *l;
395
396         patch = ntoskrnl_functbl;
397         while (patch->ipt_func != NULL) {
398                 windrv_unwrap(patch->ipt_wrap);
399                 patch++;
400         }
401
402         /* Stop the workitem queues. */
403         ntoskrnl_destroy_workitem_threads();
404         /* Stop the DPC queues. */
405         ntoskrnl_destroy_dpc_threads();
406
407         ExFreePool(kq_queues);
408         ExFreePool(wq_queues);
409
410         uma_zdestroy(mdl_zone);
411         uma_zdestroy(iw_zone);
412
413         mtx_lock_spin(&ntoskrnl_calllock);
414         while(!IsListEmpty(&ntoskrnl_calllist)) {
415                 l = RemoveHeadList(&ntoskrnl_calllist);
416                 e = CONTAINING_RECORD(l, callout_entry, ce_list);
417                 mtx_unlock_spin(&ntoskrnl_calllock);
418                 ExFreePool(e);
419                 mtx_lock_spin(&ntoskrnl_calllock);
420         }
421         mtx_unlock_spin(&ntoskrnl_calllock);
422
423         mtx_destroy(&ntoskrnl_dispatchlock);
424         mtx_destroy(&ntoskrnl_interlock);
425         mtx_destroy(&ntoskrnl_calllock);
426
427         return (0);
428 }
429
430 /*
431  * We need to be able to reference this externally from the wrapper;
432  * GCC only generates a local implementation of memset.
433  */
434 static void *
435 ntoskrnl_memset(buf, ch, size)
436         void                    *buf;
437         int                     ch;
438         size_t                  size;
439 {
440         return (memset(buf, ch, size));
441 }
442
443 static void *
444 ntoskrnl_memmove(dst, src, size)
445         void                    *src;
446         void                    *dst;
447         size_t                  size;
448 {
449         bcopy(src, dst, size);
450         return (dst);
451 }
452
453 static void *
454 ntoskrnl_memchr(void *buf, unsigned char ch, size_t len)
455 {
456         if (len != 0) {
457                 unsigned char *p = buf;
458
459                 do {
460                         if (*p++ == ch)
461                                 return (p - 1);
462                 } while (--len != 0);
463         }
464         return (NULL);
465 }
466
467 static char *
468 ntoskrnl_strstr(s, find)
469         char *s, *find;
470 {
471         char c, sc;
472         size_t len;
473
474         if ((c = *find++) != 0) {
475                 len = strlen(find);
476                 do {
477                         do {
478                                 if ((sc = *s++) == 0)
479                                         return (NULL);
480                         } while (sc != c);
481                 } while (strncmp(s, find, len) != 0);
482                 s--;
483         }
484         return ((char *)s);
485 }
486
487 /* Taken from libc */
488 static char *
489 ntoskrnl_strncat(dst, src, n)
490         char            *dst;
491         char            *src;
492         size_t          n;
493 {
494         if (n != 0) {
495                 char *d = dst;
496                 const char *s = src;
497
498                 while (*d != 0)
499                         d++;
500                 do {
501                         if ((*d = *s++) == 0)
502                                 break;
503                         d++;
504                 } while (--n != 0);
505                 *d = 0;
506         }
507         return (dst);
508 }
509
510 static int
511 ntoskrnl_toupper(c)
512         int                     c;
513 {
514         return (toupper(c));
515 }
516
517 static int
518 ntoskrnl_tolower(c)
519         int                     c;
520 {
521         return (tolower(c));
522 }
523
524 static uint8_t
525 RtlEqualUnicodeString(unicode_string *str1, unicode_string *str2,
526         uint8_t caseinsensitive)
527 {
528         int                     i;
529
530         if (str1->us_len != str2->us_len)
531                 return (FALSE);
532
533         for (i = 0; i < str1->us_len; i++) {
534                 if (caseinsensitive == TRUE) {
535                         if (toupper((char)(str1->us_buf[i] & 0xFF)) !=
536                             toupper((char)(str2->us_buf[i] & 0xFF)))
537                                 return (FALSE);
538                 } else {
539                         if (str1->us_buf[i] != str2->us_buf[i])
540                                 return (FALSE);
541                 }
542         }
543
544         return (TRUE);
545 }
546
547 static void
548 RtlCopyString(dst, src)
549         ansi_string             *dst;
550         const ansi_string       *src;
551 {
552         if (src != NULL && src->as_buf != NULL && dst->as_buf != NULL) {
553                 dst->as_len = min(src->as_len, dst->as_maxlen);
554                 memcpy(dst->as_buf, src->as_buf, dst->as_len);
555                 if (dst->as_len < dst->as_maxlen)
556                         dst->as_buf[dst->as_len] = 0;
557         } else
558                 dst->as_len = 0;
559 }
560
561 static void
562 RtlCopyUnicodeString(dest, src)
563         unicode_string          *dest;
564         unicode_string          *src;
565 {
566
567         if (dest->us_maxlen >= src->us_len)
568                 dest->us_len = src->us_len;
569         else
570                 dest->us_len = dest->us_maxlen;
571         memcpy(dest->us_buf, src->us_buf, dest->us_len);
572 }
573
574 static void
575 ntoskrnl_ascii_to_unicode(ascii, unicode, len)
576         char                    *ascii;
577         uint16_t                *unicode;
578         int                     len;
579 {
580         int                     i;
581         uint16_t                *ustr;
582
583         ustr = unicode;
584         for (i = 0; i < len; i++) {
585                 *ustr = (uint16_t)ascii[i];
586                 ustr++;
587         }
588 }
589
590 static void
591 ntoskrnl_unicode_to_ascii(unicode, ascii, len)
592         uint16_t                *unicode;
593         char                    *ascii;
594         int                     len;
595 {
596         int                     i;
597         uint8_t                 *astr;
598
599         astr = ascii;
600         for (i = 0; i < len / 2; i++) {
601                 *astr = (uint8_t)unicode[i];
602                 astr++;
603         }
604 }
605
606 uint32_t
607 RtlUnicodeStringToAnsiString(ansi_string *dest, unicode_string *src, uint8_t allocate)
608 {
609         if (dest == NULL || src == NULL)
610                 return (STATUS_INVALID_PARAMETER);
611
612         dest->as_len = src->us_len / 2;
613         if (dest->as_maxlen < dest->as_len)
614                 dest->as_len = dest->as_maxlen;
615
616         if (allocate == TRUE) {
617                 dest->as_buf = ExAllocatePoolWithTag(NonPagedPool,
618                     (src->us_len / 2) + 1, 0);
619                 if (dest->as_buf == NULL)
620                         return (STATUS_INSUFFICIENT_RESOURCES);
621                 dest->as_len = dest->as_maxlen = src->us_len / 2;
622         } else {
623                 dest->as_len = src->us_len / 2; /* XXX */
624                 if (dest->as_maxlen < dest->as_len)
625                         dest->as_len = dest->as_maxlen;
626         }
627
628         ntoskrnl_unicode_to_ascii(src->us_buf, dest->as_buf,
629             dest->as_len * 2);
630
631         return (STATUS_SUCCESS);
632 }
633
634 uint32_t
635 RtlAnsiStringToUnicodeString(unicode_string *dest, ansi_string *src,
636         uint8_t allocate)
637 {
638         if (dest == NULL || src == NULL)
639                 return (STATUS_INVALID_PARAMETER);
640
641         if (allocate == TRUE) {
642                 dest->us_buf = ExAllocatePoolWithTag(NonPagedPool,
643                     src->as_len * 2, 0);
644                 if (dest->us_buf == NULL)
645                         return (STATUS_INSUFFICIENT_RESOURCES);
646                 dest->us_len = dest->us_maxlen = strlen(src->as_buf) * 2;
647         } else {
648                 dest->us_len = src->as_len * 2; /* XXX */
649                 if (dest->us_maxlen < dest->us_len)
650                         dest->us_len = dest->us_maxlen;
651         }
652
653         ntoskrnl_ascii_to_unicode(src->as_buf, dest->us_buf,
654             dest->us_len / 2);
655
656         return (STATUS_SUCCESS);
657 }
658
659 void *
660 ExAllocatePoolWithTag(pooltype, len, tag)
661         uint32_t                pooltype;
662         size_t                  len;
663         uint32_t                tag;
664 {
665         void                    *buf;
666
667         buf = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
668         if (buf == NULL)
669                 return (NULL);
670
671         return (buf);
672 }
673
674 static void
675 ExFreePoolWithTag(buf, tag)
676         void            *buf;
677         uint32_t        tag;
678 {
679         ExFreePool(buf);
680 }
681
682 void
683 ExFreePool(buf)
684         void                    *buf;
685 {
686         free(buf, M_DEVBUF);
687 }
688
689 uint32_t
690 IoAllocateDriverObjectExtension(drv, clid, extlen, ext)
691         driver_object           *drv;
692         void                    *clid;
693         uint32_t                extlen;
694         void                    **ext;
695 {
696         custom_extension        *ce;
697
698         ce = ExAllocatePoolWithTag(NonPagedPool, sizeof(custom_extension)
699             + extlen, 0);
700
701         if (ce == NULL)
702                 return (STATUS_INSUFFICIENT_RESOURCES);
703
704         ce->ce_clid = clid;
705         InsertTailList((&drv->dro_driverext->dre_usrext), (&ce->ce_list));
706
707         *ext = (void *)(ce + 1);
708
709         return (STATUS_SUCCESS);
710 }
711
712 void *
713 IoGetDriverObjectExtension(drv, clid)
714         driver_object           *drv;
715         void                    *clid;
716 {
717         list_entry              *e;
718         custom_extension        *ce;
719
720         /*
721          * Sanity check. Our dummy bus drivers don't have
722          * any driver extentions.
723          */
724
725         if (drv->dro_driverext == NULL)
726                 return (NULL);
727
728         e = drv->dro_driverext->dre_usrext.nle_flink;
729         while (e != &drv->dro_driverext->dre_usrext) {
730                 ce = (custom_extension *)e;
731                 if (ce->ce_clid == clid)
732                         return ((void *)(ce + 1));
733                 e = e->nle_flink;
734         }
735
736         return (NULL);
737 }
738
739
740 uint32_t
741 IoCreateDevice(driver_object *drv, uint32_t devextlen, unicode_string *devname,
742         uint32_t devtype, uint32_t devchars, uint8_t exclusive,
743         device_object **newdev)
744 {
745         device_object           *dev;
746
747         dev = ExAllocatePoolWithTag(NonPagedPool, sizeof(device_object), 0);
748         if (dev == NULL)
749                 return (STATUS_INSUFFICIENT_RESOURCES);
750
751         dev->do_type = devtype;
752         dev->do_drvobj = drv;
753         dev->do_currirp = NULL;
754         dev->do_flags = 0;
755
756         if (devextlen) {
757                 dev->do_devext = ExAllocatePoolWithTag(NonPagedPool,
758                     devextlen, 0);
759
760                 if (dev->do_devext == NULL) {
761                         ExFreePool(dev);
762                         return (STATUS_INSUFFICIENT_RESOURCES);
763                 }
764
765                 bzero(dev->do_devext, devextlen);
766         } else
767                 dev->do_devext = NULL;
768
769         dev->do_size = sizeof(device_object) + devextlen;
770         dev->do_refcnt = 1;
771         dev->do_attacheddev = NULL;
772         dev->do_nextdev = NULL;
773         dev->do_devtype = devtype;
774         dev->do_stacksize = 1;
775         dev->do_alignreq = 1;
776         dev->do_characteristics = devchars;
777         dev->do_iotimer = NULL;
778         KeInitializeEvent(&dev->do_devlock, EVENT_TYPE_SYNC, TRUE);
779
780         /*
781          * Vpd is used for disk/tape devices,
782          * but we don't support those. (Yet.)
783          */
784         dev->do_vpb = NULL;
785
786         dev->do_devobj_ext = ExAllocatePoolWithTag(NonPagedPool,
787             sizeof(devobj_extension), 0);
788
789         if (dev->do_devobj_ext == NULL) {
790                 if (dev->do_devext != NULL)
791                         ExFreePool(dev->do_devext);
792                 ExFreePool(dev);
793                 return (STATUS_INSUFFICIENT_RESOURCES);
794         }
795
796         dev->do_devobj_ext->dve_type = 0;
797         dev->do_devobj_ext->dve_size = sizeof(devobj_extension);
798         dev->do_devobj_ext->dve_devobj = dev;
799
800         /*
801          * Attach this device to the driver object's list
802          * of devices. Note: this is not the same as attaching
803          * the device to the device stack. The driver's AddDevice
804          * routine must explicitly call IoAddDeviceToDeviceStack()
805          * to do that.
806          */
807
808         if (drv->dro_devobj == NULL) {
809                 drv->dro_devobj = dev;
810                 dev->do_nextdev = NULL;
811         } else {
812                 dev->do_nextdev = drv->dro_devobj;
813                 drv->dro_devobj = dev;
814         }
815
816         *newdev = dev;
817
818         return (STATUS_SUCCESS);
819 }
820
821 void
822 IoDeleteDevice(dev)
823         device_object           *dev;
824 {
825         device_object           *prev;
826
827         if (dev == NULL)
828                 return;
829
830         if (dev->do_devobj_ext != NULL)
831                 ExFreePool(dev->do_devobj_ext);
832
833         if (dev->do_devext != NULL)
834                 ExFreePool(dev->do_devext);
835
836         /* Unlink the device from the driver's device list. */
837
838         prev = dev->do_drvobj->dro_devobj;
839         if (prev == dev)
840                 dev->do_drvobj->dro_devobj = dev->do_nextdev;
841         else {
842                 while (prev->do_nextdev != dev)
843                         prev = prev->do_nextdev;
844                 prev->do_nextdev = dev->do_nextdev;
845         }
846
847         ExFreePool(dev);
848 }
849
850 device_object *
851 IoGetAttachedDevice(dev)
852         device_object           *dev;
853 {
854         device_object           *d;
855
856         if (dev == NULL)
857                 return (NULL);
858
859         d = dev;
860
861         while (d->do_attacheddev != NULL)
862                 d = d->do_attacheddev;
863
864         return (d);
865 }
866
867 static irp *
868 IoBuildSynchronousFsdRequest(func, dobj, buf, len, off, event, status)
869         uint32_t                func;
870         device_object           *dobj;
871         void                    *buf;
872         uint32_t                len;
873         uint64_t                *off;
874         nt_kevent               *event;
875         io_status_block         *status;
876 {
877         irp                     *ip;
878
879         ip = IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status);
880         if (ip == NULL)
881                 return (NULL);
882         ip->irp_usrevent = event;
883
884         return (ip);
885 }
886
887 static irp *
888 IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status)
889         uint32_t                func;
890         device_object           *dobj;
891         void                    *buf;
892         uint32_t                len;
893         uint64_t                *off;
894         io_status_block         *status;
895 {
896         irp                     *ip;
897         io_stack_location       *sl;
898
899         ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
900         if (ip == NULL)
901                 return (NULL);
902
903         ip->irp_usriostat = status;
904         ip->irp_tail.irp_overlay.irp_thread = NULL;
905
906         sl = IoGetNextIrpStackLocation(ip);
907         sl->isl_major = func;
908         sl->isl_minor = 0;
909         sl->isl_flags = 0;
910         sl->isl_ctl = 0;
911         sl->isl_devobj = dobj;
912         sl->isl_fileobj = NULL;
913         sl->isl_completionfunc = NULL;
914
915         ip->irp_userbuf = buf;
916
917         if (dobj->do_flags & DO_BUFFERED_IO) {
918                 ip->irp_assoc.irp_sysbuf =
919                     ExAllocatePoolWithTag(NonPagedPool, len, 0);
920                 if (ip->irp_assoc.irp_sysbuf == NULL) {
921                         IoFreeIrp(ip);
922                         return (NULL);
923                 }
924                 bcopy(buf, ip->irp_assoc.irp_sysbuf, len);
925         }
926
927         if (dobj->do_flags & DO_DIRECT_IO) {
928                 ip->irp_mdl = IoAllocateMdl(buf, len, FALSE, FALSE, ip);
929                 if (ip->irp_mdl == NULL) {
930                         if (ip->irp_assoc.irp_sysbuf != NULL)
931                                 ExFreePool(ip->irp_assoc.irp_sysbuf);
932                         IoFreeIrp(ip);
933                         return (NULL);
934                 }
935                 ip->irp_userbuf = NULL;
936                 ip->irp_assoc.irp_sysbuf = NULL;
937         }
938
939         if (func == IRP_MJ_READ) {
940                 sl->isl_parameters.isl_read.isl_len = len;
941                 if (off != NULL)
942                         sl->isl_parameters.isl_read.isl_byteoff = *off;
943                 else
944                         sl->isl_parameters.isl_read.isl_byteoff = 0;
945         }
946
947         if (func == IRP_MJ_WRITE) {
948                 sl->isl_parameters.isl_write.isl_len = len;
949                 if (off != NULL)
950                         sl->isl_parameters.isl_write.isl_byteoff = *off;
951                 else
952                         sl->isl_parameters.isl_write.isl_byteoff = 0;
953         }
954
955         return (ip);
956 }
957
958 static irp *
959 IoBuildDeviceIoControlRequest(uint32_t iocode, device_object *dobj, void *ibuf,
960         uint32_t ilen, void *obuf, uint32_t olen, uint8_t isinternal,
961         nt_kevent *event, io_status_block *status)
962 {
963         irp                     *ip;
964         io_stack_location       *sl;
965         uint32_t                buflen;
966
967         ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
968         if (ip == NULL)
969                 return (NULL);
970         ip->irp_usrevent = event;
971         ip->irp_usriostat = status;
972         ip->irp_tail.irp_overlay.irp_thread = NULL;
973
974         sl = IoGetNextIrpStackLocation(ip);
975         sl->isl_major = isinternal == TRUE ?
976             IRP_MJ_INTERNAL_DEVICE_CONTROL : IRP_MJ_DEVICE_CONTROL;
977         sl->isl_minor = 0;
978         sl->isl_flags = 0;
979         sl->isl_ctl = 0;
980         sl->isl_devobj = dobj;
981         sl->isl_fileobj = NULL;
982         sl->isl_completionfunc = NULL;
983         sl->isl_parameters.isl_ioctl.isl_iocode = iocode;
984         sl->isl_parameters.isl_ioctl.isl_ibuflen = ilen;
985         sl->isl_parameters.isl_ioctl.isl_obuflen = olen;
986
987         switch(IO_METHOD(iocode)) {
988         case METHOD_BUFFERED:
989                 if (ilen > olen)
990                         buflen = ilen;
991                 else
992                         buflen = olen;
993                 if (buflen) {
994                         ip->irp_assoc.irp_sysbuf =
995                             ExAllocatePoolWithTag(NonPagedPool, buflen, 0);
996                         if (ip->irp_assoc.irp_sysbuf == NULL) {
997                                 IoFreeIrp(ip);
998                                 return (NULL);
999                         }
1000                 }
1001                 if (ilen && ibuf != NULL) {
1002                         bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
1003                         bzero((char *)ip->irp_assoc.irp_sysbuf + ilen,
1004                             buflen - ilen);
1005                 } else
1006                         bzero(ip->irp_assoc.irp_sysbuf, ilen);
1007                 ip->irp_userbuf = obuf;
1008                 break;
1009         case METHOD_IN_DIRECT:
1010         case METHOD_OUT_DIRECT:
1011                 if (ilen && ibuf != NULL) {
1012                         ip->irp_assoc.irp_sysbuf =
1013                             ExAllocatePoolWithTag(NonPagedPool, ilen, 0);
1014                         if (ip->irp_assoc.irp_sysbuf == NULL) {
1015                                 IoFreeIrp(ip);
1016                                 return (NULL);
1017                         }
1018                         bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
1019                 }
1020                 if (olen && obuf != NULL) {
1021                         ip->irp_mdl = IoAllocateMdl(obuf, olen,
1022                             FALSE, FALSE, ip);
1023                         /*
1024                          * Normally we would MmProbeAndLockPages()
1025                          * here, but we don't have to in our
1026                          * imlementation.
1027                          */
1028                 }
1029                 break;
1030         case METHOD_NEITHER:
1031                 ip->irp_userbuf = obuf;
1032                 sl->isl_parameters.isl_ioctl.isl_type3ibuf = ibuf;
1033                 break;
1034         default:
1035                 break;
1036         }
1037
1038         /*
1039          * Ideally, we should associate this IRP with the calling
1040          * thread here.
1041          */
1042
1043         return (ip);
1044 }
1045
1046 static irp *
1047 IoAllocateIrp(uint8_t stsize, uint8_t chargequota)
1048 {
1049         irp                     *i;
1050
1051         i = ExAllocatePoolWithTag(NonPagedPool, IoSizeOfIrp(stsize), 0);
1052         if (i == NULL)
1053                 return (NULL);
1054
1055         IoInitializeIrp(i, IoSizeOfIrp(stsize), stsize);
1056
1057         return (i);
1058 }
1059
1060 static irp *
1061 IoMakeAssociatedIrp(irp *ip, uint8_t stsize)
1062 {
1063         irp                     *associrp;
1064
1065         associrp = IoAllocateIrp(stsize, FALSE);
1066         if (associrp == NULL)
1067                 return (NULL);
1068
1069         mtx_lock(&ntoskrnl_dispatchlock);
1070         associrp->irp_flags |= IRP_ASSOCIATED_IRP;
1071         associrp->irp_tail.irp_overlay.irp_thread =
1072             ip->irp_tail.irp_overlay.irp_thread;
1073         associrp->irp_assoc.irp_master = ip;
1074         mtx_unlock(&ntoskrnl_dispatchlock);
1075
1076         return (associrp);
1077 }
1078
1079 static void
1080 IoFreeIrp(ip)
1081         irp                     *ip;
1082 {
1083         ExFreePool(ip);
1084 }
1085
1086 static void
1087 IoInitializeIrp(irp *io, uint16_t psize, uint8_t ssize)
1088 {
1089         bzero((char *)io, IoSizeOfIrp(ssize));
1090         io->irp_size = psize;
1091         io->irp_stackcnt = ssize;
1092         io->irp_currentstackloc = ssize;
1093         InitializeListHead(&io->irp_thlist);
1094         io->irp_tail.irp_overlay.irp_csl =
1095             (io_stack_location *)(io + 1) + ssize;
1096 }
1097
1098 static void
1099 IoReuseIrp(ip, status)
1100         irp                     *ip;
1101         uint32_t                status;
1102 {
1103         uint8_t                 allocflags;
1104
1105         allocflags = ip->irp_allocflags;
1106         IoInitializeIrp(ip, ip->irp_size, ip->irp_stackcnt);
1107         ip->irp_iostat.isb_status = status;
1108         ip->irp_allocflags = allocflags;
1109 }
1110
1111 void
1112 IoAcquireCancelSpinLock(uint8_t *irql)
1113 {
1114         KeAcquireSpinLock(&ntoskrnl_cancellock, irql);
1115 }
1116
1117 void
1118 IoReleaseCancelSpinLock(uint8_t irql)
1119 {
1120         KeReleaseSpinLock(&ntoskrnl_cancellock, irql);
1121 }
1122
1123 uint8_t
1124 IoCancelIrp(irp *ip)
1125 {
1126         cancel_func             cfunc;
1127         uint8_t                 cancelirql;
1128
1129         IoAcquireCancelSpinLock(&cancelirql);
1130         cfunc = IoSetCancelRoutine(ip, NULL);
1131         ip->irp_cancel = TRUE;
1132         if (cfunc == NULL) {
1133                 IoReleaseCancelSpinLock(cancelirql);
1134                 return (FALSE);
1135         }
1136         ip->irp_cancelirql = cancelirql;
1137         MSCALL2(cfunc, IoGetCurrentIrpStackLocation(ip)->isl_devobj, ip);
1138         return (uint8_t)IoSetCancelValue(ip, TRUE);
1139 }
1140
1141 uint32_t
1142 IofCallDriver(dobj, ip)
1143         device_object           *dobj;
1144         irp                     *ip;
1145 {
1146         driver_object           *drvobj;
1147         io_stack_location       *sl;
1148         uint32_t                status;
1149         driver_dispatch         disp;
1150
1151         drvobj = dobj->do_drvobj;
1152
1153         if (ip->irp_currentstackloc <= 0)
1154                 panic("IoCallDriver(): out of stack locations");
1155
1156         IoSetNextIrpStackLocation(ip);
1157         sl = IoGetCurrentIrpStackLocation(ip);
1158
1159         sl->isl_devobj = dobj;
1160
1161         disp = drvobj->dro_dispatch[sl->isl_major];
1162         status = MSCALL2(disp, dobj, ip);
1163
1164         return (status);
1165 }
1166
1167 void
1168 IofCompleteRequest(irp *ip, uint8_t prioboost)
1169 {
1170         uint32_t                status;
1171         device_object           *dobj;
1172         io_stack_location       *sl;
1173         completion_func         cf;
1174
1175         KASSERT(ip->irp_iostat.isb_status != STATUS_PENDING,
1176             ("incorrect IRP(%p) status (STATUS_PENDING)", ip));
1177
1178         sl = IoGetCurrentIrpStackLocation(ip);
1179         IoSkipCurrentIrpStackLocation(ip);
1180
1181         do {
1182                 if (sl->isl_ctl & SL_PENDING_RETURNED)
1183                         ip->irp_pendingreturned = TRUE;
1184
1185                 if (ip->irp_currentstackloc != (ip->irp_stackcnt + 1))
1186                         dobj = IoGetCurrentIrpStackLocation(ip)->isl_devobj;
1187                 else
1188                         dobj = NULL;
1189
1190                 if (sl->isl_completionfunc != NULL &&
1191                     ((ip->irp_iostat.isb_status == STATUS_SUCCESS &&
1192                     sl->isl_ctl & SL_INVOKE_ON_SUCCESS) ||
1193                     (ip->irp_iostat.isb_status != STATUS_SUCCESS &&
1194                     sl->isl_ctl & SL_INVOKE_ON_ERROR) ||
1195                     (ip->irp_cancel == TRUE &&
1196                     sl->isl_ctl & SL_INVOKE_ON_CANCEL))) {
1197                         cf = sl->isl_completionfunc;
1198                         status = MSCALL3(cf, dobj, ip, sl->isl_completionctx);
1199                         if (status == STATUS_MORE_PROCESSING_REQUIRED)
1200                                 return;
1201                 } else {
1202                         if ((ip->irp_currentstackloc <= ip->irp_stackcnt) &&
1203                             (ip->irp_pendingreturned == TRUE))
1204                                 IoMarkIrpPending(ip);
1205                 }
1206
1207                 /* move to the next.  */
1208                 IoSkipCurrentIrpStackLocation(ip);
1209                 sl++;
1210         } while (ip->irp_currentstackloc <= (ip->irp_stackcnt + 1));
1211
1212         if (ip->irp_usriostat != NULL)
1213                 *ip->irp_usriostat = ip->irp_iostat;
1214         if (ip->irp_usrevent != NULL)
1215                 KeSetEvent(ip->irp_usrevent, prioboost, FALSE);
1216
1217         /* Handle any associated IRPs. */
1218
1219         if (ip->irp_flags & IRP_ASSOCIATED_IRP) {
1220                 uint32_t                masterirpcnt;
1221                 irp                     *masterirp;
1222                 mdl                     *m;
1223
1224                 masterirp = ip->irp_assoc.irp_master;
1225                 masterirpcnt =
1226                     InterlockedDecrement(&masterirp->irp_assoc.irp_irpcnt);
1227
1228                 while ((m = ip->irp_mdl) != NULL) {
1229                         ip->irp_mdl = m->mdl_next;
1230                         IoFreeMdl(m);
1231                 }
1232                 IoFreeIrp(ip);
1233                 if (masterirpcnt == 0)
1234                         IoCompleteRequest(masterirp, IO_NO_INCREMENT);
1235                 return;
1236         }
1237
1238         /* With any luck, these conditions will never arise. */
1239
1240         if (ip->irp_flags & IRP_PAGING_IO) {
1241                 if (ip->irp_mdl != NULL)
1242                         IoFreeMdl(ip->irp_mdl);
1243                 IoFreeIrp(ip);
1244         }
1245 }
1246
1247 void
1248 ntoskrnl_intr(arg)
1249         void                    *arg;
1250 {
1251         kinterrupt              *iobj;
1252         uint8_t                 irql;
1253         uint8_t                 claimed;
1254         list_entry              *l;
1255
1256         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1257         l = ntoskrnl_intlist.nle_flink;
1258         while (l != &ntoskrnl_intlist) {
1259                 iobj = CONTAINING_RECORD(l, kinterrupt, ki_list);
1260                 claimed = MSCALL2(iobj->ki_svcfunc, iobj, iobj->ki_svcctx);
1261                 if (claimed == TRUE)
1262                         break;
1263                 l = l->nle_flink;
1264         }
1265         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1266 }
1267
1268 uint8_t
1269 KeAcquireInterruptSpinLock(iobj)
1270         kinterrupt              *iobj;
1271 {
1272         uint8_t                 irql;
1273         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1274         return (irql);
1275 }
1276
1277 void
1278 KeReleaseInterruptSpinLock(kinterrupt *iobj, uint8_t irql)
1279 {
1280         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1281 }
1282
1283 uint8_t
1284 KeSynchronizeExecution(iobj, syncfunc, syncctx)
1285         kinterrupt              *iobj;
1286         void                    *syncfunc;
1287         void                    *syncctx;
1288 {
1289         uint8_t                 irql;
1290
1291         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1292         MSCALL1(syncfunc, syncctx);
1293         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1294
1295         return (TRUE);
1296 }
1297
1298 /*
1299  * IoConnectInterrupt() is passed only the interrupt vector and
1300  * irql that a device wants to use, but no device-specific tag
1301  * of any kind. This conflicts rather badly with FreeBSD's
1302  * bus_setup_intr(), which needs the device_t for the device
1303  * requesting interrupt delivery. In order to bypass this
1304  * inconsistency, we implement a second level of interrupt
1305  * dispatching on top of bus_setup_intr(). All devices use
1306  * ntoskrnl_intr() as their ISR, and any device requesting
1307  * interrupts will be registered with ntoskrnl_intr()'s interrupt
1308  * dispatch list. When an interrupt arrives, we walk the list
1309  * and invoke all the registered ISRs. This effectively makes all
1310  * interrupts shared, but it's the only way to duplicate the
1311  * semantics of IoConnectInterrupt() and IoDisconnectInterrupt() properly.
1312  */
1313
1314 uint32_t
1315 IoConnectInterrupt(kinterrupt **iobj, void *svcfunc, void *svcctx,
1316         kspin_lock *lock, uint32_t vector, uint8_t irql, uint8_t syncirql,
1317         uint8_t imode, uint8_t shared, uint32_t affinity, uint8_t savefloat)
1318 {
1319         uint8_t                 curirql;
1320
1321         *iobj = ExAllocatePoolWithTag(NonPagedPool, sizeof(kinterrupt), 0);
1322         if (*iobj == NULL)
1323                 return (STATUS_INSUFFICIENT_RESOURCES);
1324
1325         (*iobj)->ki_svcfunc = svcfunc;
1326         (*iobj)->ki_svcctx = svcctx;
1327
1328         if (lock == NULL) {
1329                 KeInitializeSpinLock(&(*iobj)->ki_lock_priv);
1330                 (*iobj)->ki_lock = &(*iobj)->ki_lock_priv;
1331         } else
1332                 (*iobj)->ki_lock = lock;
1333
1334         KeAcquireSpinLock(&ntoskrnl_intlock, &curirql);
1335         InsertHeadList((&ntoskrnl_intlist), (&(*iobj)->ki_list));
1336         KeReleaseSpinLock(&ntoskrnl_intlock, curirql);
1337
1338         return (STATUS_SUCCESS);
1339 }
1340
1341 void
1342 IoDisconnectInterrupt(iobj)
1343         kinterrupt              *iobj;
1344 {
1345         uint8_t                 irql;
1346
1347         if (iobj == NULL)
1348                 return;
1349
1350         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1351         RemoveEntryList((&iobj->ki_list));
1352         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1353
1354         ExFreePool(iobj);
1355 }
1356
1357 device_object *
1358 IoAttachDeviceToDeviceStack(src, dst)
1359         device_object           *src;
1360         device_object           *dst;
1361 {
1362         device_object           *attached;
1363
1364         mtx_lock(&ntoskrnl_dispatchlock);
1365         attached = IoGetAttachedDevice(dst);
1366         attached->do_attacheddev = src;
1367         src->do_attacheddev = NULL;
1368         src->do_stacksize = attached->do_stacksize + 1;
1369         mtx_unlock(&ntoskrnl_dispatchlock);
1370
1371         return (attached);
1372 }
1373
1374 void
1375 IoDetachDevice(topdev)
1376         device_object           *topdev;
1377 {
1378         device_object           *tail;
1379
1380         mtx_lock(&ntoskrnl_dispatchlock);
1381
1382         /* First, break the chain. */
1383         tail = topdev->do_attacheddev;
1384         if (tail == NULL) {
1385                 mtx_unlock(&ntoskrnl_dispatchlock);
1386                 return;
1387         }
1388         topdev->do_attacheddev = tail->do_attacheddev;
1389         topdev->do_refcnt--;
1390
1391         /* Now reduce the stacksize count for the takm_il objects. */
1392
1393         tail = topdev->do_attacheddev;
1394         while (tail != NULL) {
1395                 tail->do_stacksize--;
1396                 tail = tail->do_attacheddev;
1397         }
1398
1399         mtx_unlock(&ntoskrnl_dispatchlock);
1400 }
1401
1402 /*
1403  * For the most part, an object is considered signalled if
1404  * dh_sigstate == TRUE. The exception is for mutant objects
1405  * (mutexes), where the logic works like this:
1406  *
1407  * - If the thread already owns the object and sigstate is
1408  *   less than or equal to 0, then the object is considered
1409  *   signalled (recursive acquisition).
1410  * - If dh_sigstate == 1, the object is also considered
1411  *   signalled.
1412  */
1413
1414 static int
1415 ntoskrnl_is_signalled(obj, td)
1416         nt_dispatch_header      *obj;
1417         struct thread           *td;
1418 {
1419         kmutant                 *km;
1420
1421         if (obj->dh_type == DISP_TYPE_MUTANT) {
1422                 km = (kmutant *)obj;
1423                 if ((obj->dh_sigstate <= 0 && km->km_ownerthread == td) ||
1424                     obj->dh_sigstate == 1)
1425                         return (TRUE);
1426                 return (FALSE);
1427         }
1428
1429         if (obj->dh_sigstate > 0)
1430                 return (TRUE);
1431         return (FALSE);
1432 }
1433
1434 static void
1435 ntoskrnl_satisfy_wait(obj, td)
1436         nt_dispatch_header      *obj;
1437         struct thread           *td;
1438 {
1439         kmutant                 *km;
1440
1441         switch (obj->dh_type) {
1442         case DISP_TYPE_MUTANT:
1443                 km = (struct kmutant *)obj;
1444                 obj->dh_sigstate--;
1445                 /*
1446                  * If sigstate reaches 0, the mutex is now
1447                  * non-signalled (the new thread owns it).
1448                  */
1449                 if (obj->dh_sigstate == 0) {
1450                         km->km_ownerthread = td;
1451                         if (km->km_abandoned == TRUE)
1452                                 km->km_abandoned = FALSE;
1453                 }
1454                 break;
1455         /* Synchronization objects get reset to unsignalled. */
1456         case DISP_TYPE_SYNCHRONIZATION_EVENT:
1457         case DISP_TYPE_SYNCHRONIZATION_TIMER:
1458                 obj->dh_sigstate = 0;
1459                 break;
1460         case DISP_TYPE_SEMAPHORE:
1461                 obj->dh_sigstate--;
1462                 break;
1463         default:
1464                 break;
1465         }
1466 }
1467
1468 static void
1469 ntoskrnl_satisfy_multiple_waits(wb)
1470         wait_block              *wb;
1471 {
1472         wait_block              *cur;
1473         struct thread           *td;
1474
1475         cur = wb;
1476         td = wb->wb_kthread;
1477
1478         do {
1479                 ntoskrnl_satisfy_wait(wb->wb_object, td);
1480                 cur->wb_awakened = TRUE;
1481                 cur = cur->wb_next;
1482         } while (cur != wb);
1483 }
1484
1485 /* Always called with dispatcher lock held. */
1486 static void
1487 ntoskrnl_waittest(obj, increment)
1488         nt_dispatch_header      *obj;
1489         uint32_t                increment;
1490 {
1491         wait_block              *w, *next;
1492         list_entry              *e;
1493         struct thread           *td;
1494         wb_ext                  *we;
1495         int                     satisfied;
1496
1497         /*
1498          * Once an object has been signalled, we walk its list of
1499          * wait blocks. If a wait block can be awakened, then satisfy
1500          * waits as necessary and wake the thread.
1501          *
1502          * The rules work like this:
1503          *
1504          * If a wait block is marked as WAITTYPE_ANY, then
1505          * we can satisfy the wait conditions on the current
1506          * object and wake the thread right away. Satisfying
1507          * the wait also has the effect of breaking us out
1508          * of the search loop.
1509          *
1510          * If the object is marked as WAITTYLE_ALL, then the
1511          * wait block will be part of a circularly linked
1512          * list of wait blocks belonging to a waiting thread
1513          * that's sleeping in KeWaitForMultipleObjects(). In
1514          * order to wake the thread, all the objects in the
1515          * wait list must be in the signalled state. If they
1516          * are, we then satisfy all of them and wake the
1517          * thread.
1518          *
1519          */
1520
1521         e = obj->dh_waitlisthead.nle_flink;
1522
1523         while (e != &obj->dh_waitlisthead && obj->dh_sigstate > 0) {
1524                 w = CONTAINING_RECORD(e, wait_block, wb_waitlist);
1525                 we = w->wb_ext;
1526                 td = we->we_td;
1527                 satisfied = FALSE;
1528                 if (w->wb_waittype == WAITTYPE_ANY) {
1529                         /*
1530                          * Thread can be awakened if
1531                          * any wait is satisfied.
1532                          */
1533                         ntoskrnl_satisfy_wait(obj, td);
1534                         satisfied = TRUE;
1535                         w->wb_awakened = TRUE;
1536                 } else {
1537                         /*
1538                          * Thread can only be woken up
1539                          * if all waits are satisfied.
1540                          * If the thread is waiting on multiple
1541                          * objects, they should all be linked
1542                          * through the wb_next pointers in the
1543                          * wait blocks.
1544                          */
1545                         satisfied = TRUE;
1546                         next = w->wb_next;
1547                         while (next != w) {
1548                                 if (ntoskrnl_is_signalled(obj, td) == FALSE) {
1549                                         satisfied = FALSE;
1550                                         break;
1551                                 }
1552                                 next = next->wb_next;
1553                         }
1554                         ntoskrnl_satisfy_multiple_waits(w);
1555                 }
1556
1557                 if (satisfied == TRUE)
1558                         cv_broadcastpri(&we->we_cv,
1559                             (w->wb_oldpri - (increment * 4)) > PRI_MIN_KERN ?
1560                             w->wb_oldpri - (increment * 4) : PRI_MIN_KERN);
1561
1562                 e = e->nle_flink;
1563         }
1564 }
1565
1566 /*
1567  * Return the number of 100 nanosecond intervals since
1568  * January 1, 1601. (?!?!)
1569  */
1570 void
1571 ntoskrnl_time(tval)
1572         uint64_t                *tval;
1573 {
1574         struct timespec         ts;
1575
1576         nanotime(&ts);
1577         *tval = (uint64_t)ts.tv_nsec / 100 + (uint64_t)ts.tv_sec * 10000000 +
1578             11644473600 * 10000000; /* 100ns ticks from 1601 to 1970 */
1579 }
1580
1581 static void
1582 KeQuerySystemTime(current_time)
1583         uint64_t                *current_time;
1584 {
1585         ntoskrnl_time(current_time);
1586 }
1587
1588 static uint32_t
1589 KeTickCount(void)
1590 {
1591         struct timeval tv;
1592         getmicrouptime(&tv);
1593         return tvtohz(&tv);
1594 }
1595
1596
1597 /*
1598  * KeWaitForSingleObject() is a tricky beast, because it can be used
1599  * with several different object types: semaphores, timers, events,
1600  * mutexes and threads. Semaphores don't appear very often, but the
1601  * other object types are quite common. KeWaitForSingleObject() is
1602  * what's normally used to acquire a mutex, and it can be used to
1603  * wait for a thread termination.
1604  *
1605  * The Windows NDIS API is implemented in terms of Windows kernel
1606  * primitives, and some of the object manipulation is duplicated in
1607  * NDIS. For example, NDIS has timers and events, which are actually
1608  * Windows kevents and ktimers. Now, you're supposed to only use the
1609  * NDIS variants of these objects within the confines of the NDIS API,
1610  * but there are some naughty developers out there who will use
1611  * KeWaitForSingleObject() on NDIS timer and event objects, so we
1612  * have to support that as well. Conseqently, our NDIS timer and event
1613  * code has to be closely tied into our ntoskrnl timer and event code,
1614  * just as it is in Windows.
1615  *
1616  * KeWaitForSingleObject() may do different things for different kinds
1617  * of objects:
1618  *
1619  * - For events, we check if the event has been signalled. If the
1620  *   event is already in the signalled state, we just return immediately,
1621  *   otherwise we wait for it to be set to the signalled state by someone
1622  *   else calling KeSetEvent(). Events can be either synchronization or
1623  *   notification events.
1624  *
1625  * - For timers, if the timer has already fired and the timer is in
1626  *   the signalled state, we just return, otherwise we wait on the
1627  *   timer. Unlike an event, timers get signalled automatically when
1628  *   they expire rather than someone having to trip them manually.
1629  *   Timers initialized with KeInitializeTimer() are always notification
1630  *   events: KeInitializeTimerEx() lets you initialize a timer as
1631  *   either a notification or synchronization event.
1632  *
1633  * - For mutexes, we try to acquire the mutex and if we can't, we wait
1634  *   on the mutex until it's available and then grab it. When a mutex is
1635  *   released, it enters the signalled state, which wakes up one of the
1636  *   threads waiting to acquire it. Mutexes are always synchronization
1637  *   events.
1638  *
1639  * - For threads, the only thing we do is wait until the thread object
1640  *   enters a signalled state, which occurs when the thread terminates.
1641  *   Threads are always notification events.
1642  *
1643  * A notification event wakes up all threads waiting on an object. A
1644  * synchronization event wakes up just one. Also, a synchronization event
1645  * is auto-clearing, which means we automatically set the event back to
1646  * the non-signalled state once the wakeup is done.
1647  */
1648
1649 uint32_t
1650 KeWaitForSingleObject(void *arg, uint32_t reason, uint32_t mode,
1651     uint8_t alertable, int64_t *duetime)
1652 {
1653         wait_block              w;
1654         struct thread           *td = curthread;
1655         struct timeval          tv;
1656         int                     error = 0;
1657         uint64_t                curtime;
1658         wb_ext                  we;
1659         nt_dispatch_header      *obj;
1660
1661         obj = arg;
1662
1663         if (obj == NULL)
1664                 return (STATUS_INVALID_PARAMETER);
1665
1666         mtx_lock(&ntoskrnl_dispatchlock);
1667
1668         cv_init(&we.we_cv, "KeWFS");
1669         we.we_td = td;
1670
1671         /*
1672          * Check to see if this object is already signalled,
1673          * and just return without waiting if it is.
1674          */
1675         if (ntoskrnl_is_signalled(obj, td) == TRUE) {
1676                 /* Sanity check the signal state value. */
1677                 if (obj->dh_sigstate != INT32_MIN) {
1678                         ntoskrnl_satisfy_wait(obj, curthread);
1679                         mtx_unlock(&ntoskrnl_dispatchlock);
1680                         return (STATUS_SUCCESS);
1681                 } else {
1682                         /*
1683                          * There's a limit to how many times we can
1684                          * recursively acquire a mutant. If we hit
1685                          * the limit, something is very wrong.
1686                          */
1687                         if (obj->dh_type == DISP_TYPE_MUTANT) {
1688                                 mtx_unlock(&ntoskrnl_dispatchlock);
1689                                 panic("mutant limit exceeded");
1690                         }
1691                 }
1692         }
1693
1694         bzero((char *)&w, sizeof(wait_block));
1695         w.wb_object = obj;
1696         w.wb_ext = &we;
1697         w.wb_waittype = WAITTYPE_ANY;
1698         w.wb_next = &w;
1699         w.wb_waitkey = 0;
1700         w.wb_awakened = FALSE;
1701         w.wb_oldpri = td->td_priority;
1702
1703         InsertTailList((&obj->dh_waitlisthead), (&w.wb_waitlist));
1704
1705         /*
1706          * The timeout value is specified in 100 nanosecond units
1707          * and can be a positive or negative number. If it's positive,
1708          * then the duetime is absolute, and we need to convert it
1709          * to an absolute offset relative to now in order to use it.
1710          * If it's negative, then the duetime is relative and we
1711          * just have to convert the units.
1712          */
1713
1714         if (duetime != NULL) {
1715                 if (*duetime < 0) {
1716                         tv.tv_sec = - (*duetime) / 10000000;
1717                         tv.tv_usec = (- (*duetime) / 10) -
1718                             (tv.tv_sec * 1000000);
1719                 } else {
1720                         ntoskrnl_time(&curtime);
1721                         if (*duetime < curtime)
1722                                 tv.tv_sec = tv.tv_usec = 0;
1723                         else {
1724                                 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1725                                 tv.tv_usec = ((*duetime) - curtime) / 10 -
1726                                     (tv.tv_sec * 1000000);
1727                         }
1728                 }
1729         }
1730
1731         if (duetime == NULL)
1732                 cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
1733         else
1734                 error = cv_timedwait(&we.we_cv,
1735                     &ntoskrnl_dispatchlock, tvtohz(&tv));
1736
1737         RemoveEntryList(&w.wb_waitlist);
1738
1739         cv_destroy(&we.we_cv);
1740
1741         /* We timed out. Leave the object alone and return status. */
1742
1743         if (error == EWOULDBLOCK) {
1744                 mtx_unlock(&ntoskrnl_dispatchlock);
1745                 return (STATUS_TIMEOUT);
1746         }
1747
1748         mtx_unlock(&ntoskrnl_dispatchlock);
1749
1750         return (STATUS_SUCCESS);
1751 /*
1752         return (KeWaitForMultipleObjects(1, &obj, WAITTYPE_ALL, reason,
1753             mode, alertable, duetime, &w));
1754 */
1755 }
1756
1757 static uint32_t
1758 KeWaitForMultipleObjects(uint32_t cnt, nt_dispatch_header *obj[], uint32_t wtype,
1759         uint32_t reason, uint32_t mode, uint8_t alertable, int64_t *duetime,
1760         wait_block *wb_array)
1761 {
1762         struct thread           *td = curthread;
1763         wait_block              *whead, *w;
1764         wait_block              _wb_array[MAX_WAIT_OBJECTS];
1765         nt_dispatch_header      *cur;
1766         struct timeval          tv;
1767         int                     i, wcnt = 0, error = 0;
1768         uint64_t                curtime;
1769         struct timespec         t1, t2;
1770         uint32_t                status = STATUS_SUCCESS;
1771         wb_ext                  we;
1772
1773         if (cnt > MAX_WAIT_OBJECTS)
1774                 return (STATUS_INVALID_PARAMETER);
1775         if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL)
1776                 return (STATUS_INVALID_PARAMETER);
1777
1778         mtx_lock(&ntoskrnl_dispatchlock);
1779
1780         cv_init(&we.we_cv, "KeWFM");
1781         we.we_td = td;
1782
1783         if (wb_array == NULL)
1784                 whead = _wb_array;
1785         else
1786                 whead = wb_array;
1787
1788         bzero((char *)whead, sizeof(wait_block) * cnt);
1789
1790         /* First pass: see if we can satisfy any waits immediately. */
1791
1792         wcnt = 0;
1793         w = whead;
1794
1795         for (i = 0; i < cnt; i++) {
1796                 InsertTailList((&obj[i]->dh_waitlisthead),
1797                     (&w->wb_waitlist));
1798                 w->wb_ext = &we;
1799                 w->wb_object = obj[i];
1800                 w->wb_waittype = wtype;
1801                 w->wb_waitkey = i;
1802                 w->wb_awakened = FALSE;
1803                 w->wb_oldpri = td->td_priority;
1804                 w->wb_next = w + 1;
1805                 w++;
1806                 wcnt++;
1807                 if (ntoskrnl_is_signalled(obj[i], td)) {
1808                         /*
1809                          * There's a limit to how many times
1810                          * we can recursively acquire a mutant.
1811                          * If we hit the limit, something
1812                          * is very wrong.
1813                          */
1814                         if (obj[i]->dh_sigstate == INT32_MIN &&
1815                             obj[i]->dh_type == DISP_TYPE_MUTANT) {
1816                                 mtx_unlock(&ntoskrnl_dispatchlock);
1817                                 panic("mutant limit exceeded");
1818                         }
1819
1820                         /*
1821                          * If this is a WAITTYPE_ANY wait, then
1822                          * satisfy the waited object and exit
1823                          * right now.
1824                          */
1825
1826                         if (wtype == WAITTYPE_ANY) {
1827                                 ntoskrnl_satisfy_wait(obj[i], td);
1828                                 status = STATUS_WAIT_0 + i;
1829                                 goto wait_done;
1830                         } else {
1831                                 w--;
1832                                 wcnt--;
1833                                 w->wb_object = NULL;
1834                                 RemoveEntryList(&w->wb_waitlist);
1835                         }
1836                 }
1837         }
1838
1839         /*
1840          * If this is a WAITTYPE_ALL wait and all objects are
1841          * already signalled, satisfy the waits and exit now.
1842          */
1843
1844         if (wtype == WAITTYPE_ALL && wcnt == 0) {
1845                 for (i = 0; i < cnt; i++)
1846                         ntoskrnl_satisfy_wait(obj[i], td);
1847                 status = STATUS_SUCCESS;
1848                 goto wait_done;
1849         }
1850
1851         /*
1852          * Create a circular waitblock list. The waitcount
1853          * must always be non-zero when we get here.
1854          */
1855
1856         (w - 1)->wb_next = whead;
1857
1858         /* Wait on any objects that aren't yet signalled. */
1859
1860         /* Calculate timeout, if any. */
1861
1862         if (duetime != NULL) {
1863                 if (*duetime < 0) {
1864                         tv.tv_sec = - (*duetime) / 10000000;
1865                         tv.tv_usec = (- (*duetime) / 10) -
1866                             (tv.tv_sec * 1000000);
1867                 } else {
1868                         ntoskrnl_time(&curtime);
1869                         if (*duetime < curtime)
1870                                 tv.tv_sec = tv.tv_usec = 0;
1871                         else {
1872                                 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1873                                 tv.tv_usec = ((*duetime) - curtime) / 10 -
1874                                     (tv.tv_sec * 1000000);
1875                         }
1876                 }
1877         }
1878
1879         while (wcnt) {
1880                 nanotime(&t1);
1881
1882                 if (duetime == NULL)
1883                         cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
1884                 else
1885                         error = cv_timedwait(&we.we_cv,
1886                             &ntoskrnl_dispatchlock, tvtohz(&tv));
1887
1888                 /* Wait with timeout expired. */
1889
1890                 if (error) {
1891                         status = STATUS_TIMEOUT;
1892                         goto wait_done;
1893                 }
1894
1895                 nanotime(&t2);
1896
1897                 /* See what's been signalled. */
1898
1899                 w = whead;
1900                 do {
1901                         cur = w->wb_object;
1902                         if (ntoskrnl_is_signalled(cur, td) == TRUE ||
1903                             w->wb_awakened == TRUE) {
1904                                 /* Sanity check the signal state value. */
1905                                 if (cur->dh_sigstate == INT32_MIN &&
1906                                     cur->dh_type == DISP_TYPE_MUTANT) {
1907                                         mtx_unlock(&ntoskrnl_dispatchlock);
1908                                         panic("mutant limit exceeded");
1909                                 }
1910                                 wcnt--;
1911                                 if (wtype == WAITTYPE_ANY) {
1912                                         status = w->wb_waitkey &
1913                                             STATUS_WAIT_0;
1914                                         goto wait_done;
1915                                 }
1916                         }
1917                         w = w->wb_next;
1918                 } while (w != whead);
1919
1920                 /*
1921                  * If all objects have been signalled, or if this
1922                  * is a WAITTYPE_ANY wait and we were woke up by
1923                  * someone, we can bail.
1924                  */
1925
1926                 if (wcnt == 0) {
1927                         status = STATUS_SUCCESS;
1928                         goto wait_done;
1929                 }
1930
1931                 /*
1932                  * If this is WAITTYPE_ALL wait, and there's still
1933                  * objects that haven't been signalled, deduct the
1934                  * time that's elapsed so far from the timeout and
1935                  * wait again (or continue waiting indefinitely if
1936                  * there's no timeout).
1937                  */
1938
1939                 if (duetime != NULL) {
1940                         tv.tv_sec -= (t2.tv_sec - t1.tv_sec);
1941                         tv.tv_usec -= (t2.tv_nsec - t1.tv_nsec) / 1000;
1942                 }
1943         }
1944
1945
1946 wait_done:
1947
1948         cv_destroy(&we.we_cv);
1949
1950         for (i = 0; i < cnt; i++) {
1951                 if (whead[i].wb_object != NULL)
1952                         RemoveEntryList(&whead[i].wb_waitlist);
1953
1954         }
1955         mtx_unlock(&ntoskrnl_dispatchlock);
1956
1957         return (status);
1958 }
1959
1960 static void
1961 WRITE_REGISTER_USHORT(uint16_t *reg, uint16_t val)
1962 {
1963         bus_space_write_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1964 }
1965
1966 static uint16_t
1967 READ_REGISTER_USHORT(reg)
1968         uint16_t                *reg;
1969 {
1970         return (bus_space_read_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1971 }
1972
1973 static void
1974 WRITE_REGISTER_ULONG(reg, val)
1975         uint32_t                *reg;
1976         uint32_t                val;
1977 {
1978         bus_space_write_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1979 }
1980
1981 static uint32_t
1982 READ_REGISTER_ULONG(reg)
1983         uint32_t                *reg;
1984 {
1985         return (bus_space_read_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1986 }
1987
1988 static uint8_t
1989 READ_REGISTER_UCHAR(uint8_t *reg)
1990 {
1991         return (bus_space_read_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1992 }
1993
1994 static void
1995 WRITE_REGISTER_UCHAR(uint8_t *reg, uint8_t val)
1996 {
1997         bus_space_write_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1998 }
1999
2000 static int64_t
2001 _allmul(a, b)
2002         int64_t                 a;
2003         int64_t                 b;
2004 {
2005         return (a * b);
2006 }
2007
2008 static int64_t
2009 _alldiv(a, b)
2010         int64_t                 a;
2011         int64_t                 b;
2012 {
2013         return (a / b);
2014 }
2015
2016 static int64_t
2017 _allrem(a, b)
2018         int64_t                 a;
2019         int64_t                 b;
2020 {
2021         return (a % b);
2022 }
2023
2024 static uint64_t
2025 _aullmul(a, b)
2026         uint64_t                a;
2027         uint64_t                b;
2028 {
2029         return (a * b);
2030 }
2031
2032 static uint64_t
2033 _aulldiv(a, b)
2034         uint64_t                a;
2035         uint64_t                b;
2036 {
2037         return (a / b);
2038 }
2039
2040 static uint64_t
2041 _aullrem(a, b)
2042         uint64_t                a;
2043         uint64_t                b;
2044 {
2045         return (a % b);
2046 }
2047
2048 static int64_t
2049 _allshl(int64_t a, uint8_t b)
2050 {
2051         return (a << b);
2052 }
2053
2054 static uint64_t
2055 _aullshl(uint64_t a, uint8_t b)
2056 {
2057         return (a << b);
2058 }
2059
2060 static int64_t
2061 _allshr(int64_t a, uint8_t b)
2062 {
2063         return (a >> b);
2064 }
2065
2066 static uint64_t
2067 _aullshr(uint64_t a, uint8_t b)
2068 {
2069         return (a >> b);
2070 }
2071
2072 static slist_entry *
2073 ntoskrnl_pushsl(head, entry)
2074         slist_header            *head;
2075         slist_entry             *entry;
2076 {
2077         slist_entry             *oldhead;
2078
2079         oldhead = head->slh_list.slh_next;
2080         entry->sl_next = head->slh_list.slh_next;
2081         head->slh_list.slh_next = entry;
2082         head->slh_list.slh_depth++;
2083         head->slh_list.slh_seq++;
2084
2085         return (oldhead);
2086 }
2087
2088 static void
2089 InitializeSListHead(head)
2090         slist_header            *head;
2091 {
2092         memset(head, 0, sizeof(*head));
2093 }
2094
2095 static slist_entry *
2096 ntoskrnl_popsl(head)
2097         slist_header            *head;
2098 {
2099         slist_entry             *first;
2100
2101         first = head->slh_list.slh_next;
2102         if (first != NULL) {
2103                 head->slh_list.slh_next = first->sl_next;
2104                 head->slh_list.slh_depth--;
2105                 head->slh_list.slh_seq++;
2106         }
2107
2108         return (first);
2109 }
2110
2111 /*
2112  * We need this to make lookaside lists work for amd64.
2113  * We pass a pointer to ExAllocatePoolWithTag() the lookaside
2114  * list structure. For amd64 to work right, this has to be a
2115  * pointer to the wrapped version of the routine, not the
2116  * original. Letting the Windows driver invoke the original
2117  * function directly will result in a convention calling
2118  * mismatch and a pretty crash. On x86, this effectively
2119  * becomes a no-op since ipt_func and ipt_wrap are the same.
2120  */
2121
2122 static funcptr
2123 ntoskrnl_findwrap(func)
2124         funcptr                 func;
2125 {
2126         image_patch_table       *patch;
2127
2128         patch = ntoskrnl_functbl;
2129         while (patch->ipt_func != NULL) {
2130                 if ((funcptr)patch->ipt_func == func)
2131                         return ((funcptr)patch->ipt_wrap);
2132                 patch++;
2133         }
2134
2135         return (NULL);
2136 }
2137
2138 static void
2139 ExInitializePagedLookasideList(paged_lookaside_list *lookaside,
2140         lookaside_alloc_func *allocfunc, lookaside_free_func *freefunc,
2141         uint32_t flags, size_t size, uint32_t tag, uint16_t depth)
2142 {
2143         bzero((char *)lookaside, sizeof(paged_lookaside_list));
2144
2145         if (size < sizeof(slist_entry))
2146                 lookaside->nll_l.gl_size = sizeof(slist_entry);
2147         else
2148                 lookaside->nll_l.gl_size = size;
2149         lookaside->nll_l.gl_tag = tag;
2150         if (allocfunc == NULL)
2151                 lookaside->nll_l.gl_allocfunc =
2152                     ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
2153         else
2154                 lookaside->nll_l.gl_allocfunc = allocfunc;
2155
2156         if (freefunc == NULL)
2157                 lookaside->nll_l.gl_freefunc =
2158                     ntoskrnl_findwrap((funcptr)ExFreePool);
2159         else
2160                 lookaside->nll_l.gl_freefunc = freefunc;
2161
2162 #ifdef __i386__
2163         KeInitializeSpinLock(&lookaside->nll_obsoletelock);
2164 #endif
2165
2166         lookaside->nll_l.gl_type = NonPagedPool;
2167         lookaside->nll_l.gl_depth = depth;
2168         lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
2169 }
2170
2171 static void
2172 ExDeletePagedLookasideList(lookaside)
2173         paged_lookaside_list   *lookaside;
2174 {
2175         void                    *buf;
2176         void            (*freefunc)(void *);
2177
2178         freefunc = lookaside->nll_l.gl_freefunc;
2179         while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
2180                 MSCALL1(freefunc, buf);
2181 }
2182
2183 static void
2184 ExInitializeNPagedLookasideList(npaged_lookaside_list *lookaside,
2185         lookaside_alloc_func *allocfunc, lookaside_free_func *freefunc,
2186         uint32_t flags, size_t size, uint32_t tag, uint16_t depth)
2187 {
2188         bzero((char *)lookaside, sizeof(npaged_lookaside_list));
2189
2190         if (size < sizeof(slist_entry))
2191                 lookaside->nll_l.gl_size = sizeof(slist_entry);
2192         else
2193                 lookaside->nll_l.gl_size = size;
2194         lookaside->nll_l.gl_tag = tag;
2195         if (allocfunc == NULL)
2196                 lookaside->nll_l.gl_allocfunc =
2197                     ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
2198         else
2199                 lookaside->nll_l.gl_allocfunc = allocfunc;
2200
2201         if (freefunc == NULL)
2202                 lookaside->nll_l.gl_freefunc =
2203                     ntoskrnl_findwrap((funcptr)ExFreePool);
2204         else
2205                 lookaside->nll_l.gl_freefunc = freefunc;
2206
2207 #ifdef __i386__
2208         KeInitializeSpinLock(&lookaside->nll_obsoletelock);
2209 #endif
2210
2211         lookaside->nll_l.gl_type = NonPagedPool;
2212         lookaside->nll_l.gl_depth = depth;
2213         lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
2214 }
2215
2216 static void
2217 ExDeleteNPagedLookasideList(lookaside)
2218         npaged_lookaside_list   *lookaside;
2219 {
2220         void                    *buf;
2221         void            (*freefunc)(void *);
2222
2223         freefunc = lookaside->nll_l.gl_freefunc;
2224         while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
2225                 MSCALL1(freefunc, buf);
2226 }
2227
2228 slist_entry *
2229 InterlockedPushEntrySList(head, entry)
2230         slist_header            *head;
2231         slist_entry             *entry;
2232 {
2233         slist_entry             *oldhead;
2234
2235         mtx_lock_spin(&ntoskrnl_interlock);
2236         oldhead = ntoskrnl_pushsl(head, entry);
2237         mtx_unlock_spin(&ntoskrnl_interlock);
2238
2239         return (oldhead);
2240 }
2241
2242 slist_entry *
2243 InterlockedPopEntrySList(head)
2244         slist_header            *head;
2245 {
2246         slist_entry             *first;
2247
2248         mtx_lock_spin(&ntoskrnl_interlock);
2249         first = ntoskrnl_popsl(head);
2250         mtx_unlock_spin(&ntoskrnl_interlock);
2251
2252         return (first);
2253 }
2254
2255 static slist_entry *
2256 ExInterlockedPushEntrySList(head, entry, lock)
2257         slist_header            *head;
2258         slist_entry             *entry;
2259         kspin_lock              *lock;
2260 {
2261         return (InterlockedPushEntrySList(head, entry));
2262 }
2263
2264 static slist_entry *
2265 ExInterlockedPopEntrySList(head, lock)
2266         slist_header            *head;
2267         kspin_lock              *lock;
2268 {
2269         return (InterlockedPopEntrySList(head));
2270 }
2271
2272 uint16_t
2273 ExQueryDepthSList(head)
2274         slist_header            *head;
2275 {
2276         uint16_t                depth;
2277
2278         mtx_lock_spin(&ntoskrnl_interlock);
2279         depth = head->slh_list.slh_depth;
2280         mtx_unlock_spin(&ntoskrnl_interlock);
2281
2282         return (depth);
2283 }
2284
2285 void
2286 KeInitializeSpinLock(lock)
2287         kspin_lock              *lock;
2288 {
2289         *lock = 0;
2290 }
2291
2292 #ifdef __i386__
2293 void
2294 KefAcquireSpinLockAtDpcLevel(lock)
2295         kspin_lock              *lock;
2296 {
2297 #ifdef NTOSKRNL_DEBUG_SPINLOCKS
2298         int                     i = 0;
2299 #endif
2300
2301         while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0) {
2302                 /* sit and spin */;
2303 #ifdef NTOSKRNL_DEBUG_SPINLOCKS
2304                 i++;
2305                 if (i > 200000000)
2306                         panic("DEADLOCK!");
2307 #endif
2308         }
2309 }
2310
2311 void
2312 KefReleaseSpinLockFromDpcLevel(lock)
2313         kspin_lock              *lock;
2314 {
2315         atomic_store_rel_int((volatile u_int *)lock, 0);
2316 }
2317
2318 uint8_t
2319 KeAcquireSpinLockRaiseToDpc(kspin_lock *lock)
2320 {
2321         uint8_t                 oldirql;
2322
2323         if (KeGetCurrentIrql() > DISPATCH_LEVEL)
2324                 panic("IRQL_NOT_LESS_THAN_OR_EQUAL");
2325
2326         KeRaiseIrql(DISPATCH_LEVEL, &oldirql);
2327         KeAcquireSpinLockAtDpcLevel(lock);
2328
2329         return (oldirql);
2330 }
2331 #else
2332 void
2333 KeAcquireSpinLockAtDpcLevel(kspin_lock *lock)
2334 {
2335         while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0)
2336                 /* sit and spin */;
2337 }
2338
2339 void
2340 KeReleaseSpinLockFromDpcLevel(kspin_lock *lock)
2341 {
2342         atomic_store_rel_int((volatile u_int *)lock, 0);
2343 }
2344 #endif /* __i386__ */
2345
2346 uintptr_t
2347 InterlockedExchange(dst, val)
2348         volatile uint32_t       *dst;
2349         uintptr_t               val;
2350 {
2351         uintptr_t               r;
2352
2353         mtx_lock_spin(&ntoskrnl_interlock);
2354         r = *dst;
2355         *dst = val;
2356         mtx_unlock_spin(&ntoskrnl_interlock);
2357
2358         return (r);
2359 }
2360
2361 static uint32_t
2362 InterlockedIncrement(addend)
2363         volatile uint32_t       *addend;
2364 {
2365         atomic_add_long((volatile u_long *)addend, 1);
2366         return (*addend);
2367 }
2368
2369 static uint32_t
2370 InterlockedDecrement(addend)
2371         volatile uint32_t       *addend;
2372 {
2373         atomic_subtract_long((volatile u_long *)addend, 1);
2374         return (*addend);
2375 }
2376
2377 static void
2378 ExInterlockedAddLargeStatistic(addend, inc)
2379         uint64_t                *addend;
2380         uint32_t                inc;
2381 {
2382         mtx_lock_spin(&ntoskrnl_interlock);
2383         *addend += inc;
2384         mtx_unlock_spin(&ntoskrnl_interlock);
2385 };
2386
2387 mdl *
2388 IoAllocateMdl(void *vaddr, uint32_t len, uint8_t secondarybuf,
2389         uint8_t chargequota, irp *iopkt)
2390 {
2391         mdl                     *m;
2392         int                     zone = 0;
2393
2394         if (MmSizeOfMdl(vaddr, len) > MDL_ZONE_SIZE)
2395                 m = ExAllocatePoolWithTag(NonPagedPool,
2396                     MmSizeOfMdl(vaddr, len), 0);
2397         else {
2398                 m = uma_zalloc(mdl_zone, M_NOWAIT | M_ZERO);
2399                 zone++;
2400         }
2401
2402         if (m == NULL)
2403                 return (NULL);
2404
2405         MmInitializeMdl(m, vaddr, len);
2406
2407         /*
2408          * MmInitializMdl() clears the flags field, so we
2409          * have to set this here. If the MDL came from the
2410          * MDL UMA zone, tag it so we can release it to
2411          * the right place later.
2412          */
2413         if (zone)
2414                 m->mdl_flags = MDL_ZONE_ALLOCED;
2415
2416         if (iopkt != NULL) {
2417                 if (secondarybuf == TRUE) {
2418                         mdl                     *last;
2419                         last = iopkt->irp_mdl;
2420                         while (last->mdl_next != NULL)
2421                                 last = last->mdl_next;
2422                         last->mdl_next = m;
2423                 } else {
2424                         if (iopkt->irp_mdl != NULL)
2425                                 panic("leaking an MDL in IoAllocateMdl()");
2426                         iopkt->irp_mdl = m;
2427                 }
2428         }
2429
2430         return (m);
2431 }
2432
2433 void
2434 IoFreeMdl(m)
2435         mdl                     *m;
2436 {
2437         if (m == NULL)
2438                 return;
2439
2440         if (m->mdl_flags & MDL_ZONE_ALLOCED)
2441                 uma_zfree(mdl_zone, m);
2442         else
2443                 ExFreePool(m);
2444 }
2445
2446 static void *
2447 MmAllocateContiguousMemory(size, highest)
2448         uint32_t                size;
2449         uint64_t                highest;
2450 {
2451         void *addr;
2452         size_t pagelength = roundup(size, PAGE_SIZE);
2453
2454         addr = ExAllocatePoolWithTag(NonPagedPool, pagelength, 0);
2455
2456         return (addr);
2457 }
2458
2459 static void *
2460 MmAllocateContiguousMemorySpecifyCache(size, lowest, highest,
2461     boundary, cachetype)
2462         uint32_t                size;
2463         uint64_t                lowest;
2464         uint64_t                highest;
2465         uint64_t                boundary;
2466         enum nt_caching_type    cachetype;
2467 {
2468         vm_memattr_t            memattr;
2469         void                    *ret;
2470
2471         switch (cachetype) {
2472         case MmNonCached:
2473                 memattr = VM_MEMATTR_UNCACHEABLE;
2474                 break;
2475         case MmWriteCombined:
2476                 memattr = VM_MEMATTR_WRITE_COMBINING;
2477                 break;
2478         case MmNonCachedUnordered:
2479                 memattr = VM_MEMATTR_UNCACHEABLE;
2480                 break;
2481         case MmCached:
2482         case MmHardwareCoherentCached:
2483         case MmUSWCCached:
2484         default:
2485                 memattr = VM_MEMATTR_DEFAULT;
2486                 break;
2487         }
2488
2489         ret = (void *)kmem_alloc_contig(kernel_map, size, M_ZERO | M_NOWAIT,
2490             lowest, highest, PAGE_SIZE, boundary, memattr);
2491         if (ret != NULL)
2492                 malloc_type_allocated(M_DEVBUF, round_page(size));
2493         return (ret);
2494 }
2495
2496 static void
2497 MmFreeContiguousMemory(base)
2498         void                    *base;
2499 {
2500         ExFreePool(base);
2501 }
2502
2503 static void
2504 MmFreeContiguousMemorySpecifyCache(base, size, cachetype)
2505         void                    *base;
2506         uint32_t                size;
2507         enum nt_caching_type    cachetype;
2508 {
2509         contigfree(base, size, M_DEVBUF);
2510 }
2511
2512 static uint32_t
2513 MmSizeOfMdl(vaddr, len)
2514         void                    *vaddr;
2515         size_t                  len;
2516 {
2517         uint32_t                l;
2518
2519         l = sizeof(struct mdl) +
2520             (sizeof(vm_offset_t *) * SPAN_PAGES(vaddr, len));
2521
2522         return (l);
2523 }
2524
2525 /*
2526  * The Microsoft documentation says this routine fills in the
2527  * page array of an MDL with the _physical_ page addresses that
2528  * comprise the buffer, but we don't really want to do that here.
2529  * Instead, we just fill in the page array with the kernel virtual
2530  * addresses of the buffers.
2531  */
2532 void
2533 MmBuildMdlForNonPagedPool(m)
2534         mdl                     *m;
2535 {
2536         vm_offset_t             *mdl_pages;
2537         int                     pagecnt, i;
2538
2539         pagecnt = SPAN_PAGES(m->mdl_byteoffset, m->mdl_bytecount);
2540
2541         if (pagecnt > (m->mdl_size - sizeof(mdl)) / sizeof(vm_offset_t *))
2542                 panic("not enough pages in MDL to describe buffer");
2543
2544         mdl_pages = MmGetMdlPfnArray(m);
2545
2546         for (i = 0; i < pagecnt; i++)
2547                 *mdl_pages = (vm_offset_t)m->mdl_startva + (i * PAGE_SIZE);
2548
2549         m->mdl_flags |= MDL_SOURCE_IS_NONPAGED_POOL;
2550         m->mdl_mappedsystemva = MmGetMdlVirtualAddress(m);
2551 }
2552
2553 static void *
2554 MmMapLockedPages(mdl *buf, uint8_t accessmode)
2555 {
2556         buf->mdl_flags |= MDL_MAPPED_TO_SYSTEM_VA;
2557         return (MmGetMdlVirtualAddress(buf));
2558 }
2559
2560 static void *
2561 MmMapLockedPagesSpecifyCache(mdl *buf, uint8_t accessmode, uint32_t cachetype,
2562         void *vaddr, uint32_t bugcheck, uint32_t prio)
2563 {
2564         return (MmMapLockedPages(buf, accessmode));
2565 }
2566
2567 static void
2568 MmUnmapLockedPages(vaddr, buf)
2569         void                    *vaddr;
2570         mdl                     *buf;
2571 {
2572         buf->mdl_flags &= ~MDL_MAPPED_TO_SYSTEM_VA;
2573 }
2574
2575 /*
2576  * This function has a problem in that it will break if you
2577  * compile this module without PAE and try to use it on a PAE
2578  * kernel. Unfortunately, there's no way around this at the
2579  * moment. It's slightly less broken that using pmap_kextract().
2580  * You'd think the virtual memory subsystem would help us out
2581  * here, but it doesn't.
2582  */
2583
2584 static uint64_t
2585 MmGetPhysicalAddress(void *base)
2586 {
2587         return (pmap_extract(kernel_map->pmap, (vm_offset_t)base));
2588 }
2589
2590 uint8_t
2591 MmIsAddressValid(vaddr)
2592         void                    *vaddr;
2593 {
2594         if (pmap_extract(kernel_map->pmap, (vm_offset_t)vaddr))
2595                 return (TRUE);
2596
2597         return (FALSE);
2598 }
2599
2600 void *
2601 MmMapIoSpace(paddr, len, cachetype)
2602         uint64_t                paddr;
2603         uint32_t                len;
2604         uint32_t                cachetype;
2605 {
2606         devclass_t              nexus_class;
2607         device_t                *nexus_devs, devp;
2608         int                     nexus_count = 0;
2609         device_t                matching_dev = NULL;
2610         struct resource         *res;
2611         int                     i;
2612         vm_offset_t             v;
2613
2614         /* There will always be at least one nexus. */
2615
2616         nexus_class = devclass_find("nexus");
2617         devclass_get_devices(nexus_class, &nexus_devs, &nexus_count);
2618
2619         for (i = 0; i < nexus_count; i++) {
2620                 devp = nexus_devs[i];
2621                 matching_dev = ntoskrnl_finddev(devp, paddr, &res);
2622                 if (matching_dev)
2623                         break;
2624         }
2625
2626         free(nexus_devs, M_TEMP);
2627
2628         if (matching_dev == NULL)
2629                 return (NULL);
2630
2631         v = (vm_offset_t)rman_get_virtual(res);
2632         if (paddr > rman_get_start(res))
2633                 v += paddr - rman_get_start(res);
2634
2635         return ((void *)v);
2636 }
2637
2638 void
2639 MmUnmapIoSpace(vaddr, len)
2640         void                    *vaddr;
2641         size_t                  len;
2642 {
2643 }
2644
2645
2646 static device_t
2647 ntoskrnl_finddev(dev, paddr, res)
2648         device_t                dev;
2649         uint64_t                paddr;
2650         struct resource         **res;
2651 {
2652         device_t                *children = NULL;
2653         device_t                matching_dev;
2654         int                     childcnt;
2655         struct resource         *r;
2656         struct resource_list    *rl;
2657         struct resource_list_entry      *rle;
2658         uint32_t                flags;
2659         int                     i;
2660
2661         /* We only want devices that have been successfully probed. */
2662
2663         if (device_is_alive(dev) == FALSE)
2664                 return (NULL);
2665
2666         rl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev);
2667         if (rl != NULL) {
2668                 STAILQ_FOREACH(rle, rl, link) {
2669                         r = rle->res;
2670
2671                         if (r == NULL)
2672                                 continue;
2673
2674                         flags = rman_get_flags(r);
2675
2676                         if (rle->type == SYS_RES_MEMORY &&
2677                             paddr >= rman_get_start(r) &&
2678                             paddr <= rman_get_end(r)) {
2679                                 if (!(flags & RF_ACTIVE))
2680                                         bus_activate_resource(dev,
2681                                             SYS_RES_MEMORY, 0, r);
2682                                 *res = r;
2683                                 return (dev);
2684                         }
2685                 }
2686         }
2687
2688         /*
2689          * If this device has children, do another
2690          * level of recursion to inspect them.
2691          */
2692
2693         device_get_children(dev, &children, &childcnt);
2694
2695         for (i = 0; i < childcnt; i++) {
2696                 matching_dev = ntoskrnl_finddev(children[i], paddr, res);
2697                 if (matching_dev != NULL) {
2698                         free(children, M_TEMP);
2699                         return (matching_dev);
2700                 }
2701         }
2702
2703
2704         /* Won't somebody please think of the children! */
2705
2706         if (children != NULL)
2707                 free(children, M_TEMP);
2708
2709         return (NULL);
2710 }
2711
2712 /*
2713  * Workitems are unlike DPCs, in that they run in a user-mode thread
2714  * context rather than at DISPATCH_LEVEL in kernel context. In our
2715  * case we run them in kernel context anyway.
2716  */
2717 static void
2718 ntoskrnl_workitem_thread(arg)
2719         void                    *arg;
2720 {
2721         kdpc_queue              *kq;
2722         list_entry              *l;
2723         io_workitem             *iw;
2724         uint8_t                 irql;
2725
2726         kq = arg;
2727
2728         InitializeListHead(&kq->kq_disp);
2729         kq->kq_td = curthread;
2730         kq->kq_exit = 0;
2731         KeInitializeSpinLock(&kq->kq_lock);
2732         KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
2733
2734         while (1) {
2735                 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
2736
2737                 KeAcquireSpinLock(&kq->kq_lock, &irql);
2738
2739                 if (kq->kq_exit) {
2740                         kq->kq_exit = 0;
2741                         KeReleaseSpinLock(&kq->kq_lock, irql);
2742                         break;
2743                 }
2744
2745                 while (!IsListEmpty(&kq->kq_disp)) {
2746                         l = RemoveHeadList(&kq->kq_disp);
2747                         iw = CONTAINING_RECORD(l,
2748                             io_workitem, iw_listentry);
2749                         InitializeListHead((&iw->iw_listentry));
2750                         if (iw->iw_func == NULL)
2751                                 continue;
2752                         KeReleaseSpinLock(&kq->kq_lock, irql);
2753                         MSCALL2(iw->iw_func, iw->iw_dobj, iw->iw_ctx);
2754                         KeAcquireSpinLock(&kq->kq_lock, &irql);
2755                 }
2756
2757                 KeReleaseSpinLock(&kq->kq_lock, irql);
2758         }
2759
2760         kproc_exit(0);
2761         return; /* notreached */
2762 }
2763
2764 static ndis_status
2765 RtlCharToInteger(src, base, val)
2766         const char              *src;
2767         uint32_t                base;
2768         uint32_t                *val;
2769 {
2770         int negative = 0;
2771         uint32_t res;
2772
2773         if (!src || !val)
2774                 return (STATUS_ACCESS_VIOLATION);
2775         while (*src != '\0' && *src <= ' ')
2776                 src++;
2777         if (*src == '+')
2778                 src++;
2779         else if (*src == '-') {
2780                 src++;
2781                 negative = 1;
2782         }
2783         if (base == 0) {
2784                 base = 10;
2785                 if (*src == '0') {
2786                         src++;
2787                         if (*src == 'b') {
2788                                 base = 2;
2789                                 src++;
2790                         } else if (*src == 'o') {
2791                                 base = 8;
2792                                 src++;
2793                         } else if (*src == 'x') {
2794                                 base = 16;
2795                                 src++;
2796                         }
2797                 }
2798         } else if (!(base == 2 || base == 8 || base == 10 || base == 16))
2799                 return (STATUS_INVALID_PARAMETER);
2800
2801         for (res = 0; *src; src++) {
2802                 int v;
2803                 if (isdigit(*src))
2804                         v = *src - '0';
2805                 else if (isxdigit(*src))
2806                         v = tolower(*src) - 'a' + 10;
2807                 else
2808                         v = base;
2809                 if (v >= base)
2810                         return (STATUS_INVALID_PARAMETER);
2811                 res = res * base + v;
2812         }
2813         *val = negative ? -res : res;
2814         return (STATUS_SUCCESS);
2815 }
2816
2817 static void
2818 ntoskrnl_destroy_workitem_threads(void)
2819 {
2820         kdpc_queue              *kq;
2821         int                     i;
2822
2823         for (i = 0; i < WORKITEM_THREADS; i++) {
2824                 kq = wq_queues + i;
2825                 kq->kq_exit = 1;
2826                 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
2827                 while (kq->kq_exit)
2828                         tsleep(kq->kq_td->td_proc, PWAIT, "waitiw", hz/10);
2829         }
2830 }
2831
2832 io_workitem *
2833 IoAllocateWorkItem(dobj)
2834         device_object           *dobj;
2835 {
2836         io_workitem             *iw;
2837
2838         iw = uma_zalloc(iw_zone, M_NOWAIT);
2839         if (iw == NULL)
2840                 return (NULL);
2841
2842         InitializeListHead(&iw->iw_listentry);
2843         iw->iw_dobj = dobj;
2844
2845         mtx_lock(&ntoskrnl_dispatchlock);
2846         iw->iw_idx = wq_idx;
2847         WORKIDX_INC(wq_idx);
2848         mtx_unlock(&ntoskrnl_dispatchlock);
2849
2850         return (iw);
2851 }
2852
2853 void
2854 IoFreeWorkItem(iw)
2855         io_workitem             *iw;
2856 {
2857         uma_zfree(iw_zone, iw);
2858 }
2859
2860 void
2861 IoQueueWorkItem(iw, iw_func, qtype, ctx)
2862         io_workitem             *iw;
2863         io_workitem_func        iw_func;
2864         uint32_t                qtype;
2865         void                    *ctx;
2866 {
2867         kdpc_queue              *kq;
2868         list_entry              *l;
2869         io_workitem             *cur;
2870         uint8_t                 irql;
2871
2872         kq = wq_queues + iw->iw_idx;
2873
2874         KeAcquireSpinLock(&kq->kq_lock, &irql);
2875
2876         /*
2877          * Traverse the list and make sure this workitem hasn't
2878          * already been inserted. Queuing the same workitem
2879          * twice will hose the list but good.
2880          */
2881
2882         l = kq->kq_disp.nle_flink;
2883         while (l != &kq->kq_disp) {
2884                 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
2885                 if (cur == iw) {
2886                         /* Already queued -- do nothing. */
2887                         KeReleaseSpinLock(&kq->kq_lock, irql);
2888                         return;
2889                 }
2890                 l = l->nle_flink;
2891         }
2892
2893         iw->iw_func = iw_func;
2894         iw->iw_ctx = ctx;
2895
2896         InsertTailList((&kq->kq_disp), (&iw->iw_listentry));
2897         KeReleaseSpinLock(&kq->kq_lock, irql);
2898
2899         KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
2900 }
2901
2902 static void
2903 ntoskrnl_workitem(dobj, arg)
2904         device_object           *dobj;
2905         void                    *arg;
2906 {
2907         io_workitem             *iw;
2908         work_queue_item         *w;
2909         work_item_func          f;
2910
2911         iw = arg;
2912         w = (work_queue_item *)dobj;
2913         f = (work_item_func)w->wqi_func;
2914         uma_zfree(iw_zone, iw);
2915         MSCALL2(f, w, w->wqi_ctx);
2916 }
2917
2918 /*
2919  * The ExQueueWorkItem() API is deprecated in Windows XP. Microsoft
2920  * warns that it's unsafe and to use IoQueueWorkItem() instead. The
2921  * problem with ExQueueWorkItem() is that it can't guard against
2922  * the condition where a driver submits a job to the work queue and
2923  * is then unloaded before the job is able to run. IoQueueWorkItem()
2924  * acquires a reference to the device's device_object via the
2925  * object manager and retains it until after the job has completed,
2926  * which prevents the driver from being unloaded before the job
2927  * runs. (We don't currently support this behavior, though hopefully
2928  * that will change once the object manager API is fleshed out a bit.)
2929  *
2930  * Having said all that, the ExQueueWorkItem() API remains, because
2931  * there are still other parts of Windows that use it, including
2932  * NDIS itself: NdisScheduleWorkItem() calls ExQueueWorkItem().
2933  * We fake up the ExQueueWorkItem() API on top of our implementation
2934  * of IoQueueWorkItem(). Workitem thread #3 is reserved exclusively
2935  * for ExQueueWorkItem() jobs, and we pass a pointer to the work
2936  * queue item (provided by the caller) in to IoAllocateWorkItem()
2937  * instead of the device_object. We need to save this pointer so
2938  * we can apply a sanity check: as with the DPC queue and other
2939  * workitem queues, we can't allow the same work queue item to
2940  * be queued twice. If it's already pending, we silently return
2941  */
2942
2943 void
2944 ExQueueWorkItem(w, qtype)
2945         work_queue_item         *w;
2946         uint32_t                qtype;
2947 {
2948         io_workitem             *iw;
2949         io_workitem_func        iwf;
2950         kdpc_queue              *kq;
2951         list_entry              *l;
2952         io_workitem             *cur;
2953         uint8_t                 irql;
2954
2955
2956         /*
2957          * We need to do a special sanity test to make sure
2958          * the ExQueueWorkItem() API isn't used to queue
2959          * the same workitem twice. Rather than checking the
2960          * io_workitem pointer itself, we test the attached
2961          * device object, which is really a pointer to the
2962          * legacy work queue item structure.
2963          */
2964
2965         kq = wq_queues + WORKITEM_LEGACY_THREAD;
2966         KeAcquireSpinLock(&kq->kq_lock, &irql);
2967         l = kq->kq_disp.nle_flink;
2968         while (l != &kq->kq_disp) {
2969                 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
2970                 if (cur->iw_dobj == (device_object *)w) {
2971                         /* Already queued -- do nothing. */
2972                         KeReleaseSpinLock(&kq->kq_lock, irql);
2973                         return;
2974                 }
2975                 l = l->nle_flink;
2976         }
2977         KeReleaseSpinLock(&kq->kq_lock, irql);
2978
2979         iw = IoAllocateWorkItem((device_object *)w);
2980         if (iw == NULL)
2981                 return;
2982
2983         iw->iw_idx = WORKITEM_LEGACY_THREAD;
2984         iwf = (io_workitem_func)ntoskrnl_findwrap((funcptr)ntoskrnl_workitem);
2985         IoQueueWorkItem(iw, iwf, qtype, iw);
2986 }
2987
2988 static void
2989 RtlZeroMemory(dst, len)
2990         void                    *dst;
2991         size_t                  len;
2992 {
2993         bzero(dst, len);
2994 }
2995
2996 static void
2997 RtlSecureZeroMemory(dst, len)
2998         void                    *dst;
2999         size_t                  len;
3000 {
3001         memset(dst, 0, len);
3002 }
3003
3004 static void
3005 RtlFillMemory(dst, len, c)
3006         void                    *dst;
3007         size_t                  len;
3008         uint8_t                 c;
3009 {
3010         memset(dst, c, len);
3011 }
3012
3013 static void
3014 RtlMoveMemory(dst, src, len)
3015         void                    *dst;
3016         const void              *src;
3017         size_t                  len;
3018 {
3019         memmove(dst, src, len);
3020 }
3021
3022 static void
3023 RtlCopyMemory(dst, src, len)
3024         void                    *dst;
3025         const void              *src;
3026         size_t                  len;
3027 {
3028         bcopy(src, dst, len);
3029 }
3030
3031 static size_t
3032 RtlCompareMemory(s1, s2, len)
3033         const void              *s1;
3034         const void              *s2;
3035         size_t                  len;
3036 {
3037         size_t                  i;
3038         uint8_t                 *m1, *m2;
3039
3040         m1 = __DECONST(char *, s1);
3041         m2 = __DECONST(char *, s2);
3042
3043         for (i = 0; i < len && m1[i] == m2[i]; i++);
3044         return (i);
3045 }
3046
3047 void
3048 RtlInitAnsiString(dst, src)
3049         ansi_string             *dst;
3050         char                    *src;
3051 {
3052         ansi_string             *a;
3053
3054         a = dst;
3055         if (a == NULL)
3056                 return;
3057         if (src == NULL) {
3058                 a->as_len = a->as_maxlen = 0;
3059                 a->as_buf = NULL;
3060         } else {
3061                 a->as_buf = src;
3062                 a->as_len = a->as_maxlen = strlen(src);
3063         }
3064 }
3065
3066 void
3067 RtlInitUnicodeString(dst, src)
3068         unicode_string          *dst;
3069         uint16_t                *src;
3070 {
3071         unicode_string          *u;
3072         int                     i;
3073
3074         u = dst;
3075         if (u == NULL)
3076                 return;
3077         if (src == NULL) {
3078                 u->us_len = u->us_maxlen = 0;
3079                 u->us_buf = NULL;
3080         } else {
3081                 i = 0;
3082                 while(src[i] != 0)
3083                         i++;
3084                 u->us_buf = src;
3085                 u->us_len = u->us_maxlen = i * 2;
3086         }
3087 }
3088
3089 ndis_status
3090 RtlUnicodeStringToInteger(ustr, base, val)
3091         unicode_string          *ustr;
3092         uint32_t                base;
3093         uint32_t                *val;
3094 {
3095         uint16_t                *uchr;
3096         int                     len, neg = 0;
3097         char                    abuf[64];
3098         char                    *astr;
3099
3100         uchr = ustr->us_buf;
3101         len = ustr->us_len;
3102         bzero(abuf, sizeof(abuf));
3103
3104         if ((char)((*uchr) & 0xFF) == '-') {
3105                 neg = 1;
3106                 uchr++;
3107                 len -= 2;
3108         } else if ((char)((*uchr) & 0xFF) == '+') {
3109                 neg = 0;
3110                 uchr++;
3111                 len -= 2;
3112         }
3113
3114         if (base == 0) {
3115                 if ((char)((*uchr) & 0xFF) == 'b') {
3116                         base = 2;
3117                         uchr++;
3118                         len -= 2;
3119                 } else if ((char)((*uchr) & 0xFF) == 'o') {
3120                         base = 8;
3121                         uchr++;
3122                         len -= 2;
3123                 } else if ((char)((*uchr) & 0xFF) == 'x') {
3124                         base = 16;
3125                         uchr++;
3126                         len -= 2;
3127                 } else
3128                         base = 10;
3129         }
3130
3131         astr = abuf;
3132         if (neg) {
3133                 strcpy(astr, "-");
3134                 astr++;
3135         }
3136
3137         ntoskrnl_unicode_to_ascii(uchr, astr, len);
3138         *val = strtoul(abuf, NULL, base);
3139
3140         return (STATUS_SUCCESS);
3141 }
3142
3143 void
3144 RtlFreeUnicodeString(ustr)
3145         unicode_string          *ustr;
3146 {
3147         if (ustr->us_buf == NULL)
3148                 return;
3149         ExFreePool(ustr->us_buf);
3150         ustr->us_buf = NULL;
3151 }
3152
3153 void
3154 RtlFreeAnsiString(astr)
3155         ansi_string             *astr;
3156 {
3157         if (astr->as_buf == NULL)
3158                 return;
3159         ExFreePool(astr->as_buf);
3160         astr->as_buf = NULL;
3161 }
3162
3163 static int
3164 atoi(str)
3165         const char              *str;
3166 {
3167         return (int)strtol(str, (char **)NULL, 10);
3168 }
3169
3170 static long
3171 atol(str)
3172         const char              *str;
3173 {
3174         return strtol(str, (char **)NULL, 10);
3175 }
3176
3177 static int
3178 rand(void)
3179 {
3180         struct timeval          tv;
3181
3182         microtime(&tv);
3183         srandom(tv.tv_usec);
3184         return ((int)random());
3185 }
3186
3187 static void
3188 srand(seed)
3189         unsigned int            seed;
3190 {
3191         srandom(seed);
3192 }
3193
3194 static uint8_t
3195 IoIsWdmVersionAvailable(uint8_t major, uint8_t minor)
3196 {
3197         if (major == WDM_MAJOR && minor == WDM_MINOR_WINXP)
3198                 return (TRUE);
3199         return (FALSE);
3200 }
3201
3202 static ndis_status
3203 IoGetDeviceObjectPointer(name, reqaccess, fileobj, devobj)
3204         unicode_string          *name;
3205         uint32_t                reqaccess;
3206         void                    *fileobj;
3207         device_object           *devobj;
3208 {
3209         return (STATUS_SUCCESS);
3210 }
3211
3212 static ndis_status
3213 IoGetDeviceProperty(devobj, regprop, buflen, prop, reslen)
3214         device_object           *devobj;
3215         uint32_t                regprop;
3216         uint32_t                buflen;
3217         void                    *prop;
3218         uint32_t                *reslen;
3219 {
3220         driver_object           *drv;
3221         uint16_t                **name;
3222
3223         drv = devobj->do_drvobj;
3224
3225         switch (regprop) {
3226         case DEVPROP_DRIVER_KEYNAME:
3227                 name = prop;
3228                 *name = drv->dro_drivername.us_buf;
3229                 *reslen = drv->dro_drivername.us_len;
3230                 break;
3231         default:
3232                 return (STATUS_INVALID_PARAMETER_2);
3233                 break;
3234         }
3235
3236         return (STATUS_SUCCESS);
3237 }
3238
3239 static void
3240 KeInitializeMutex(kmutex, level)
3241         kmutant                 *kmutex;
3242         uint32_t                level;
3243 {
3244         InitializeListHead((&kmutex->km_header.dh_waitlisthead));
3245         kmutex->km_abandoned = FALSE;
3246         kmutex->km_apcdisable = 1;
3247         kmutex->km_header.dh_sigstate = 1;
3248         kmutex->km_header.dh_type = DISP_TYPE_MUTANT;
3249         kmutex->km_header.dh_size = sizeof(kmutant) / sizeof(uint32_t);
3250         kmutex->km_ownerthread = NULL;
3251 }
3252
3253 static uint32_t
3254 KeReleaseMutex(kmutant *kmutex, uint8_t kwait)
3255 {
3256         uint32_t                prevstate;
3257
3258         mtx_lock(&ntoskrnl_dispatchlock);
3259         prevstate = kmutex->km_header.dh_sigstate;
3260         if (kmutex->km_ownerthread != curthread) {
3261                 mtx_unlock(&ntoskrnl_dispatchlock);
3262                 return (STATUS_MUTANT_NOT_OWNED);
3263         }
3264
3265         kmutex->km_header.dh_sigstate++;
3266         kmutex->km_abandoned = FALSE;
3267
3268         if (kmutex->km_header.dh_sigstate == 1) {
3269                 kmutex->km_ownerthread = NULL;
3270                 ntoskrnl_waittest(&kmutex->km_header, IO_NO_INCREMENT);
3271         }
3272
3273         mtx_unlock(&ntoskrnl_dispatchlock);
3274
3275         return (prevstate);
3276 }
3277
3278 static uint32_t
3279 KeReadStateMutex(kmutex)
3280         kmutant                 *kmutex;
3281 {
3282         return (kmutex->km_header.dh_sigstate);
3283 }
3284
3285 void
3286 KeInitializeEvent(nt_kevent *kevent, uint32_t type, uint8_t state)
3287 {
3288         InitializeListHead((&kevent->k_header.dh_waitlisthead));
3289         kevent->k_header.dh_sigstate = state;
3290         if (type == EVENT_TYPE_NOTIFY)
3291                 kevent->k_header.dh_type = DISP_TYPE_NOTIFICATION_EVENT;
3292         else
3293                 kevent->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_EVENT;
3294         kevent->k_header.dh_size = sizeof(nt_kevent) / sizeof(uint32_t);
3295 }
3296
3297 uint32_t
3298 KeResetEvent(kevent)
3299         nt_kevent               *kevent;
3300 {
3301         uint32_t                prevstate;
3302
3303         mtx_lock(&ntoskrnl_dispatchlock);
3304         prevstate = kevent->k_header.dh_sigstate;
3305         kevent->k_header.dh_sigstate = FALSE;
3306         mtx_unlock(&ntoskrnl_dispatchlock);
3307
3308         return (prevstate);
3309 }
3310
3311 uint32_t
3312 KeSetEvent(nt_kevent *kevent, uint32_t increment, uint8_t kwait)
3313 {
3314         uint32_t                prevstate;
3315         wait_block              *w;
3316         nt_dispatch_header      *dh;
3317         struct thread           *td;
3318         wb_ext                  *we;
3319
3320         mtx_lock(&ntoskrnl_dispatchlock);
3321         prevstate = kevent->k_header.dh_sigstate;
3322         dh = &kevent->k_header;
3323
3324         if (IsListEmpty(&dh->dh_waitlisthead))
3325                 /*
3326                  * If there's nobody in the waitlist, just set
3327                  * the state to signalled.
3328                  */
3329                 dh->dh_sigstate = 1;
3330         else {
3331                 /*
3332                  * Get the first waiter. If this is a synchronization
3333                  * event, just wake up that one thread (don't bother
3334                  * setting the state to signalled since we're supposed
3335                  * to automatically clear synchronization events anyway).
3336                  *
3337                  * If it's a notification event, or the the first
3338                  * waiter is doing a WAITTYPE_ALL wait, go through
3339                  * the full wait satisfaction process.
3340                  */
3341                 w = CONTAINING_RECORD(dh->dh_waitlisthead.nle_flink,
3342                     wait_block, wb_waitlist);
3343                 we = w->wb_ext;
3344                 td = we->we_td;
3345                 if (kevent->k_header.dh_type == DISP_TYPE_NOTIFICATION_EVENT ||
3346                     w->wb_waittype == WAITTYPE_ALL) {
3347                         if (prevstate == 0) {
3348                                 dh->dh_sigstate = 1;
3349                                 ntoskrnl_waittest(dh, increment);
3350                         }
3351                 } else {
3352                         w->wb_awakened |= TRUE;
3353                         cv_broadcastpri(&we->we_cv,
3354                             (w->wb_oldpri - (increment * 4)) > PRI_MIN_KERN ?
3355                             w->wb_oldpri - (increment * 4) : PRI_MIN_KERN);
3356                 }
3357         }
3358
3359         mtx_unlock(&ntoskrnl_dispatchlock);
3360
3361         return (prevstate);
3362 }
3363
3364 void
3365 KeClearEvent(kevent)
3366         nt_kevent               *kevent;
3367 {
3368         kevent->k_header.dh_sigstate = FALSE;
3369 }
3370
3371 uint32_t
3372 KeReadStateEvent(kevent)
3373         nt_kevent               *kevent;
3374 {
3375         return (kevent->k_header.dh_sigstate);
3376 }
3377
3378 /*
3379  * The object manager in Windows is responsible for managing
3380  * references and access to various types of objects, including
3381  * device_objects, events, threads, timers and so on. However,
3382  * there's a difference in the way objects are handled in user
3383  * mode versus kernel mode.
3384  *
3385  * In user mode (i.e. Win32 applications), all objects are
3386  * managed by the object manager. For example, when you create
3387  * a timer or event object, you actually end up with an 
3388  * object_header (for the object manager's bookkeeping
3389  * purposes) and an object body (which contains the actual object
3390  * structure, e.g. ktimer, kevent, etc...). This allows Windows
3391  * to manage resource quotas and to enforce access restrictions
3392  * on basically every kind of system object handled by the kernel.
3393  *
3394  * However, in kernel mode, you only end up using the object
3395  * manager some of the time. For example, in a driver, you create
3396  * a timer object by simply allocating the memory for a ktimer
3397  * structure and initializing it with KeInitializeTimer(). Hence,
3398  * the timer has no object_header and no reference counting or
3399  * security/resource checks are done on it. The assumption in
3400  * this case is that if you're running in kernel mode, you know
3401  * what you're doing, and you're already at an elevated privilege
3402  * anyway.
3403  *
3404  * There are some exceptions to this. The two most important ones
3405  * for our purposes are device_objects and threads. We need to use
3406  * the object manager to do reference counting on device_objects,
3407  * and for threads, you can only get a pointer to a thread's
3408  * dispatch header by using ObReferenceObjectByHandle() on the
3409  * handle returned by PsCreateSystemThread().
3410  */
3411
3412 static ndis_status
3413 ObReferenceObjectByHandle(ndis_handle handle, uint32_t reqaccess, void *otype,
3414         uint8_t accessmode, void **object, void **handleinfo)
3415 {
3416         nt_objref               *nr;
3417
3418         nr = malloc(sizeof(nt_objref), M_DEVBUF, M_NOWAIT|M_ZERO);
3419         if (nr == NULL)
3420                 return (STATUS_INSUFFICIENT_RESOURCES);
3421
3422         InitializeListHead((&nr->no_dh.dh_waitlisthead));
3423         nr->no_obj = handle;
3424         nr->no_dh.dh_type = DISP_TYPE_THREAD;
3425         nr->no_dh.dh_sigstate = 0;
3426         nr->no_dh.dh_size = (uint8_t)(sizeof(struct thread) /
3427             sizeof(uint32_t));
3428         TAILQ_INSERT_TAIL(&ntoskrnl_reflist, nr, link);
3429         *object = nr;
3430
3431         return (STATUS_SUCCESS);
3432 }
3433
3434 static void
3435 ObfDereferenceObject(object)
3436         void                    *object;
3437 {
3438         nt_objref               *nr;
3439
3440         nr = object;
3441         TAILQ_REMOVE(&ntoskrnl_reflist, nr, link);
3442         free(nr, M_DEVBUF);
3443 }
3444
3445 static uint32_t
3446 ZwClose(handle)
3447         ndis_handle             handle;
3448 {
3449         return (STATUS_SUCCESS);
3450 }
3451
3452 static uint32_t
3453 WmiQueryTraceInformation(traceclass, traceinfo, infolen, reqlen, buf)
3454         uint32_t                traceclass;
3455         void                    *traceinfo;
3456         uint32_t                infolen;
3457         uint32_t                reqlen;
3458         void                    *buf;
3459 {
3460         return (STATUS_NOT_FOUND);
3461 }
3462
3463 static uint32_t
3464 WmiTraceMessage(uint64_t loghandle, uint32_t messageflags,
3465         void *guid, uint16_t messagenum, ...)
3466 {
3467         return (STATUS_SUCCESS);
3468 }
3469
3470 static uint32_t
3471 IoWMIRegistrationControl(dobj, action)
3472         device_object           *dobj;
3473         uint32_t                action;
3474 {
3475         return (STATUS_SUCCESS);
3476 }
3477
3478 /*
3479  * This is here just in case the thread returns without calling
3480  * PsTerminateSystemThread().
3481  */
3482 static void
3483 ntoskrnl_thrfunc(arg)
3484         void                    *arg;
3485 {
3486         thread_context          *thrctx;
3487         uint32_t (*tfunc)(void *);
3488         void                    *tctx;
3489         uint32_t                rval;
3490
3491         thrctx = arg;
3492         tfunc = thrctx->tc_thrfunc;
3493         tctx = thrctx->tc_thrctx;
3494         free(thrctx, M_TEMP);
3495
3496         rval = MSCALL1(tfunc, tctx);
3497
3498         PsTerminateSystemThread(rval);
3499         return; /* notreached */
3500 }
3501
3502 static ndis_status
3503 PsCreateSystemThread(handle, reqaccess, objattrs, phandle,
3504         clientid, thrfunc, thrctx)
3505         ndis_handle             *handle;
3506         uint32_t                reqaccess;
3507         void                    *objattrs;
3508         ndis_handle             phandle;
3509         void                    *clientid;
3510         void                    *thrfunc;
3511         void                    *thrctx;
3512 {
3513         int                     error;
3514         thread_context          *tc;
3515         struct proc             *p;
3516
3517         tc = malloc(sizeof(thread_context), M_TEMP, M_NOWAIT);
3518         if (tc == NULL)
3519                 return (STATUS_INSUFFICIENT_RESOURCES);
3520
3521         tc->tc_thrctx = thrctx;
3522         tc->tc_thrfunc = thrfunc;
3523
3524         error = kproc_create(ntoskrnl_thrfunc, tc, &p,
3525             RFHIGHPID, NDIS_KSTACK_PAGES, "Windows Kthread %d", ntoskrnl_kth);
3526
3527         if (error) {
3528                 free(tc, M_TEMP);
3529                 return (STATUS_INSUFFICIENT_RESOURCES);
3530         }
3531
3532         *handle = p;
3533         ntoskrnl_kth++;
3534
3535         return (STATUS_SUCCESS);
3536 }
3537
3538 /*
3539  * In Windows, the exit of a thread is an event that you're allowed
3540  * to wait on, assuming you've obtained a reference to the thread using
3541  * ObReferenceObjectByHandle(). Unfortunately, the only way we can
3542  * simulate this behavior is to register each thread we create in a
3543  * reference list, and if someone holds a reference to us, we poke
3544  * them.
3545  */
3546 static ndis_status
3547 PsTerminateSystemThread(status)
3548         ndis_status             status;
3549 {
3550         struct nt_objref        *nr;
3551
3552         mtx_lock(&ntoskrnl_dispatchlock);
3553         TAILQ_FOREACH(nr, &ntoskrnl_reflist, link) {
3554                 if (nr->no_obj != curthread->td_proc)
3555                         continue;
3556                 nr->no_dh.dh_sigstate = 1;
3557                 ntoskrnl_waittest(&nr->no_dh, IO_NO_INCREMENT);
3558                 break;
3559         }
3560         mtx_unlock(&ntoskrnl_dispatchlock);
3561
3562         ntoskrnl_kth--;
3563
3564         kproc_exit(0);
3565         return (0);     /* notreached */
3566 }
3567
3568 static uint32_t
3569 DbgPrint(char *fmt, ...)
3570 {
3571         va_list                 ap;
3572
3573         if (bootverbose) {
3574                 va_start(ap, fmt);
3575                 vprintf(fmt, ap);
3576         }
3577
3578         return (STATUS_SUCCESS);
3579 }
3580
3581 static void
3582 DbgBreakPoint(void)
3583 {
3584
3585         kdb_enter(KDB_WHY_NDIS, "DbgBreakPoint(): breakpoint");
3586 }
3587
3588 static void
3589 KeBugCheckEx(code, param1, param2, param3, param4)
3590     uint32_t                    code;
3591     u_long                      param1;
3592     u_long                      param2;
3593     u_long                      param3;
3594     u_long                      param4;
3595 {
3596         panic("KeBugCheckEx: STOP 0x%X", code);
3597 }
3598
3599 static void
3600 ntoskrnl_timercall(arg)
3601         void                    *arg;
3602 {
3603         ktimer                  *timer;
3604         struct timeval          tv;
3605         kdpc                    *dpc;
3606
3607         mtx_lock(&ntoskrnl_dispatchlock);
3608
3609         timer = arg;
3610
3611 #ifdef NTOSKRNL_DEBUG_TIMERS
3612         ntoskrnl_timer_fires++;
3613 #endif
3614         ntoskrnl_remove_timer(timer);
3615
3616         /*
3617          * This should never happen, but complain
3618          * if it does.
3619          */
3620
3621         if (timer->k_header.dh_inserted == FALSE) {
3622                 mtx_unlock(&ntoskrnl_dispatchlock);
3623                 printf("NTOS: timer %p fired even though "
3624                     "it was canceled\n", timer);
3625                 return;
3626         }
3627
3628         /* Mark the timer as no longer being on the timer queue. */
3629
3630         timer->k_header.dh_inserted = FALSE;
3631
3632         /* Now signal the object and satisfy any waits on it. */
3633
3634         timer->k_header.dh_sigstate = 1;
3635         ntoskrnl_waittest(&timer->k_header, IO_NO_INCREMENT);
3636
3637         /*
3638          * If this is a periodic timer, re-arm it
3639          * so it will fire again. We do this before
3640          * calling any deferred procedure calls because
3641          * it's possible the DPC might cancel the timer,
3642          * in which case it would be wrong for us to
3643          * re-arm it again afterwards.
3644          */
3645
3646         if (timer->k_period) {
3647                 tv.tv_sec = 0;
3648                 tv.tv_usec = timer->k_period * 1000;
3649                 timer->k_header.dh_inserted = TRUE;
3650                 ntoskrnl_insert_timer(timer, tvtohz(&tv));
3651 #ifdef NTOSKRNL_DEBUG_TIMERS
3652                 ntoskrnl_timer_reloads++;
3653 #endif
3654         }
3655
3656         dpc = timer->k_dpc;
3657
3658         mtx_unlock(&ntoskrnl_dispatchlock);
3659
3660         /* If there's a DPC associated with the timer, queue it up. */
3661
3662         if (dpc != NULL)
3663                 KeInsertQueueDpc(dpc, NULL, NULL);
3664 }
3665
3666 #ifdef NTOSKRNL_DEBUG_TIMERS
3667 static int
3668 sysctl_show_timers(SYSCTL_HANDLER_ARGS)
3669 {
3670         int                     ret;
3671
3672         ret = 0;
3673         ntoskrnl_show_timers();
3674         return (sysctl_handle_int(oidp, &ret, 0, req));
3675 }
3676
3677 static void
3678 ntoskrnl_show_timers()
3679 {
3680         int                     i = 0;
3681         list_entry              *l;
3682
3683         mtx_lock_spin(&ntoskrnl_calllock);
3684         l = ntoskrnl_calllist.nle_flink;
3685         while(l != &ntoskrnl_calllist) {
3686                 i++;
3687                 l = l->nle_flink;
3688         }
3689         mtx_unlock_spin(&ntoskrnl_calllock);
3690
3691         printf("\n");
3692         printf("%d timers available (out of %d)\n", i, NTOSKRNL_TIMEOUTS);
3693         printf("timer sets: %qu\n", ntoskrnl_timer_sets);
3694         printf("timer reloads: %qu\n", ntoskrnl_timer_reloads);
3695         printf("timer cancels: %qu\n", ntoskrnl_timer_cancels);
3696         printf("timer fires: %qu\n", ntoskrnl_timer_fires);
3697         printf("\n");
3698 }
3699 #endif
3700
3701 /*
3702  * Must be called with dispatcher lock held.
3703  */
3704
3705 static void
3706 ntoskrnl_insert_timer(timer, ticks)
3707         ktimer                  *timer;
3708         int                     ticks;
3709 {
3710         callout_entry           *e;
3711         list_entry              *l;
3712         struct callout          *c;
3713
3714         /*
3715          * Try and allocate a timer.
3716          */
3717         mtx_lock_spin(&ntoskrnl_calllock);
3718         if (IsListEmpty(&ntoskrnl_calllist)) {
3719                 mtx_unlock_spin(&ntoskrnl_calllock);
3720 #ifdef NTOSKRNL_DEBUG_TIMERS
3721                 ntoskrnl_show_timers();
3722 #endif
3723                 panic("out of timers!");
3724         }
3725         l = RemoveHeadList(&ntoskrnl_calllist);
3726         mtx_unlock_spin(&ntoskrnl_calllock);
3727
3728         e = CONTAINING_RECORD(l, callout_entry, ce_list);
3729         c = &e->ce_callout;
3730
3731         timer->k_callout = c;
3732
3733         callout_init(c, CALLOUT_MPSAFE);
3734         callout_reset(c, ticks, ntoskrnl_timercall, timer);
3735 }
3736
3737 static void
3738 ntoskrnl_remove_timer(timer)
3739         ktimer                  *timer;
3740 {
3741         callout_entry           *e;
3742
3743         e = (callout_entry *)timer->k_callout;
3744         callout_stop(timer->k_callout);
3745
3746         mtx_lock_spin(&ntoskrnl_calllock);
3747         InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
3748         mtx_unlock_spin(&ntoskrnl_calllock);
3749 }
3750
3751 void
3752 KeInitializeTimer(timer)
3753         ktimer                  *timer;
3754 {
3755         if (timer == NULL)
3756                 return;
3757
3758         KeInitializeTimerEx(timer,  EVENT_TYPE_NOTIFY);
3759 }
3760
3761 void
3762 KeInitializeTimerEx(timer, type)
3763         ktimer                  *timer;
3764         uint32_t                type;
3765 {
3766         if (timer == NULL)
3767                 return;
3768
3769         bzero((char *)timer, sizeof(ktimer));
3770         InitializeListHead((&timer->k_header.dh_waitlisthead));
3771         timer->k_header.dh_sigstate = FALSE;
3772         timer->k_header.dh_inserted = FALSE;
3773         if (type == EVENT_TYPE_NOTIFY)
3774                 timer->k_header.dh_type = DISP_TYPE_NOTIFICATION_TIMER;
3775         else
3776                 timer->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_TIMER;
3777         timer->k_header.dh_size = sizeof(ktimer) / sizeof(uint32_t);
3778 }
3779
3780 /*
3781  * DPC subsystem. A Windows Defered Procedure Call has the following
3782  * properties:
3783  * - It runs at DISPATCH_LEVEL.
3784  * - It can have one of 3 importance values that control when it
3785  *   runs relative to other DPCs in the queue.
3786  * - On SMP systems, it can be set to run on a specific processor.
3787  * In order to satisfy the last property, we create a DPC thread for
3788  * each CPU in the system and bind it to that CPU. Each thread
3789  * maintains three queues with different importance levels, which
3790  * will be processed in order from lowest to highest.
3791  *
3792  * In Windows, interrupt handlers run as DPCs. (Not to be confused
3793  * with ISRs, which run in interrupt context and can preempt DPCs.)
3794  * ISRs are given the highest importance so that they'll take
3795  * precedence over timers and other things.
3796  */
3797
3798 static void
3799 ntoskrnl_dpc_thread(arg)
3800         void                    *arg;
3801 {
3802         kdpc_queue              *kq;
3803         kdpc                    *d;
3804         list_entry              *l;
3805         uint8_t                 irql;
3806
3807         kq = arg;
3808
3809         InitializeListHead(&kq->kq_disp);
3810         kq->kq_td = curthread;
3811         kq->kq_exit = 0;
3812         kq->kq_running = FALSE;
3813         KeInitializeSpinLock(&kq->kq_lock);
3814         KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
3815         KeInitializeEvent(&kq->kq_done, EVENT_TYPE_SYNC, FALSE);
3816
3817         /*
3818          * Elevate our priority. DPCs are used to run interrupt
3819          * handlers, and they should trigger as soon as possible
3820          * once scheduled by an ISR.
3821          */
3822
3823         thread_lock(curthread);
3824 #ifdef NTOSKRNL_MULTIPLE_DPCS
3825         sched_bind(curthread, kq->kq_cpu);
3826 #endif
3827         sched_prio(curthread, PRI_MIN_KERN);
3828         thread_unlock(curthread);
3829
3830         while (1) {
3831                 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
3832
3833                 KeAcquireSpinLock(&kq->kq_lock, &irql);
3834
3835                 if (kq->kq_exit) {
3836                         kq->kq_exit = 0;
3837                         KeReleaseSpinLock(&kq->kq_lock, irql);
3838                         break;
3839                 }
3840
3841                 kq->kq_running = TRUE;
3842
3843                 while (!IsListEmpty(&kq->kq_disp)) {
3844                         l = RemoveHeadList((&kq->kq_disp));
3845                         d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
3846                         InitializeListHead((&d->k_dpclistentry));
3847                         KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
3848                         MSCALL4(d->k_deferedfunc, d, d->k_deferredctx,
3849                             d->k_sysarg1, d->k_sysarg2);
3850                         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3851                 }
3852
3853                 kq->kq_running = FALSE;
3854
3855                 KeReleaseSpinLock(&kq->kq_lock, irql);
3856
3857                 KeSetEvent(&kq->kq_done, IO_NO_INCREMENT, FALSE);
3858         }
3859
3860         kproc_exit(0);
3861         return; /* notreached */
3862 }
3863
3864 static void
3865 ntoskrnl_destroy_dpc_threads(void)
3866 {
3867         kdpc_queue              *kq;
3868         kdpc                    dpc;
3869         int                     i;
3870
3871         kq = kq_queues;
3872 #ifdef NTOSKRNL_MULTIPLE_DPCS
3873         for (i = 0; i < mp_ncpus; i++) {
3874 #else
3875         for (i = 0; i < 1; i++) {
3876 #endif
3877                 kq += i;
3878
3879                 kq->kq_exit = 1;
3880                 KeInitializeDpc(&dpc, NULL, NULL);
3881                 KeSetTargetProcessorDpc(&dpc, i);
3882                 KeInsertQueueDpc(&dpc, NULL, NULL);
3883                 while (kq->kq_exit)
3884                         tsleep(kq->kq_td->td_proc, PWAIT, "dpcw", hz/10);
3885         }
3886 }
3887
3888 static uint8_t
3889 ntoskrnl_insert_dpc(head, dpc)
3890         list_entry              *head;
3891         kdpc                    *dpc;
3892 {
3893         list_entry              *l;
3894         kdpc                    *d;
3895
3896         l = head->nle_flink;
3897         while (l != head) {
3898                 d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
3899                 if (d == dpc)
3900                         return (FALSE);
3901                 l = l->nle_flink;
3902         }
3903
3904         if (dpc->k_importance == KDPC_IMPORTANCE_LOW)
3905                 InsertTailList((head), (&dpc->k_dpclistentry));
3906         else
3907                 InsertHeadList((head), (&dpc->k_dpclistentry));
3908
3909         return (TRUE);
3910 }
3911
3912 void
3913 KeInitializeDpc(dpc, dpcfunc, dpcctx)
3914         kdpc                    *dpc;
3915         void                    *dpcfunc;
3916         void                    *dpcctx;
3917 {
3918
3919         if (dpc == NULL)
3920                 return;
3921
3922         dpc->k_deferedfunc = dpcfunc;
3923         dpc->k_deferredctx = dpcctx;
3924         dpc->k_num = KDPC_CPU_DEFAULT;
3925         dpc->k_importance = KDPC_IMPORTANCE_MEDIUM;
3926         InitializeListHead((&dpc->k_dpclistentry));
3927 }
3928
3929 uint8_t
3930 KeInsertQueueDpc(dpc, sysarg1, sysarg2)
3931         kdpc                    *dpc;
3932         void                    *sysarg1;
3933         void                    *sysarg2;
3934 {
3935         kdpc_queue              *kq;
3936         uint8_t                 r;
3937         uint8_t                 irql;
3938
3939         if (dpc == NULL)
3940                 return (FALSE);
3941
3942         kq = kq_queues;
3943
3944 #ifdef NTOSKRNL_MULTIPLE_DPCS
3945         KeRaiseIrql(DISPATCH_LEVEL, &irql);
3946
3947         /*
3948          * By default, the DPC is queued to run on the same CPU
3949          * that scheduled it.
3950          */
3951
3952         if (dpc->k_num == KDPC_CPU_DEFAULT)
3953                 kq += curthread->td_oncpu;
3954         else
3955                 kq += dpc->k_num;
3956         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3957 #else
3958         KeAcquireSpinLock(&kq->kq_lock, &irql);
3959 #endif
3960
3961         r = ntoskrnl_insert_dpc(&kq->kq_disp, dpc);
3962         if (r == TRUE) {
3963                 dpc->k_sysarg1 = sysarg1;
3964                 dpc->k_sysarg2 = sysarg2;
3965         }
3966         KeReleaseSpinLock(&kq->kq_lock, irql);
3967
3968         if (r == FALSE)
3969                 return (r);
3970
3971         KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
3972
3973         return (r);
3974 }
3975
3976 uint8_t
3977 KeRemoveQueueDpc(dpc)
3978         kdpc                    *dpc;
3979 {
3980         kdpc_queue              *kq;
3981         uint8_t                 irql;
3982
3983         if (dpc == NULL)
3984                 return (FALSE);
3985
3986 #ifdef NTOSKRNL_MULTIPLE_DPCS
3987         KeRaiseIrql(DISPATCH_LEVEL, &irql);
3988
3989         kq = kq_queues + dpc->k_num;
3990
3991         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3992 #else
3993         kq = kq_queues;
3994         KeAcquireSpinLock(&kq->kq_lock, &irql);
3995 #endif
3996
3997         if (dpc->k_dpclistentry.nle_flink == &dpc->k_dpclistentry) {
3998                 KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
3999                 KeLowerIrql(irql);
4000                 return (FALSE);
4001         }
4002
4003         RemoveEntryList((&dpc->k_dpclistentry));
4004         InitializeListHead((&dpc->k_dpclistentry));
4005
4006         KeReleaseSpinLock(&kq->kq_lock, irql);
4007
4008         return (TRUE);
4009 }
4010
4011 void
4012 KeSetImportanceDpc(dpc, imp)
4013         kdpc                    *dpc;
4014         uint32_t                imp;
4015 {
4016         if (imp != KDPC_IMPORTANCE_LOW &&
4017             imp != KDPC_IMPORTANCE_MEDIUM &&
4018             imp != KDPC_IMPORTANCE_HIGH)
4019                 return;
4020
4021         dpc->k_importance = (uint8_t)imp;
4022 }
4023
4024 void
4025 KeSetTargetProcessorDpc(kdpc *dpc, uint8_t cpu)
4026 {
4027         if (cpu > mp_ncpus)
4028                 return;
4029
4030         dpc->k_num = cpu;
4031 }
4032
4033 void
4034 KeFlushQueuedDpcs(void)
4035 {
4036         kdpc_queue              *kq;
4037         int                     i;
4038
4039         /*
4040          * Poke each DPC queue and wait
4041          * for them to drain.
4042          */
4043
4044 #ifdef NTOSKRNL_MULTIPLE_DPCS
4045         for (i = 0; i < mp_ncpus; i++) {
4046 #else
4047         for (i = 0; i < 1; i++) {
4048 #endif
4049                 kq = kq_queues + i;
4050                 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
4051                 KeWaitForSingleObject(&kq->kq_done, 0, 0, TRUE, NULL);
4052         }
4053 }
4054
4055 uint32_t
4056 KeGetCurrentProcessorNumber(void)
4057 {
4058         return ((uint32_t)curthread->td_oncpu);
4059 }
4060
4061 uint8_t
4062 KeSetTimerEx(timer, duetime, period, dpc)
4063         ktimer                  *timer;
4064         int64_t                 duetime;
4065         uint32_t                period;
4066         kdpc                    *dpc;
4067 {
4068         struct timeval          tv;
4069         uint64_t                curtime;
4070         uint8_t                 pending;
4071
4072         if (timer == NULL)
4073                 return (FALSE);
4074
4075         mtx_lock(&ntoskrnl_dispatchlock);
4076
4077         if (timer->k_header.dh_inserted == TRUE) {
4078                 ntoskrnl_remove_timer(timer);
4079 #ifdef NTOSKRNL_DEBUG_TIMERS
4080                 ntoskrnl_timer_cancels++;
4081 #endif
4082                 timer->k_header.dh_inserted = FALSE;
4083                 pending = TRUE;
4084         } else
4085                 pending = FALSE;
4086
4087         timer->k_duetime = duetime;
4088         timer->k_period = period;
4089         timer->k_header.dh_sigstate = FALSE;
4090         timer->k_dpc = dpc;
4091
4092         if (duetime < 0) {
4093                 tv.tv_sec = - (duetime) / 10000000;
4094                 tv.tv_usec = (- (duetime) / 10) -
4095                     (tv.tv_sec * 1000000);
4096         } else {
4097                 ntoskrnl_time(&curtime);
4098                 if (duetime < curtime)
4099                         tv.tv_sec = tv.tv_usec = 0;
4100                 else {
4101                         tv.tv_sec = ((duetime) - curtime) / 10000000;
4102                         tv.tv_usec = ((duetime) - curtime) / 10 -
4103                             (tv.tv_sec * 1000000);
4104                 }
4105         }
4106
4107         timer->k_header.dh_inserted = TRUE;
4108         ntoskrnl_insert_timer(timer, tvtohz(&tv));
4109 #ifdef NTOSKRNL_DEBUG_TIMERS
4110         ntoskrnl_timer_sets++;
4111 #endif
4112
4113         mtx_unlock(&ntoskrnl_dispatchlock);
4114
4115         return (pending);
4116 }
4117
4118 uint8_t
4119 KeSetTimer(timer, duetime, dpc)
4120         ktimer                  *timer;
4121         int64_t                 duetime;
4122         kdpc                    *dpc;
4123 {
4124         return (KeSetTimerEx(timer, duetime, 0, dpc));
4125 }
4126
4127 /*
4128  * The Windows DDK documentation seems to say that cancelling
4129  * a timer that has a DPC will result in the DPC also being
4130  * cancelled, but this isn't really the case.
4131  */
4132
4133 uint8_t
4134 KeCancelTimer(timer)
4135         ktimer                  *timer;
4136 {
4137         uint8_t                 pending;
4138
4139         if (timer == NULL)
4140                 return (FALSE);
4141
4142         mtx_lock(&ntoskrnl_dispatchlock);
4143
4144         pending = timer->k_header.dh_inserted;
4145
4146         if (timer->k_header.dh_inserted == TRUE) {
4147                 timer->k_header.dh_inserted = FALSE;
4148                 ntoskrnl_remove_timer(timer);
4149 #ifdef NTOSKRNL_DEBUG_TIMERS
4150                 ntoskrnl_timer_cancels++;
4151 #endif
4152         }
4153
4154         mtx_unlock(&ntoskrnl_dispatchlock);
4155
4156         return (pending);
4157 }
4158
4159 uint8_t
4160 KeReadStateTimer(timer)
4161         ktimer                  *timer;
4162 {
4163         return (timer->k_header.dh_sigstate);
4164 }
4165
4166 static int32_t
4167 KeDelayExecutionThread(uint8_t wait_mode, uint8_t alertable, int64_t *interval)
4168 {
4169         ktimer                  timer;
4170
4171         if (wait_mode != 0)
4172                 panic("invalid wait_mode %d", wait_mode);
4173
4174         KeInitializeTimer(&timer);
4175         KeSetTimer(&timer, *interval, NULL);
4176         KeWaitForSingleObject(&timer, 0, 0, alertable, NULL);
4177
4178         return STATUS_SUCCESS;
4179 }
4180
4181 static uint64_t
4182 KeQueryInterruptTime(void)
4183 {
4184         int ticks;
4185         struct timeval tv;
4186
4187         getmicrouptime(&tv);
4188
4189         ticks = tvtohz(&tv);
4190
4191         return ticks * ((10000000 + hz - 1) / hz);
4192 }
4193
4194 static struct thread *
4195 KeGetCurrentThread(void)
4196 {
4197
4198         return curthread;
4199 }
4200
4201 static int32_t
4202 KeSetPriorityThread(td, pri)
4203         struct thread   *td;
4204         int32_t         pri;
4205 {
4206         int32_t old;
4207
4208         if (td == NULL)
4209                 return LOW_REALTIME_PRIORITY;
4210
4211         if (td->td_priority <= PRI_MIN_KERN)
4212                 old = HIGH_PRIORITY;
4213         else if (td->td_priority >= PRI_MAX_KERN)
4214                 old = LOW_PRIORITY;
4215         else
4216                 old = LOW_REALTIME_PRIORITY;
4217
4218         thread_lock(td);
4219         if (pri == HIGH_PRIORITY)
4220                 sched_prio(td, PRI_MIN_KERN);
4221         if (pri == LOW_REALTIME_PRIORITY)
4222                 sched_prio(td, PRI_MIN_KERN + (PRI_MAX_KERN - PRI_MIN_KERN) / 2);
4223         if (pri == LOW_PRIORITY)
4224                 sched_prio(td, PRI_MAX_KERN);
4225         thread_unlock(td);
4226
4227         return old;
4228 }
4229
4230 static void
4231 dummy()
4232 {
4233         printf("ntoskrnl dummy called...\n");
4234 }
4235
4236
4237 image_patch_table ntoskrnl_functbl[] = {
4238         IMPORT_SFUNC(RtlZeroMemory, 2),
4239         IMPORT_SFUNC(RtlSecureZeroMemory, 2),
4240         IMPORT_SFUNC(RtlFillMemory, 3),
4241         IMPORT_SFUNC(RtlMoveMemory, 3),
4242         IMPORT_SFUNC(RtlCharToInteger, 3),
4243         IMPORT_SFUNC(RtlCopyMemory, 3),
4244         IMPORT_SFUNC(RtlCopyString, 2),
4245         IMPORT_SFUNC(RtlCompareMemory, 3),
4246         IMPORT_SFUNC(RtlEqualUnicodeString, 3),
4247         IMPORT_SFUNC(RtlCopyUnicodeString, 2),
4248         IMPORT_SFUNC(RtlUnicodeStringToAnsiString, 3),
4249         IMPORT_SFUNC(RtlAnsiStringToUnicodeString, 3),
4250         IMPORT_SFUNC(RtlInitAnsiString, 2),
4251         IMPORT_SFUNC_MAP(RtlInitString, RtlInitAnsiString, 2),
4252         IMPORT_SFUNC(RtlInitUnicodeString, 2),
4253         IMPORT_SFUNC(RtlFreeAnsiString, 1),
4254         IMPORT_SFUNC(RtlFreeUnicodeString, 1),
4255         IMPORT_SFUNC(RtlUnicodeStringToInteger, 3),
4256         IMPORT_CFUNC(sprintf, 0),
4257         IMPORT_CFUNC(vsprintf, 0),
4258         IMPORT_CFUNC_MAP(_snprintf, snprintf, 0),
4259         IMPORT_CFUNC_MAP(_vsnprintf, vsnprintf, 0),
4260         IMPORT_CFUNC(DbgPrint, 0),
4261         IMPORT_SFUNC(DbgBreakPoint, 0),
4262         IMPORT_SFUNC(KeBugCheckEx, 5),
4263         IMPORT_CFUNC(strncmp, 0),
4264         IMPORT_CFUNC(strcmp, 0),
4265         IMPORT_CFUNC_MAP(stricmp, strcasecmp, 0),
4266         IMPORT_CFUNC(strncpy, 0),
4267         IMPORT_CFUNC(strcpy, 0),
4268         IMPORT_CFUNC(strlen, 0),
4269         IMPORT_CFUNC_MAP(toupper, ntoskrnl_toupper, 0),
4270         IMPORT_CFUNC_MAP(tolower, ntoskrnl_tolower, 0),
4271         IMPORT_CFUNC_MAP(strstr, ntoskrnl_strstr, 0),
4272         IMPORT_CFUNC_MAP(strncat, ntoskrnl_strncat, 0),
4273         IMPORT_CFUNC_MAP(strchr, index, 0),
4274         IMPORT_CFUNC_MAP(strrchr, rindex, 0),
4275         IMPORT_CFUNC(memcpy, 0),
4276         IMPORT_CFUNC_MAP(memmove, ntoskrnl_memmove, 0),
4277         IMPORT_CFUNC_MAP(memset, ntoskrnl_memset, 0),
4278         IMPORT_CFUNC_MAP(memchr, ntoskrnl_memchr, 0),
4279         IMPORT_SFUNC(IoAllocateDriverObjectExtension, 4),
4280         IMPORT_SFUNC(IoGetDriverObjectExtension, 2),
4281         IMPORT_FFUNC(IofCallDriver, 2),
4282         IMPORT_FFUNC(IofCompleteRequest, 2),
4283         IMPORT_SFUNC(IoAcquireCancelSpinLock, 1),
4284         IMPORT_SFUNC(IoReleaseCancelSpinLock, 1),
4285         IMPORT_SFUNC(IoCancelIrp, 1),
4286         IMPORT_SFUNC(IoConnectInterrupt, 11),
4287         IMPORT_SFUNC(IoDisconnectInterrupt, 1),
4288         IMPORT_SFUNC(IoCreateDevice, 7),
4289         IMPORT_SFUNC(IoDeleteDevice, 1),
4290         IMPORT_SFUNC(IoGetAttachedDevice, 1),
4291         IMPORT_SFUNC(IoAttachDeviceToDeviceStack, 2),
4292         IMPORT_SFUNC(IoDetachDevice, 1),
4293         IMPORT_SFUNC(IoBuildSynchronousFsdRequest, 7),
4294         IMPORT_SFUNC(IoBuildAsynchronousFsdRequest, 6),
4295         IMPORT_SFUNC(IoBuildDeviceIoControlRequest, 9),
4296         IMPORT_SFUNC(IoAllocateIrp, 2),
4297         IMPORT_SFUNC(IoReuseIrp, 2),
4298         IMPORT_SFUNC(IoMakeAssociatedIrp, 2),
4299         IMPORT_SFUNC(IoFreeIrp, 1),
4300         IMPORT_SFUNC(IoInitializeIrp, 3),
4301         IMPORT_SFUNC(KeAcquireInterruptSpinLock, 1),
4302         IMPORT_SFUNC(KeReleaseInterruptSpinLock, 2),
4303         IMPORT_SFUNC(KeSynchronizeExecution, 3),
4304         IMPORT_SFUNC(KeWaitForSingleObject, 5),
4305         IMPORT_SFUNC(KeWaitForMultipleObjects, 8),
4306         IMPORT_SFUNC(_allmul, 4),
4307         IMPORT_SFUNC(_alldiv, 4),
4308         IMPORT_SFUNC(_allrem, 4),
4309         IMPORT_RFUNC(_allshr, 0),
4310         IMPORT_RFUNC(_allshl, 0),
4311         IMPORT_SFUNC(_aullmul, 4),
4312         IMPORT_SFUNC(_aulldiv, 4),
4313         IMPORT_SFUNC(_aullrem, 4),
4314         IMPORT_RFUNC(_aullshr, 0),
4315         IMPORT_RFUNC(_aullshl, 0),
4316         IMPORT_CFUNC(atoi, 0),
4317         IMPORT_CFUNC(atol, 0),
4318         IMPORT_CFUNC(rand, 0),
4319         IMPORT_CFUNC(srand, 0),
4320         IMPORT_SFUNC(WRITE_REGISTER_USHORT, 2),
4321         IMPORT_SFUNC(READ_REGISTER_USHORT, 1),
4322         IMPORT_SFUNC(WRITE_REGISTER_ULONG, 2),
4323         IMPORT_SFUNC(READ_REGISTER_ULONG, 1),
4324         IMPORT_SFUNC(READ_REGISTER_UCHAR, 1),
4325         IMPORT_SFUNC(WRITE_REGISTER_UCHAR, 2),
4326         IMPORT_SFUNC(ExInitializePagedLookasideList, 7),
4327         IMPORT_SFUNC(ExDeletePagedLookasideList, 1),
4328         IMPORT_SFUNC(ExInitializeNPagedLookasideList, 7),
4329         IMPORT_SFUNC(ExDeleteNPagedLookasideList, 1),
4330         IMPORT_FFUNC(InterlockedPopEntrySList, 1),
4331         IMPORT_FFUNC(InitializeSListHead, 1),
4332         IMPORT_FFUNC(InterlockedPushEntrySList, 2),
4333         IMPORT_SFUNC(ExQueryDepthSList, 1),
4334         IMPORT_FFUNC_MAP(ExpInterlockedPopEntrySList,
4335                 InterlockedPopEntrySList, 1),
4336         IMPORT_FFUNC_MAP(ExpInterlockedPushEntrySList,
4337                 InterlockedPushEntrySList, 2),
4338         IMPORT_FFUNC(ExInterlockedPopEntrySList, 2),
4339         IMPORT_FFUNC(ExInterlockedPushEntrySList, 3),
4340         IMPORT_SFUNC(ExAllocatePoolWithTag, 3),
4341         IMPORT_SFUNC(ExFreePoolWithTag, 2),
4342         IMPORT_SFUNC(ExFreePool, 1),
4343 #ifdef __i386__
4344         IMPORT_FFUNC(KefAcquireSpinLockAtDpcLevel, 1),
4345         IMPORT_FFUNC(KefReleaseSpinLockFromDpcLevel,1),
4346         IMPORT_FFUNC(KeAcquireSpinLockRaiseToDpc, 1),
4347 #else
4348         /*
4349          * For AMD64, we can get away with just mapping
4350          * KeAcquireSpinLockRaiseToDpc() directly to KfAcquireSpinLock()
4351          * because the calling conventions end up being the same.
4352          * On i386, we have to be careful because KfAcquireSpinLock()
4353          * is _fastcall but KeAcquireSpinLockRaiseToDpc() isn't.
4354          */
4355         IMPORT_SFUNC(KeAcquireSpinLockAtDpcLevel, 1),
4356         IMPORT_SFUNC(KeReleaseSpinLockFromDpcLevel, 1),
4357         IMPORT_SFUNC_MAP(KeAcquireSpinLockRaiseToDpc, KfAcquireSpinLock, 1),
4358 #endif
4359         IMPORT_SFUNC_MAP(KeReleaseSpinLock, KfReleaseSpinLock, 1),
4360         IMPORT_FFUNC(InterlockedIncrement, 1),
4361         IMPORT_FFUNC(InterlockedDecrement, 1),
4362         IMPORT_FFUNC(InterlockedExchange, 2),
4363         IMPORT_FFUNC(ExInterlockedAddLargeStatistic, 2),
4364         IMPORT_SFUNC(IoAllocateMdl, 5),
4365         IMPORT_SFUNC(IoFreeMdl, 1),
4366         IMPORT_SFUNC(MmAllocateContiguousMemory, 2 + 1),
4367         IMPORT_SFUNC(MmAllocateContiguousMemorySpecifyCache, 5 + 3),
4368         IMPORT_SFUNC(MmFreeContiguousMemory, 1),
4369         IMPORT_SFUNC(MmFreeContiguousMemorySpecifyCache, 3),
4370         IMPORT_SFUNC(MmSizeOfMdl, 1),
4371         IMPORT_SFUNC(MmMapLockedPages, 2),
4372         IMPORT_SFUNC(MmMapLockedPagesSpecifyCache, 6),
4373         IMPORT_SFUNC(MmUnmapLockedPages, 2),
4374         IMPORT_SFUNC(MmBuildMdlForNonPagedPool, 1),
4375         IMPORT_SFUNC(MmGetPhysicalAddress, 1),
4376         IMPORT_SFUNC(MmIsAddressValid, 1),
4377         IMPORT_SFUNC(MmMapIoSpace, 3 + 1),
4378         IMPORT_SFUNC(MmUnmapIoSpace, 2),
4379         IMPORT_SFUNC(KeInitializeSpinLock, 1),
4380         IMPORT_SFUNC(IoIsWdmVersionAvailable, 2),
4381         IMPORT_SFUNC(IoGetDeviceObjectPointer, 4),
4382         IMPORT_SFUNC(IoGetDeviceProperty, 5),
4383         IMPORT_SFUNC(IoAllocateWorkItem, 1),
4384         IMPORT_SFUNC(IoFreeWorkItem, 1),
4385         IMPORT_SFUNC(IoQueueWorkItem, 4),
4386         IMPORT_SFUNC(ExQueueWorkItem, 2),
4387         IMPORT_SFUNC(ntoskrnl_workitem, 2),
4388         IMPORT_SFUNC(KeInitializeMutex, 2),
4389         IMPORT_SFUNC(KeReleaseMutex, 2),
4390         IMPORT_SFUNC(KeReadStateMutex, 1),
4391         IMPORT_SFUNC(KeInitializeEvent, 3),
4392         IMPORT_SFUNC(KeSetEvent, 3),
4393         IMPORT_SFUNC(KeResetEvent, 1),
4394         IMPORT_SFUNC(KeClearEvent, 1),
4395         IMPORT_SFUNC(KeReadStateEvent, 1),
4396         IMPORT_SFUNC(KeInitializeTimer, 1),
4397         IMPORT_SFUNC(KeInitializeTimerEx, 2),
4398         IMPORT_SFUNC(KeSetTimer, 3),
4399         IMPORT_SFUNC(KeSetTimerEx, 4),
4400         IMPORT_SFUNC(KeCancelTimer, 1),
4401         IMPORT_SFUNC(KeReadStateTimer, 1),
4402         IMPORT_SFUNC(KeInitializeDpc, 3),
4403         IMPORT_SFUNC(KeInsertQueueDpc, 3),
4404         IMPORT_SFUNC(KeRemoveQueueDpc, 1),
4405         IMPORT_SFUNC(KeSetImportanceDpc, 2),
4406         IMPORT_SFUNC(KeSetTargetProcessorDpc, 2),
4407         IMPORT_SFUNC(KeFlushQueuedDpcs, 0),
4408         IMPORT_SFUNC(KeGetCurrentProcessorNumber, 1),
4409         IMPORT_SFUNC(ObReferenceObjectByHandle, 6),
4410         IMPORT_FFUNC(ObfDereferenceObject, 1),
4411         IMPORT_SFUNC(ZwClose, 1),
4412         IMPORT_SFUNC(PsCreateSystemThread, 7),
4413         IMPORT_SFUNC(PsTerminateSystemThread, 1),
4414         IMPORT_SFUNC(IoWMIRegistrationControl, 2),
4415         IMPORT_SFUNC(WmiQueryTraceInformation, 5),
4416         IMPORT_CFUNC(WmiTraceMessage, 0),
4417         IMPORT_SFUNC(KeQuerySystemTime, 1),
4418         IMPORT_CFUNC(KeTickCount, 0),
4419         IMPORT_SFUNC(KeDelayExecutionThread, 3),
4420         IMPORT_SFUNC(KeQueryInterruptTime, 0),
4421         IMPORT_SFUNC(KeGetCurrentThread, 0),
4422         IMPORT_SFUNC(KeSetPriorityThread, 2),
4423
4424         /*
4425          * This last entry is a catch-all for any function we haven't
4426          * implemented yet. The PE import list patching routine will
4427          * use it for any function that doesn't have an explicit match
4428          * in this table.
4429          */
4430
4431         { NULL, (FUNC)dummy, NULL, 0, WINDRV_WRAP_STDCALL },
4432
4433         /* End of list. */
4434
4435         { NULL, NULL, NULL }
4436 };