]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/compat/ndis/subr_ntoskrnl.c
Regenerate.
[FreeBSD/FreeBSD.git] / sys / compat / ndis / subr_ntoskrnl.c
1 /*-
2  * Copyright (c) 2003
3  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *      This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #include <sys/ctype.h>
37 #include <sys/unistd.h>
38 #include <sys/param.h>
39 #include <sys/types.h>
40 #include <sys/errno.h>
41 #include <sys/systm.h>
42 #include <sys/malloc.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45
46 #include <sys/callout.h>
47 #if __FreeBSD_version > 502113
48 #include <sys/kdb.h>
49 #endif
50 #include <sys/kernel.h>
51 #include <sys/proc.h>
52 #include <sys/condvar.h>
53 #include <sys/kthread.h>
54 #include <sys/module.h>
55 #include <sys/smp.h>
56 #include <sys/sched.h>
57 #include <sys/sysctl.h>
58
59 #include <machine/atomic.h>
60 #include <machine/bus.h>
61 #include <machine/stdarg.h>
62 #include <machine/resource.h>
63
64 #include <sys/bus.h>
65 #include <sys/rman.h>
66
67 #include <vm/vm.h>
68 #include <vm/vm_param.h>
69 #include <vm/pmap.h>
70 #include <vm/uma.h>
71 #include <vm/vm_kern.h>
72 #include <vm/vm_map.h>
73
74 #include <compat/ndis/pe_var.h>
75 #include <compat/ndis/cfg_var.h>
76 #include <compat/ndis/resource_var.h>
77 #include <compat/ndis/ntoskrnl_var.h>
78 #include <compat/ndis/hal_var.h>
79 #include <compat/ndis/ndis_var.h>
80
81 #ifdef NTOSKRNL_DEBUG_TIMERS
82 static int sysctl_show_timers(SYSCTL_HANDLER_ARGS);
83
84 SYSCTL_PROC(_debug, OID_AUTO, ntoskrnl_timers, CTLFLAG_RW, 0, 0,
85         sysctl_show_timers, "I", "Show ntoskrnl timer stats");
86 #endif
87
88 struct kdpc_queue {
89         list_entry              kq_disp;
90         struct thread           *kq_td;
91         int                     kq_cpu;
92         int                     kq_exit;
93         int                     kq_running;
94         kspin_lock              kq_lock;
95         nt_kevent               kq_proc;
96         nt_kevent               kq_done;
97 };
98
99 typedef struct kdpc_queue kdpc_queue;
100
101 struct wb_ext {
102         struct cv               we_cv;
103         struct thread           *we_td;
104 };
105
106 typedef struct wb_ext wb_ext;
107
108 #define NTOSKRNL_TIMEOUTS       256
109 #ifdef NTOSKRNL_DEBUG_TIMERS
110 static uint64_t ntoskrnl_timer_fires;
111 static uint64_t ntoskrnl_timer_sets;
112 static uint64_t ntoskrnl_timer_reloads;
113 static uint64_t ntoskrnl_timer_cancels;
114 #endif
115
116 struct callout_entry {
117         struct callout          ce_callout;
118         list_entry              ce_list;
119 };
120
121 typedef struct callout_entry callout_entry;
122
123 static struct list_entry ntoskrnl_calllist;
124 static struct mtx ntoskrnl_calllock;
125
126 static struct list_entry ntoskrnl_intlist;
127 static kspin_lock ntoskrnl_intlock;
128
129 static uint8_t RtlEqualUnicodeString(unicode_string *,
130         unicode_string *, uint8_t);
131 static void RtlCopyUnicodeString(unicode_string *,
132         unicode_string *);
133 static irp *IoBuildSynchronousFsdRequest(uint32_t, device_object *,
134          void *, uint32_t, uint64_t *, nt_kevent *, io_status_block *);
135 static irp *IoBuildAsynchronousFsdRequest(uint32_t,
136         device_object *, void *, uint32_t, uint64_t *, io_status_block *);
137 static irp *IoBuildDeviceIoControlRequest(uint32_t,
138         device_object *, void *, uint32_t, void *, uint32_t,
139         uint8_t, nt_kevent *, io_status_block *);
140 static irp *IoAllocateIrp(uint8_t, uint8_t);
141 static void IoReuseIrp(irp *, uint32_t);
142 static void IoFreeIrp(irp *);
143 static void IoInitializeIrp(irp *, uint16_t, uint8_t);
144 static irp *IoMakeAssociatedIrp(irp *, uint8_t);
145 static uint32_t KeWaitForMultipleObjects(uint32_t,
146         nt_dispatch_header **, uint32_t, uint32_t, uint32_t, uint8_t,
147         int64_t *, wait_block *);
148 static void ntoskrnl_waittest(nt_dispatch_header *, uint32_t);
149 static void ntoskrnl_satisfy_wait(nt_dispatch_header *, struct thread *);
150 static void ntoskrnl_satisfy_multiple_waits(wait_block *);
151 static int ntoskrnl_is_signalled(nt_dispatch_header *, struct thread *);
152 static void ntoskrnl_insert_timer(ktimer *, int);
153 static void ntoskrnl_remove_timer(ktimer *);
154 #ifdef NTOSKRNL_DEBUG_TIMERS
155 static void ntoskrnl_show_timers(void);
156 #endif
157 static void ntoskrnl_timercall(void *);
158 static void ntoskrnl_dpc_thread(void *);
159 static void ntoskrnl_destroy_dpc_threads(void);
160 static void ntoskrnl_destroy_workitem_threads(void);
161 static void ntoskrnl_workitem_thread(void *);
162 static void ntoskrnl_workitem(device_object *, void *);
163 static void ntoskrnl_unicode_to_ascii(uint16_t *, char *, int);
164 static void ntoskrnl_ascii_to_unicode(char *, uint16_t *, int);
165 static uint8_t ntoskrnl_insert_dpc(list_entry *, kdpc *);
166 static void WRITE_REGISTER_USHORT(uint16_t *, uint16_t);
167 static uint16_t READ_REGISTER_USHORT(uint16_t *);
168 static void WRITE_REGISTER_ULONG(uint32_t *, uint32_t);
169 static uint32_t READ_REGISTER_ULONG(uint32_t *);
170 static void WRITE_REGISTER_UCHAR(uint8_t *, uint8_t);
171 static uint8_t READ_REGISTER_UCHAR(uint8_t *);
172 static int64_t _allmul(int64_t, int64_t);
173 static int64_t _alldiv(int64_t, int64_t);
174 static int64_t _allrem(int64_t, int64_t);
175 static int64_t _allshr(int64_t, uint8_t);
176 static int64_t _allshl(int64_t, uint8_t);
177 static uint64_t _aullmul(uint64_t, uint64_t);
178 static uint64_t _aulldiv(uint64_t, uint64_t);
179 static uint64_t _aullrem(uint64_t, uint64_t);
180 static uint64_t _aullshr(uint64_t, uint8_t);
181 static uint64_t _aullshl(uint64_t, uint8_t);
182 static slist_entry *ntoskrnl_pushsl(slist_header *, slist_entry *);
183 static slist_entry *ntoskrnl_popsl(slist_header *);
184 static void ExInitializePagedLookasideList(paged_lookaside_list *,
185         lookaside_alloc_func *, lookaside_free_func *,
186         uint32_t, size_t, uint32_t, uint16_t);
187 static void ExDeletePagedLookasideList(paged_lookaside_list *);
188 static void ExInitializeNPagedLookasideList(npaged_lookaside_list *,
189         lookaside_alloc_func *, lookaside_free_func *,
190         uint32_t, size_t, uint32_t, uint16_t);
191 static void ExDeleteNPagedLookasideList(npaged_lookaside_list *);
192 static slist_entry
193         *ExInterlockedPushEntrySList(slist_header *,
194         slist_entry *, kspin_lock *);
195 static slist_entry
196         *ExInterlockedPopEntrySList(slist_header *, kspin_lock *);
197 static uint32_t InterlockedIncrement(volatile uint32_t *);
198 static uint32_t InterlockedDecrement(volatile uint32_t *);
199 static void ExInterlockedAddLargeStatistic(uint64_t *, uint32_t);
200 static void *MmAllocateContiguousMemory(uint32_t, uint64_t);
201 static void *MmAllocateContiguousMemorySpecifyCache(uint32_t,
202         uint64_t, uint64_t, uint64_t, uint32_t);
203 static void MmFreeContiguousMemory(void *);
204 static void MmFreeContiguousMemorySpecifyCache(void *, uint32_t, uint32_t);
205 static uint32_t MmSizeOfMdl(void *, size_t);
206 static void *MmMapLockedPages(mdl *, uint8_t);
207 static void *MmMapLockedPagesSpecifyCache(mdl *,
208         uint8_t, uint32_t, void *, uint32_t, uint32_t);
209 static void MmUnmapLockedPages(void *, mdl *);
210 static uint8_t MmIsAddressValid(void *);
211 static device_t ntoskrnl_finddev(device_t, uint64_t, struct resource **);
212 static void RtlZeroMemory(void *, size_t);
213 static void RtlCopyMemory(void *, const void *, size_t);
214 static size_t RtlCompareMemory(const void *, const void *, size_t);
215 static ndis_status RtlUnicodeStringToInteger(unicode_string *,
216         uint32_t, uint32_t *);
217 static int atoi (const char *);
218 static long atol (const char *);
219 static int rand(void);
220 static void srand(unsigned int);
221 static void ntoskrnl_time(uint64_t *);
222 static uint8_t IoIsWdmVersionAvailable(uint8_t, uint8_t);
223 static void ntoskrnl_thrfunc(void *);
224 static ndis_status PsCreateSystemThread(ndis_handle *,
225         uint32_t, void *, ndis_handle, void *, void *, void *);
226 static ndis_status PsTerminateSystemThread(ndis_status);
227 static ndis_status IoGetDeviceProperty(device_object *, uint32_t,
228         uint32_t, void *, uint32_t *);
229 static void KeInitializeMutex(kmutant *, uint32_t);
230 static uint32_t KeReleaseMutex(kmutant *, uint8_t);
231 static uint32_t KeReadStateMutex(kmutant *);
232 static ndis_status ObReferenceObjectByHandle(ndis_handle,
233         uint32_t, void *, uint8_t, void **, void **);
234 static void ObfDereferenceObject(void *);
235 static uint32_t ZwClose(ndis_handle);
236 static uint32_t WmiQueryTraceInformation(uint32_t, void *, uint32_t,
237         uint32_t, void *);
238 static uint32_t WmiTraceMessage(uint64_t, uint32_t, void *, uint16_t, ...);
239 static uint32_t IoWMIRegistrationControl(device_object *, uint32_t);
240 static void *ntoskrnl_memset(void *, int, size_t);
241 static void *ntoskrnl_memmove(void *, void *, size_t);
242 static void *ntoskrnl_memchr(void *, unsigned char, size_t);
243 static char *ntoskrnl_strstr(char *, char *);
244 static int ntoskrnl_toupper(int);
245 static int ntoskrnl_tolower(int);
246 static funcptr ntoskrnl_findwrap(funcptr);
247 static uint32_t DbgPrint(char *, ...);
248 static void DbgBreakPoint(void);
249 static void dummy(void);
250
251 static struct mtx ntoskrnl_dispatchlock;
252 static struct mtx ntoskrnl_interlock;
253 static kspin_lock ntoskrnl_cancellock;
254 static int ntoskrnl_kth = 0;
255 static struct nt_objref_head ntoskrnl_reflist;
256 static uma_zone_t mdl_zone;
257 static uma_zone_t iw_zone;
258 static struct kdpc_queue *kq_queues;
259 static struct kdpc_queue *wq_queues;
260 static int wq_idx = 0;
261
262 int
263 ntoskrnl_libinit()
264 {
265         image_patch_table       *patch;
266         int                     error;
267         struct proc             *p;
268         kdpc_queue              *kq;
269         callout_entry           *e;
270         int                     i;
271         char                    name[64];
272
273         mtx_init(&ntoskrnl_dispatchlock,
274             "ntoskrnl dispatch lock", MTX_NDIS_LOCK, MTX_DEF|MTX_RECURSE);
275         mtx_init(&ntoskrnl_interlock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN);
276         KeInitializeSpinLock(&ntoskrnl_cancellock);
277         KeInitializeSpinLock(&ntoskrnl_intlock);
278         TAILQ_INIT(&ntoskrnl_reflist);
279
280         InitializeListHead(&ntoskrnl_calllist);
281         InitializeListHead(&ntoskrnl_intlist);
282         mtx_init(&ntoskrnl_calllock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN);
283
284         kq_queues = ExAllocatePoolWithTag(NonPagedPool,
285 #ifdef NTOSKRNL_MULTIPLE_DPCS
286             sizeof(kdpc_queue) * mp_ncpus, 0);
287 #else
288             sizeof(kdpc_queue), 0);
289 #endif
290
291         if (kq_queues == NULL)
292                 return(ENOMEM);
293
294         wq_queues = ExAllocatePoolWithTag(NonPagedPool,
295             sizeof(kdpc_queue) * WORKITEM_THREADS, 0);
296
297         if (wq_queues == NULL)
298                 return(ENOMEM);
299
300 #ifdef NTOSKRNL_MULTIPLE_DPCS
301         bzero((char *)kq_queues, sizeof(kdpc_queue) * mp_ncpus);
302 #else
303         bzero((char *)kq_queues, sizeof(kdpc_queue));
304 #endif
305         bzero((char *)wq_queues, sizeof(kdpc_queue) * WORKITEM_THREADS);
306
307         /*
308          * Launch the DPC threads.
309          */
310
311 #ifdef NTOSKRNL_MULTIPLE_DPCS
312         for (i = 0; i < mp_ncpus; i++) {
313 #else
314         for (i = 0; i < 1; i++) {
315 #endif
316                 kq = kq_queues + i;
317                 kq->kq_cpu = i;
318                 sprintf(name, "Windows DPC %d", i);
319                 error = kthread_create(ntoskrnl_dpc_thread, kq, &p,
320                     RFHIGHPID, NDIS_KSTACK_PAGES, name);
321                 if (error)
322                         panic("failed to launch DPC thread");
323         }
324
325         /*
326          * Launch the workitem threads.
327          */
328
329         for (i = 0; i < WORKITEM_THREADS; i++) {
330                 kq = wq_queues + i;
331                 sprintf(name, "Windows Workitem %d", i);
332                 error = kthread_create(ntoskrnl_workitem_thread, kq, &p,
333                     RFHIGHPID, NDIS_KSTACK_PAGES, name);
334                 if (error)
335                         panic("failed to launch workitem thread");
336         }
337
338         patch = ntoskrnl_functbl;
339         while (patch->ipt_func != NULL) {
340                 windrv_wrap((funcptr)patch->ipt_func,
341                     (funcptr *)&patch->ipt_wrap,
342                     patch->ipt_argcnt, patch->ipt_ftype);
343                 patch++;
344         }
345
346         for (i = 0; i < NTOSKRNL_TIMEOUTS; i++) {
347                 e = ExAllocatePoolWithTag(NonPagedPool,
348                     sizeof(callout_entry), 0);
349                 if (e == NULL)
350                         panic("failed to allocate timeouts");
351                 mtx_lock_spin(&ntoskrnl_calllock);
352                 InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
353                 mtx_unlock_spin(&ntoskrnl_calllock);
354         }
355
356         /*
357          * MDLs are supposed to be variable size (they describe
358          * buffers containing some number of pages, but we don't
359          * know ahead of time how many pages that will be). But
360          * always allocating them off the heap is very slow. As
361          * a compromise, we create an MDL UMA zone big enough to
362          * handle any buffer requiring up to 16 pages, and we
363          * use those for any MDLs for buffers of 16 pages or less
364          * in size. For buffers larger than that (which we assume
365          * will be few and far between, we allocate the MDLs off
366          * the heap.
367          */
368
369         mdl_zone = uma_zcreate("Windows MDL", MDL_ZONE_SIZE,
370             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
371
372         iw_zone = uma_zcreate("Windows WorkItem", sizeof(io_workitem),
373             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
374
375         return(0);
376 }
377
378 int
379 ntoskrnl_libfini()
380 {
381         image_patch_table       *patch;
382         callout_entry           *e;
383         list_entry              *l;
384
385         patch = ntoskrnl_functbl;
386         while (patch->ipt_func != NULL) {
387                 windrv_unwrap(patch->ipt_wrap);
388                 patch++;
389         }
390
391         /* Stop the workitem queues. */
392         ntoskrnl_destroy_workitem_threads();
393         /* Stop the DPC queues. */
394         ntoskrnl_destroy_dpc_threads();
395
396         ExFreePool(kq_queues);
397         ExFreePool(wq_queues);
398
399         uma_zdestroy(mdl_zone);
400         uma_zdestroy(iw_zone);
401
402         mtx_lock_spin(&ntoskrnl_calllock);
403         while(!IsListEmpty(&ntoskrnl_calllist)) {
404                 l = RemoveHeadList(&ntoskrnl_calllist);
405                 e = CONTAINING_RECORD(l, callout_entry, ce_list);
406                 mtx_unlock_spin(&ntoskrnl_calllock);
407                 ExFreePool(e);
408                 mtx_lock_spin(&ntoskrnl_calllock);
409         }
410         mtx_unlock_spin(&ntoskrnl_calllock);
411
412         mtx_destroy(&ntoskrnl_dispatchlock);
413         mtx_destroy(&ntoskrnl_interlock);
414         mtx_destroy(&ntoskrnl_calllock);
415
416         return(0);
417 }
418
419 /*
420  * We need to be able to reference this externally from the wrapper;
421  * GCC only generates a local implementation of memset.
422  */
423 static void *
424 ntoskrnl_memset(buf, ch, size)
425         void                    *buf;
426         int                     ch;
427         size_t                  size;
428 {
429         return(memset(buf, ch, size));
430 }
431
432 static void *
433 ntoskrnl_memmove(dst, src, size)
434         void                    *src;
435         void                    *dst;
436         size_t                  size;
437 {
438         bcopy(src, dst, size);
439         return(dst);
440 }
441
442 static void *
443 ntoskrnl_memchr(buf, ch, len)
444         void                    *buf;
445         unsigned char           ch;
446         size_t                  len;
447 {
448         if (len != 0) {
449                 unsigned char *p = buf;
450
451                 do {
452                         if (*p++ == ch)
453                                 return (p - 1);
454                 } while (--len != 0);
455         }
456         return (NULL);
457 }
458
459 static char *
460 ntoskrnl_strstr(s, find)
461         char *s, *find;
462 {
463         char c, sc;
464         size_t len;
465
466         if ((c = *find++) != 0) {
467                 len = strlen(find);
468                 do {
469                         do {
470                                 if ((sc = *s++) == 0)
471                                         return (NULL);
472                         } while (sc != c);
473                 } while (strncmp(s, find, len) != 0);
474                 s--;
475         }
476         return ((char *)s);
477 }
478
479 static int
480 ntoskrnl_toupper(c)
481         int                     c;
482 {
483         return(toupper(c));
484 }
485
486 static int
487 ntoskrnl_tolower(c)
488         int                     c;
489 {
490         return(tolower(c));
491 }
492
493 static uint8_t 
494 RtlEqualUnicodeString(str1, str2, caseinsensitive)
495         unicode_string          *str1;
496         unicode_string          *str2;
497         uint8_t                 caseinsensitive;
498 {
499         int                     i;
500
501         if (str1->us_len != str2->us_len)
502                 return(FALSE);
503
504         for (i = 0; i < str1->us_len; i++) {
505                 if (caseinsensitive == TRUE) {
506                         if (toupper((char)(str1->us_buf[i] & 0xFF)) !=
507                             toupper((char)(str2->us_buf[i] & 0xFF)))
508                                 return(FALSE);
509                 } else {
510                         if (str1->us_buf[i] != str2->us_buf[i])
511                                 return(FALSE);
512                 }
513         }
514
515         return(TRUE);
516 }
517
518 static void
519 RtlCopyUnicodeString(dest, src)
520         unicode_string          *dest;
521         unicode_string          *src;
522 {
523
524         if (dest->us_maxlen >= src->us_len)
525                 dest->us_len = src->us_len;
526         else
527                 dest->us_len = dest->us_maxlen;
528         memcpy(dest->us_buf, src->us_buf, dest->us_len);
529         return;
530 }
531
532 static void
533 ntoskrnl_ascii_to_unicode(ascii, unicode, len)
534         char                    *ascii;
535         uint16_t                *unicode;
536         int                     len;
537 {
538         int                     i;
539         uint16_t                *ustr;
540
541         ustr = unicode;
542         for (i = 0; i < len; i++) {
543                 *ustr = (uint16_t)ascii[i];
544                 ustr++;
545         }
546
547         return;
548 }
549
550 static void
551 ntoskrnl_unicode_to_ascii(unicode, ascii, len)
552         uint16_t                *unicode;
553         char                    *ascii;
554         int                     len;
555 {
556         int                     i;
557         uint8_t                 *astr;
558
559         astr = ascii;
560         for (i = 0; i < len / 2; i++) {
561                 *astr = (uint8_t)unicode[i];
562                 astr++;
563         }
564
565         return;
566 }
567
568 uint32_t
569 RtlUnicodeStringToAnsiString(dest, src, allocate)
570         ansi_string             *dest;
571         unicode_string          *src;
572         uint8_t                 allocate;
573 {
574         if (dest == NULL || src == NULL)
575                 return(STATUS_INVALID_PARAMETER);
576
577         dest->as_len = src->us_len / 2;
578         if (dest->as_maxlen < dest->as_len)
579                 dest->as_len = dest->as_maxlen;
580
581         if (allocate == TRUE) {
582                 dest->as_buf = ExAllocatePoolWithTag(NonPagedPool,
583                     (src->us_len / 2) + 1, 0);
584                 if (dest->as_buf == NULL)
585                         return(STATUS_INSUFFICIENT_RESOURCES);
586                 dest->as_len = dest->as_maxlen = src->us_len / 2;
587         } else {
588                 dest->as_len = src->us_len / 2; /* XXX */
589                 if (dest->as_maxlen < dest->as_len)
590                         dest->as_len = dest->as_maxlen;
591         }
592
593         ntoskrnl_unicode_to_ascii(src->us_buf, dest->as_buf,
594             dest->as_len * 2);
595
596         return (STATUS_SUCCESS);
597 }
598
599 uint32_t
600 RtlAnsiStringToUnicodeString(dest, src, allocate)
601         unicode_string          *dest;
602         ansi_string             *src;
603         uint8_t                 allocate;
604 {
605         if (dest == NULL || src == NULL)
606                 return(STATUS_INVALID_PARAMETER);
607
608         if (allocate == TRUE) {
609                 dest->us_buf = ExAllocatePoolWithTag(NonPagedPool,
610                     src->as_len * 2, 0);
611                 if (dest->us_buf == NULL)
612                         return(STATUS_INSUFFICIENT_RESOURCES);
613                 dest->us_len = dest->us_maxlen = strlen(src->as_buf) * 2;
614         } else {
615                 dest->us_len = src->as_len * 2; /* XXX */
616                 if (dest->us_maxlen < dest->us_len)
617                         dest->us_len = dest->us_maxlen;
618         }
619
620         ntoskrnl_ascii_to_unicode(src->as_buf, dest->us_buf,
621             dest->us_len / 2);
622
623         return (STATUS_SUCCESS);
624 }
625
626 void *
627 ExAllocatePoolWithTag(pooltype, len, tag)
628         uint32_t                pooltype;
629         size_t                  len;
630         uint32_t                tag;
631 {
632         void                    *buf;
633
634         buf = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
635         if (buf == NULL)
636                 return(NULL);
637
638         return(buf);
639 }
640
641 void
642 ExFreePool(buf)
643         void                    *buf;
644 {
645         free(buf, M_DEVBUF);
646         return;
647 }
648
649 uint32_t
650 IoAllocateDriverObjectExtension(drv, clid, extlen, ext)
651         driver_object           *drv;
652         void                    *clid;
653         uint32_t                extlen;
654         void                    **ext;
655 {
656         custom_extension        *ce;
657
658         ce = ExAllocatePoolWithTag(NonPagedPool, sizeof(custom_extension)
659             + extlen, 0);
660
661         if (ce == NULL)
662                 return(STATUS_INSUFFICIENT_RESOURCES);
663
664         ce->ce_clid = clid;
665         InsertTailList((&drv->dro_driverext->dre_usrext), (&ce->ce_list));
666
667         *ext = (void *)(ce + 1);
668
669         return(STATUS_SUCCESS);
670 }
671
672 void *
673 IoGetDriverObjectExtension(drv, clid)
674         driver_object           *drv;
675         void                    *clid;
676 {
677         list_entry              *e;
678         custom_extension        *ce;
679
680         /*
681          * Sanity check. Our dummy bus drivers don't have
682          * any driver extentions.
683          */
684
685         if (drv->dro_driverext == NULL)
686                 return(NULL);
687
688         e = drv->dro_driverext->dre_usrext.nle_flink;
689         while (e != &drv->dro_driverext->dre_usrext) {
690                 ce = (custom_extension *)e;
691                 if (ce->ce_clid == clid)
692                         return((void *)(ce + 1));
693                 e = e->nle_flink;
694         }
695
696         return(NULL);
697 }
698
699
700 uint32_t
701 IoCreateDevice(drv, devextlen, devname, devtype, devchars, exclusive, newdev)
702         driver_object           *drv;
703         uint32_t                devextlen;
704         unicode_string          *devname;
705         uint32_t                devtype;
706         uint32_t                devchars;
707         uint8_t                 exclusive;
708         device_object           **newdev;
709 {
710         device_object           *dev;
711
712         dev = ExAllocatePoolWithTag(NonPagedPool, sizeof(device_object), 0);
713         if (dev == NULL)
714                 return(STATUS_INSUFFICIENT_RESOURCES);
715
716         dev->do_type = devtype;
717         dev->do_drvobj = drv;
718         dev->do_currirp = NULL;
719         dev->do_flags = 0;
720
721         if (devextlen) {
722                 dev->do_devext = ExAllocatePoolWithTag(NonPagedPool,
723                     devextlen, 0);
724
725                 if (dev->do_devext == NULL) {
726                         ExFreePool(dev);
727                         return(STATUS_INSUFFICIENT_RESOURCES);
728                 }
729
730                 bzero(dev->do_devext, devextlen);
731         } else
732                 dev->do_devext = NULL;
733
734         dev->do_size = sizeof(device_object) + devextlen;
735         dev->do_refcnt = 1;
736         dev->do_attacheddev = NULL;
737         dev->do_nextdev = NULL;
738         dev->do_devtype = devtype;
739         dev->do_stacksize = 1;
740         dev->do_alignreq = 1;
741         dev->do_characteristics = devchars;
742         dev->do_iotimer = NULL;
743         KeInitializeEvent(&dev->do_devlock, EVENT_TYPE_SYNC, TRUE);
744
745         /*
746          * Vpd is used for disk/tape devices,
747          * but we don't support those. (Yet.)
748          */
749         dev->do_vpb = NULL;
750
751         dev->do_devobj_ext = ExAllocatePoolWithTag(NonPagedPool,
752             sizeof(devobj_extension), 0);
753
754         if (dev->do_devobj_ext == NULL) {
755                 if (dev->do_devext != NULL)
756                         ExFreePool(dev->do_devext);
757                 ExFreePool(dev);
758                 return(STATUS_INSUFFICIENT_RESOURCES);
759         }
760
761         dev->do_devobj_ext->dve_type = 0;
762         dev->do_devobj_ext->dve_size = sizeof(devobj_extension);
763         dev->do_devobj_ext->dve_devobj = dev;
764
765         /*
766          * Attach this device to the driver object's list
767          * of devices. Note: this is not the same as attaching
768          * the device to the device stack. The driver's AddDevice
769          * routine must explicitly call IoAddDeviceToDeviceStack()
770          * to do that.
771          */
772
773         if (drv->dro_devobj == NULL) {
774                 drv->dro_devobj = dev;
775                 dev->do_nextdev = NULL;
776         } else {
777                 dev->do_nextdev = drv->dro_devobj;
778                 drv->dro_devobj = dev;
779         }
780
781         *newdev = dev;
782
783         return(STATUS_SUCCESS);
784 }
785
786 void
787 IoDeleteDevice(dev)
788         device_object           *dev;
789 {
790         device_object           *prev;
791
792         if (dev == NULL)
793                 return;
794
795         if (dev->do_devobj_ext != NULL)
796                 ExFreePool(dev->do_devobj_ext);
797
798         if (dev->do_devext != NULL)
799                 ExFreePool(dev->do_devext);
800
801         /* Unlink the device from the driver's device list. */
802
803         prev = dev->do_drvobj->dro_devobj;
804         if (prev == dev)
805                 dev->do_drvobj->dro_devobj = dev->do_nextdev;
806         else {
807                 while (prev->do_nextdev != dev)
808                         prev = prev->do_nextdev;
809                 prev->do_nextdev = dev->do_nextdev;
810         }
811
812         ExFreePool(dev);
813
814         return;
815 }
816
817 device_object *
818 IoGetAttachedDevice(dev)
819         device_object           *dev;
820 {
821         device_object           *d;
822
823         if (dev == NULL)
824                 return (NULL);
825
826         d = dev;
827
828         while (d->do_attacheddev != NULL)
829                 d = d->do_attacheddev;
830
831         return (d);
832 }
833
834 static irp *
835 IoBuildSynchronousFsdRequest(func, dobj, buf, len, off, event, status)
836         uint32_t                func;
837         device_object           *dobj;
838         void                    *buf;
839         uint32_t                len;
840         uint64_t                *off;
841         nt_kevent               *event;
842         io_status_block         *status;
843 {
844         irp                     *ip;
845
846         ip = IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status);
847         if (ip == NULL)
848                 return(NULL);
849         ip->irp_usrevent = event;
850
851         return(ip);
852 }
853
854 static irp *
855 IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status)
856         uint32_t                func;
857         device_object           *dobj;
858         void                    *buf;
859         uint32_t                len;
860         uint64_t                *off;
861         io_status_block         *status;
862 {
863         irp                     *ip;
864         io_stack_location       *sl;
865
866         ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
867         if (ip == NULL)
868                 return(NULL);
869
870         ip->irp_usriostat = status;
871         ip->irp_tail.irp_overlay.irp_thread = NULL;
872
873         sl = IoGetNextIrpStackLocation(ip);
874         sl->isl_major = func;
875         sl->isl_minor = 0;
876         sl->isl_flags = 0;
877         sl->isl_ctl = 0;
878         sl->isl_devobj = dobj;
879         sl->isl_fileobj = NULL;
880         sl->isl_completionfunc = NULL;
881
882         ip->irp_userbuf = buf;
883
884         if (dobj->do_flags & DO_BUFFERED_IO) {
885                 ip->irp_assoc.irp_sysbuf =
886                     ExAllocatePoolWithTag(NonPagedPool, len, 0);
887                 if (ip->irp_assoc.irp_sysbuf == NULL) {
888                         IoFreeIrp(ip);
889                         return(NULL);
890                 }
891                 bcopy(buf, ip->irp_assoc.irp_sysbuf, len);
892         }
893
894         if (dobj->do_flags & DO_DIRECT_IO) {
895                 ip->irp_mdl = IoAllocateMdl(buf, len, FALSE, FALSE, ip);
896                 if (ip->irp_mdl == NULL) {
897                         if (ip->irp_assoc.irp_sysbuf != NULL)
898                                 ExFreePool(ip->irp_assoc.irp_sysbuf);
899                         IoFreeIrp(ip);
900                         return(NULL);
901                 }
902                 ip->irp_userbuf = NULL;
903                 ip->irp_assoc.irp_sysbuf = NULL;
904         }
905
906         if (func == IRP_MJ_READ) {
907                 sl->isl_parameters.isl_read.isl_len = len;
908                 if (off != NULL)
909                         sl->isl_parameters.isl_read.isl_byteoff = *off;
910                 else
911                         sl->isl_parameters.isl_read.isl_byteoff = 0;
912         }
913
914         if (func == IRP_MJ_WRITE) {
915                 sl->isl_parameters.isl_write.isl_len = len;
916                 if (off != NULL)
917                         sl->isl_parameters.isl_write.isl_byteoff = *off;
918                 else
919                         sl->isl_parameters.isl_write.isl_byteoff = 0;
920         }       
921
922         return(ip);
923 }
924
925 static irp *
926 IoBuildDeviceIoControlRequest(iocode, dobj, ibuf, ilen, obuf, olen,
927     isinternal, event, status)
928         uint32_t                iocode;
929         device_object           *dobj;
930         void                    *ibuf;
931         uint32_t                ilen;
932         void                    *obuf;
933         uint32_t                olen;
934         uint8_t                 isinternal;
935         nt_kevent               *event;
936         io_status_block         *status;
937 {
938         irp                     *ip;
939         io_stack_location       *sl;
940         uint32_t                buflen;
941
942         ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
943         if (ip == NULL)
944                 return(NULL);
945         ip->irp_usrevent = event;
946         ip->irp_usriostat = status;
947         ip->irp_tail.irp_overlay.irp_thread = NULL;
948
949         sl = IoGetNextIrpStackLocation(ip);
950         sl->isl_major = isinternal == TRUE ?
951             IRP_MJ_INTERNAL_DEVICE_CONTROL : IRP_MJ_DEVICE_CONTROL;
952         sl->isl_minor = 0;
953         sl->isl_flags = 0;
954         sl->isl_ctl = 0;
955         sl->isl_devobj = dobj;
956         sl->isl_fileobj = NULL;
957         sl->isl_completionfunc = NULL;
958         sl->isl_parameters.isl_ioctl.isl_iocode = iocode;
959         sl->isl_parameters.isl_ioctl.isl_ibuflen = ilen;
960         sl->isl_parameters.isl_ioctl.isl_obuflen = olen;
961
962         switch(IO_METHOD(iocode)) {
963         case METHOD_BUFFERED:
964                 if (ilen > olen)
965                         buflen = ilen;
966                 else
967                         buflen = olen;
968                 if (buflen) {
969                         ip->irp_assoc.irp_sysbuf =
970                             ExAllocatePoolWithTag(NonPagedPool, buflen, 0);
971                         if (ip->irp_assoc.irp_sysbuf == NULL) {
972                                 IoFreeIrp(ip);
973                                 return(NULL);
974                         }
975                 }
976                 if (ilen && ibuf != NULL) {
977                         bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
978                         bzero((char *)ip->irp_assoc.irp_sysbuf + ilen,
979                             buflen - ilen);
980                 } else
981                         bzero(ip->irp_assoc.irp_sysbuf, ilen);
982                 ip->irp_userbuf = obuf;
983                 break;
984         case METHOD_IN_DIRECT:
985         case METHOD_OUT_DIRECT:
986                 if (ilen && ibuf != NULL) {
987                         ip->irp_assoc.irp_sysbuf =
988                             ExAllocatePoolWithTag(NonPagedPool, ilen, 0);
989                         if (ip->irp_assoc.irp_sysbuf == NULL) {
990                                 IoFreeIrp(ip);
991                                 return(NULL);
992                         }
993                         bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
994                 }
995                 if (olen && obuf != NULL) {
996                         ip->irp_mdl = IoAllocateMdl(obuf, olen,
997                             FALSE, FALSE, ip);
998                         /*
999                          * Normally we would MmProbeAndLockPages()
1000                          * here, but we don't have to in our
1001                          * imlementation.
1002                          */
1003                 }
1004                 break;
1005         case METHOD_NEITHER:
1006                 ip->irp_userbuf = obuf;
1007                 sl->isl_parameters.isl_ioctl.isl_type3ibuf = ibuf;
1008                 break;
1009         default:
1010                 break;
1011         }
1012
1013         /*
1014          * Ideally, we should associate this IRP with the calling
1015          * thread here.
1016          */
1017
1018         return (ip);
1019 }
1020
1021 static irp *
1022 IoAllocateIrp(stsize, chargequota)
1023         uint8_t                 stsize;
1024         uint8_t                 chargequota;
1025 {
1026         irp                     *i;
1027
1028         i = ExAllocatePoolWithTag(NonPagedPool, IoSizeOfIrp(stsize), 0);
1029         if (i == NULL)
1030                 return (NULL);
1031
1032         IoInitializeIrp(i, IoSizeOfIrp(stsize), stsize);
1033
1034         return (i);
1035 }
1036
1037 static irp *
1038 IoMakeAssociatedIrp(ip, stsize)
1039         irp                     *ip;
1040         uint8_t                 stsize;
1041 {
1042         irp                     *associrp;
1043
1044         associrp = IoAllocateIrp(stsize, FALSE);
1045         if (associrp == NULL)
1046                 return(NULL);
1047
1048         mtx_lock(&ntoskrnl_dispatchlock);
1049         associrp->irp_flags |= IRP_ASSOCIATED_IRP;
1050         associrp->irp_tail.irp_overlay.irp_thread =
1051             ip->irp_tail.irp_overlay.irp_thread;
1052         associrp->irp_assoc.irp_master = ip;
1053         mtx_unlock(&ntoskrnl_dispatchlock);
1054
1055         return(associrp);
1056 }
1057
1058 static void
1059 IoFreeIrp(ip)
1060         irp                     *ip;
1061 {
1062         ExFreePool(ip);
1063         return;
1064 }
1065
1066 static void
1067 IoInitializeIrp(io, psize, ssize)
1068         irp                     *io;
1069         uint16_t                psize;
1070         uint8_t                 ssize;
1071 {
1072         bzero((char *)io, IoSizeOfIrp(ssize));
1073         io->irp_size = psize;
1074         io->irp_stackcnt = ssize;
1075         io->irp_currentstackloc = ssize;
1076         InitializeListHead(&io->irp_thlist);
1077         io->irp_tail.irp_overlay.irp_csl =
1078             (io_stack_location *)(io + 1) + ssize;
1079
1080         return;
1081 }
1082
1083 static void
1084 IoReuseIrp(ip, status)
1085         irp                     *ip;
1086         uint32_t                status;
1087 {
1088         uint8_t                 allocflags;
1089
1090         allocflags = ip->irp_allocflags;
1091         IoInitializeIrp(ip, ip->irp_size, ip->irp_stackcnt);
1092         ip->irp_iostat.isb_status = status;
1093         ip->irp_allocflags = allocflags;
1094
1095         return;
1096 }
1097
1098 void
1099 IoAcquireCancelSpinLock(irql)
1100         uint8_t                 *irql;
1101 {
1102         KeAcquireSpinLock(&ntoskrnl_cancellock, irql);
1103         return;
1104 }
1105
1106 void
1107 IoReleaseCancelSpinLock(irql)
1108         uint8_t                 irql;
1109 {
1110         KeReleaseSpinLock(&ntoskrnl_cancellock, irql);
1111         return;
1112 }
1113
1114 uint8_t
1115 IoCancelIrp(irp *ip)
1116 {
1117         cancel_func             cfunc;
1118
1119         IoAcquireCancelSpinLock(&ip->irp_cancelirql);
1120         cfunc = IoSetCancelRoutine(ip, NULL);
1121         ip->irp_cancel = TRUE;
1122         if (ip->irp_cancelfunc == NULL) {
1123                 IoReleaseCancelSpinLock(ip->irp_cancelirql);
1124                 return(FALSE);
1125         }
1126         MSCALL2(cfunc, IoGetCurrentIrpStackLocation(ip)->isl_devobj, ip);
1127         return(TRUE);
1128 }
1129
1130 uint32_t
1131 IofCallDriver(dobj, ip)
1132         device_object           *dobj;
1133         irp                     *ip;
1134 {
1135         driver_object           *drvobj;
1136         io_stack_location       *sl;
1137         uint32_t                status;
1138         driver_dispatch         disp;
1139
1140         drvobj = dobj->do_drvobj;
1141
1142         if (ip->irp_currentstackloc <= 0)
1143                 panic("IoCallDriver(): out of stack locations");
1144
1145         IoSetNextIrpStackLocation(ip);
1146         sl = IoGetCurrentIrpStackLocation(ip);
1147
1148         sl->isl_devobj = dobj;
1149
1150         disp = drvobj->dro_dispatch[sl->isl_major];
1151         status = MSCALL2(disp, dobj, ip);
1152
1153         return(status);
1154 }
1155
1156 void
1157 IofCompleteRequest(ip, prioboost)
1158         irp                     *ip;
1159         uint8_t                 prioboost;
1160 {
1161         uint32_t                i;
1162         uint32_t                status;
1163         device_object           *dobj;
1164         io_stack_location       *sl;
1165         completion_func         cf;
1166
1167         ip->irp_pendingreturned =
1168             IoGetCurrentIrpStackLocation(ip)->isl_ctl & SL_PENDING_RETURNED;
1169         sl = (io_stack_location *)(ip + 1);
1170
1171         for (i = ip->irp_currentstackloc; i < (uint32_t)ip->irp_stackcnt; i++) {
1172                 if (ip->irp_currentstackloc < ip->irp_stackcnt - 1) {
1173                         IoSkipCurrentIrpStackLocation(ip);
1174                         dobj = IoGetCurrentIrpStackLocation(ip)->isl_devobj;
1175                 } else
1176                         dobj = NULL;
1177
1178                 if (sl[i].isl_completionfunc != NULL &&
1179                     ((ip->irp_iostat.isb_status == STATUS_SUCCESS &&
1180                     sl->isl_ctl & SL_INVOKE_ON_SUCCESS) ||
1181                     (ip->irp_iostat.isb_status != STATUS_SUCCESS &&
1182                     sl->isl_ctl & SL_INVOKE_ON_ERROR) ||
1183                     (ip->irp_cancel == TRUE &&
1184                     sl->isl_ctl & SL_INVOKE_ON_CANCEL))) {
1185                         cf = sl->isl_completionfunc;
1186                         status = MSCALL3(cf, dobj, ip, sl->isl_completionctx);
1187                         if (status == STATUS_MORE_PROCESSING_REQUIRED)
1188                                 return;
1189                 }
1190
1191                 if (IoGetCurrentIrpStackLocation(ip)->isl_ctl &
1192                     SL_PENDING_RETURNED)
1193                         ip->irp_pendingreturned = TRUE;
1194         }
1195
1196         /* Handle any associated IRPs. */
1197
1198         if (ip->irp_flags & IRP_ASSOCIATED_IRP) {
1199                 uint32_t                masterirpcnt;
1200                 irp                     *masterirp;
1201                 mdl                     *m;
1202
1203                 masterirp = ip->irp_assoc.irp_master;
1204                 masterirpcnt =
1205                     InterlockedDecrement(&masterirp->irp_assoc.irp_irpcnt);
1206
1207                 while ((m = ip->irp_mdl) != NULL) {
1208                         ip->irp_mdl = m->mdl_next;
1209                         IoFreeMdl(m);
1210                 }
1211                 IoFreeIrp(ip);
1212                 if (masterirpcnt == 0)
1213                         IoCompleteRequest(masterirp, IO_NO_INCREMENT);
1214                 return;
1215         }
1216
1217         /* With any luck, these conditions will never arise. */
1218
1219         if (ip->irp_flags & (IRP_PAGING_IO|IRP_CLOSE_OPERATION)) {
1220                 if (ip->irp_usriostat != NULL)
1221                         *ip->irp_usriostat = ip->irp_iostat;
1222                 if (ip->irp_usrevent != NULL)
1223                         KeSetEvent(ip->irp_usrevent, prioboost, FALSE);
1224                 if (ip->irp_flags & IRP_PAGING_IO) {
1225                         if (ip->irp_mdl != NULL)
1226                                 IoFreeMdl(ip->irp_mdl);
1227                         IoFreeIrp(ip);
1228                 }
1229         }
1230
1231         return;
1232 }
1233
1234 void
1235 ntoskrnl_intr(arg)
1236         void                    *arg;
1237 {
1238         kinterrupt              *iobj;
1239         uint8_t                 irql;
1240         uint8_t                 claimed;
1241         list_entry              *l;
1242
1243         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1244         l = ntoskrnl_intlist.nle_flink;
1245         while (l != &ntoskrnl_intlist) {
1246                 iobj = CONTAINING_RECORD(l, kinterrupt, ki_list);
1247                 claimed = MSCALL2(iobj->ki_svcfunc, iobj, iobj->ki_svcctx);
1248                 if (claimed == TRUE)
1249                         break;
1250                 l = l->nle_flink;
1251         }
1252         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1253
1254         return;
1255 }
1256
1257 uint8_t
1258 KeAcquireInterruptSpinLock(iobj)
1259         kinterrupt              *iobj;
1260 {
1261         uint8_t                 irql;
1262         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1263         return(irql);
1264 }
1265
1266 void
1267 KeReleaseInterruptSpinLock(iobj, irql)
1268         kinterrupt              *iobj;
1269         uint8_t                 irql;
1270 {
1271         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1272         return;
1273 }
1274
1275 uint8_t
1276 KeSynchronizeExecution(iobj, syncfunc, syncctx)
1277         kinterrupt              *iobj;
1278         void                    *syncfunc;
1279         void                    *syncctx;
1280 {
1281         uint8_t                 irql;
1282         
1283         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1284         MSCALL1(syncfunc, syncctx);
1285         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1286
1287         return(TRUE);
1288 }
1289
1290 /*
1291  * IoConnectInterrupt() is passed only the interrupt vector and
1292  * irql that a device wants to use, but no device-specific tag
1293  * of any kind. This conflicts rather badly with FreeBSD's
1294  * bus_setup_intr(), which needs the device_t for the device
1295  * requesting interrupt delivery. In order to bypass this
1296  * inconsistency, we implement a second level of interrupt
1297  * dispatching on top of bus_setup_intr(). All devices use
1298  * ntoskrnl_intr() as their ISR, and any device requesting
1299  * interrupts will be registered with ntoskrnl_intr()'s interrupt
1300  * dispatch list. When an interrupt arrives, we walk the list
1301  * and invoke all the registered ISRs. This effectively makes all
1302  * interrupts shared, but it's the only way to duplicate the
1303  * semantics of IoConnectInterrupt() and IoDisconnectInterrupt() properly.
1304  */
1305
1306 uint32_t
1307 IoConnectInterrupt(iobj, svcfunc, svcctx, lock, vector, irql,
1308         syncirql, imode, shared, affinity, savefloat)
1309         kinterrupt              **iobj;
1310         void                    *svcfunc;
1311         void                    *svcctx;
1312         uint32_t                vector;
1313         kspin_lock              *lock;
1314         uint8_t                 irql;
1315         uint8_t                 syncirql;
1316         uint8_t                 imode;
1317         uint8_t                 shared;
1318         uint32_t                affinity;
1319         uint8_t                 savefloat;
1320 {
1321         uint8_t                 curirql;
1322
1323         *iobj = ExAllocatePoolWithTag(NonPagedPool, sizeof(kinterrupt), 0);
1324         if (*iobj == NULL)
1325                 return(STATUS_INSUFFICIENT_RESOURCES);
1326
1327         (*iobj)->ki_svcfunc = svcfunc;
1328         (*iobj)->ki_svcctx = svcctx;
1329
1330         if (lock == NULL) {
1331                 KeInitializeSpinLock(&(*iobj)->ki_lock_priv);
1332                 (*iobj)->ki_lock = &(*iobj)->ki_lock_priv;
1333         } else
1334                 (*iobj)->ki_lock = lock;
1335
1336         KeAcquireSpinLock(&ntoskrnl_intlock, &curirql);
1337         InsertHeadList((&ntoskrnl_intlist), (&(*iobj)->ki_list));
1338         KeReleaseSpinLock(&ntoskrnl_intlock, curirql);
1339
1340         return(STATUS_SUCCESS);
1341 }
1342
1343 void
1344 IoDisconnectInterrupt(iobj)
1345         kinterrupt              *iobj;
1346 {
1347         uint8_t                 irql;
1348
1349         if (iobj == NULL)
1350                 return;
1351
1352         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1353         RemoveEntryList((&iobj->ki_list));
1354         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1355
1356         ExFreePool(iobj);
1357
1358         return;
1359 }
1360
1361 device_object *
1362 IoAttachDeviceToDeviceStack(src, dst)
1363         device_object           *src;
1364         device_object           *dst;
1365 {
1366         device_object           *attached;
1367
1368         mtx_lock(&ntoskrnl_dispatchlock);
1369         attached = IoGetAttachedDevice(dst);
1370         attached->do_attacheddev = src;
1371         src->do_attacheddev = NULL;
1372         src->do_stacksize = attached->do_stacksize + 1;
1373         mtx_unlock(&ntoskrnl_dispatchlock);
1374
1375         return(attached);
1376 }
1377
1378 void
1379 IoDetachDevice(topdev)
1380         device_object           *topdev;
1381 {
1382         device_object           *tail;
1383
1384         mtx_lock(&ntoskrnl_dispatchlock);
1385
1386         /* First, break the chain. */
1387         tail = topdev->do_attacheddev;
1388         if (tail == NULL) {
1389                 mtx_unlock(&ntoskrnl_dispatchlock);
1390                 return;
1391         }
1392         topdev->do_attacheddev = tail->do_attacheddev;
1393         topdev->do_refcnt--;
1394
1395         /* Now reduce the stacksize count for the takm_il objects. */
1396
1397         tail = topdev->do_attacheddev;
1398         while (tail != NULL) {
1399                 tail->do_stacksize--;
1400                 tail = tail->do_attacheddev;
1401         }
1402
1403         mtx_unlock(&ntoskrnl_dispatchlock);
1404
1405         return;
1406 }
1407
1408 /*
1409  * For the most part, an object is considered signalled if
1410  * dh_sigstate == TRUE. The exception is for mutant objects
1411  * (mutexes), where the logic works like this:
1412  *
1413  * - If the thread already owns the object and sigstate is
1414  *   less than or equal to 0, then the object is considered
1415  *   signalled (recursive acquisition).
1416  * - If dh_sigstate == 1, the object is also considered
1417  *   signalled.
1418  */
1419
1420 static int
1421 ntoskrnl_is_signalled(obj, td)
1422         nt_dispatch_header      *obj;
1423         struct thread           *td;
1424 {
1425         kmutant                 *km;
1426         
1427         if (obj->dh_type == DISP_TYPE_MUTANT) {
1428                 km = (kmutant *)obj;
1429                 if ((obj->dh_sigstate <= 0 && km->km_ownerthread == td) ||
1430                     obj->dh_sigstate == 1)
1431                         return(TRUE);
1432                 return(FALSE);
1433         }
1434
1435         if (obj->dh_sigstate > 0)
1436                 return(TRUE);
1437         return(FALSE);
1438 }
1439
1440 static void
1441 ntoskrnl_satisfy_wait(obj, td)
1442         nt_dispatch_header      *obj;
1443         struct thread           *td;
1444 {
1445         kmutant                 *km;
1446
1447         switch (obj->dh_type) {
1448         case DISP_TYPE_MUTANT:
1449                 km = (struct kmutant *)obj;
1450                 obj->dh_sigstate--;
1451                 /*
1452                  * If sigstate reaches 0, the mutex is now
1453                  * non-signalled (the new thread owns it).
1454                  */
1455                 if (obj->dh_sigstate == 0) {
1456                         km->km_ownerthread = td;
1457                         if (km->km_abandoned == TRUE)
1458                                 km->km_abandoned = FALSE;
1459                 }
1460                 break;
1461         /* Synchronization objects get reset to unsignalled. */
1462         case DISP_TYPE_SYNCHRONIZATION_EVENT:
1463         case DISP_TYPE_SYNCHRONIZATION_TIMER:
1464                 obj->dh_sigstate = 0;
1465                 break;
1466         case DISP_TYPE_SEMAPHORE:
1467                 obj->dh_sigstate--;
1468                 break;
1469         default:
1470                 break;
1471         }
1472
1473         return;
1474 }
1475
1476 static void
1477 ntoskrnl_satisfy_multiple_waits(wb)
1478         wait_block              *wb;
1479 {
1480         wait_block              *cur;
1481         struct thread           *td;
1482
1483         cur = wb;
1484         td = wb->wb_kthread;
1485
1486         do {
1487                 ntoskrnl_satisfy_wait(wb->wb_object, td);
1488                 cur->wb_awakened = TRUE;
1489                 cur = cur->wb_next;
1490         } while (cur != wb);
1491
1492         return;
1493 }
1494
1495 /* Always called with dispatcher lock held. */
1496 static void
1497 ntoskrnl_waittest(obj, increment)
1498         nt_dispatch_header      *obj;
1499         uint32_t                increment;
1500 {
1501         wait_block              *w, *next;
1502         list_entry              *e;
1503         struct thread           *td;
1504         wb_ext                  *we;
1505         int                     satisfied;
1506
1507         /*
1508          * Once an object has been signalled, we walk its list of
1509          * wait blocks. If a wait block can be awakened, then satisfy
1510          * waits as necessary and wake the thread.
1511          *
1512          * The rules work like this:
1513          *
1514          * If a wait block is marked as WAITTYPE_ANY, then
1515          * we can satisfy the wait conditions on the current
1516          * object and wake the thread right away. Satisfying
1517          * the wait also has the effect of breaking us out
1518          * of the search loop.
1519          *
1520          * If the object is marked as WAITTYLE_ALL, then the
1521          * wait block will be part of a circularly linked
1522          * list of wait blocks belonging to a waiting thread
1523          * that's sleeping in KeWaitForMultipleObjects(). In
1524          * order to wake the thread, all the objects in the
1525          * wait list must be in the signalled state. If they
1526          * are, we then satisfy all of them and wake the
1527          * thread.
1528          *
1529          */
1530
1531         e = obj->dh_waitlisthead.nle_flink;
1532
1533         while (e != &obj->dh_waitlisthead && obj->dh_sigstate > 0) {
1534                 w = CONTAINING_RECORD(e, wait_block, wb_waitlist);
1535                 we = w->wb_ext;
1536                 td = we->we_td;
1537                 satisfied = FALSE;
1538                 if (w->wb_waittype == WAITTYPE_ANY) {
1539                         /*
1540                          * Thread can be awakened if
1541                          * any wait is satisfied.
1542                          */
1543                         ntoskrnl_satisfy_wait(obj, td);
1544                         satisfied = TRUE;
1545                         w->wb_awakened = TRUE;
1546                 } else {
1547                         /*
1548                          * Thread can only be woken up
1549                          * if all waits are satisfied.
1550                          * If the thread is waiting on multiple
1551                          * objects, they should all be linked
1552                          * through the wb_next pointers in the
1553                          * wait blocks.
1554                          */
1555                         satisfied = TRUE;
1556                         next = w->wb_next;
1557                         while (next != w) {
1558                                 if (ntoskrnl_is_signalled(obj, td) == FALSE) {
1559                                         satisfied = FALSE;
1560                                         break;
1561                                 }
1562                                 next = next->wb_next;
1563                         }
1564                         ntoskrnl_satisfy_multiple_waits(w);
1565                 }
1566
1567                 if (satisfied == TRUE)
1568                         cv_broadcastpri(&we->we_cv, w->wb_oldpri -
1569                             (increment * 4));
1570
1571                 e = e->nle_flink;
1572         }
1573
1574         return;
1575 }
1576
1577 static void 
1578 ntoskrnl_time(tval)
1579         uint64_t                *tval;
1580 {
1581         struct timespec         ts;
1582
1583         nanotime(&ts);
1584         *tval = (uint64_t)ts.tv_nsec / 100 + (uint64_t)ts.tv_sec * 10000000 +
1585             11644473600;
1586
1587         return;
1588 }
1589
1590 /*
1591  * KeWaitForSingleObject() is a tricky beast, because it can be used
1592  * with several different object types: semaphores, timers, events,
1593  * mutexes and threads. Semaphores don't appear very often, but the
1594  * other object types are quite common. KeWaitForSingleObject() is
1595  * what's normally used to acquire a mutex, and it can be used to
1596  * wait for a thread termination.
1597  *
1598  * The Windows NDIS API is implemented in terms of Windows kernel
1599  * primitives, and some of the object manipulation is duplicated in
1600  * NDIS. For example, NDIS has timers and events, which are actually
1601  * Windows kevents and ktimers. Now, you're supposed to only use the
1602  * NDIS variants of these objects within the confines of the NDIS API,
1603  * but there are some naughty developers out there who will use
1604  * KeWaitForSingleObject() on NDIS timer and event objects, so we
1605  * have to support that as well. Conseqently, our NDIS timer and event
1606  * code has to be closely tied into our ntoskrnl timer and event code,
1607  * just as it is in Windows.
1608  *
1609  * KeWaitForSingleObject() may do different things for different kinds
1610  * of objects:
1611  *
1612  * - For events, we check if the event has been signalled. If the
1613  *   event is already in the signalled state, we just return immediately,
1614  *   otherwise we wait for it to be set to the signalled state by someone
1615  *   else calling KeSetEvent(). Events can be either synchronization or
1616  *   notification events.
1617  *
1618  * - For timers, if the timer has already fired and the timer is in
1619  *   the signalled state, we just return, otherwise we wait on the
1620  *   timer. Unlike an event, timers get signalled automatically when
1621  *   they expire rather than someone having to trip them manually.
1622  *   Timers initialized with KeInitializeTimer() are always notification
1623  *   events: KeInitializeTimerEx() lets you initialize a timer as
1624  *   either a notification or synchronization event.
1625  *
1626  * - For mutexes, we try to acquire the mutex and if we can't, we wait
1627  *   on the mutex until it's available and then grab it. When a mutex is
1628  *   released, it enters the signalled state, which wakes up one of the
1629  *   threads waiting to acquire it. Mutexes are always synchronization
1630  *   events.
1631  *
1632  * - For threads, the only thing we do is wait until the thread object
1633  *   enters a signalled state, which occurs when the thread terminates.
1634  *   Threads are always notification events.
1635  *
1636  * A notification event wakes up all threads waiting on an object. A
1637  * synchronization event wakes up just one. Also, a synchronization event
1638  * is auto-clearing, which means we automatically set the event back to
1639  * the non-signalled state once the wakeup is done.
1640  */
1641
1642 uint32_t
1643 KeWaitForSingleObject(arg, reason, mode, alertable, duetime)
1644         void                    *arg;
1645         uint32_t                reason;
1646         uint32_t                mode;
1647         uint8_t                 alertable;
1648         int64_t                 *duetime;
1649 {
1650         wait_block              w;
1651         struct thread           *td = curthread;
1652         struct timeval          tv;
1653         int                     error = 0;
1654         uint64_t                curtime;
1655         wb_ext                  we;
1656         nt_dispatch_header      *obj;
1657
1658         obj = arg;
1659
1660         if (obj == NULL)
1661                 return(STATUS_INVALID_PARAMETER);
1662
1663         mtx_lock(&ntoskrnl_dispatchlock);
1664
1665         cv_init(&we.we_cv, "KeWFS");
1666         we.we_td = td;
1667
1668         /*
1669          * Check to see if this object is already signalled,
1670          * and just return without waiting if it is.
1671          */
1672         if (ntoskrnl_is_signalled(obj, td) == TRUE) {
1673                 /* Sanity check the signal state value. */
1674                 if (obj->dh_sigstate != INT32_MIN) {
1675                         ntoskrnl_satisfy_wait(obj, curthread);
1676                         mtx_unlock(&ntoskrnl_dispatchlock);
1677                         return (STATUS_SUCCESS);
1678                 } else {
1679                         /*
1680                          * There's a limit to how many times we can
1681                          * recursively acquire a mutant. If we hit
1682                          * the limit, something is very wrong.
1683                          */
1684                         if (obj->dh_type == DISP_TYPE_MUTANT) {
1685                                 mtx_unlock(&ntoskrnl_dispatchlock);
1686                                 panic("mutant limit exceeded");
1687                         }
1688                 }
1689         }
1690
1691         bzero((char *)&w, sizeof(wait_block));
1692         w.wb_object = obj;
1693         w.wb_ext = &we;
1694         w.wb_waittype = WAITTYPE_ANY;
1695         w.wb_next = &w;
1696         w.wb_waitkey = 0;
1697         w.wb_awakened = FALSE;
1698         w.wb_oldpri = td->td_priority;
1699
1700         InsertTailList((&obj->dh_waitlisthead), (&w.wb_waitlist));
1701
1702         /*
1703          * The timeout value is specified in 100 nanosecond units
1704          * and can be a positive or negative number. If it's positive,
1705          * then the duetime is absolute, and we need to convert it
1706          * to an absolute offset relative to now in order to use it.
1707          * If it's negative, then the duetime is relative and we
1708          * just have to convert the units.
1709          */
1710
1711         if (duetime != NULL) {
1712                 if (*duetime < 0) {
1713                         tv.tv_sec = - (*duetime) / 10000000;
1714                         tv.tv_usec = (- (*duetime) / 10) -
1715                             (tv.tv_sec * 1000000);
1716                 } else {
1717                         ntoskrnl_time(&curtime);
1718                         if (*duetime < curtime)
1719                                 tv.tv_sec = tv.tv_usec = 0;
1720                         else {
1721                                 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1722                                 tv.tv_usec = ((*duetime) - curtime) / 10 -
1723                                     (tv.tv_sec * 1000000);
1724                         }
1725                 }
1726         }
1727
1728         if (duetime == NULL)
1729                 cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
1730         else
1731                 error = cv_timedwait(&we.we_cv,
1732                     &ntoskrnl_dispatchlock, tvtohz(&tv));
1733
1734         RemoveEntryList(&w.wb_waitlist);
1735
1736         cv_destroy(&we.we_cv);
1737
1738         /* We timed out. Leave the object alone and return status. */
1739
1740         if (error == EWOULDBLOCK) {
1741                 mtx_unlock(&ntoskrnl_dispatchlock);
1742                 return(STATUS_TIMEOUT);
1743         }
1744
1745         mtx_unlock(&ntoskrnl_dispatchlock);
1746
1747         return(STATUS_SUCCESS);
1748 /*
1749         return(KeWaitForMultipleObjects(1, &obj, WAITTYPE_ALL, reason,
1750             mode, alertable, duetime, &w));
1751 */
1752 }
1753
1754 static uint32_t
1755 KeWaitForMultipleObjects(cnt, obj, wtype, reason, mode,
1756         alertable, duetime, wb_array)
1757         uint32_t                cnt;
1758         nt_dispatch_header      *obj[];
1759         uint32_t                wtype;
1760         uint32_t                reason;
1761         uint32_t                mode;
1762         uint8_t                 alertable;
1763         int64_t                 *duetime;
1764         wait_block              *wb_array;
1765 {
1766         struct thread           *td = curthread;
1767         wait_block              *whead, *w;
1768         wait_block              _wb_array[MAX_WAIT_OBJECTS];
1769         nt_dispatch_header      *cur;
1770         struct timeval          tv;
1771         int                     i, wcnt = 0, error = 0;
1772         uint64_t                curtime;
1773         struct timespec         t1, t2;
1774         uint32_t                status = STATUS_SUCCESS;
1775         wb_ext                  we;
1776
1777         if (cnt > MAX_WAIT_OBJECTS)
1778                 return(STATUS_INVALID_PARAMETER);
1779         if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL)
1780                 return(STATUS_INVALID_PARAMETER);
1781
1782         mtx_lock(&ntoskrnl_dispatchlock);
1783
1784         cv_init(&we.we_cv, "KeWFM");
1785         we.we_td = td;
1786
1787         if (wb_array == NULL)
1788                 whead = _wb_array;
1789         else
1790                 whead = wb_array;
1791
1792         bzero((char *)whead, sizeof(wait_block) * cnt);
1793
1794         /* First pass: see if we can satisfy any waits immediately. */
1795
1796         wcnt = 0;
1797         w = whead;
1798
1799         for (i = 0; i < cnt; i++) {
1800                 InsertTailList((&obj[i]->dh_waitlisthead),
1801                     (&w->wb_waitlist));
1802                 w->wb_ext = &we;
1803                 w->wb_object = obj[i];
1804                 w->wb_waittype = wtype;
1805                 w->wb_waitkey = i;
1806                 w->wb_awakened = FALSE;
1807                 w->wb_oldpri = td->td_priority;
1808                 w->wb_next = w + 1;
1809                 w++;
1810                 wcnt++;
1811                 if (ntoskrnl_is_signalled(obj[i], td)) {
1812                         /*
1813                          * There's a limit to how many times
1814                          * we can recursively acquire a mutant.
1815                          * If we hit the limit, something
1816                          * is very wrong.
1817                          */
1818                         if (obj[i]->dh_sigstate == INT32_MIN &&
1819                             obj[i]->dh_type == DISP_TYPE_MUTANT) {
1820                                 mtx_unlock(&ntoskrnl_dispatchlock);
1821                                 panic("mutant limit exceeded");
1822                         }
1823
1824                         /*
1825                          * If this is a WAITTYPE_ANY wait, then
1826                          * satisfy the waited object and exit
1827                          * right now.
1828                          */
1829
1830                         if (wtype == WAITTYPE_ANY) {
1831                                 ntoskrnl_satisfy_wait(obj[i], td);
1832                                 status = STATUS_WAIT_0 + i;
1833                                 goto wait_done;
1834                         } else {
1835                                 w--;
1836                                 wcnt--;
1837                                 w->wb_object = NULL;
1838                                 RemoveEntryList(&w->wb_waitlist);
1839                         }
1840                 }
1841         }
1842
1843         /*
1844          * If this is a WAITTYPE_ALL wait and all objects are
1845          * already signalled, satisfy the waits and exit now.
1846          */
1847
1848         if (wtype == WAITTYPE_ALL && wcnt == 0) {
1849                 for (i = 0; i < cnt; i++)
1850                         ntoskrnl_satisfy_wait(obj[i], td);
1851                 status = STATUS_SUCCESS;
1852                 goto wait_done;
1853         }
1854
1855         /*
1856          * Create a circular waitblock list. The waitcount
1857          * must always be non-zero when we get here.
1858          */
1859
1860         (w - 1)->wb_next = whead;
1861
1862         /* Wait on any objects that aren't yet signalled. */
1863
1864         /* Calculate timeout, if any. */
1865
1866         if (duetime != NULL) {
1867                 if (*duetime < 0) {
1868                         tv.tv_sec = - (*duetime) / 10000000;
1869                         tv.tv_usec = (- (*duetime) / 10) -
1870                             (tv.tv_sec * 1000000);
1871                 } else {
1872                         ntoskrnl_time(&curtime);
1873                         if (*duetime < curtime)
1874                                 tv.tv_sec = tv.tv_usec = 0;
1875                         else {
1876                                 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1877                                 tv.tv_usec = ((*duetime) - curtime) / 10 -
1878                                     (tv.tv_sec * 1000000);
1879                         }
1880                 }
1881         }
1882
1883         while (wcnt) {
1884                 nanotime(&t1);
1885
1886                 if (duetime == NULL)
1887                         cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
1888                 else
1889                         error = cv_timedwait(&we.we_cv,
1890                             &ntoskrnl_dispatchlock, tvtohz(&tv));
1891
1892                 /* Wait with timeout expired. */
1893
1894                 if (error) {
1895                         status = STATUS_TIMEOUT;
1896                         goto wait_done;
1897                 }
1898
1899                 nanotime(&t2);
1900
1901                 /* See what's been signalled. */
1902
1903                 w = whead;
1904                 do {
1905                         cur = w->wb_object;
1906                         if (ntoskrnl_is_signalled(cur, td) == TRUE ||
1907                             w->wb_awakened == TRUE) {
1908                                 /* Sanity check the signal state value. */
1909                                 if (cur->dh_sigstate == INT32_MIN &&
1910                                     cur->dh_type == DISP_TYPE_MUTANT) {
1911                                         mtx_unlock(&ntoskrnl_dispatchlock);
1912                                         panic("mutant limit exceeded");
1913                                 }
1914                                 wcnt--;
1915                                 if (wtype == WAITTYPE_ANY) {
1916                                         status = w->wb_waitkey &
1917                                             STATUS_WAIT_0;
1918                                         goto wait_done;
1919                                 }
1920                         }
1921                         w = w->wb_next;
1922                 } while (w != whead);
1923
1924                 /*
1925                  * If all objects have been signalled, or if this
1926                  * is a WAITTYPE_ANY wait and we were woke up by
1927                  * someone, we can bail.
1928                  */
1929
1930                 if (wcnt == 0) {
1931                         status = STATUS_SUCCESS;
1932                         goto wait_done;
1933                 }
1934
1935                 /*
1936                  * If this is WAITTYPE_ALL wait, and there's still
1937                  * objects that haven't been signalled, deduct the
1938                  * time that's elapsed so far from the timeout and
1939                  * wait again (or continue waiting indefinitely if
1940                  * there's no timeout).
1941                  */
1942
1943                 if (duetime != NULL) {
1944                         tv.tv_sec -= (t2.tv_sec - t1.tv_sec);
1945                         tv.tv_usec -= (t2.tv_nsec - t1.tv_nsec) / 1000;
1946                 }
1947         }
1948
1949
1950 wait_done:
1951
1952         cv_destroy(&we.we_cv);
1953
1954         for (i = 0; i < cnt; i++) {
1955                 if (whead[i].wb_object != NULL)
1956                         RemoveEntryList(&whead[i].wb_waitlist);
1957
1958         }
1959         mtx_unlock(&ntoskrnl_dispatchlock);
1960
1961         return(status);
1962 }
1963
1964 static void
1965 WRITE_REGISTER_USHORT(reg, val)
1966         uint16_t                *reg;
1967         uint16_t                val;
1968 {
1969         bus_space_write_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1970         return;
1971 }
1972
1973 static uint16_t
1974 READ_REGISTER_USHORT(reg)
1975         uint16_t                *reg;
1976 {
1977         return(bus_space_read_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1978 }
1979
1980 static void
1981 WRITE_REGISTER_ULONG(reg, val)
1982         uint32_t                *reg;
1983         uint32_t                val;
1984 {
1985         bus_space_write_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1986         return;
1987 }
1988
1989 static uint32_t
1990 READ_REGISTER_ULONG(reg)
1991         uint32_t                *reg;
1992 {
1993         return(bus_space_read_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1994 }
1995
1996 static uint8_t
1997 READ_REGISTER_UCHAR(reg)
1998         uint8_t                 *reg;
1999 {
2000         return(bus_space_read_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
2001 }
2002
2003 static void
2004 WRITE_REGISTER_UCHAR(reg, val)
2005         uint8_t                 *reg;
2006         uint8_t                 val;
2007 {
2008         bus_space_write_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
2009         return;
2010 }
2011
2012 static int64_t
2013 _allmul(a, b)
2014         int64_t                 a;
2015         int64_t                 b;
2016 {
2017         return (a * b);
2018 }
2019
2020 static int64_t
2021 _alldiv(a, b)
2022         int64_t                 a;
2023         int64_t                 b;
2024 {
2025         return (a / b);
2026 }
2027
2028 static int64_t
2029 _allrem(a, b)
2030         int64_t                 a;
2031         int64_t                 b;
2032 {
2033         return (a % b);
2034 }
2035
2036 static uint64_t
2037 _aullmul(a, b)
2038         uint64_t                a;
2039         uint64_t                b;
2040 {
2041         return (a * b);
2042 }
2043
2044 static uint64_t
2045 _aulldiv(a, b)
2046         uint64_t                a;
2047         uint64_t                b;
2048 {
2049         return (a / b);
2050 }
2051
2052 static uint64_t
2053 _aullrem(a, b)
2054         uint64_t                a;
2055         uint64_t                b;
2056 {
2057         return (a % b);
2058 }
2059
2060 static int64_t
2061 _allshl(a, b)
2062         int64_t                 a;
2063         uint8_t                 b;
2064 {
2065         return (a << b);
2066 }
2067
2068 static uint64_t
2069 _aullshl(a, b)
2070         uint64_t                a;
2071         uint8_t                 b;
2072 {
2073         return (a << b);
2074 }
2075
2076 static int64_t
2077 _allshr(a, b)
2078         int64_t                 a;
2079         uint8_t                 b;
2080 {
2081         return (a >> b);
2082 }
2083
2084 static uint64_t
2085 _aullshr(a, b)
2086         uint64_t                a;
2087         uint8_t                 b;
2088 {
2089         return (a >> b);
2090 }
2091
2092 static slist_entry *
2093 ntoskrnl_pushsl(head, entry)
2094         slist_header            *head;
2095         slist_entry             *entry;
2096 {
2097         slist_entry             *oldhead;
2098
2099         oldhead = head->slh_list.slh_next;
2100         entry->sl_next = head->slh_list.slh_next;
2101         head->slh_list.slh_next = entry;
2102         head->slh_list.slh_depth++;
2103         head->slh_list.slh_seq++;
2104
2105         return(oldhead);
2106 }
2107
2108 static slist_entry *
2109 ntoskrnl_popsl(head)
2110         slist_header            *head;
2111 {
2112         slist_entry             *first;
2113
2114         first = head->slh_list.slh_next;
2115         if (first != NULL) {
2116                 head->slh_list.slh_next = first->sl_next;
2117                 head->slh_list.slh_depth--;
2118                 head->slh_list.slh_seq++;
2119         }
2120
2121         return(first);
2122 }
2123
2124 /*
2125  * We need this to make lookaside lists work for amd64.
2126  * We pass a pointer to ExAllocatePoolWithTag() the lookaside
2127  * list structure. For amd64 to work right, this has to be a
2128  * pointer to the wrapped version of the routine, not the
2129  * original. Letting the Windows driver invoke the original
2130  * function directly will result in a convention calling
2131  * mismatch and a pretty crash. On x86, this effectively
2132  * becomes a no-op since ipt_func and ipt_wrap are the same.
2133  */
2134
2135 static funcptr
2136 ntoskrnl_findwrap(func)
2137         funcptr                 func;
2138 {
2139         image_patch_table       *patch;
2140
2141         patch = ntoskrnl_functbl;
2142         while (patch->ipt_func != NULL) {
2143                 if ((funcptr)patch->ipt_func == func)
2144                         return((funcptr)patch->ipt_wrap);
2145                 patch++;
2146         }
2147
2148         return(NULL);
2149 }
2150
2151 static void
2152 ExInitializePagedLookasideList(lookaside, allocfunc, freefunc,
2153     flags, size, tag, depth)
2154         paged_lookaside_list    *lookaside;
2155         lookaside_alloc_func    *allocfunc;
2156         lookaside_free_func     *freefunc;
2157         uint32_t                flags;
2158         size_t                  size;
2159         uint32_t                tag;
2160         uint16_t                depth;
2161 {
2162         bzero((char *)lookaside, sizeof(paged_lookaside_list));
2163
2164         if (size < sizeof(slist_entry))
2165                 lookaside->nll_l.gl_size = sizeof(slist_entry);
2166         else
2167                 lookaside->nll_l.gl_size = size;
2168         lookaside->nll_l.gl_tag = tag;
2169         if (allocfunc == NULL)
2170                 lookaside->nll_l.gl_allocfunc =
2171                     ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
2172         else
2173                 lookaside->nll_l.gl_allocfunc = allocfunc;
2174
2175         if (freefunc == NULL)
2176                 lookaside->nll_l.gl_freefunc =
2177                     ntoskrnl_findwrap((funcptr)ExFreePool);
2178         else
2179                 lookaside->nll_l.gl_freefunc = freefunc;
2180
2181 #ifdef __i386__
2182         KeInitializeSpinLock(&lookaside->nll_obsoletelock);
2183 #endif
2184
2185         lookaside->nll_l.gl_type = NonPagedPool;
2186         lookaside->nll_l.gl_depth = depth;
2187         lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
2188
2189         return;
2190 }
2191
2192 static void
2193 ExDeletePagedLookasideList(lookaside)
2194         paged_lookaside_list   *lookaside;
2195 {
2196         void                    *buf;
2197         void            (*freefunc)(void *);
2198
2199         freefunc = lookaside->nll_l.gl_freefunc;
2200         while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
2201                 MSCALL1(freefunc, buf);
2202
2203         return;
2204 }
2205
2206 static void
2207 ExInitializeNPagedLookasideList(lookaside, allocfunc, freefunc,
2208     flags, size, tag, depth)
2209         npaged_lookaside_list   *lookaside;
2210         lookaside_alloc_func    *allocfunc;
2211         lookaside_free_func     *freefunc;
2212         uint32_t                flags;
2213         size_t                  size;
2214         uint32_t                tag;
2215         uint16_t                depth;
2216 {
2217         bzero((char *)lookaside, sizeof(npaged_lookaside_list));
2218
2219         if (size < sizeof(slist_entry))
2220                 lookaside->nll_l.gl_size = sizeof(slist_entry);
2221         else
2222                 lookaside->nll_l.gl_size = size;
2223         lookaside->nll_l.gl_tag = tag;
2224         if (allocfunc == NULL)
2225                 lookaside->nll_l.gl_allocfunc =
2226                     ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
2227         else
2228                 lookaside->nll_l.gl_allocfunc = allocfunc;
2229
2230         if (freefunc == NULL)
2231                 lookaside->nll_l.gl_freefunc =
2232                     ntoskrnl_findwrap((funcptr)ExFreePool);
2233         else
2234                 lookaside->nll_l.gl_freefunc = freefunc;
2235
2236 #ifdef __i386__
2237         KeInitializeSpinLock(&lookaside->nll_obsoletelock);
2238 #endif
2239
2240         lookaside->nll_l.gl_type = NonPagedPool;
2241         lookaside->nll_l.gl_depth = depth;
2242         lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
2243
2244         return;
2245 }
2246
2247 static void
2248 ExDeleteNPagedLookasideList(lookaside)
2249         npaged_lookaside_list   *lookaside;
2250 {
2251         void                    *buf;
2252         void            (*freefunc)(void *);
2253
2254         freefunc = lookaside->nll_l.gl_freefunc;
2255         while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
2256                 MSCALL1(freefunc, buf);
2257
2258         return;
2259 }
2260
2261 slist_entry *
2262 InterlockedPushEntrySList(head, entry)
2263         slist_header            *head;
2264         slist_entry             *entry;
2265 {
2266         slist_entry             *oldhead;
2267
2268         mtx_lock_spin(&ntoskrnl_interlock);
2269         oldhead = ntoskrnl_pushsl(head, entry);
2270         mtx_unlock_spin(&ntoskrnl_interlock);
2271
2272         return(oldhead);
2273 }
2274
2275 slist_entry *
2276 InterlockedPopEntrySList(head)
2277         slist_header            *head;
2278 {
2279         slist_entry             *first;
2280
2281         mtx_lock_spin(&ntoskrnl_interlock);
2282         first = ntoskrnl_popsl(head);
2283         mtx_unlock_spin(&ntoskrnl_interlock);
2284
2285         return(first);
2286 }
2287
2288 static slist_entry *
2289 ExInterlockedPushEntrySList(head, entry, lock)
2290         slist_header            *head;
2291         slist_entry             *entry;
2292         kspin_lock              *lock;
2293 {
2294         return(InterlockedPushEntrySList(head, entry));
2295 }
2296
2297 static slist_entry *
2298 ExInterlockedPopEntrySList(head, lock)
2299         slist_header            *head;
2300         kspin_lock              *lock;
2301 {
2302         return(InterlockedPopEntrySList(head));
2303 }
2304
2305 uint16_t
2306 ExQueryDepthSList(head)
2307         slist_header            *head;
2308 {
2309         uint16_t                depth;
2310
2311         mtx_lock_spin(&ntoskrnl_interlock);
2312         depth = head->slh_list.slh_depth;
2313         mtx_unlock_spin(&ntoskrnl_interlock);
2314
2315         return(depth);
2316 }
2317
2318 void
2319 KeInitializeSpinLock(lock)
2320         kspin_lock              *lock;
2321 {
2322         *lock = 0;
2323
2324         return;
2325 }
2326
2327 #ifdef __i386__
2328 void
2329 KefAcquireSpinLockAtDpcLevel(lock)
2330         kspin_lock              *lock;
2331 {
2332 #ifdef NTOSKRNL_DEBUG_SPINLOCKS
2333         int                     i = 0;
2334 #endif
2335
2336         while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0) {
2337                 /* sit and spin */;
2338 #ifdef NTOSKRNL_DEBUG_SPINLOCKS
2339                 i++;
2340                 if (i > 200000000)
2341                         panic("DEADLOCK!");
2342 #endif
2343         }
2344
2345         return;
2346 }
2347
2348 void
2349 KefReleaseSpinLockFromDpcLevel(lock)
2350         kspin_lock              *lock;
2351 {
2352         atomic_store_rel_int((volatile u_int *)lock, 0);
2353
2354         return;
2355 }
2356
2357 uint8_t
2358 KeAcquireSpinLockRaiseToDpc(kspin_lock *lock)
2359 {
2360         uint8_t                 oldirql;
2361
2362         if (KeGetCurrentIrql() > DISPATCH_LEVEL)
2363                 panic("IRQL_NOT_LESS_THAN_OR_EQUAL");
2364
2365         KeRaiseIrql(DISPATCH_LEVEL, &oldirql);
2366         KeAcquireSpinLockAtDpcLevel(lock);
2367
2368         return(oldirql);
2369 }
2370 #else
2371 void
2372 KeAcquireSpinLockAtDpcLevel(kspin_lock *lock)
2373 {
2374         while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0)
2375                 /* sit and spin */;
2376
2377         return;
2378 }
2379
2380 void
2381 KeReleaseSpinLockFromDpcLevel(kspin_lock *lock)
2382 {
2383         atomic_store_rel_int((volatile u_int *)lock, 0);
2384
2385         return;
2386 }
2387 #endif /* __i386__ */
2388
2389 uintptr_t
2390 InterlockedExchange(dst, val)
2391         volatile uint32_t       *dst;
2392         uintptr_t               val;
2393 {
2394         uintptr_t               r;
2395
2396         mtx_lock_spin(&ntoskrnl_interlock);
2397         r = *dst;
2398         *dst = val;
2399         mtx_unlock_spin(&ntoskrnl_interlock);
2400
2401         return(r);
2402 }
2403
2404 static uint32_t
2405 InterlockedIncrement(addend)
2406         volatile uint32_t       *addend;
2407 {
2408         atomic_add_long((volatile u_long *)addend, 1);
2409         return(*addend);
2410 }
2411
2412 static uint32_t
2413 InterlockedDecrement(addend)
2414         volatile uint32_t       *addend;
2415 {
2416         atomic_subtract_long((volatile u_long *)addend, 1);
2417         return(*addend);
2418 }
2419
2420 static void
2421 ExInterlockedAddLargeStatistic(addend, inc)
2422         uint64_t                *addend;
2423         uint32_t                inc;
2424 {
2425         mtx_lock_spin(&ntoskrnl_interlock);
2426         *addend += inc;
2427         mtx_unlock_spin(&ntoskrnl_interlock);
2428
2429         return;
2430 };
2431
2432 mdl *
2433 IoAllocateMdl(vaddr, len, secondarybuf, chargequota, iopkt)
2434         void                    *vaddr;
2435         uint32_t                len;
2436         uint8_t                 secondarybuf;
2437         uint8_t                 chargequota;
2438         irp                     *iopkt;
2439 {
2440         mdl                     *m;
2441         int                     zone = 0;
2442
2443         if (MmSizeOfMdl(vaddr, len) > MDL_ZONE_SIZE)
2444                 m = ExAllocatePoolWithTag(NonPagedPool,
2445                     MmSizeOfMdl(vaddr, len), 0);
2446         else {
2447                 m = uma_zalloc(mdl_zone, M_NOWAIT | M_ZERO);
2448                 zone++;
2449         }
2450
2451         if (m == NULL)
2452                 return (NULL);
2453
2454         MmInitializeMdl(m, vaddr, len);
2455
2456         /*
2457          * MmInitializMdl() clears the flags field, so we
2458          * have to set this here. If the MDL came from the
2459          * MDL UMA zone, tag it so we can release it to
2460          * the right place later.
2461          */
2462         if (zone)
2463                 m->mdl_flags = MDL_ZONE_ALLOCED;
2464
2465         if (iopkt != NULL) {
2466                 if (secondarybuf == TRUE) {
2467                         mdl                     *last;
2468                         last = iopkt->irp_mdl;
2469                         while (last->mdl_next != NULL)
2470                                 last = last->mdl_next;
2471                         last->mdl_next = m;
2472                 } else {
2473                         if (iopkt->irp_mdl != NULL)
2474                                 panic("leaking an MDL in IoAllocateMdl()");
2475                         iopkt->irp_mdl = m;
2476                 }
2477         }
2478
2479         return (m);
2480 }
2481
2482 void
2483 IoFreeMdl(m)
2484         mdl                     *m;
2485 {
2486         if (m == NULL)
2487                 return;
2488
2489         if (m->mdl_flags & MDL_ZONE_ALLOCED)
2490                 uma_zfree(mdl_zone, m);
2491         else
2492                 ExFreePool(m);
2493
2494         return;
2495 }
2496
2497 static void *
2498 MmAllocateContiguousMemory(size, highest)
2499         uint32_t                size;
2500         uint64_t                highest;
2501 {
2502         void *addr;
2503         size_t pagelength = roundup(size, PAGE_SIZE);
2504
2505         addr = ExAllocatePoolWithTag(NonPagedPool, pagelength, 0);
2506
2507         return(addr);
2508 }
2509
2510 static void *
2511 MmAllocateContiguousMemorySpecifyCache(size, lowest, highest,
2512     boundary, cachetype)
2513         uint32_t                size;
2514         uint64_t                lowest;
2515         uint64_t                highest;
2516         uint64_t                boundary;
2517         uint32_t                cachetype;
2518 {
2519         void *addr;
2520         size_t pagelength = roundup(size, PAGE_SIZE);
2521
2522         addr = ExAllocatePoolWithTag(NonPagedPool, pagelength, 0);
2523
2524         return(addr);
2525 }
2526
2527 static void
2528 MmFreeContiguousMemory(base)
2529         void                    *base;
2530 {
2531         ExFreePool(base);
2532 }
2533
2534 static void
2535 MmFreeContiguousMemorySpecifyCache(base, size, cachetype)
2536         void                    *base;
2537         uint32_t                size;
2538         uint32_t                cachetype;
2539 {
2540         ExFreePool(base);
2541 }
2542
2543 static uint32_t
2544 MmSizeOfMdl(vaddr, len)
2545         void                    *vaddr;
2546         size_t                  len;
2547 {
2548         uint32_t                l;
2549
2550         l = sizeof(struct mdl) +
2551             (sizeof(vm_offset_t *) * SPAN_PAGES(vaddr, len));
2552
2553         return(l);
2554 }
2555
2556 /*
2557  * The Microsoft documentation says this routine fills in the
2558  * page array of an MDL with the _physical_ page addresses that
2559  * comprise the buffer, but we don't really want to do that here.
2560  * Instead, we just fill in the page array with the kernel virtual
2561  * addresses of the buffers.
2562  */
2563 void
2564 MmBuildMdlForNonPagedPool(m)
2565         mdl                     *m;
2566 {
2567         vm_offset_t             *mdl_pages;
2568         int                     pagecnt, i;
2569
2570         pagecnt = SPAN_PAGES(m->mdl_byteoffset, m->mdl_bytecount);
2571
2572         if (pagecnt > (m->mdl_size - sizeof(mdl)) / sizeof(vm_offset_t *))
2573                 panic("not enough pages in MDL to describe buffer");
2574
2575         mdl_pages = MmGetMdlPfnArray(m);
2576
2577         for (i = 0; i < pagecnt; i++)
2578                 *mdl_pages = (vm_offset_t)m->mdl_startva + (i * PAGE_SIZE);
2579
2580         m->mdl_flags |= MDL_SOURCE_IS_NONPAGED_POOL;
2581         m->mdl_mappedsystemva = MmGetMdlVirtualAddress(m);
2582
2583         return;
2584 }
2585
2586 static void *
2587 MmMapLockedPages(buf, accessmode)
2588         mdl                     *buf;
2589         uint8_t                 accessmode;
2590 {
2591         buf->mdl_flags |= MDL_MAPPED_TO_SYSTEM_VA;
2592         return(MmGetMdlVirtualAddress(buf));
2593 }
2594
2595 static void *
2596 MmMapLockedPagesSpecifyCache(buf, accessmode, cachetype, vaddr,
2597     bugcheck, prio)
2598         mdl                     *buf;
2599         uint8_t                 accessmode;
2600         uint32_t                cachetype;
2601         void                    *vaddr;
2602         uint32_t                bugcheck;
2603         uint32_t                prio;
2604 {
2605         return(MmMapLockedPages(buf, accessmode));
2606 }
2607
2608 static void
2609 MmUnmapLockedPages(vaddr, buf)
2610         void                    *vaddr;
2611         mdl                     *buf;
2612 {
2613         buf->mdl_flags &= ~MDL_MAPPED_TO_SYSTEM_VA;
2614         return;
2615 }
2616
2617 /*
2618  * This function has a problem in that it will break if you
2619  * compile this module without PAE and try to use it on a PAE
2620  * kernel. Unfortunately, there's no way around this at the
2621  * moment. It's slightly less broken that using pmap_kextract().
2622  * You'd think the virtual memory subsystem would help us out
2623  * here, but it doesn't.
2624  */
2625
2626 static uint8_t
2627 MmIsAddressValid(vaddr)
2628         void                    *vaddr;
2629 {
2630         if (pmap_extract(kernel_map->pmap, (vm_offset_t)vaddr))
2631                 return(TRUE);
2632
2633         return(FALSE);
2634 }
2635
2636 void *
2637 MmMapIoSpace(paddr, len, cachetype)
2638         uint64_t                paddr;
2639         uint32_t                len;
2640         uint32_t                cachetype;
2641 {
2642         devclass_t              nexus_class;
2643         device_t                *nexus_devs, devp;
2644         int                     nexus_count = 0;
2645         device_t                matching_dev = NULL;
2646         struct resource         *res;
2647         int                     i;
2648         vm_offset_t             v;
2649
2650         /* There will always be at least one nexus. */
2651
2652         nexus_class = devclass_find("nexus");
2653         devclass_get_devices(nexus_class, &nexus_devs, &nexus_count);
2654
2655         for (i = 0; i < nexus_count; i++) {
2656                 devp = nexus_devs[i];
2657                 matching_dev = ntoskrnl_finddev(devp, paddr, &res);
2658                 if (matching_dev)
2659                         break;
2660         }
2661
2662         free(nexus_devs, M_TEMP);
2663
2664         if (matching_dev == NULL)
2665                 return(NULL);
2666
2667         v = (vm_offset_t)rman_get_virtual(res);
2668         if (paddr > rman_get_start(res))
2669                 v += paddr - rman_get_start(res);
2670
2671         return((void *)v);
2672 }
2673
2674 void
2675 MmUnmapIoSpace(vaddr, len)
2676         void                    *vaddr;
2677         size_t                  len;
2678 {
2679         return;
2680 }
2681
2682
2683 static device_t
2684 ntoskrnl_finddev(dev, paddr, res)
2685         device_t                dev;
2686         uint64_t                paddr;
2687         struct resource         **res;
2688 {
2689         device_t                *children = NULL;
2690         device_t                matching_dev;
2691         int                     childcnt;
2692         struct resource         *r;
2693         struct resource_list    *rl;
2694         struct resource_list_entry      *rle;
2695         uint32_t                flags;
2696         int                     i;
2697
2698         /* We only want devices that have been successfully probed. */
2699
2700         if (device_is_alive(dev) == FALSE)
2701                 return(NULL);
2702
2703         rl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev);
2704         if (rl != NULL) {
2705 #if __FreeBSD_version < 600022
2706                 SLIST_FOREACH(rle, rl, link) {
2707 #else
2708                 STAILQ_FOREACH(rle, rl, link) {
2709 #endif
2710                         r = rle->res;
2711
2712                         if (r == NULL)
2713                                 continue;
2714
2715                         flags = rman_get_flags(r);
2716
2717                         if (rle->type == SYS_RES_MEMORY &&
2718                             paddr >= rman_get_start(r) &&
2719                             paddr <= rman_get_end(r)) {
2720                                 if (!(flags & RF_ACTIVE))
2721                                         bus_activate_resource(dev,
2722                                             SYS_RES_MEMORY, 0, r);
2723                                 *res = r;
2724                                 return(dev);
2725                         }
2726                 }
2727         }
2728
2729         /*
2730          * If this device has children, do another
2731          * level of recursion to inspect them.
2732          */
2733
2734         device_get_children(dev, &children, &childcnt);
2735
2736         for (i = 0; i < childcnt; i++) {
2737                 matching_dev = ntoskrnl_finddev(children[i], paddr, res);
2738                 if (matching_dev != NULL) {
2739                         free(children, M_TEMP);
2740                         return(matching_dev);
2741                 }
2742         }
2743
2744         
2745         /* Won't somebody please think of the children! */
2746
2747         if (children != NULL)
2748                 free(children, M_TEMP);
2749
2750         return(NULL);
2751 }
2752
2753 /*
2754  * Workitems are unlike DPCs, in that they run in a user-mode thread
2755  * context rather than at DISPATCH_LEVEL in kernel context. In our
2756  * case we run them in kernel context anyway.
2757  */
2758 static void
2759 ntoskrnl_workitem_thread(arg)
2760         void                    *arg;
2761 {
2762         kdpc_queue              *kq;
2763         list_entry              *l;
2764         io_workitem             *iw;
2765         uint8_t                 irql;
2766
2767         kq = arg;
2768
2769         InitializeListHead(&kq->kq_disp);
2770         kq->kq_td = curthread;
2771         kq->kq_exit = 0;
2772         KeInitializeSpinLock(&kq->kq_lock);
2773         KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
2774
2775         while (1) {
2776                 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
2777
2778                 KeAcquireSpinLock(&kq->kq_lock, &irql);
2779
2780                 if (kq->kq_exit) {
2781                         kq->kq_exit = 0;
2782                         KeReleaseSpinLock(&kq->kq_lock, irql);
2783                         break;
2784                 }
2785
2786                 while (!IsListEmpty(&kq->kq_disp)) {
2787                         l = RemoveHeadList(&kq->kq_disp);
2788                         iw = CONTAINING_RECORD(l,
2789                             io_workitem, iw_listentry);
2790                         InitializeListHead((&iw->iw_listentry));
2791                         if (iw->iw_func == NULL)
2792                                 continue;
2793                         KeReleaseSpinLock(&kq->kq_lock, irql);
2794                         MSCALL2(iw->iw_func, iw->iw_dobj, iw->iw_ctx);
2795                         KeAcquireSpinLock(&kq->kq_lock, &irql);
2796                 }
2797
2798                 KeReleaseSpinLock(&kq->kq_lock, irql);
2799         }
2800
2801 #if __FreeBSD_version < 502113
2802         mtx_lock(&Giant);
2803 #endif
2804         kthread_exit(0);
2805         return; /* notreached */
2806 }
2807
2808 static void
2809 ntoskrnl_destroy_workitem_threads(void)
2810 {
2811         kdpc_queue              *kq;
2812         int                     i;
2813
2814         for (i = 0; i < WORKITEM_THREADS; i++) {
2815                 kq = wq_queues + i;
2816                 kq->kq_exit = 1;
2817                 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);       
2818                 while (kq->kq_exit)
2819                         tsleep(kq->kq_td->td_proc, PWAIT, "waitiw", hz/10);
2820         }
2821
2822         return;
2823 }
2824
2825 io_workitem *
2826 IoAllocateWorkItem(dobj)
2827         device_object           *dobj;
2828 {
2829         io_workitem             *iw;
2830
2831         iw = uma_zalloc(iw_zone, M_NOWAIT);
2832         if (iw == NULL)
2833                 return(NULL);
2834
2835         InitializeListHead(&iw->iw_listentry);
2836         iw->iw_dobj = dobj;
2837
2838         mtx_lock(&ntoskrnl_dispatchlock);
2839         iw->iw_idx = wq_idx;
2840         WORKIDX_INC(wq_idx);
2841         mtx_unlock(&ntoskrnl_dispatchlock);
2842
2843         return(iw);
2844 }
2845
2846 void
2847 IoFreeWorkItem(iw)
2848         io_workitem             *iw;
2849 {
2850         uma_zfree(iw_zone, iw);
2851         return;
2852 }
2853
2854 void
2855 IoQueueWorkItem(iw, iw_func, qtype, ctx)
2856         io_workitem             *iw;
2857         io_workitem_func        iw_func;
2858         uint32_t                qtype;
2859         void                    *ctx;
2860 {
2861         kdpc_queue              *kq;
2862         list_entry              *l;
2863         io_workitem             *cur;
2864         uint8_t                 irql;
2865
2866         kq = wq_queues + iw->iw_idx;
2867
2868         KeAcquireSpinLock(&kq->kq_lock, &irql);
2869
2870         /*
2871          * Traverse the list and make sure this workitem hasn't
2872          * already been inserted. Queuing the same workitem
2873          * twice will hose the list but good.
2874          */
2875
2876         l = kq->kq_disp.nle_flink;
2877         while (l != &kq->kq_disp) {
2878                 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
2879                 if (cur == iw) {
2880                         /* Already queued -- do nothing. */
2881                         KeReleaseSpinLock(&kq->kq_lock, irql);
2882                         return;
2883                 }
2884                 l = l->nle_flink;
2885         }
2886
2887         iw->iw_func = iw_func;
2888         iw->iw_ctx = ctx;
2889
2890         InsertTailList((&kq->kq_disp), (&iw->iw_listentry));
2891         KeReleaseSpinLock(&kq->kq_lock, irql);
2892
2893         KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
2894
2895         return;
2896 }
2897
2898 static void
2899 ntoskrnl_workitem(dobj, arg)
2900         device_object           *dobj;
2901         void                    *arg;
2902 {
2903         io_workitem             *iw;
2904         work_queue_item         *w;
2905         work_item_func          f;
2906
2907         iw = arg;
2908         w = (work_queue_item *)dobj;
2909         f = (work_item_func)w->wqi_func;
2910         uma_zfree(iw_zone, iw);
2911         MSCALL2(f, w, w->wqi_ctx);
2912
2913         return;
2914 }
2915
2916 /*
2917  * The ExQueueWorkItem() API is deprecated in Windows XP. Microsoft
2918  * warns that it's unsafe and to use IoQueueWorkItem() instead. The
2919  * problem with ExQueueWorkItem() is that it can't guard against
2920  * the condition where a driver submits a job to the work queue and
2921  * is then unloaded before the job is able to run. IoQueueWorkItem()
2922  * acquires a reference to the device's device_object via the
2923  * object manager and retains it until after the job has completed,
2924  * which prevents the driver from being unloaded before the job
2925  * runs. (We don't currently support this behavior, though hopefully
2926  * that will change once the object manager API is fleshed out a bit.)
2927  *
2928  * Having said all that, the ExQueueWorkItem() API remains, because
2929  * there are still other parts of Windows that use it, including
2930  * NDIS itself: NdisScheduleWorkItem() calls ExQueueWorkItem().
2931  * We fake up the ExQueueWorkItem() API on top of our implementation
2932  * of IoQueueWorkItem(). Workitem thread #3 is reserved exclusively
2933  * for ExQueueWorkItem() jobs, and we pass a pointer to the work
2934  * queue item (provided by the caller) in to IoAllocateWorkItem()
2935  * instead of the device_object. We need to save this pointer so
2936  * we can apply a sanity check: as with the DPC queue and other
2937  * workitem queues, we can't allow the same work queue item to
2938  * be queued twice. If it's already pending, we silently return
2939  */
2940
2941 void
2942 ExQueueWorkItem(w, qtype)
2943         work_queue_item         *w;
2944         uint32_t                qtype;
2945 {
2946         io_workitem             *iw;
2947         io_workitem_func        iwf;
2948         kdpc_queue              *kq;
2949         list_entry              *l;
2950         io_workitem             *cur;
2951         uint8_t                 irql;
2952
2953
2954         /*
2955          * We need to do a special sanity test to make sure
2956          * the ExQueueWorkItem() API isn't used to queue
2957          * the same workitem twice. Rather than checking the
2958          * io_workitem pointer itself, we test the attached
2959          * device object, which is really a pointer to the
2960          * legacy work queue item structure.
2961          */
2962
2963         kq = wq_queues + WORKITEM_LEGACY_THREAD;
2964         KeAcquireSpinLock(&kq->kq_lock, &irql);
2965         l = kq->kq_disp.nle_flink;
2966         while (l != &kq->kq_disp) {
2967                 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
2968                 if (cur->iw_dobj == (device_object *)w) {
2969                         /* Already queued -- do nothing. */
2970                         KeReleaseSpinLock(&kq->kq_lock, irql);
2971                         return;
2972                 }
2973                 l = l->nle_flink;
2974         }
2975         KeReleaseSpinLock(&kq->kq_lock, irql);
2976
2977         iw = IoAllocateWorkItem((device_object *)w);
2978         if (iw == NULL)
2979                 return;
2980
2981         iw->iw_idx = WORKITEM_LEGACY_THREAD;
2982         iwf = (io_workitem_func)ntoskrnl_findwrap((funcptr)ntoskrnl_workitem);
2983         IoQueueWorkItem(iw, iwf, qtype, iw);
2984
2985         return;
2986 }
2987
2988 static void
2989 RtlZeroMemory(dst, len)
2990         void                    *dst;
2991         size_t                  len;
2992 {
2993         bzero(dst, len);
2994         return;
2995 }
2996
2997 static void
2998 RtlCopyMemory(dst, src, len)
2999         void                    *dst;
3000         const void              *src;
3001         size_t                  len;
3002 {
3003         bcopy(src, dst, len);
3004         return;
3005 }
3006
3007 static size_t
3008 RtlCompareMemory(s1, s2, len)
3009         const void              *s1;
3010         const void              *s2;
3011         size_t                  len;
3012 {
3013         size_t                  i, total = 0;
3014         uint8_t                 *m1, *m2;
3015
3016         m1 = __DECONST(char *, s1);
3017         m2 = __DECONST(char *, s2);
3018
3019         for (i = 0; i < len; i++) {
3020                 if (m1[i] == m2[i])
3021                         total++;
3022         }
3023         return(total);
3024 }
3025
3026 void
3027 RtlInitAnsiString(dst, src)
3028         ansi_string             *dst;
3029         char                    *src;
3030 {
3031         ansi_string             *a;
3032
3033         a = dst;
3034         if (a == NULL)
3035                 return;
3036         if (src == NULL) {
3037                 a->as_len = a->as_maxlen = 0;
3038                 a->as_buf = NULL;
3039         } else {
3040                 a->as_buf = src;
3041                 a->as_len = a->as_maxlen = strlen(src);
3042         }
3043
3044         return;
3045 }
3046
3047 void
3048 RtlInitUnicodeString(dst, src)
3049         unicode_string          *dst;
3050         uint16_t                *src;
3051 {
3052         unicode_string          *u;
3053         int                     i;
3054
3055         u = dst;
3056         if (u == NULL)
3057                 return;
3058         if (src == NULL) {
3059                 u->us_len = u->us_maxlen = 0;
3060                 u->us_buf = NULL;
3061         } else {
3062                 i = 0;
3063                 while(src[i] != 0)
3064                         i++;
3065                 u->us_buf = src;
3066                 u->us_len = u->us_maxlen = i * 2;
3067         }
3068
3069         return;
3070 }
3071
3072 ndis_status
3073 RtlUnicodeStringToInteger(ustr, base, val)
3074         unicode_string          *ustr;
3075         uint32_t                base;
3076         uint32_t                *val;
3077 {
3078         uint16_t                *uchr;
3079         int                     len, neg = 0;
3080         char                    abuf[64];
3081         char                    *astr;
3082
3083         uchr = ustr->us_buf;
3084         len = ustr->us_len;
3085         bzero(abuf, sizeof(abuf));
3086
3087         if ((char)((*uchr) & 0xFF) == '-') {
3088                 neg = 1;
3089                 uchr++;
3090                 len -= 2;
3091         } else if ((char)((*uchr) & 0xFF) == '+') {
3092                 neg = 0;
3093                 uchr++;
3094                 len -= 2;
3095         }
3096
3097         if (base == 0) {
3098                 if ((char)((*uchr) & 0xFF) == 'b') {
3099                         base = 2;
3100                         uchr++;
3101                         len -= 2;
3102                 } else if ((char)((*uchr) & 0xFF) == 'o') {
3103                         base = 8;
3104                         uchr++;
3105                         len -= 2;
3106                 } else if ((char)((*uchr) & 0xFF) == 'x') {
3107                         base = 16;
3108                         uchr++;
3109                         len -= 2;
3110                 } else
3111                         base = 10;
3112         }
3113
3114         astr = abuf;
3115         if (neg) {
3116                 strcpy(astr, "-");
3117                 astr++;
3118         }
3119
3120         ntoskrnl_unicode_to_ascii(uchr, astr, len);
3121         *val = strtoul(abuf, NULL, base);
3122
3123         return(STATUS_SUCCESS);
3124 }
3125
3126 void
3127 RtlFreeUnicodeString(ustr)
3128         unicode_string          *ustr;
3129 {
3130         if (ustr->us_buf == NULL)
3131                 return;
3132         ExFreePool(ustr->us_buf);
3133         ustr->us_buf = NULL;
3134         return;
3135 }
3136
3137 void
3138 RtlFreeAnsiString(astr)
3139         ansi_string             *astr;
3140 {
3141         if (astr->as_buf == NULL)
3142                 return;
3143         ExFreePool(astr->as_buf);
3144         astr->as_buf = NULL;
3145         return;
3146 }
3147
3148 static int
3149 atoi(str)
3150         const char              *str;
3151 {
3152         return (int)strtol(str, (char **)NULL, 10);
3153 }
3154
3155 static long
3156 atol(str)
3157         const char              *str;
3158 {
3159         return strtol(str, (char **)NULL, 10);
3160 }
3161
3162 static int
3163 rand(void)
3164 {
3165         struct timeval          tv;
3166
3167         microtime(&tv);
3168         srandom(tv.tv_usec);
3169         return((int)random());
3170 }
3171
3172 static void
3173 srand(seed)
3174         unsigned int            seed;
3175 {
3176         srandom(seed);
3177         return;
3178 }
3179
3180 static uint8_t
3181 IoIsWdmVersionAvailable(major, minor)
3182         uint8_t                 major;
3183         uint8_t                 minor;
3184 {
3185         if (major == WDM_MAJOR && minor == WDM_MINOR_WINXP)
3186                 return(TRUE);
3187         return(FALSE);
3188 }
3189
3190 static ndis_status
3191 IoGetDeviceProperty(devobj, regprop, buflen, prop, reslen)
3192         device_object           *devobj;
3193         uint32_t                regprop;
3194         uint32_t                buflen;
3195         void                    *prop;
3196         uint32_t                *reslen;
3197 {
3198         driver_object           *drv;
3199         uint16_t                **name;
3200
3201         drv = devobj->do_drvobj;
3202
3203         switch (regprop) {
3204         case DEVPROP_DRIVER_KEYNAME:
3205                 name = prop;
3206                 *name = drv->dro_drivername.us_buf;
3207                 *reslen = drv->dro_drivername.us_len;
3208                 break;
3209         default:
3210                 return(STATUS_INVALID_PARAMETER_2);
3211                 break;
3212         }
3213
3214         return(STATUS_SUCCESS);
3215 }
3216
3217 static void
3218 KeInitializeMutex(kmutex, level)
3219         kmutant                 *kmutex;
3220         uint32_t                level;
3221 {
3222         InitializeListHead((&kmutex->km_header.dh_waitlisthead));
3223         kmutex->km_abandoned = FALSE;
3224         kmutex->km_apcdisable = 1;
3225         kmutex->km_header.dh_sigstate = 1;
3226         kmutex->km_header.dh_type = DISP_TYPE_MUTANT;
3227         kmutex->km_header.dh_size = sizeof(kmutant) / sizeof(uint32_t);
3228         kmutex->km_ownerthread = NULL;
3229         return;
3230 }
3231
3232 static uint32_t
3233 KeReleaseMutex(kmutex, kwait)
3234         kmutant                 *kmutex;
3235         uint8_t                 kwait;
3236 {
3237         uint32_t                prevstate;
3238
3239         mtx_lock(&ntoskrnl_dispatchlock);
3240         prevstate = kmutex->km_header.dh_sigstate;
3241         if (kmutex->km_ownerthread != curthread) {
3242                 mtx_unlock(&ntoskrnl_dispatchlock);
3243                 return(STATUS_MUTANT_NOT_OWNED);
3244         }
3245
3246         kmutex->km_header.dh_sigstate++;
3247         kmutex->km_abandoned = FALSE;
3248
3249         if (kmutex->km_header.dh_sigstate == 1) {
3250                 kmutex->km_ownerthread = NULL;
3251                 ntoskrnl_waittest(&kmutex->km_header, IO_NO_INCREMENT);
3252         }
3253
3254         mtx_unlock(&ntoskrnl_dispatchlock);
3255
3256         return(prevstate);
3257 }
3258
3259 static uint32_t
3260 KeReadStateMutex(kmutex)
3261         kmutant                 *kmutex;
3262 {
3263         return(kmutex->km_header.dh_sigstate);
3264 }
3265
3266 void
3267 KeInitializeEvent(kevent, type, state)
3268         nt_kevent               *kevent;
3269         uint32_t                type;
3270         uint8_t                 state;
3271 {
3272         InitializeListHead((&kevent->k_header.dh_waitlisthead));
3273         kevent->k_header.dh_sigstate = state;
3274         if (type == EVENT_TYPE_NOTIFY)
3275                 kevent->k_header.dh_type = DISP_TYPE_NOTIFICATION_EVENT;
3276         else
3277                 kevent->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_EVENT;
3278         kevent->k_header.dh_size = sizeof(nt_kevent) / sizeof(uint32_t);
3279         return;
3280 }
3281
3282 uint32_t
3283 KeResetEvent(kevent)
3284         nt_kevent               *kevent;
3285 {
3286         uint32_t                prevstate;
3287
3288         mtx_lock(&ntoskrnl_dispatchlock);
3289         prevstate = kevent->k_header.dh_sigstate;
3290         kevent->k_header.dh_sigstate = FALSE;
3291         mtx_unlock(&ntoskrnl_dispatchlock);
3292
3293         return(prevstate);
3294 }
3295
3296 uint32_t
3297 KeSetEvent(kevent, increment, kwait)
3298         nt_kevent               *kevent;
3299         uint32_t                increment;
3300         uint8_t                 kwait;
3301 {
3302         uint32_t                prevstate;
3303         wait_block              *w;
3304         nt_dispatch_header      *dh;
3305         struct thread           *td;
3306         wb_ext                  *we;
3307
3308         mtx_lock(&ntoskrnl_dispatchlock);
3309         prevstate = kevent->k_header.dh_sigstate;
3310         dh = &kevent->k_header;
3311
3312         if (IsListEmpty(&dh->dh_waitlisthead))
3313                 /*
3314                  * If there's nobody in the waitlist, just set
3315                  * the state to signalled.
3316                  */
3317                 dh->dh_sigstate = 1;
3318         else {
3319                 /*
3320                  * Get the first waiter. If this is a synchronization
3321                  * event, just wake up that one thread (don't bother
3322                  * setting the state to signalled since we're supposed
3323                  * to automatically clear synchronization events anyway).
3324                  *
3325                  * If it's a notification event, or the the first
3326                  * waiter is doing a WAITTYPE_ALL wait, go through
3327                  * the full wait satisfaction process.
3328                  */
3329                 w = CONTAINING_RECORD(dh->dh_waitlisthead.nle_flink,
3330                     wait_block, wb_waitlist);
3331                 we = w->wb_ext;
3332                 td = we->we_td;
3333                 if (kevent->k_header.dh_type == DISP_TYPE_NOTIFICATION_EVENT ||
3334                     w->wb_waittype == WAITTYPE_ALL) {
3335                         if (prevstate == 0) {
3336                                 dh->dh_sigstate = 1;
3337                                 ntoskrnl_waittest(dh, increment);
3338                         }
3339                 } else {
3340                         w->wb_awakened |= TRUE;
3341                         cv_broadcastpri(&we->we_cv, w->wb_oldpri -
3342                             (increment * 4));
3343                 }
3344         }
3345
3346         mtx_unlock(&ntoskrnl_dispatchlock);
3347
3348         return(prevstate);
3349 }
3350
3351 void
3352 KeClearEvent(kevent)
3353         nt_kevent               *kevent;
3354 {
3355         kevent->k_header.dh_sigstate = FALSE;
3356         return;
3357 }
3358
3359 uint32_t
3360 KeReadStateEvent(kevent)
3361         nt_kevent               *kevent;
3362 {
3363         return(kevent->k_header.dh_sigstate);
3364 }
3365
3366 /*
3367  * The object manager in Windows is responsible for managing
3368  * references and access to various types of objects, including
3369  * device_objects, events, threads, timers and so on. However,
3370  * there's a difference in the way objects are handled in user
3371  * mode versus kernel mode.
3372  *
3373  * In user mode (i.e. Win32 applications), all objects are
3374  * managed by the object manager. For example, when you create
3375  * a timer or event object, you actually end up with an 
3376  * object_header (for the object manager's bookkeeping
3377  * purposes) and an object body (which contains the actual object
3378  * structure, e.g. ktimer, kevent, etc...). This allows Windows
3379  * to manage resource quotas and to enforce access restrictions
3380  * on basically every kind of system object handled by the kernel.
3381  *
3382  * However, in kernel mode, you only end up using the object
3383  * manager some of the time. For example, in a driver, you create
3384  * a timer object by simply allocating the memory for a ktimer
3385  * structure and initializing it with KeInitializeTimer(). Hence,
3386  * the timer has no object_header and no reference counting or
3387  * security/resource checks are done on it. The assumption in
3388  * this case is that if you're running in kernel mode, you know
3389  * what you're doing, and you're already at an elevated privilege
3390  * anyway.
3391  *
3392  * There are some exceptions to this. The two most important ones
3393  * for our purposes are device_objects and threads. We need to use
3394  * the object manager to do reference counting on device_objects,
3395  * and for threads, you can only get a pointer to a thread's
3396  * dispatch header by using ObReferenceObjectByHandle() on the
3397  * handle returned by PsCreateSystemThread().
3398  */
3399
3400 static ndis_status
3401 ObReferenceObjectByHandle(handle, reqaccess, otype,
3402     accessmode, object, handleinfo)
3403         ndis_handle             handle;
3404         uint32_t                reqaccess;
3405         void                    *otype;
3406         uint8_t                 accessmode;
3407         void                    **object;
3408         void                    **handleinfo;
3409 {
3410         nt_objref               *nr;
3411
3412         nr = malloc(sizeof(nt_objref), M_DEVBUF, M_NOWAIT|M_ZERO);
3413         if (nr == NULL)
3414                 return(STATUS_INSUFFICIENT_RESOURCES);
3415
3416         InitializeListHead((&nr->no_dh.dh_waitlisthead));
3417         nr->no_obj = handle;
3418         nr->no_dh.dh_type = DISP_TYPE_THREAD;
3419         nr->no_dh.dh_sigstate = 0;
3420         nr->no_dh.dh_size = (uint8_t)(sizeof(struct thread) /
3421             sizeof(uint32_t));
3422         TAILQ_INSERT_TAIL(&ntoskrnl_reflist, nr, link);
3423         *object = nr;
3424
3425         return(STATUS_SUCCESS);
3426 }
3427
3428 static void
3429 ObfDereferenceObject(object)
3430         void                    *object;
3431 {
3432         nt_objref               *nr;
3433
3434         nr = object;
3435         TAILQ_REMOVE(&ntoskrnl_reflist, nr, link);
3436         free(nr, M_DEVBUF);
3437
3438         return;
3439 }
3440
3441 static uint32_t
3442 ZwClose(handle)
3443         ndis_handle             handle;
3444 {
3445         return(STATUS_SUCCESS);
3446 }
3447
3448 static uint32_t
3449 WmiQueryTraceInformation(traceclass, traceinfo, infolen, reqlen, buf)
3450         uint32_t                traceclass;
3451         void                    *traceinfo;
3452         uint32_t                infolen;
3453         uint32_t                reqlen;
3454         void                    *buf;
3455 {
3456         return(STATUS_NOT_FOUND);
3457 }
3458
3459 static uint32_t
3460 WmiTraceMessage(uint64_t loghandle, uint32_t messageflags,
3461         void *guid, uint16_t messagenum, ...)
3462 {
3463         return(STATUS_SUCCESS);
3464 }
3465
3466 static uint32_t
3467 IoWMIRegistrationControl(dobj, action)
3468         device_object           *dobj;
3469         uint32_t                action;
3470 {
3471         return(STATUS_SUCCESS);
3472 }
3473
3474 /*
3475  * This is here just in case the thread returns without calling
3476  * PsTerminateSystemThread().
3477  */
3478 static void
3479 ntoskrnl_thrfunc(arg)
3480         void                    *arg;
3481 {
3482         thread_context          *thrctx;
3483         uint32_t (*tfunc)(void *);
3484         void                    *tctx;
3485         uint32_t                rval;
3486
3487         thrctx = arg;
3488         tfunc = thrctx->tc_thrfunc;
3489         tctx = thrctx->tc_thrctx;
3490         free(thrctx, M_TEMP);
3491
3492         rval = MSCALL1(tfunc, tctx);
3493
3494         PsTerminateSystemThread(rval);
3495         return; /* notreached */
3496 }
3497
3498 static ndis_status
3499 PsCreateSystemThread(handle, reqaccess, objattrs, phandle,
3500         clientid, thrfunc, thrctx)
3501         ndis_handle             *handle;
3502         uint32_t                reqaccess;
3503         void                    *objattrs;
3504         ndis_handle             phandle;
3505         void                    *clientid;
3506         void                    *thrfunc;
3507         void                    *thrctx;
3508 {
3509         int                     error;
3510         char                    tname[128];
3511         thread_context          *tc;
3512         struct proc             *p;
3513
3514         tc = malloc(sizeof(thread_context), M_TEMP, M_NOWAIT);
3515         if (tc == NULL)
3516                 return(STATUS_INSUFFICIENT_RESOURCES);
3517
3518         tc->tc_thrctx = thrctx;
3519         tc->tc_thrfunc = thrfunc;
3520
3521         sprintf(tname, "windows kthread %d", ntoskrnl_kth);
3522         error = kthread_create(ntoskrnl_thrfunc, tc, &p,
3523             RFHIGHPID, NDIS_KSTACK_PAGES, tname);
3524
3525         if (error) {
3526                 free(tc, M_TEMP);
3527                 return(STATUS_INSUFFICIENT_RESOURCES);
3528         }
3529
3530         *handle = p;
3531         ntoskrnl_kth++;
3532
3533         return(STATUS_SUCCESS);
3534 }
3535
3536 /*
3537  * In Windows, the exit of a thread is an event that you're allowed
3538  * to wait on, assuming you've obtained a reference to the thread using
3539  * ObReferenceObjectByHandle(). Unfortunately, the only way we can
3540  * simulate this behavior is to register each thread we create in a
3541  * reference list, and if someone holds a reference to us, we poke
3542  * them.
3543  */
3544 static ndis_status
3545 PsTerminateSystemThread(status)
3546         ndis_status             status;
3547 {
3548         struct nt_objref        *nr;
3549
3550         mtx_lock(&ntoskrnl_dispatchlock);
3551         TAILQ_FOREACH(nr, &ntoskrnl_reflist, link) {
3552                 if (nr->no_obj != curthread->td_proc)
3553                         continue;
3554                 nr->no_dh.dh_sigstate = 1;
3555                 ntoskrnl_waittest(&nr->no_dh, IO_NO_INCREMENT);
3556                 break;
3557         }
3558         mtx_unlock(&ntoskrnl_dispatchlock);
3559
3560         ntoskrnl_kth--;
3561
3562 #if __FreeBSD_version < 502113
3563         mtx_lock(&Giant);
3564 #endif
3565         kthread_exit(0);
3566         return(0);      /* notreached */
3567 }
3568
3569 static uint32_t
3570 DbgPrint(char *fmt, ...)
3571 {
3572         va_list                 ap;
3573
3574         if (bootverbose) {
3575                 va_start(ap, fmt);
3576                 vprintf(fmt, ap);
3577         }
3578
3579         return(STATUS_SUCCESS);
3580 }
3581
3582 static void
3583 DbgBreakPoint(void)
3584 {
3585
3586 #if __FreeBSD_version < 502113
3587         Debugger("DbgBreakPoint(): breakpoint");
3588 #else
3589         kdb_enter("DbgBreakPoint(): breakpoint");
3590 #endif
3591 }
3592
3593 static void
3594 ntoskrnl_timercall(arg)
3595         void                    *arg;
3596 {
3597         ktimer                  *timer;
3598         struct timeval          tv;
3599         kdpc                    *dpc;
3600
3601         mtx_lock(&ntoskrnl_dispatchlock);
3602
3603         timer = arg;
3604
3605 #ifdef NTOSKRNL_DEBUG_TIMERS
3606         ntoskrnl_timer_fires++;
3607 #endif
3608         ntoskrnl_remove_timer(timer);
3609
3610         /*
3611          * This should never happen, but complain
3612          * if it does.
3613          */
3614
3615         if (timer->k_header.dh_inserted == FALSE) {
3616                 mtx_unlock(&ntoskrnl_dispatchlock);
3617                 printf("NTOS: timer %p fired even though "
3618                     "it was canceled\n", timer);
3619                 return;
3620         }
3621
3622         /* Mark the timer as no longer being on the timer queue. */
3623
3624         timer->k_header.dh_inserted = FALSE;
3625
3626         /* Now signal the object and satisfy any waits on it. */
3627
3628         timer->k_header.dh_sigstate = 1;
3629         ntoskrnl_waittest(&timer->k_header, IO_NO_INCREMENT);
3630
3631         /*
3632          * If this is a periodic timer, re-arm it
3633          * so it will fire again. We do this before
3634          * calling any deferred procedure calls because
3635          * it's possible the DPC might cancel the timer,
3636          * in which case it would be wrong for us to
3637          * re-arm it again afterwards.
3638          */
3639
3640         if (timer->k_period) {
3641                 tv.tv_sec = 0;
3642                 tv.tv_usec = timer->k_period * 1000;
3643                 timer->k_header.dh_inserted = TRUE;
3644                 ntoskrnl_insert_timer(timer, tvtohz(&tv));
3645 #ifdef NTOSKRNL_DEBUG_TIMERS
3646                 ntoskrnl_timer_reloads++;
3647 #endif
3648         }
3649
3650         dpc = timer->k_dpc;
3651
3652         mtx_unlock(&ntoskrnl_dispatchlock);
3653
3654         /* If there's a DPC associated with the timer, queue it up. */
3655
3656         if (dpc != NULL)
3657                 KeInsertQueueDpc(dpc, NULL, NULL);
3658
3659         return;
3660 }
3661
3662 #ifdef NTOSKRNL_DEBUG_TIMERS
3663 static int
3664 sysctl_show_timers(SYSCTL_HANDLER_ARGS)
3665 {
3666         int                     ret;
3667
3668         ret = 0;
3669         ntoskrnl_show_timers();
3670         return (sysctl_handle_int(oidp, &ret, 0, req));
3671 }
3672
3673 static void
3674 ntoskrnl_show_timers()
3675 {
3676         int                     i = 0;
3677         list_entry              *l;
3678
3679         mtx_lock_spin(&ntoskrnl_calllock);
3680         l = ntoskrnl_calllist.nle_flink;
3681         while(l != &ntoskrnl_calllist) {
3682                 i++;
3683                 l = l->nle_flink;
3684         }
3685         mtx_unlock_spin(&ntoskrnl_calllock);
3686
3687         printf("\n");
3688         printf("%d timers available (out of %d)\n", i, NTOSKRNL_TIMEOUTS);
3689         printf("timer sets: %qu\n", ntoskrnl_timer_sets);
3690         printf("timer reloads: %qu\n", ntoskrnl_timer_reloads);
3691         printf("timer cancels: %qu\n", ntoskrnl_timer_cancels);
3692         printf("timer fires: %qu\n", ntoskrnl_timer_fires);
3693         printf("\n");
3694
3695         return;
3696 }
3697 #endif
3698
3699 /*
3700  * Must be called with dispatcher lock held.
3701  */
3702
3703 static void
3704 ntoskrnl_insert_timer(timer, ticks)
3705         ktimer                  *timer;
3706         int                     ticks;
3707 {
3708         callout_entry           *e;
3709         list_entry              *l;
3710         struct callout          *c;
3711
3712         /*
3713          * Try and allocate a timer.
3714          */
3715         mtx_lock_spin(&ntoskrnl_calllock);
3716         if (IsListEmpty(&ntoskrnl_calllist)) {
3717                 mtx_unlock_spin(&ntoskrnl_calllock);
3718 #ifdef NTOSKRNL_DEBUG_TIMERS
3719                 ntoskrnl_show_timers();
3720 #endif
3721                 panic("out of timers!");
3722         }
3723         l = RemoveHeadList(&ntoskrnl_calllist);
3724         mtx_unlock_spin(&ntoskrnl_calllock);
3725
3726         e = CONTAINING_RECORD(l, callout_entry, ce_list);
3727         c = &e->ce_callout;
3728
3729         timer->k_callout = c;
3730
3731         callout_init(c, CALLOUT_MPSAFE);
3732         callout_reset(c, ticks, ntoskrnl_timercall, timer);
3733
3734         return;
3735 }
3736
3737 static void
3738 ntoskrnl_remove_timer(timer)
3739         ktimer                  *timer;
3740 {
3741         callout_entry           *e;
3742
3743         e = (callout_entry *)timer->k_callout;
3744         callout_stop(timer->k_callout);
3745
3746         mtx_lock_spin(&ntoskrnl_calllock);
3747         InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
3748         mtx_unlock_spin(&ntoskrnl_calllock);
3749
3750         return;
3751 }
3752
3753 void
3754 KeInitializeTimer(timer)
3755         ktimer                  *timer;
3756 {
3757         if (timer == NULL)
3758                 return;
3759
3760         KeInitializeTimerEx(timer,  EVENT_TYPE_NOTIFY);
3761
3762         return;
3763 }
3764
3765 void
3766 KeInitializeTimerEx(timer, type)
3767         ktimer                  *timer;
3768         uint32_t                type;
3769 {
3770         if (timer == NULL)
3771                 return;
3772
3773         bzero((char *)timer, sizeof(ktimer));
3774         InitializeListHead((&timer->k_header.dh_waitlisthead));
3775         timer->k_header.dh_sigstate = FALSE;
3776         timer->k_header.dh_inserted = FALSE;
3777         if (type == EVENT_TYPE_NOTIFY)
3778                 timer->k_header.dh_type = DISP_TYPE_NOTIFICATION_TIMER;
3779         else
3780                 timer->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_TIMER;
3781         timer->k_header.dh_size = sizeof(ktimer) / sizeof(uint32_t);
3782
3783         return;
3784 }
3785
3786 /*
3787  * DPC subsystem. A Windows Defered Procedure Call has the following
3788  * properties:
3789  * - It runs at DISPATCH_LEVEL.
3790  * - It can have one of 3 importance values that control when it
3791  *   runs relative to other DPCs in the queue.
3792  * - On SMP systems, it can be set to run on a specific processor.
3793  * In order to satisfy the last property, we create a DPC thread for
3794  * each CPU in the system and bind it to that CPU. Each thread
3795  * maintains three queues with different importance levels, which
3796  * will be processed in order from lowest to highest.
3797  *
3798  * In Windows, interrupt handlers run as DPCs. (Not to be confused
3799  * with ISRs, which run in interrupt context and can preempt DPCs.)
3800  * ISRs are given the highest importance so that they'll take
3801  * precedence over timers and other things.
3802  */
3803
3804 static void
3805 ntoskrnl_dpc_thread(arg)
3806         void                    *arg;
3807 {
3808         kdpc_queue              *kq;
3809         kdpc                    *d;
3810         list_entry              *l;
3811         uint8_t                 irql;
3812
3813         kq = arg;
3814
3815         InitializeListHead(&kq->kq_disp);
3816         kq->kq_td = curthread;
3817         kq->kq_exit = 0;
3818         kq->kq_running = FALSE;
3819         KeInitializeSpinLock(&kq->kq_lock);
3820         KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
3821         KeInitializeEvent(&kq->kq_done, EVENT_TYPE_SYNC, FALSE);
3822
3823         /*
3824          * Elevate our priority. DPCs are used to run interrupt
3825          * handlers, and they should trigger as soon as possible
3826          * once scheduled by an ISR.
3827          */
3828
3829         thread_lock(curthread);
3830 #ifdef NTOSKRNL_MULTIPLE_DPCS
3831 #if __FreeBSD_version >= 502102
3832         sched_bind(curthread, kq->kq_cpu);
3833 #endif
3834 #endif
3835         sched_prio(curthread, PRI_MIN_KERN);
3836 #if __FreeBSD_version < 600000
3837         curthread->td_base_pri = PRI_MIN_KERN;
3838 #endif
3839         thread_unlock(curthread);
3840
3841         while (1) {
3842                 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
3843
3844                 KeAcquireSpinLock(&kq->kq_lock, &irql);
3845
3846                 if (kq->kq_exit) {
3847                         kq->kq_exit = 0;
3848                         KeReleaseSpinLock(&kq->kq_lock, irql);
3849                         break;
3850                 }
3851
3852                 kq->kq_running = TRUE;
3853
3854                 while (!IsListEmpty(&kq->kq_disp)) {
3855                         l = RemoveHeadList((&kq->kq_disp));
3856                         d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
3857                         InitializeListHead((&d->k_dpclistentry));
3858                         KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
3859                         MSCALL4(d->k_deferedfunc, d, d->k_deferredctx,
3860                             d->k_sysarg1, d->k_sysarg2);
3861                         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3862                 }
3863
3864                 kq->kq_running = FALSE;
3865
3866                 KeReleaseSpinLock(&kq->kq_lock, irql);
3867
3868                 KeSetEvent(&kq->kq_done, IO_NO_INCREMENT, FALSE);
3869         }
3870
3871 #if __FreeBSD_version < 502113
3872         mtx_lock(&Giant);
3873 #endif
3874         kthread_exit(0);
3875         return; /* notreached */
3876 }
3877
3878 static void
3879 ntoskrnl_destroy_dpc_threads(void)
3880 {
3881         kdpc_queue              *kq;
3882         kdpc                    dpc;
3883         int                     i;
3884
3885         kq = kq_queues;
3886 #ifdef NTOSKRNL_MULTIPLE_DPCS
3887         for (i = 0; i < mp_ncpus; i++) {
3888 #else
3889         for (i = 0; i < 1; i++) {
3890 #endif
3891                 kq += i;
3892
3893                 kq->kq_exit = 1;
3894                 KeInitializeDpc(&dpc, NULL, NULL);
3895                 KeSetTargetProcessorDpc(&dpc, i);
3896                 KeInsertQueueDpc(&dpc, NULL, NULL);
3897                 while (kq->kq_exit)
3898                         tsleep(kq->kq_td->td_proc, PWAIT, "dpcw", hz/10);
3899         }
3900
3901         return;
3902 }
3903
3904 static uint8_t
3905 ntoskrnl_insert_dpc(head, dpc)
3906         list_entry              *head;
3907         kdpc                    *dpc;
3908 {
3909         list_entry              *l;
3910         kdpc                    *d;
3911
3912         l = head->nle_flink;
3913         while (l != head) {
3914                 d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
3915                 if (d == dpc)
3916                         return(FALSE);
3917                 l = l->nle_flink;
3918         }
3919
3920         if (dpc->k_importance == KDPC_IMPORTANCE_LOW)
3921                 InsertTailList((head), (&dpc->k_dpclistentry));
3922         else
3923                 InsertHeadList((head), (&dpc->k_dpclistentry));
3924
3925         return (TRUE);
3926 }
3927
3928 void
3929 KeInitializeDpc(dpc, dpcfunc, dpcctx)
3930         kdpc                    *dpc;
3931         void                    *dpcfunc;
3932         void                    *dpcctx;
3933 {
3934
3935         if (dpc == NULL)
3936                 return;
3937
3938         dpc->k_deferedfunc = dpcfunc;
3939         dpc->k_deferredctx = dpcctx;
3940         dpc->k_num = KDPC_CPU_DEFAULT;
3941         dpc->k_importance = KDPC_IMPORTANCE_MEDIUM;
3942         InitializeListHead((&dpc->k_dpclistentry));
3943
3944         return;
3945 }
3946
3947 uint8_t
3948 KeInsertQueueDpc(dpc, sysarg1, sysarg2)
3949         kdpc                    *dpc;
3950         void                    *sysarg1;
3951         void                    *sysarg2;
3952 {
3953         kdpc_queue              *kq;
3954         uint8_t                 r;
3955         uint8_t                 irql;
3956
3957         if (dpc == NULL)
3958                 return(FALSE);
3959
3960         kq = kq_queues;
3961
3962 #ifdef NTOSKRNL_MULTIPLE_DPCS
3963         KeRaiseIrql(DISPATCH_LEVEL, &irql);
3964
3965         /*
3966          * By default, the DPC is queued to run on the same CPU
3967          * that scheduled it.
3968          */
3969
3970         if (dpc->k_num == KDPC_CPU_DEFAULT)
3971                 kq += curthread->td_oncpu;
3972         else
3973                 kq += dpc->k_num;
3974         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3975 #else
3976         KeAcquireSpinLock(&kq->kq_lock, &irql);
3977 #endif
3978
3979         r = ntoskrnl_insert_dpc(&kq->kq_disp, dpc);
3980         if (r == TRUE) {
3981                 dpc->k_sysarg1 = sysarg1;
3982                 dpc->k_sysarg2 = sysarg2;
3983         }
3984         KeReleaseSpinLock(&kq->kq_lock, irql);
3985
3986         if (r == FALSE)
3987                 return(r);
3988
3989         KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
3990
3991         return(r);
3992 }
3993
3994 uint8_t
3995 KeRemoveQueueDpc(dpc)
3996         kdpc                    *dpc;
3997 {
3998         kdpc_queue              *kq;
3999         uint8_t                 irql;
4000
4001         if (dpc == NULL)
4002                 return(FALSE);
4003
4004 #ifdef NTOSKRNL_MULTIPLE_DPCS
4005         KeRaiseIrql(DISPATCH_LEVEL, &irql);
4006
4007         kq = kq_queues + dpc->k_num;
4008
4009         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
4010 #else
4011         kq = kq_queues;
4012         KeAcquireSpinLock(&kq->kq_lock, &irql);
4013 #endif
4014
4015         if (dpc->k_dpclistentry.nle_flink == &dpc->k_dpclistentry) {
4016                 KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
4017                 KeLowerIrql(irql);
4018                 return(FALSE);
4019         }
4020
4021         RemoveEntryList((&dpc->k_dpclistentry));
4022         InitializeListHead((&dpc->k_dpclistentry));
4023
4024         KeReleaseSpinLock(&kq->kq_lock, irql);
4025
4026         return(TRUE);
4027 }
4028
4029 void
4030 KeSetImportanceDpc(dpc, imp)
4031         kdpc                    *dpc;
4032         uint32_t                imp;
4033 {
4034         if (imp != KDPC_IMPORTANCE_LOW &&
4035             imp != KDPC_IMPORTANCE_MEDIUM &&
4036             imp != KDPC_IMPORTANCE_HIGH)
4037                 return;
4038
4039         dpc->k_importance = (uint8_t)imp;
4040         return;
4041 }
4042
4043 void
4044 KeSetTargetProcessorDpc(dpc, cpu)
4045         kdpc                    *dpc;
4046         uint8_t                 cpu;
4047 {
4048         if (cpu > mp_ncpus)
4049                 return;
4050
4051         dpc->k_num = cpu;
4052         return;
4053 }
4054
4055 void
4056 KeFlushQueuedDpcs(void)
4057 {
4058         kdpc_queue              *kq;
4059         int                     i;
4060
4061         /*
4062          * Poke each DPC queue and wait
4063          * for them to drain.
4064          */
4065
4066 #ifdef NTOSKRNL_MULTIPLE_DPCS
4067         for (i = 0; i < mp_ncpus; i++) {
4068 #else
4069         for (i = 0; i < 1; i++) {
4070 #endif
4071                 kq = kq_queues + i;
4072                 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
4073                 KeWaitForSingleObject(&kq->kq_done, 0, 0, TRUE, NULL);
4074         }
4075
4076         return;
4077 }
4078
4079 uint32_t
4080 KeGetCurrentProcessorNumber(void)
4081 {
4082         return((uint32_t)curthread->td_oncpu);
4083 }
4084
4085 uint8_t
4086 KeSetTimerEx(timer, duetime, period, dpc)
4087         ktimer                  *timer;
4088         int64_t                 duetime;
4089         uint32_t                period;
4090         kdpc                    *dpc;
4091 {
4092         struct timeval          tv;
4093         uint64_t                curtime;
4094         uint8_t                 pending;
4095
4096         if (timer == NULL)
4097                 return(FALSE);
4098
4099         mtx_lock(&ntoskrnl_dispatchlock);
4100
4101         if (timer->k_header.dh_inserted == TRUE) {
4102                 ntoskrnl_remove_timer(timer);
4103 #ifdef NTOSKRNL_DEBUG_TIMERS
4104                 ntoskrnl_timer_cancels++;
4105 #endif
4106                 timer->k_header.dh_inserted = FALSE;
4107                 pending = TRUE;
4108         } else
4109                 pending = FALSE;
4110
4111         timer->k_duetime = duetime;
4112         timer->k_period = period;
4113         timer->k_header.dh_sigstate = FALSE;
4114         timer->k_dpc = dpc;
4115
4116         if (duetime < 0) {
4117                 tv.tv_sec = - (duetime) / 10000000;
4118                 tv.tv_usec = (- (duetime) / 10) -
4119                     (tv.tv_sec * 1000000);
4120         } else {
4121                 ntoskrnl_time(&curtime);
4122                 if (duetime < curtime)
4123                         tv.tv_sec = tv.tv_usec = 0;
4124                 else {
4125                         tv.tv_sec = ((duetime) - curtime) / 10000000;
4126                         tv.tv_usec = ((duetime) - curtime) / 10 -
4127                             (tv.tv_sec * 1000000);
4128                 }
4129         }
4130
4131         timer->k_header.dh_inserted = TRUE;
4132         ntoskrnl_insert_timer(timer, tvtohz(&tv));
4133 #ifdef NTOSKRNL_DEBUG_TIMERS
4134         ntoskrnl_timer_sets++;
4135 #endif
4136
4137         mtx_unlock(&ntoskrnl_dispatchlock);
4138
4139         return(pending);
4140 }
4141
4142 uint8_t
4143 KeSetTimer(timer, duetime, dpc)
4144         ktimer                  *timer;
4145         int64_t                 duetime;
4146         kdpc                    *dpc;
4147 {
4148         return (KeSetTimerEx(timer, duetime, 0, dpc));
4149 }
4150
4151 /*
4152  * The Windows DDK documentation seems to say that cancelling
4153  * a timer that has a DPC will result in the DPC also being
4154  * cancelled, but this isn't really the case.
4155  */
4156
4157 uint8_t
4158 KeCancelTimer(timer)
4159         ktimer                  *timer;
4160 {
4161         uint8_t                 pending;
4162
4163         if (timer == NULL)
4164                 return(FALSE);
4165
4166         mtx_lock(&ntoskrnl_dispatchlock);
4167
4168         pending = timer->k_header.dh_inserted;
4169
4170         if (timer->k_header.dh_inserted == TRUE) {
4171                 timer->k_header.dh_inserted = FALSE;
4172                 ntoskrnl_remove_timer(timer);
4173 #ifdef NTOSKRNL_DEBUG_TIMERS
4174                 ntoskrnl_timer_cancels++;
4175 #endif
4176         }
4177
4178         mtx_unlock(&ntoskrnl_dispatchlock);
4179
4180         return(pending);
4181 }
4182
4183 uint8_t
4184 KeReadStateTimer(timer)
4185         ktimer                  *timer;
4186 {
4187         return(timer->k_header.dh_sigstate);
4188 }
4189
4190 static void
4191 dummy()
4192 {
4193         printf ("ntoskrnl dummy called...\n");
4194         return;
4195 }
4196
4197
4198 image_patch_table ntoskrnl_functbl[] = {
4199         IMPORT_SFUNC(RtlZeroMemory, 2),
4200         IMPORT_SFUNC(RtlCopyMemory, 3),
4201         IMPORT_SFUNC(RtlCompareMemory, 3),
4202         IMPORT_SFUNC(RtlEqualUnicodeString, 3),
4203         IMPORT_SFUNC(RtlCopyUnicodeString, 2),
4204         IMPORT_SFUNC(RtlUnicodeStringToAnsiString, 3),
4205         IMPORT_SFUNC(RtlAnsiStringToUnicodeString, 3),
4206         IMPORT_SFUNC(RtlInitAnsiString, 2),
4207         IMPORT_SFUNC_MAP(RtlInitString, RtlInitAnsiString, 2),
4208         IMPORT_SFUNC(RtlInitUnicodeString, 2),
4209         IMPORT_SFUNC(RtlFreeAnsiString, 1),
4210         IMPORT_SFUNC(RtlFreeUnicodeString, 1),
4211         IMPORT_SFUNC(RtlUnicodeStringToInteger, 3),
4212         IMPORT_CFUNC(sprintf, 0),
4213         IMPORT_CFUNC(vsprintf, 0),
4214         IMPORT_CFUNC_MAP(_snprintf, snprintf, 0),
4215         IMPORT_CFUNC_MAP(_vsnprintf, vsnprintf, 0),
4216         IMPORT_CFUNC(DbgPrint, 0),
4217         IMPORT_SFUNC(DbgBreakPoint, 0),
4218         IMPORT_CFUNC(strncmp, 0),
4219         IMPORT_CFUNC(strcmp, 0),
4220         IMPORT_CFUNC_MAP(stricmp, strcasecmp, 0),
4221         IMPORT_CFUNC(strncpy, 0),
4222         IMPORT_CFUNC(strcpy, 0),
4223         IMPORT_CFUNC(strlen, 0),
4224         IMPORT_CFUNC_MAP(toupper, ntoskrnl_toupper, 0),
4225         IMPORT_CFUNC_MAP(tolower, ntoskrnl_tolower, 0),
4226         IMPORT_CFUNC_MAP(strstr, ntoskrnl_strstr, 0),
4227         IMPORT_CFUNC_MAP(strchr, index, 0),
4228         IMPORT_CFUNC_MAP(strrchr, rindex, 0),
4229         IMPORT_CFUNC(memcpy, 0),
4230         IMPORT_CFUNC_MAP(memmove, ntoskrnl_memmove, 0),
4231         IMPORT_CFUNC_MAP(memset, ntoskrnl_memset, 0),
4232         IMPORT_CFUNC_MAP(memchr, ntoskrnl_memchr, 0),
4233         IMPORT_SFUNC(IoAllocateDriverObjectExtension, 4),
4234         IMPORT_SFUNC(IoGetDriverObjectExtension, 2),
4235         IMPORT_FFUNC(IofCallDriver, 2),
4236         IMPORT_FFUNC(IofCompleteRequest, 2),
4237         IMPORT_SFUNC(IoAcquireCancelSpinLock, 1),
4238         IMPORT_SFUNC(IoReleaseCancelSpinLock, 1),
4239         IMPORT_SFUNC(IoCancelIrp, 1),
4240         IMPORT_SFUNC(IoConnectInterrupt, 11),
4241         IMPORT_SFUNC(IoDisconnectInterrupt, 1),
4242         IMPORT_SFUNC(IoCreateDevice, 7),
4243         IMPORT_SFUNC(IoDeleteDevice, 1),
4244         IMPORT_SFUNC(IoGetAttachedDevice, 1),
4245         IMPORT_SFUNC(IoAttachDeviceToDeviceStack, 2),
4246         IMPORT_SFUNC(IoDetachDevice, 1),
4247         IMPORT_SFUNC(IoBuildSynchronousFsdRequest, 7),
4248         IMPORT_SFUNC(IoBuildAsynchronousFsdRequest, 6),
4249         IMPORT_SFUNC(IoBuildDeviceIoControlRequest, 9),
4250         IMPORT_SFUNC(IoAllocateIrp, 2),
4251         IMPORT_SFUNC(IoReuseIrp, 2),
4252         IMPORT_SFUNC(IoMakeAssociatedIrp, 2),
4253         IMPORT_SFUNC(IoFreeIrp, 1),
4254         IMPORT_SFUNC(IoInitializeIrp, 3),
4255         IMPORT_SFUNC(KeAcquireInterruptSpinLock, 1),
4256         IMPORT_SFUNC(KeReleaseInterruptSpinLock, 2),
4257         IMPORT_SFUNC(KeSynchronizeExecution, 3),
4258         IMPORT_SFUNC(KeWaitForSingleObject, 5),
4259         IMPORT_SFUNC(KeWaitForMultipleObjects, 8),
4260         IMPORT_SFUNC(_allmul, 4),
4261         IMPORT_SFUNC(_alldiv, 4),
4262         IMPORT_SFUNC(_allrem, 4),
4263         IMPORT_RFUNC(_allshr, 0),
4264         IMPORT_RFUNC(_allshl, 0),
4265         IMPORT_SFUNC(_aullmul, 4),
4266         IMPORT_SFUNC(_aulldiv, 4),
4267         IMPORT_SFUNC(_aullrem, 4),
4268         IMPORT_RFUNC(_aullshr, 0),
4269         IMPORT_RFUNC(_aullshl, 0),
4270         IMPORT_CFUNC(atoi, 0),
4271         IMPORT_CFUNC(atol, 0),
4272         IMPORT_CFUNC(rand, 0),
4273         IMPORT_CFUNC(srand, 0),
4274         IMPORT_SFUNC(WRITE_REGISTER_USHORT, 2),
4275         IMPORT_SFUNC(READ_REGISTER_USHORT, 1),
4276         IMPORT_SFUNC(WRITE_REGISTER_ULONG, 2),
4277         IMPORT_SFUNC(READ_REGISTER_ULONG, 1),
4278         IMPORT_SFUNC(READ_REGISTER_UCHAR, 1),
4279         IMPORT_SFUNC(WRITE_REGISTER_UCHAR, 2),
4280         IMPORT_SFUNC(ExInitializePagedLookasideList, 7),
4281         IMPORT_SFUNC(ExDeletePagedLookasideList, 1),
4282         IMPORT_SFUNC(ExInitializeNPagedLookasideList, 7),
4283         IMPORT_SFUNC(ExDeleteNPagedLookasideList, 1),
4284         IMPORT_FFUNC(InterlockedPopEntrySList, 1),
4285         IMPORT_FFUNC(InterlockedPushEntrySList, 2),
4286         IMPORT_SFUNC(ExQueryDepthSList, 1),
4287         IMPORT_FFUNC_MAP(ExpInterlockedPopEntrySList,
4288                 InterlockedPopEntrySList, 1),
4289         IMPORT_FFUNC_MAP(ExpInterlockedPushEntrySList,
4290                 InterlockedPushEntrySList, 2),
4291         IMPORT_FFUNC(ExInterlockedPopEntrySList, 2),
4292         IMPORT_FFUNC(ExInterlockedPushEntrySList, 3),
4293         IMPORT_SFUNC(ExAllocatePoolWithTag, 3),
4294         IMPORT_SFUNC(ExFreePool, 1),
4295 #ifdef __i386__
4296         IMPORT_FFUNC(KefAcquireSpinLockAtDpcLevel, 1),
4297         IMPORT_FFUNC(KefReleaseSpinLockFromDpcLevel,1),
4298         IMPORT_FFUNC(KeAcquireSpinLockRaiseToDpc, 1),
4299 #else
4300         /*
4301          * For AMD64, we can get away with just mapping
4302          * KeAcquireSpinLockRaiseToDpc() directly to KfAcquireSpinLock()
4303          * because the calling conventions end up being the same.
4304          * On i386, we have to be careful because KfAcquireSpinLock()
4305          * is _fastcall but KeAcquireSpinLockRaiseToDpc() isn't.
4306          */
4307         IMPORT_SFUNC(KeAcquireSpinLockAtDpcLevel, 1),
4308         IMPORT_SFUNC(KeReleaseSpinLockFromDpcLevel, 1),
4309         IMPORT_SFUNC_MAP(KeAcquireSpinLockRaiseToDpc, KfAcquireSpinLock, 1),
4310 #endif
4311         IMPORT_SFUNC_MAP(KeReleaseSpinLock, KfReleaseSpinLock, 1),
4312         IMPORT_FFUNC(InterlockedIncrement, 1),
4313         IMPORT_FFUNC(InterlockedDecrement, 1),
4314         IMPORT_FFUNC(InterlockedExchange, 2),
4315         IMPORT_FFUNC(ExInterlockedAddLargeStatistic, 2),
4316         IMPORT_SFUNC(IoAllocateMdl, 5),
4317         IMPORT_SFUNC(IoFreeMdl, 1),
4318         IMPORT_SFUNC(MmAllocateContiguousMemory, 2),
4319         IMPORT_SFUNC(MmAllocateContiguousMemorySpecifyCache, 5),
4320         IMPORT_SFUNC(MmFreeContiguousMemory, 1),
4321         IMPORT_SFUNC(MmFreeContiguousMemorySpecifyCache, 3),
4322         IMPORT_SFUNC_MAP(MmGetPhysicalAddress, pmap_kextract, 1),
4323         IMPORT_SFUNC(MmSizeOfMdl, 1),
4324         IMPORT_SFUNC(MmMapLockedPages, 2),
4325         IMPORT_SFUNC(MmMapLockedPagesSpecifyCache, 6),
4326         IMPORT_SFUNC(MmUnmapLockedPages, 2),
4327         IMPORT_SFUNC(MmBuildMdlForNonPagedPool, 1),
4328         IMPORT_SFUNC(MmIsAddressValid, 1),
4329         IMPORT_SFUNC(MmMapIoSpace, 3 + 1),
4330         IMPORT_SFUNC(MmUnmapIoSpace, 2),
4331         IMPORT_SFUNC(KeInitializeSpinLock, 1),
4332         IMPORT_SFUNC(IoIsWdmVersionAvailable, 2),
4333         IMPORT_SFUNC(IoGetDeviceProperty, 5),
4334         IMPORT_SFUNC(IoAllocateWorkItem, 1),
4335         IMPORT_SFUNC(IoFreeWorkItem, 1),
4336         IMPORT_SFUNC(IoQueueWorkItem, 4),
4337         IMPORT_SFUNC(ExQueueWorkItem, 2),
4338         IMPORT_SFUNC(ntoskrnl_workitem, 2),
4339         IMPORT_SFUNC(KeInitializeMutex, 2),
4340         IMPORT_SFUNC(KeReleaseMutex, 2),
4341         IMPORT_SFUNC(KeReadStateMutex, 1),
4342         IMPORT_SFUNC(KeInitializeEvent, 3),
4343         IMPORT_SFUNC(KeSetEvent, 3),
4344         IMPORT_SFUNC(KeResetEvent, 1),
4345         IMPORT_SFUNC(KeClearEvent, 1),
4346         IMPORT_SFUNC(KeReadStateEvent, 1),
4347         IMPORT_SFUNC(KeInitializeTimer, 1),
4348         IMPORT_SFUNC(KeInitializeTimerEx, 2),
4349         IMPORT_SFUNC(KeSetTimer, 3),
4350         IMPORT_SFUNC(KeSetTimerEx, 4),
4351         IMPORT_SFUNC(KeCancelTimer, 1),
4352         IMPORT_SFUNC(KeReadStateTimer, 1),
4353         IMPORT_SFUNC(KeInitializeDpc, 3),
4354         IMPORT_SFUNC(KeInsertQueueDpc, 3),
4355         IMPORT_SFUNC(KeRemoveQueueDpc, 1),
4356         IMPORT_SFUNC(KeSetImportanceDpc, 2),
4357         IMPORT_SFUNC(KeSetTargetProcessorDpc, 2),
4358         IMPORT_SFUNC(KeFlushQueuedDpcs, 0),
4359         IMPORT_SFUNC(KeGetCurrentProcessorNumber, 1),
4360         IMPORT_SFUNC(ObReferenceObjectByHandle, 6),
4361         IMPORT_FFUNC(ObfDereferenceObject, 1),
4362         IMPORT_SFUNC(ZwClose, 1),
4363         IMPORT_SFUNC(PsCreateSystemThread, 7),
4364         IMPORT_SFUNC(PsTerminateSystemThread, 1),
4365         IMPORT_SFUNC(IoWMIRegistrationControl, 2),
4366         IMPORT_SFUNC(WmiQueryTraceInformation, 5),
4367         IMPORT_CFUNC(WmiTraceMessage, 0),
4368
4369         /*
4370          * This last entry is a catch-all for any function we haven't
4371          * implemented yet. The PE import list patching routine will
4372          * use it for any function that doesn't have an explicit match
4373          * in this table.
4374          */
4375
4376         { NULL, (FUNC)dummy, NULL, 0, WINDRV_WRAP_STDCALL },
4377
4378         /* End of list. */
4379
4380         { NULL, NULL, NULL }
4381 };