]> CyberLeo.Net >> Repos - FreeBSD/releng/7.2.git/blob - sys/compat/ndis/subr_ntoskrnl.c
Create releng/7.2 from stable/7 in preparation for 7.2-RELEASE.
[FreeBSD/releng/7.2.git] / sys / compat / ndis / subr_ntoskrnl.c
1 /*-
2  * Copyright (c) 2003
3  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *      This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #include <sys/ctype.h>
37 #include <sys/unistd.h>
38 #include <sys/param.h>
39 #include <sys/types.h>
40 #include <sys/errno.h>
41 #include <sys/systm.h>
42 #include <sys/malloc.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45
46 #include <sys/callout.h>
47 #if __FreeBSD_version > 502113
48 #include <sys/kdb.h>
49 #endif
50 #include <sys/kernel.h>
51 #include <sys/proc.h>
52 #include <sys/condvar.h>
53 #include <sys/kthread.h>
54 #include <sys/module.h>
55 #include <sys/smp.h>
56 #include <sys/sched.h>
57 #include <sys/sysctl.h>
58
59 #include <machine/atomic.h>
60 #include <machine/bus.h>
61 #include <machine/stdarg.h>
62 #include <machine/resource.h>
63
64 #include <sys/bus.h>
65 #include <sys/rman.h>
66
67 #include <vm/vm.h>
68 #include <vm/vm_param.h>
69 #include <vm/pmap.h>
70 #include <vm/uma.h>
71 #include <vm/vm_kern.h>
72 #include <vm/vm_map.h>
73
74 #include <compat/ndis/pe_var.h>
75 #include <compat/ndis/cfg_var.h>
76 #include <compat/ndis/resource_var.h>
77 #include <compat/ndis/ntoskrnl_var.h>
78 #include <compat/ndis/hal_var.h>
79 #include <compat/ndis/ndis_var.h>
80
81 #ifdef NTOSKRNL_DEBUG_TIMERS
82 static int sysctl_show_timers(SYSCTL_HANDLER_ARGS);
83
84 SYSCTL_PROC(_debug, OID_AUTO, ntoskrnl_timers, CTLFLAG_RW, 0, 0,
85         sysctl_show_timers, "I", "Show ntoskrnl timer stats");
86 #endif
87
88 struct kdpc_queue {
89         list_entry              kq_disp;
90         struct thread           *kq_td;
91         int                     kq_cpu;
92         int                     kq_exit;
93         int                     kq_running;
94         kspin_lock              kq_lock;
95         nt_kevent               kq_proc;
96         nt_kevent               kq_done;
97 };
98
99 typedef struct kdpc_queue kdpc_queue;
100
101 struct wb_ext {
102         struct cv               we_cv;
103         struct thread           *we_td;
104 };
105
106 typedef struct wb_ext wb_ext;
107
108 #define NTOSKRNL_TIMEOUTS       256
109 #ifdef NTOSKRNL_DEBUG_TIMERS
110 static uint64_t ntoskrnl_timer_fires;
111 static uint64_t ntoskrnl_timer_sets;
112 static uint64_t ntoskrnl_timer_reloads;
113 static uint64_t ntoskrnl_timer_cancels;
114 #endif
115
116 struct callout_entry {
117         struct callout          ce_callout;
118         list_entry              ce_list;
119 };
120
121 typedef struct callout_entry callout_entry;
122
123 static struct list_entry ntoskrnl_calllist;
124 static struct mtx ntoskrnl_calllock;
125
126 static struct list_entry ntoskrnl_intlist;
127 static kspin_lock ntoskrnl_intlock;
128
129 static uint8_t RtlEqualUnicodeString(unicode_string *,
130         unicode_string *, uint8_t);
131 static void RtlCopyUnicodeString(unicode_string *,
132         unicode_string *);
133 static irp *IoBuildSynchronousFsdRequest(uint32_t, device_object *,
134          void *, uint32_t, uint64_t *, nt_kevent *, io_status_block *);
135 static irp *IoBuildAsynchronousFsdRequest(uint32_t,
136         device_object *, void *, uint32_t, uint64_t *, io_status_block *);
137 static irp *IoBuildDeviceIoControlRequest(uint32_t,
138         device_object *, void *, uint32_t, void *, uint32_t,
139         uint8_t, nt_kevent *, io_status_block *);
140 static irp *IoAllocateIrp(uint8_t, uint8_t);
141 static void IoReuseIrp(irp *, uint32_t);
142 static void IoFreeIrp(irp *);
143 static void IoInitializeIrp(irp *, uint16_t, uint8_t);
144 static irp *IoMakeAssociatedIrp(irp *, uint8_t);
145 static uint32_t KeWaitForMultipleObjects(uint32_t,
146         nt_dispatch_header **, uint32_t, uint32_t, uint32_t, uint8_t,
147         int64_t *, wait_block *);
148 static void ntoskrnl_waittest(nt_dispatch_header *, uint32_t);
149 static void ntoskrnl_satisfy_wait(nt_dispatch_header *, struct thread *);
150 static void ntoskrnl_satisfy_multiple_waits(wait_block *);
151 static int ntoskrnl_is_signalled(nt_dispatch_header *, struct thread *);
152 static void ntoskrnl_insert_timer(ktimer *, int);
153 static void ntoskrnl_remove_timer(ktimer *);
154 #ifdef NTOSKRNL_DEBUG_TIMERS
155 static void ntoskrnl_show_timers(void);
156 #endif
157 static void ntoskrnl_timercall(void *);
158 static void ntoskrnl_dpc_thread(void *);
159 static void ntoskrnl_destroy_dpc_threads(void);
160 static void ntoskrnl_destroy_workitem_threads(void);
161 static void ntoskrnl_workitem_thread(void *);
162 static void ntoskrnl_workitem(device_object *, void *);
163 static void ntoskrnl_unicode_to_ascii(uint16_t *, char *, int);
164 static void ntoskrnl_ascii_to_unicode(char *, uint16_t *, int);
165 static uint8_t ntoskrnl_insert_dpc(list_entry *, kdpc *);
166 static void WRITE_REGISTER_USHORT(uint16_t *, uint16_t);
167 static uint16_t READ_REGISTER_USHORT(uint16_t *);
168 static void WRITE_REGISTER_ULONG(uint32_t *, uint32_t);
169 static uint32_t READ_REGISTER_ULONG(uint32_t *);
170 static void WRITE_REGISTER_UCHAR(uint8_t *, uint8_t);
171 static uint8_t READ_REGISTER_UCHAR(uint8_t *);
172 static int64_t _allmul(int64_t, int64_t);
173 static int64_t _alldiv(int64_t, int64_t);
174 static int64_t _allrem(int64_t, int64_t);
175 static int64_t _allshr(int64_t, uint8_t);
176 static int64_t _allshl(int64_t, uint8_t);
177 static uint64_t _aullmul(uint64_t, uint64_t);
178 static uint64_t _aulldiv(uint64_t, uint64_t);
179 static uint64_t _aullrem(uint64_t, uint64_t);
180 static uint64_t _aullshr(uint64_t, uint8_t);
181 static uint64_t _aullshl(uint64_t, uint8_t);
182 static slist_entry *ntoskrnl_pushsl(slist_header *, slist_entry *);
183 static slist_entry *ntoskrnl_popsl(slist_header *);
184 static void ExInitializePagedLookasideList(paged_lookaside_list *,
185         lookaside_alloc_func *, lookaside_free_func *,
186         uint32_t, size_t, uint32_t, uint16_t);
187 static void ExDeletePagedLookasideList(paged_lookaside_list *);
188 static void ExInitializeNPagedLookasideList(npaged_lookaside_list *,
189         lookaside_alloc_func *, lookaside_free_func *,
190         uint32_t, size_t, uint32_t, uint16_t);
191 static void ExDeleteNPagedLookasideList(npaged_lookaside_list *);
192 static slist_entry
193         *ExInterlockedPushEntrySList(slist_header *,
194         slist_entry *, kspin_lock *);
195 static slist_entry
196         *ExInterlockedPopEntrySList(slist_header *, kspin_lock *);
197 static uint32_t InterlockedIncrement(volatile uint32_t *);
198 static uint32_t InterlockedDecrement(volatile uint32_t *);
199 static void ExInterlockedAddLargeStatistic(uint64_t *, uint32_t);
200 static void *MmAllocateContiguousMemory(uint32_t, uint64_t);
201 static void *MmAllocateContiguousMemorySpecifyCache(uint32_t,
202         uint64_t, uint64_t, uint64_t, uint32_t);
203 static void MmFreeContiguousMemory(void *);
204 static void MmFreeContiguousMemorySpecifyCache(void *, uint32_t, uint32_t);
205 static uint32_t MmSizeOfMdl(void *, size_t);
206 static void *MmMapLockedPages(mdl *, uint8_t);
207 static void *MmMapLockedPagesSpecifyCache(mdl *,
208         uint8_t, uint32_t, void *, uint32_t, uint32_t);
209 static void MmUnmapLockedPages(void *, mdl *);
210 static uint8_t MmIsAddressValid(void *);
211 static device_t ntoskrnl_finddev(device_t, uint64_t, struct resource **);
212 static void RtlZeroMemory(void *, size_t);
213 static void RtlCopyMemory(void *, const void *, size_t);
214 static size_t RtlCompareMemory(const void *, const void *, size_t);
215 static ndis_status RtlUnicodeStringToInteger(unicode_string *,
216         uint32_t, uint32_t *);
217 static int atoi (const char *);
218 static long atol (const char *);
219 static int rand(void);
220 static void srand(unsigned int);
221 static void KeQuerySystemTime(uint64_t *);
222 static uint32_t KeTickCount(void);
223 static uint8_t IoIsWdmVersionAvailable(uint8_t, uint8_t);
224 static void ntoskrnl_thrfunc(void *);
225 static ndis_status PsCreateSystemThread(ndis_handle *,
226         uint32_t, void *, ndis_handle, void *, void *, void *);
227 static ndis_status PsTerminateSystemThread(ndis_status);
228 static ndis_status IoGetDeviceObjectPointer(unicode_string *,
229         uint32_t, void *, device_object *);
230 static ndis_status IoGetDeviceProperty(device_object *, uint32_t,
231         uint32_t, void *, uint32_t *);
232 static void KeInitializeMutex(kmutant *, uint32_t);
233 static uint32_t KeReleaseMutex(kmutant *, uint8_t);
234 static uint32_t KeReadStateMutex(kmutant *);
235 static ndis_status ObReferenceObjectByHandle(ndis_handle,
236         uint32_t, void *, uint8_t, void **, void **);
237 static void ObfDereferenceObject(void *);
238 static uint32_t ZwClose(ndis_handle);
239 static uint32_t WmiQueryTraceInformation(uint32_t, void *, uint32_t,
240         uint32_t, void *);
241 static uint32_t WmiTraceMessage(uint64_t, uint32_t, void *, uint16_t, ...);
242 static uint32_t IoWMIRegistrationControl(device_object *, uint32_t);
243 static void *ntoskrnl_memset(void *, int, size_t);
244 static void *ntoskrnl_memmove(void *, void *, size_t);
245 static void *ntoskrnl_memchr(void *, unsigned char, size_t);
246 static char *ntoskrnl_strstr(char *, char *);
247 static char *ntoskrnl_strncat(char *, char *, size_t);
248 static int ntoskrnl_toupper(int);
249 static int ntoskrnl_tolower(int);
250 static funcptr ntoskrnl_findwrap(funcptr);
251 static uint32_t DbgPrint(char *, ...);
252 static void DbgBreakPoint(void);
253 static void KeBugCheckEx(uint32_t, u_long, u_long, u_long, u_long);
254 static void dummy(void);
255
256 static struct mtx ntoskrnl_dispatchlock;
257 static struct mtx ntoskrnl_interlock;
258 static kspin_lock ntoskrnl_cancellock;
259 static int ntoskrnl_kth = 0;
260 static struct nt_objref_head ntoskrnl_reflist;
261 static uma_zone_t mdl_zone;
262 static uma_zone_t iw_zone;
263 static struct kdpc_queue *kq_queues;
264 static struct kdpc_queue *wq_queues;
265 static int wq_idx = 0;
266
267 int
268 ntoskrnl_libinit()
269 {
270         image_patch_table       *patch;
271         int                     error;
272         struct proc             *p;
273         kdpc_queue              *kq;
274         callout_entry           *e;
275         int                     i;
276         char                    name[64];
277
278         mtx_init(&ntoskrnl_dispatchlock,
279             "ntoskrnl dispatch lock", MTX_NDIS_LOCK, MTX_DEF|MTX_RECURSE);
280         mtx_init(&ntoskrnl_interlock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN);
281         KeInitializeSpinLock(&ntoskrnl_cancellock);
282         KeInitializeSpinLock(&ntoskrnl_intlock);
283         TAILQ_INIT(&ntoskrnl_reflist);
284
285         InitializeListHead(&ntoskrnl_calllist);
286         InitializeListHead(&ntoskrnl_intlist);
287         mtx_init(&ntoskrnl_calllock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN);
288
289         kq_queues = ExAllocatePoolWithTag(NonPagedPool,
290 #ifdef NTOSKRNL_MULTIPLE_DPCS
291             sizeof(kdpc_queue) * mp_ncpus, 0);
292 #else
293             sizeof(kdpc_queue), 0);
294 #endif
295
296         if (kq_queues == NULL)
297                 return(ENOMEM);
298
299         wq_queues = ExAllocatePoolWithTag(NonPagedPool,
300             sizeof(kdpc_queue) * WORKITEM_THREADS, 0);
301
302         if (wq_queues == NULL)
303                 return(ENOMEM);
304
305 #ifdef NTOSKRNL_MULTIPLE_DPCS
306         bzero((char *)kq_queues, sizeof(kdpc_queue) * mp_ncpus);
307 #else
308         bzero((char *)kq_queues, sizeof(kdpc_queue));
309 #endif
310         bzero((char *)wq_queues, sizeof(kdpc_queue) * WORKITEM_THREADS);
311
312         /*
313          * Launch the DPC threads.
314          */
315
316 #ifdef NTOSKRNL_MULTIPLE_DPCS
317         for (i = 0; i < mp_ncpus; i++) {
318 #else
319         for (i = 0; i < 1; i++) {
320 #endif
321                 kq = kq_queues + i;
322                 kq->kq_cpu = i;
323                 sprintf(name, "Windows DPC %d", i);
324                 error = kthread_create(ntoskrnl_dpc_thread, kq, &p,
325                     RFHIGHPID, NDIS_KSTACK_PAGES, name);
326                 if (error)
327                         panic("failed to launch DPC thread");
328         }
329
330         /*
331          * Launch the workitem threads.
332          */
333
334         for (i = 0; i < WORKITEM_THREADS; i++) {
335                 kq = wq_queues + i;
336                 sprintf(name, "Windows Workitem %d", i);
337                 error = kthread_create(ntoskrnl_workitem_thread, kq, &p,
338                     RFHIGHPID, NDIS_KSTACK_PAGES, name);
339                 if (error)
340                         panic("failed to launch workitem thread");
341         }
342
343         patch = ntoskrnl_functbl;
344         while (patch->ipt_func != NULL) {
345                 windrv_wrap((funcptr)patch->ipt_func,
346                     (funcptr *)&patch->ipt_wrap,
347                     patch->ipt_argcnt, patch->ipt_ftype);
348                 patch++;
349         }
350
351         for (i = 0; i < NTOSKRNL_TIMEOUTS; i++) {
352                 e = ExAllocatePoolWithTag(NonPagedPool,
353                     sizeof(callout_entry), 0);
354                 if (e == NULL)
355                         panic("failed to allocate timeouts");
356                 mtx_lock_spin(&ntoskrnl_calllock);
357                 InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
358                 mtx_unlock_spin(&ntoskrnl_calllock);
359         }
360
361         /*
362          * MDLs are supposed to be variable size (they describe
363          * buffers containing some number of pages, but we don't
364          * know ahead of time how many pages that will be). But
365          * always allocating them off the heap is very slow. As
366          * a compromise, we create an MDL UMA zone big enough to
367          * handle any buffer requiring up to 16 pages, and we
368          * use those for any MDLs for buffers of 16 pages or less
369          * in size. For buffers larger than that (which we assume
370          * will be few and far between, we allocate the MDLs off
371          * the heap.
372          */
373
374         mdl_zone = uma_zcreate("Windows MDL", MDL_ZONE_SIZE,
375             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
376
377         iw_zone = uma_zcreate("Windows WorkItem", sizeof(io_workitem),
378             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
379
380         return(0);
381 }
382
383 int
384 ntoskrnl_libfini()
385 {
386         image_patch_table       *patch;
387         callout_entry           *e;
388         list_entry              *l;
389
390         patch = ntoskrnl_functbl;
391         while (patch->ipt_func != NULL) {
392                 windrv_unwrap(patch->ipt_wrap);
393                 patch++;
394         }
395
396         /* Stop the workitem queues. */
397         ntoskrnl_destroy_workitem_threads();
398         /* Stop the DPC queues. */
399         ntoskrnl_destroy_dpc_threads();
400
401         ExFreePool(kq_queues);
402         ExFreePool(wq_queues);
403
404         uma_zdestroy(mdl_zone);
405         uma_zdestroy(iw_zone);
406
407         mtx_lock_spin(&ntoskrnl_calllock);
408         while(!IsListEmpty(&ntoskrnl_calllist)) {
409                 l = RemoveHeadList(&ntoskrnl_calllist);
410                 e = CONTAINING_RECORD(l, callout_entry, ce_list);
411                 mtx_unlock_spin(&ntoskrnl_calllock);
412                 ExFreePool(e);
413                 mtx_lock_spin(&ntoskrnl_calllock);
414         }
415         mtx_unlock_spin(&ntoskrnl_calllock);
416
417         mtx_destroy(&ntoskrnl_dispatchlock);
418         mtx_destroy(&ntoskrnl_interlock);
419         mtx_destroy(&ntoskrnl_calllock);
420
421         return(0);
422 }
423
424 /*
425  * We need to be able to reference this externally from the wrapper;
426  * GCC only generates a local implementation of memset.
427  */
428 static void *
429 ntoskrnl_memset(buf, ch, size)
430         void                    *buf;
431         int                     ch;
432         size_t                  size;
433 {
434         return(memset(buf, ch, size));
435 }
436
437 static void *
438 ntoskrnl_memmove(dst, src, size)
439         void                    *src;
440         void                    *dst;
441         size_t                  size;
442 {
443         bcopy(src, dst, size);
444         return(dst);
445 }
446
447 static void *
448 ntoskrnl_memchr(void *buf, unsigned char ch, size_t len)
449 {
450         if (len != 0) {
451                 unsigned char *p = buf;
452
453                 do {
454                         if (*p++ == ch)
455                                 return (p - 1);
456                 } while (--len != 0);
457         }
458         return (NULL);
459 }
460
461 static char *
462 ntoskrnl_strstr(s, find)
463         char *s, *find;
464 {
465         char c, sc;
466         size_t len;
467
468         if ((c = *find++) != 0) {
469                 len = strlen(find);
470                 do {
471                         do {
472                                 if ((sc = *s++) == 0)
473                                         return (NULL);
474                         } while (sc != c);
475                 } while (strncmp(s, find, len) != 0);
476                 s--;
477         }
478         return ((char *)s);
479 }
480
481 /* Taken from libc */
482 static char *
483 ntoskrnl_strncat(dst, src, n)
484         char            *dst;
485         char            *src;
486         size_t          n;
487 {
488         if (n != 0) {
489                 char *d = dst;
490                 const char *s = src;
491
492                 while (*d != 0)
493                         d++;
494                 do {
495                         if ((*d = *s++) == 0)
496                                 break;
497                         d++;
498                 } while (--n != 0);
499                 *d = 0;
500         }
501         return (dst);
502 }
503
504 static int
505 ntoskrnl_toupper(c)
506         int                     c;
507 {
508         return(toupper(c));
509 }
510
511 static int
512 ntoskrnl_tolower(c)
513         int                     c;
514 {
515         return(tolower(c));
516 }
517
518 static uint8_t 
519 RtlEqualUnicodeString(unicode_string *str1, unicode_string *str2,
520         uint8_t caseinsensitive)
521 {
522         int                     i;
523
524         if (str1->us_len != str2->us_len)
525                 return(FALSE);
526
527         for (i = 0; i < str1->us_len; i++) {
528                 if (caseinsensitive == TRUE) {
529                         if (toupper((char)(str1->us_buf[i] & 0xFF)) !=
530                             toupper((char)(str2->us_buf[i] & 0xFF)))
531                                 return(FALSE);
532                 } else {
533                         if (str1->us_buf[i] != str2->us_buf[i])
534                                 return(FALSE);
535                 }
536         }
537
538         return(TRUE);
539 }
540
541 static void
542 RtlCopyUnicodeString(dest, src)
543         unicode_string          *dest;
544         unicode_string          *src;
545 {
546
547         if (dest->us_maxlen >= src->us_len)
548                 dest->us_len = src->us_len;
549         else
550                 dest->us_len = dest->us_maxlen;
551         memcpy(dest->us_buf, src->us_buf, dest->us_len);
552         return;
553 }
554
555 static void
556 ntoskrnl_ascii_to_unicode(ascii, unicode, len)
557         char                    *ascii;
558         uint16_t                *unicode;
559         int                     len;
560 {
561         int                     i;
562         uint16_t                *ustr;
563
564         ustr = unicode;
565         for (i = 0; i < len; i++) {
566                 *ustr = (uint16_t)ascii[i];
567                 ustr++;
568         }
569
570         return;
571 }
572
573 static void
574 ntoskrnl_unicode_to_ascii(unicode, ascii, len)
575         uint16_t                *unicode;
576         char                    *ascii;
577         int                     len;
578 {
579         int                     i;
580         uint8_t                 *astr;
581
582         astr = ascii;
583         for (i = 0; i < len / 2; i++) {
584                 *astr = (uint8_t)unicode[i];
585                 astr++;
586         }
587
588         return;
589 }
590
591 uint32_t
592 RtlUnicodeStringToAnsiString(ansi_string *dest, unicode_string *src, uint8_t allocate)
593 {
594         if (dest == NULL || src == NULL)
595                 return(STATUS_INVALID_PARAMETER);
596
597         dest->as_len = src->us_len / 2;
598         if (dest->as_maxlen < dest->as_len)
599                 dest->as_len = dest->as_maxlen;
600
601         if (allocate == TRUE) {
602                 dest->as_buf = ExAllocatePoolWithTag(NonPagedPool,
603                     (src->us_len / 2) + 1, 0);
604                 if (dest->as_buf == NULL)
605                         return(STATUS_INSUFFICIENT_RESOURCES);
606                 dest->as_len = dest->as_maxlen = src->us_len / 2;
607         } else {
608                 dest->as_len = src->us_len / 2; /* XXX */
609                 if (dest->as_maxlen < dest->as_len)
610                         dest->as_len = dest->as_maxlen;
611         }
612
613         ntoskrnl_unicode_to_ascii(src->us_buf, dest->as_buf,
614             dest->as_len * 2);
615
616         return (STATUS_SUCCESS);
617 }
618
619 uint32_t
620 RtlAnsiStringToUnicodeString(unicode_string *dest, ansi_string *src,
621         uint8_t allocate)
622 {
623         if (dest == NULL || src == NULL)
624                 return(STATUS_INVALID_PARAMETER);
625
626         if (allocate == TRUE) {
627                 dest->us_buf = ExAllocatePoolWithTag(NonPagedPool,
628                     src->as_len * 2, 0);
629                 if (dest->us_buf == NULL)
630                         return(STATUS_INSUFFICIENT_RESOURCES);
631                 dest->us_len = dest->us_maxlen = strlen(src->as_buf) * 2;
632         } else {
633                 dest->us_len = src->as_len * 2; /* XXX */
634                 if (dest->us_maxlen < dest->us_len)
635                         dest->us_len = dest->us_maxlen;
636         }
637
638         ntoskrnl_ascii_to_unicode(src->as_buf, dest->us_buf,
639             dest->us_len / 2);
640
641         return (STATUS_SUCCESS);
642 }
643
644 void *
645 ExAllocatePoolWithTag(pooltype, len, tag)
646         uint32_t                pooltype;
647         size_t                  len;
648         uint32_t                tag;
649 {
650         void                    *buf;
651
652         buf = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
653         if (buf == NULL)
654                 return(NULL);
655
656         return(buf);
657 }
658
659 void
660 ExFreePool(buf)
661         void                    *buf;
662 {
663         free(buf, M_DEVBUF);
664         return;
665 }
666
667 uint32_t
668 IoAllocateDriverObjectExtension(drv, clid, extlen, ext)
669         driver_object           *drv;
670         void                    *clid;
671         uint32_t                extlen;
672         void                    **ext;
673 {
674         custom_extension        *ce;
675
676         ce = ExAllocatePoolWithTag(NonPagedPool, sizeof(custom_extension)
677             + extlen, 0);
678
679         if (ce == NULL)
680                 return(STATUS_INSUFFICIENT_RESOURCES);
681
682         ce->ce_clid = clid;
683         InsertTailList((&drv->dro_driverext->dre_usrext), (&ce->ce_list));
684
685         *ext = (void *)(ce + 1);
686
687         return(STATUS_SUCCESS);
688 }
689
690 void *
691 IoGetDriverObjectExtension(drv, clid)
692         driver_object           *drv;
693         void                    *clid;
694 {
695         list_entry              *e;
696         custom_extension        *ce;
697
698         /*
699          * Sanity check. Our dummy bus drivers don't have
700          * any driver extentions.
701          */
702
703         if (drv->dro_driverext == NULL)
704                 return(NULL);
705
706         e = drv->dro_driverext->dre_usrext.nle_flink;
707         while (e != &drv->dro_driverext->dre_usrext) {
708                 ce = (custom_extension *)e;
709                 if (ce->ce_clid == clid)
710                         return((void *)(ce + 1));
711                 e = e->nle_flink;
712         }
713
714         return(NULL);
715 }
716
717
718 uint32_t
719 IoCreateDevice(driver_object *drv, uint32_t devextlen, unicode_string *devname,
720         uint32_t devtype, uint32_t devchars, uint8_t exclusive,
721         device_object **newdev)
722 {
723         device_object           *dev;
724
725         dev = ExAllocatePoolWithTag(NonPagedPool, sizeof(device_object), 0);
726         if (dev == NULL)
727                 return(STATUS_INSUFFICIENT_RESOURCES);
728
729         dev->do_type = devtype;
730         dev->do_drvobj = drv;
731         dev->do_currirp = NULL;
732         dev->do_flags = 0;
733
734         if (devextlen) {
735                 dev->do_devext = ExAllocatePoolWithTag(NonPagedPool,
736                     devextlen, 0);
737
738                 if (dev->do_devext == NULL) {
739                         ExFreePool(dev);
740                         return(STATUS_INSUFFICIENT_RESOURCES);
741                 }
742
743                 bzero(dev->do_devext, devextlen);
744         } else
745                 dev->do_devext = NULL;
746
747         dev->do_size = sizeof(device_object) + devextlen;
748         dev->do_refcnt = 1;
749         dev->do_attacheddev = NULL;
750         dev->do_nextdev = NULL;
751         dev->do_devtype = devtype;
752         dev->do_stacksize = 1;
753         dev->do_alignreq = 1;
754         dev->do_characteristics = devchars;
755         dev->do_iotimer = NULL;
756         KeInitializeEvent(&dev->do_devlock, EVENT_TYPE_SYNC, TRUE);
757
758         /*
759          * Vpd is used for disk/tape devices,
760          * but we don't support those. (Yet.)
761          */
762         dev->do_vpb = NULL;
763
764         dev->do_devobj_ext = ExAllocatePoolWithTag(NonPagedPool,
765             sizeof(devobj_extension), 0);
766
767         if (dev->do_devobj_ext == NULL) {
768                 if (dev->do_devext != NULL)
769                         ExFreePool(dev->do_devext);
770                 ExFreePool(dev);
771                 return(STATUS_INSUFFICIENT_RESOURCES);
772         }
773
774         dev->do_devobj_ext->dve_type = 0;
775         dev->do_devobj_ext->dve_size = sizeof(devobj_extension);
776         dev->do_devobj_ext->dve_devobj = dev;
777
778         /*
779          * Attach this device to the driver object's list
780          * of devices. Note: this is not the same as attaching
781          * the device to the device stack. The driver's AddDevice
782          * routine must explicitly call IoAddDeviceToDeviceStack()
783          * to do that.
784          */
785
786         if (drv->dro_devobj == NULL) {
787                 drv->dro_devobj = dev;
788                 dev->do_nextdev = NULL;
789         } else {
790                 dev->do_nextdev = drv->dro_devobj;
791                 drv->dro_devobj = dev;
792         }
793
794         *newdev = dev;
795
796         return(STATUS_SUCCESS);
797 }
798
799 void
800 IoDeleteDevice(dev)
801         device_object           *dev;
802 {
803         device_object           *prev;
804
805         if (dev == NULL)
806                 return;
807
808         if (dev->do_devobj_ext != NULL)
809                 ExFreePool(dev->do_devobj_ext);
810
811         if (dev->do_devext != NULL)
812                 ExFreePool(dev->do_devext);
813
814         /* Unlink the device from the driver's device list. */
815
816         prev = dev->do_drvobj->dro_devobj;
817         if (prev == dev)
818                 dev->do_drvobj->dro_devobj = dev->do_nextdev;
819         else {
820                 while (prev->do_nextdev != dev)
821                         prev = prev->do_nextdev;
822                 prev->do_nextdev = dev->do_nextdev;
823         }
824
825         ExFreePool(dev);
826
827         return;
828 }
829
830 device_object *
831 IoGetAttachedDevice(dev)
832         device_object           *dev;
833 {
834         device_object           *d;
835
836         if (dev == NULL)
837                 return (NULL);
838
839         d = dev;
840
841         while (d->do_attacheddev != NULL)
842                 d = d->do_attacheddev;
843
844         return (d);
845 }
846
847 static irp *
848 IoBuildSynchronousFsdRequest(func, dobj, buf, len, off, event, status)
849         uint32_t                func;
850         device_object           *dobj;
851         void                    *buf;
852         uint32_t                len;
853         uint64_t                *off;
854         nt_kevent               *event;
855         io_status_block         *status;
856 {
857         irp                     *ip;
858
859         ip = IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status);
860         if (ip == NULL)
861                 return(NULL);
862         ip->irp_usrevent = event;
863
864         return(ip);
865 }
866
867 static irp *
868 IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status)
869         uint32_t                func;
870         device_object           *dobj;
871         void                    *buf;
872         uint32_t                len;
873         uint64_t                *off;
874         io_status_block         *status;
875 {
876         irp                     *ip;
877         io_stack_location       *sl;
878
879         ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
880         if (ip == NULL)
881                 return(NULL);
882
883         ip->irp_usriostat = status;
884         ip->irp_tail.irp_overlay.irp_thread = NULL;
885
886         sl = IoGetNextIrpStackLocation(ip);
887         sl->isl_major = func;
888         sl->isl_minor = 0;
889         sl->isl_flags = 0;
890         sl->isl_ctl = 0;
891         sl->isl_devobj = dobj;
892         sl->isl_fileobj = NULL;
893         sl->isl_completionfunc = NULL;
894
895         ip->irp_userbuf = buf;
896
897         if (dobj->do_flags & DO_BUFFERED_IO) {
898                 ip->irp_assoc.irp_sysbuf =
899                     ExAllocatePoolWithTag(NonPagedPool, len, 0);
900                 if (ip->irp_assoc.irp_sysbuf == NULL) {
901                         IoFreeIrp(ip);
902                         return(NULL);
903                 }
904                 bcopy(buf, ip->irp_assoc.irp_sysbuf, len);
905         }
906
907         if (dobj->do_flags & DO_DIRECT_IO) {
908                 ip->irp_mdl = IoAllocateMdl(buf, len, FALSE, FALSE, ip);
909                 if (ip->irp_mdl == NULL) {
910                         if (ip->irp_assoc.irp_sysbuf != NULL)
911                                 ExFreePool(ip->irp_assoc.irp_sysbuf);
912                         IoFreeIrp(ip);
913                         return(NULL);
914                 }
915                 ip->irp_userbuf = NULL;
916                 ip->irp_assoc.irp_sysbuf = NULL;
917         }
918
919         if (func == IRP_MJ_READ) {
920                 sl->isl_parameters.isl_read.isl_len = len;
921                 if (off != NULL)
922                         sl->isl_parameters.isl_read.isl_byteoff = *off;
923                 else
924                         sl->isl_parameters.isl_read.isl_byteoff = 0;
925         }
926
927         if (func == IRP_MJ_WRITE) {
928                 sl->isl_parameters.isl_write.isl_len = len;
929                 if (off != NULL)
930                         sl->isl_parameters.isl_write.isl_byteoff = *off;
931                 else
932                         sl->isl_parameters.isl_write.isl_byteoff = 0;
933         }       
934
935         return(ip);
936 }
937
938 static irp *
939 IoBuildDeviceIoControlRequest(uint32_t iocode, device_object *dobj, void *ibuf,
940         uint32_t ilen, void *obuf, uint32_t olen, uint8_t isinternal,
941         nt_kevent *event, io_status_block *status)
942 {
943         irp                     *ip;
944         io_stack_location       *sl;
945         uint32_t                buflen;
946
947         ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
948         if (ip == NULL)
949                 return(NULL);
950         ip->irp_usrevent = event;
951         ip->irp_usriostat = status;
952         ip->irp_tail.irp_overlay.irp_thread = NULL;
953
954         sl = IoGetNextIrpStackLocation(ip);
955         sl->isl_major = isinternal == TRUE ?
956             IRP_MJ_INTERNAL_DEVICE_CONTROL : IRP_MJ_DEVICE_CONTROL;
957         sl->isl_minor = 0;
958         sl->isl_flags = 0;
959         sl->isl_ctl = 0;
960         sl->isl_devobj = dobj;
961         sl->isl_fileobj = NULL;
962         sl->isl_completionfunc = NULL;
963         sl->isl_parameters.isl_ioctl.isl_iocode = iocode;
964         sl->isl_parameters.isl_ioctl.isl_ibuflen = ilen;
965         sl->isl_parameters.isl_ioctl.isl_obuflen = olen;
966
967         switch(IO_METHOD(iocode)) {
968         case METHOD_BUFFERED:
969                 if (ilen > olen)
970                         buflen = ilen;
971                 else
972                         buflen = olen;
973                 if (buflen) {
974                         ip->irp_assoc.irp_sysbuf =
975                             ExAllocatePoolWithTag(NonPagedPool, buflen, 0);
976                         if (ip->irp_assoc.irp_sysbuf == NULL) {
977                                 IoFreeIrp(ip);
978                                 return(NULL);
979                         }
980                 }
981                 if (ilen && ibuf != NULL) {
982                         bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
983                         bzero((char *)ip->irp_assoc.irp_sysbuf + ilen,
984                             buflen - ilen);
985                 } else
986                         bzero(ip->irp_assoc.irp_sysbuf, ilen);
987                 ip->irp_userbuf = obuf;
988                 break;
989         case METHOD_IN_DIRECT:
990         case METHOD_OUT_DIRECT:
991                 if (ilen && ibuf != NULL) {
992                         ip->irp_assoc.irp_sysbuf =
993                             ExAllocatePoolWithTag(NonPagedPool, ilen, 0);
994                         if (ip->irp_assoc.irp_sysbuf == NULL) {
995                                 IoFreeIrp(ip);
996                                 return(NULL);
997                         }
998                         bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
999                 }
1000                 if (olen && obuf != NULL) {
1001                         ip->irp_mdl = IoAllocateMdl(obuf, olen,
1002                             FALSE, FALSE, ip);
1003                         /*
1004                          * Normally we would MmProbeAndLockPages()
1005                          * here, but we don't have to in our
1006                          * imlementation.
1007                          */
1008                 }
1009                 break;
1010         case METHOD_NEITHER:
1011                 ip->irp_userbuf = obuf;
1012                 sl->isl_parameters.isl_ioctl.isl_type3ibuf = ibuf;
1013                 break;
1014         default:
1015                 break;
1016         }
1017
1018         /*
1019          * Ideally, we should associate this IRP with the calling
1020          * thread here.
1021          */
1022
1023         return (ip);
1024 }
1025
1026 static irp *
1027 IoAllocateIrp(uint8_t stsize, uint8_t chargequota)
1028 {
1029         irp                     *i;
1030
1031         i = ExAllocatePoolWithTag(NonPagedPool, IoSizeOfIrp(stsize), 0);
1032         if (i == NULL)
1033                 return (NULL);
1034
1035         IoInitializeIrp(i, IoSizeOfIrp(stsize), stsize);
1036
1037         return (i);
1038 }
1039
1040 static irp *
1041 IoMakeAssociatedIrp(irp *ip, uint8_t stsize)
1042 {
1043         irp                     *associrp;
1044
1045         associrp = IoAllocateIrp(stsize, FALSE);
1046         if (associrp == NULL)
1047                 return(NULL);
1048
1049         mtx_lock(&ntoskrnl_dispatchlock);
1050         associrp->irp_flags |= IRP_ASSOCIATED_IRP;
1051         associrp->irp_tail.irp_overlay.irp_thread =
1052             ip->irp_tail.irp_overlay.irp_thread;
1053         associrp->irp_assoc.irp_master = ip;
1054         mtx_unlock(&ntoskrnl_dispatchlock);
1055
1056         return(associrp);
1057 }
1058
1059 static void
1060 IoFreeIrp(ip)
1061         irp                     *ip;
1062 {
1063         ExFreePool(ip);
1064         return;
1065 }
1066
1067 static void
1068 IoInitializeIrp(irp *io, uint16_t psize, uint8_t ssize)
1069 {
1070         bzero((char *)io, IoSizeOfIrp(ssize));
1071         io->irp_size = psize;
1072         io->irp_stackcnt = ssize;
1073         io->irp_currentstackloc = ssize;
1074         InitializeListHead(&io->irp_thlist);
1075         io->irp_tail.irp_overlay.irp_csl =
1076             (io_stack_location *)(io + 1) + ssize;
1077
1078         return;
1079 }
1080
1081 static void
1082 IoReuseIrp(ip, status)
1083         irp                     *ip;
1084         uint32_t                status;
1085 {
1086         uint8_t                 allocflags;
1087
1088         allocflags = ip->irp_allocflags;
1089         IoInitializeIrp(ip, ip->irp_size, ip->irp_stackcnt);
1090         ip->irp_iostat.isb_status = status;
1091         ip->irp_allocflags = allocflags;
1092
1093         return;
1094 }
1095
1096 void
1097 IoAcquireCancelSpinLock(uint8_t *irql)
1098 {
1099         KeAcquireSpinLock(&ntoskrnl_cancellock, irql);
1100         return;
1101 }
1102
1103 void
1104 IoReleaseCancelSpinLock(uint8_t irql)
1105 {
1106         KeReleaseSpinLock(&ntoskrnl_cancellock, irql);
1107         return;
1108 }
1109
1110 uint8_t
1111 IoCancelIrp(irp *ip)
1112 {
1113         cancel_func             cfunc;
1114
1115         IoAcquireCancelSpinLock(&ip->irp_cancelirql);
1116         cfunc = IoSetCancelRoutine(ip, NULL);
1117         ip->irp_cancel = TRUE;
1118         if (ip->irp_cancelfunc == NULL) {
1119                 IoReleaseCancelSpinLock(ip->irp_cancelirql);
1120                 return(FALSE);
1121         }
1122         MSCALL2(cfunc, IoGetCurrentIrpStackLocation(ip)->isl_devobj, ip);
1123         return(TRUE);
1124 }
1125
1126 uint32_t
1127 IofCallDriver(dobj, ip)
1128         device_object           *dobj;
1129         irp                     *ip;
1130 {
1131         driver_object           *drvobj;
1132         io_stack_location       *sl;
1133         uint32_t                status;
1134         driver_dispatch         disp;
1135
1136         drvobj = dobj->do_drvobj;
1137
1138         if (ip->irp_currentstackloc <= 0)
1139                 panic("IoCallDriver(): out of stack locations");
1140
1141         IoSetNextIrpStackLocation(ip);
1142         sl = IoGetCurrentIrpStackLocation(ip);
1143
1144         sl->isl_devobj = dobj;
1145
1146         disp = drvobj->dro_dispatch[sl->isl_major];
1147         status = MSCALL2(disp, dobj, ip);
1148
1149         return(status);
1150 }
1151
1152 void
1153 IofCompleteRequest(irp *ip, uint8_t prioboost)
1154 {
1155         uint32_t                i;
1156         uint32_t                status;
1157         device_object           *dobj;
1158         io_stack_location       *sl;
1159         completion_func         cf;
1160
1161         ip->irp_pendingreturned =
1162             IoGetCurrentIrpStackLocation(ip)->isl_ctl & SL_PENDING_RETURNED;
1163         sl = (io_stack_location *)(ip + 1);
1164
1165         for (i = ip->irp_currentstackloc; i < (uint32_t)ip->irp_stackcnt; i++) {
1166                 if (ip->irp_currentstackloc < ip->irp_stackcnt - 1) {
1167                         IoSkipCurrentIrpStackLocation(ip);
1168                         dobj = IoGetCurrentIrpStackLocation(ip)->isl_devobj;
1169                 } else
1170                         dobj = NULL;
1171
1172                 if (sl[i].isl_completionfunc != NULL &&
1173                     ((ip->irp_iostat.isb_status == STATUS_SUCCESS &&
1174                     sl->isl_ctl & SL_INVOKE_ON_SUCCESS) ||
1175                     (ip->irp_iostat.isb_status != STATUS_SUCCESS &&
1176                     sl->isl_ctl & SL_INVOKE_ON_ERROR) ||
1177                     (ip->irp_cancel == TRUE &&
1178                     sl->isl_ctl & SL_INVOKE_ON_CANCEL))) {
1179                         cf = sl->isl_completionfunc;
1180                         status = MSCALL3(cf, dobj, ip, sl->isl_completionctx);
1181                         if (status == STATUS_MORE_PROCESSING_REQUIRED)
1182                                 return;
1183                 }
1184
1185                 if (IoGetCurrentIrpStackLocation(ip)->isl_ctl &
1186                     SL_PENDING_RETURNED)
1187                         ip->irp_pendingreturned = TRUE;
1188         }
1189
1190         /* Handle any associated IRPs. */
1191
1192         if (ip->irp_flags & IRP_ASSOCIATED_IRP) {
1193                 uint32_t                masterirpcnt;
1194                 irp                     *masterirp;
1195                 mdl                     *m;
1196
1197                 masterirp = ip->irp_assoc.irp_master;
1198                 masterirpcnt =
1199                     InterlockedDecrement(&masterirp->irp_assoc.irp_irpcnt);
1200
1201                 while ((m = ip->irp_mdl) != NULL) {
1202                         ip->irp_mdl = m->mdl_next;
1203                         IoFreeMdl(m);
1204                 }
1205                 IoFreeIrp(ip);
1206                 if (masterirpcnt == 0)
1207                         IoCompleteRequest(masterirp, IO_NO_INCREMENT);
1208                 return;
1209         }
1210
1211         /* With any luck, these conditions will never arise. */
1212
1213         if (ip->irp_flags & (IRP_PAGING_IO|IRP_CLOSE_OPERATION)) {
1214                 if (ip->irp_usriostat != NULL)
1215                         *ip->irp_usriostat = ip->irp_iostat;
1216                 if (ip->irp_usrevent != NULL)
1217                         KeSetEvent(ip->irp_usrevent, prioboost, FALSE);
1218                 if (ip->irp_flags & IRP_PAGING_IO) {
1219                         if (ip->irp_mdl != NULL)
1220                                 IoFreeMdl(ip->irp_mdl);
1221                         IoFreeIrp(ip);
1222                 }
1223         }
1224
1225         return;
1226 }
1227
1228 void
1229 ntoskrnl_intr(arg)
1230         void                    *arg;
1231 {
1232         kinterrupt              *iobj;
1233         uint8_t                 irql;
1234         uint8_t                 claimed;
1235         list_entry              *l;
1236
1237         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1238         l = ntoskrnl_intlist.nle_flink;
1239         while (l != &ntoskrnl_intlist) {
1240                 iobj = CONTAINING_RECORD(l, kinterrupt, ki_list);
1241                 claimed = MSCALL2(iobj->ki_svcfunc, iobj, iobj->ki_svcctx);
1242                 if (claimed == TRUE)
1243                         break;
1244                 l = l->nle_flink;
1245         }
1246         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1247
1248         return;
1249 }
1250
1251 uint8_t
1252 KeAcquireInterruptSpinLock(iobj)
1253         kinterrupt              *iobj;
1254 {
1255         uint8_t                 irql;
1256         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1257         return(irql);
1258 }
1259
1260 void
1261 KeReleaseInterruptSpinLock(kinterrupt *iobj, uint8_t irql)
1262 {
1263         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1264         return;
1265 }
1266
1267 uint8_t
1268 KeSynchronizeExecution(iobj, syncfunc, syncctx)
1269         kinterrupt              *iobj;
1270         void                    *syncfunc;
1271         void                    *syncctx;
1272 {
1273         uint8_t                 irql;
1274         
1275         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1276         MSCALL1(syncfunc, syncctx);
1277         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1278
1279         return(TRUE);
1280 }
1281
1282 /*
1283  * IoConnectInterrupt() is passed only the interrupt vector and
1284  * irql that a device wants to use, but no device-specific tag
1285  * of any kind. This conflicts rather badly with FreeBSD's
1286  * bus_setup_intr(), which needs the device_t for the device
1287  * requesting interrupt delivery. In order to bypass this
1288  * inconsistency, we implement a second level of interrupt
1289  * dispatching on top of bus_setup_intr(). All devices use
1290  * ntoskrnl_intr() as their ISR, and any device requesting
1291  * interrupts will be registered with ntoskrnl_intr()'s interrupt
1292  * dispatch list. When an interrupt arrives, we walk the list
1293  * and invoke all the registered ISRs. This effectively makes all
1294  * interrupts shared, but it's the only way to duplicate the
1295  * semantics of IoConnectInterrupt() and IoDisconnectInterrupt() properly.
1296  */
1297
1298 uint32_t
1299 IoConnectInterrupt(kinterrupt **iobj, void *svcfunc, void *svcctx,
1300         kspin_lock *lock, uint32_t vector, uint8_t irql, uint8_t syncirql,
1301         uint8_t imode, uint8_t shared, uint32_t affinity, uint8_t savefloat)
1302 {
1303         uint8_t                 curirql;
1304
1305         *iobj = ExAllocatePoolWithTag(NonPagedPool, sizeof(kinterrupt), 0);
1306         if (*iobj == NULL)
1307                 return(STATUS_INSUFFICIENT_RESOURCES);
1308
1309         (*iobj)->ki_svcfunc = svcfunc;
1310         (*iobj)->ki_svcctx = svcctx;
1311
1312         if (lock == NULL) {
1313                 KeInitializeSpinLock(&(*iobj)->ki_lock_priv);
1314                 (*iobj)->ki_lock = &(*iobj)->ki_lock_priv;
1315         } else
1316                 (*iobj)->ki_lock = lock;
1317
1318         KeAcquireSpinLock(&ntoskrnl_intlock, &curirql);
1319         InsertHeadList((&ntoskrnl_intlist), (&(*iobj)->ki_list));
1320         KeReleaseSpinLock(&ntoskrnl_intlock, curirql);
1321
1322         return(STATUS_SUCCESS);
1323 }
1324
1325 void
1326 IoDisconnectInterrupt(iobj)
1327         kinterrupt              *iobj;
1328 {
1329         uint8_t                 irql;
1330
1331         if (iobj == NULL)
1332                 return;
1333
1334         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1335         RemoveEntryList((&iobj->ki_list));
1336         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1337
1338         ExFreePool(iobj);
1339
1340         return;
1341 }
1342
1343 device_object *
1344 IoAttachDeviceToDeviceStack(src, dst)
1345         device_object           *src;
1346         device_object           *dst;
1347 {
1348         device_object           *attached;
1349
1350         mtx_lock(&ntoskrnl_dispatchlock);
1351         attached = IoGetAttachedDevice(dst);
1352         attached->do_attacheddev = src;
1353         src->do_attacheddev = NULL;
1354         src->do_stacksize = attached->do_stacksize + 1;
1355         mtx_unlock(&ntoskrnl_dispatchlock);
1356
1357         return(attached);
1358 }
1359
1360 void
1361 IoDetachDevice(topdev)
1362         device_object           *topdev;
1363 {
1364         device_object           *tail;
1365
1366         mtx_lock(&ntoskrnl_dispatchlock);
1367
1368         /* First, break the chain. */
1369         tail = topdev->do_attacheddev;
1370         if (tail == NULL) {
1371                 mtx_unlock(&ntoskrnl_dispatchlock);
1372                 return;
1373         }
1374         topdev->do_attacheddev = tail->do_attacheddev;
1375         topdev->do_refcnt--;
1376
1377         /* Now reduce the stacksize count for the takm_il objects. */
1378
1379         tail = topdev->do_attacheddev;
1380         while (tail != NULL) {
1381                 tail->do_stacksize--;
1382                 tail = tail->do_attacheddev;
1383         }
1384
1385         mtx_unlock(&ntoskrnl_dispatchlock);
1386
1387         return;
1388 }
1389
1390 /*
1391  * For the most part, an object is considered signalled if
1392  * dh_sigstate == TRUE. The exception is for mutant objects
1393  * (mutexes), where the logic works like this:
1394  *
1395  * - If the thread already owns the object and sigstate is
1396  *   less than or equal to 0, then the object is considered
1397  *   signalled (recursive acquisition).
1398  * - If dh_sigstate == 1, the object is also considered
1399  *   signalled.
1400  */
1401
1402 static int
1403 ntoskrnl_is_signalled(obj, td)
1404         nt_dispatch_header      *obj;
1405         struct thread           *td;
1406 {
1407         kmutant                 *km;
1408         
1409         if (obj->dh_type == DISP_TYPE_MUTANT) {
1410                 km = (kmutant *)obj;
1411                 if ((obj->dh_sigstate <= 0 && km->km_ownerthread == td) ||
1412                     obj->dh_sigstate == 1)
1413                         return(TRUE);
1414                 return(FALSE);
1415         }
1416
1417         if (obj->dh_sigstate > 0)
1418                 return(TRUE);
1419         return(FALSE);
1420 }
1421
1422 static void
1423 ntoskrnl_satisfy_wait(obj, td)
1424         nt_dispatch_header      *obj;
1425         struct thread           *td;
1426 {
1427         kmutant                 *km;
1428
1429         switch (obj->dh_type) {
1430         case DISP_TYPE_MUTANT:
1431                 km = (struct kmutant *)obj;
1432                 obj->dh_sigstate--;
1433                 /*
1434                  * If sigstate reaches 0, the mutex is now
1435                  * non-signalled (the new thread owns it).
1436                  */
1437                 if (obj->dh_sigstate == 0) {
1438                         km->km_ownerthread = td;
1439                         if (km->km_abandoned == TRUE)
1440                                 km->km_abandoned = FALSE;
1441                 }
1442                 break;
1443         /* Synchronization objects get reset to unsignalled. */
1444         case DISP_TYPE_SYNCHRONIZATION_EVENT:
1445         case DISP_TYPE_SYNCHRONIZATION_TIMER:
1446                 obj->dh_sigstate = 0;
1447                 break;
1448         case DISP_TYPE_SEMAPHORE:
1449                 obj->dh_sigstate--;
1450                 break;
1451         default:
1452                 break;
1453         }
1454
1455         return;
1456 }
1457
1458 static void
1459 ntoskrnl_satisfy_multiple_waits(wb)
1460         wait_block              *wb;
1461 {
1462         wait_block              *cur;
1463         struct thread           *td;
1464
1465         cur = wb;
1466         td = wb->wb_kthread;
1467
1468         do {
1469                 ntoskrnl_satisfy_wait(wb->wb_object, td);
1470                 cur->wb_awakened = TRUE;
1471                 cur = cur->wb_next;
1472         } while (cur != wb);
1473
1474         return;
1475 }
1476
1477 /* Always called with dispatcher lock held. */
1478 static void
1479 ntoskrnl_waittest(obj, increment)
1480         nt_dispatch_header      *obj;
1481         uint32_t                increment;
1482 {
1483         wait_block              *w, *next;
1484         list_entry              *e;
1485         struct thread           *td;
1486         wb_ext                  *we;
1487         int                     satisfied;
1488
1489         /*
1490          * Once an object has been signalled, we walk its list of
1491          * wait blocks. If a wait block can be awakened, then satisfy
1492          * waits as necessary and wake the thread.
1493          *
1494          * The rules work like this:
1495          *
1496          * If a wait block is marked as WAITTYPE_ANY, then
1497          * we can satisfy the wait conditions on the current
1498          * object and wake the thread right away. Satisfying
1499          * the wait also has the effect of breaking us out
1500          * of the search loop.
1501          *
1502          * If the object is marked as WAITTYLE_ALL, then the
1503          * wait block will be part of a circularly linked
1504          * list of wait blocks belonging to a waiting thread
1505          * that's sleeping in KeWaitForMultipleObjects(). In
1506          * order to wake the thread, all the objects in the
1507          * wait list must be in the signalled state. If they
1508          * are, we then satisfy all of them and wake the
1509          * thread.
1510          *
1511          */
1512
1513         e = obj->dh_waitlisthead.nle_flink;
1514
1515         while (e != &obj->dh_waitlisthead && obj->dh_sigstate > 0) {
1516                 w = CONTAINING_RECORD(e, wait_block, wb_waitlist);
1517                 we = w->wb_ext;
1518                 td = we->we_td;
1519                 satisfied = FALSE;
1520                 if (w->wb_waittype == WAITTYPE_ANY) {
1521                         /*
1522                          * Thread can be awakened if
1523                          * any wait is satisfied.
1524                          */
1525                         ntoskrnl_satisfy_wait(obj, td);
1526                         satisfied = TRUE;
1527                         w->wb_awakened = TRUE;
1528                 } else {
1529                         /*
1530                          * Thread can only be woken up
1531                          * if all waits are satisfied.
1532                          * If the thread is waiting on multiple
1533                          * objects, they should all be linked
1534                          * through the wb_next pointers in the
1535                          * wait blocks.
1536                          */
1537                         satisfied = TRUE;
1538                         next = w->wb_next;
1539                         while (next != w) {
1540                                 if (ntoskrnl_is_signalled(obj, td) == FALSE) {
1541                                         satisfied = FALSE;
1542                                         break;
1543                                 }
1544                                 next = next->wb_next;
1545                         }
1546                         ntoskrnl_satisfy_multiple_waits(w);
1547                 }
1548
1549                 if (satisfied == TRUE)
1550                         cv_broadcastpri(&we->we_cv,
1551                             (w->wb_oldpri - (increment * 4)) > PRI_MIN_KERN ?
1552                             w->wb_oldpri - (increment * 4) : PRI_MIN_KERN);
1553
1554                 e = e->nle_flink;
1555         }
1556
1557         return;
1558 }
1559
1560 /*
1561  * Return the number of 100 nanosecond intervals since
1562  * January 1, 1601. (?!?!)
1563  */
1564 void
1565 ntoskrnl_time(tval)
1566         uint64_t                *tval;
1567 {
1568         struct timespec         ts;
1569
1570         nanotime(&ts);
1571         *tval = (uint64_t)ts.tv_nsec / 100 + (uint64_t)ts.tv_sec * 10000000 +
1572             11644473600 * 10000000; /* 100ns ticks from 1601 to 1970 */
1573
1574         return;
1575 }
1576
1577 static void
1578 KeQuerySystemTime(current_time)
1579         uint64_t                *current_time;
1580 {
1581         ntoskrnl_time(current_time);
1582 }
1583
1584 static uint32_t
1585 KeTickCount(void)
1586 {
1587         struct timeval tv;
1588         getmicrouptime(&tv);
1589         return tvtohz(&tv);
1590 }
1591
1592
1593 /*
1594  * KeWaitForSingleObject() is a tricky beast, because it can be used
1595  * with several different object types: semaphores, timers, events,
1596  * mutexes and threads. Semaphores don't appear very often, but the
1597  * other object types are quite common. KeWaitForSingleObject() is
1598  * what's normally used to acquire a mutex, and it can be used to
1599  * wait for a thread termination.
1600  *
1601  * The Windows NDIS API is implemented in terms of Windows kernel
1602  * primitives, and some of the object manipulation is duplicated in
1603  * NDIS. For example, NDIS has timers and events, which are actually
1604  * Windows kevents and ktimers. Now, you're supposed to only use the
1605  * NDIS variants of these objects within the confines of the NDIS API,
1606  * but there are some naughty developers out there who will use
1607  * KeWaitForSingleObject() on NDIS timer and event objects, so we
1608  * have to support that as well. Conseqently, our NDIS timer and event
1609  * code has to be closely tied into our ntoskrnl timer and event code,
1610  * just as it is in Windows.
1611  *
1612  * KeWaitForSingleObject() may do different things for different kinds
1613  * of objects:
1614  *
1615  * - For events, we check if the event has been signalled. If the
1616  *   event is already in the signalled state, we just return immediately,
1617  *   otherwise we wait for it to be set to the signalled state by someone
1618  *   else calling KeSetEvent(). Events can be either synchronization or
1619  *   notification events.
1620  *
1621  * - For timers, if the timer has already fired and the timer is in
1622  *   the signalled state, we just return, otherwise we wait on the
1623  *   timer. Unlike an event, timers get signalled automatically when
1624  *   they expire rather than someone having to trip them manually.
1625  *   Timers initialized with KeInitializeTimer() are always notification
1626  *   events: KeInitializeTimerEx() lets you initialize a timer as
1627  *   either a notification or synchronization event.
1628  *
1629  * - For mutexes, we try to acquire the mutex and if we can't, we wait
1630  *   on the mutex until it's available and then grab it. When a mutex is
1631  *   released, it enters the signalled state, which wakes up one of the
1632  *   threads waiting to acquire it. Mutexes are always synchronization
1633  *   events.
1634  *
1635  * - For threads, the only thing we do is wait until the thread object
1636  *   enters a signalled state, which occurs when the thread terminates.
1637  *   Threads are always notification events.
1638  *
1639  * A notification event wakes up all threads waiting on an object. A
1640  * synchronization event wakes up just one. Also, a synchronization event
1641  * is auto-clearing, which means we automatically set the event back to
1642  * the non-signalled state once the wakeup is done.
1643  */
1644
1645 uint32_t
1646 KeWaitForSingleObject(void *arg, uint32_t reason, uint32_t mode,
1647     uint8_t alertable, int64_t *duetime)
1648 {
1649         wait_block              w;
1650         struct thread           *td = curthread;
1651         struct timeval          tv;
1652         int                     error = 0;
1653         uint64_t                curtime;
1654         wb_ext                  we;
1655         nt_dispatch_header      *obj;
1656
1657         obj = arg;
1658
1659         if (obj == NULL)
1660                 return(STATUS_INVALID_PARAMETER);
1661
1662         mtx_lock(&ntoskrnl_dispatchlock);
1663
1664         cv_init(&we.we_cv, "KeWFS");
1665         we.we_td = td;
1666
1667         /*
1668          * Check to see if this object is already signalled,
1669          * and just return without waiting if it is.
1670          */
1671         if (ntoskrnl_is_signalled(obj, td) == TRUE) {
1672                 /* Sanity check the signal state value. */
1673                 if (obj->dh_sigstate != INT32_MIN) {
1674                         ntoskrnl_satisfy_wait(obj, curthread);
1675                         mtx_unlock(&ntoskrnl_dispatchlock);
1676                         return (STATUS_SUCCESS);
1677                 } else {
1678                         /*
1679                          * There's a limit to how many times we can
1680                          * recursively acquire a mutant. If we hit
1681                          * the limit, something is very wrong.
1682                          */
1683                         if (obj->dh_type == DISP_TYPE_MUTANT) {
1684                                 mtx_unlock(&ntoskrnl_dispatchlock);
1685                                 panic("mutant limit exceeded");
1686                         }
1687                 }
1688         }
1689
1690         bzero((char *)&w, sizeof(wait_block));
1691         w.wb_object = obj;
1692         w.wb_ext = &we;
1693         w.wb_waittype = WAITTYPE_ANY;
1694         w.wb_next = &w;
1695         w.wb_waitkey = 0;
1696         w.wb_awakened = FALSE;
1697         w.wb_oldpri = td->td_priority;
1698
1699         InsertTailList((&obj->dh_waitlisthead), (&w.wb_waitlist));
1700
1701         /*
1702          * The timeout value is specified in 100 nanosecond units
1703          * and can be a positive or negative number. If it's positive,
1704          * then the duetime is absolute, and we need to convert it
1705          * to an absolute offset relative to now in order to use it.
1706          * If it's negative, then the duetime is relative and we
1707          * just have to convert the units.
1708          */
1709
1710         if (duetime != NULL) {
1711                 if (*duetime < 0) {
1712                         tv.tv_sec = - (*duetime) / 10000000;
1713                         tv.tv_usec = (- (*duetime) / 10) -
1714                             (tv.tv_sec * 1000000);
1715                 } else {
1716                         ntoskrnl_time(&curtime);
1717                         if (*duetime < curtime)
1718                                 tv.tv_sec = tv.tv_usec = 0;
1719                         else {
1720                                 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1721                                 tv.tv_usec = ((*duetime) - curtime) / 10 -
1722                                     (tv.tv_sec * 1000000);
1723                         }
1724                 }
1725         }
1726
1727         if (duetime == NULL)
1728                 cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
1729         else
1730                 error = cv_timedwait(&we.we_cv,
1731                     &ntoskrnl_dispatchlock, tvtohz(&tv));
1732
1733         RemoveEntryList(&w.wb_waitlist);
1734
1735         cv_destroy(&we.we_cv);
1736
1737         /* We timed out. Leave the object alone and return status. */
1738
1739         if (error == EWOULDBLOCK) {
1740                 mtx_unlock(&ntoskrnl_dispatchlock);
1741                 return(STATUS_TIMEOUT);
1742         }
1743
1744         mtx_unlock(&ntoskrnl_dispatchlock);
1745
1746         return(STATUS_SUCCESS);
1747 /*
1748         return(KeWaitForMultipleObjects(1, &obj, WAITTYPE_ALL, reason,
1749             mode, alertable, duetime, &w));
1750 */
1751 }
1752
1753 static uint32_t
1754 KeWaitForMultipleObjects(uint32_t cnt, nt_dispatch_header *obj[], uint32_t wtype,
1755         uint32_t reason, uint32_t mode, uint8_t alertable, int64_t *duetime,
1756         wait_block *wb_array)
1757 {
1758         struct thread           *td = curthread;
1759         wait_block              *whead, *w;
1760         wait_block              _wb_array[MAX_WAIT_OBJECTS];
1761         nt_dispatch_header      *cur;
1762         struct timeval          tv;
1763         int                     i, wcnt = 0, error = 0;
1764         uint64_t                curtime;
1765         struct timespec         t1, t2;
1766         uint32_t                status = STATUS_SUCCESS;
1767         wb_ext                  we;
1768
1769         if (cnt > MAX_WAIT_OBJECTS)
1770                 return(STATUS_INVALID_PARAMETER);
1771         if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL)
1772                 return(STATUS_INVALID_PARAMETER);
1773
1774         mtx_lock(&ntoskrnl_dispatchlock);
1775
1776         cv_init(&we.we_cv, "KeWFM");
1777         we.we_td = td;
1778
1779         if (wb_array == NULL)
1780                 whead = _wb_array;
1781         else
1782                 whead = wb_array;
1783
1784         bzero((char *)whead, sizeof(wait_block) * cnt);
1785
1786         /* First pass: see if we can satisfy any waits immediately. */
1787
1788         wcnt = 0;
1789         w = whead;
1790
1791         for (i = 0; i < cnt; i++) {
1792                 InsertTailList((&obj[i]->dh_waitlisthead),
1793                     (&w->wb_waitlist));
1794                 w->wb_ext = &we;
1795                 w->wb_object = obj[i];
1796                 w->wb_waittype = wtype;
1797                 w->wb_waitkey = i;
1798                 w->wb_awakened = FALSE;
1799                 w->wb_oldpri = td->td_priority;
1800                 w->wb_next = w + 1;
1801                 w++;
1802                 wcnt++;
1803                 if (ntoskrnl_is_signalled(obj[i], td)) {
1804                         /*
1805                          * There's a limit to how many times
1806                          * we can recursively acquire a mutant.
1807                          * If we hit the limit, something
1808                          * is very wrong.
1809                          */
1810                         if (obj[i]->dh_sigstate == INT32_MIN &&
1811                             obj[i]->dh_type == DISP_TYPE_MUTANT) {
1812                                 mtx_unlock(&ntoskrnl_dispatchlock);
1813                                 panic("mutant limit exceeded");
1814                         }
1815
1816                         /*
1817                          * If this is a WAITTYPE_ANY wait, then
1818                          * satisfy the waited object and exit
1819                          * right now.
1820                          */
1821
1822                         if (wtype == WAITTYPE_ANY) {
1823                                 ntoskrnl_satisfy_wait(obj[i], td);
1824                                 status = STATUS_WAIT_0 + i;
1825                                 goto wait_done;
1826                         } else {
1827                                 w--;
1828                                 wcnt--;
1829                                 w->wb_object = NULL;
1830                                 RemoveEntryList(&w->wb_waitlist);
1831                         }
1832                 }
1833         }
1834
1835         /*
1836          * If this is a WAITTYPE_ALL wait and all objects are
1837          * already signalled, satisfy the waits and exit now.
1838          */
1839
1840         if (wtype == WAITTYPE_ALL && wcnt == 0) {
1841                 for (i = 0; i < cnt; i++)
1842                         ntoskrnl_satisfy_wait(obj[i], td);
1843                 status = STATUS_SUCCESS;
1844                 goto wait_done;
1845         }
1846
1847         /*
1848          * Create a circular waitblock list. The waitcount
1849          * must always be non-zero when we get here.
1850          */
1851
1852         (w - 1)->wb_next = whead;
1853
1854         /* Wait on any objects that aren't yet signalled. */
1855
1856         /* Calculate timeout, if any. */
1857
1858         if (duetime != NULL) {
1859                 if (*duetime < 0) {
1860                         tv.tv_sec = - (*duetime) / 10000000;
1861                         tv.tv_usec = (- (*duetime) / 10) -
1862                             (tv.tv_sec * 1000000);
1863                 } else {
1864                         ntoskrnl_time(&curtime);
1865                         if (*duetime < curtime)
1866                                 tv.tv_sec = tv.tv_usec = 0;
1867                         else {
1868                                 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1869                                 tv.tv_usec = ((*duetime) - curtime) / 10 -
1870                                     (tv.tv_sec * 1000000);
1871                         }
1872                 }
1873         }
1874
1875         while (wcnt) {
1876                 nanotime(&t1);
1877
1878                 if (duetime == NULL)
1879                         cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
1880                 else
1881                         error = cv_timedwait(&we.we_cv,
1882                             &ntoskrnl_dispatchlock, tvtohz(&tv));
1883
1884                 /* Wait with timeout expired. */
1885
1886                 if (error) {
1887                         status = STATUS_TIMEOUT;
1888                         goto wait_done;
1889                 }
1890
1891                 nanotime(&t2);
1892
1893                 /* See what's been signalled. */
1894
1895                 w = whead;
1896                 do {
1897                         cur = w->wb_object;
1898                         if (ntoskrnl_is_signalled(cur, td) == TRUE ||
1899                             w->wb_awakened == TRUE) {
1900                                 /* Sanity check the signal state value. */
1901                                 if (cur->dh_sigstate == INT32_MIN &&
1902                                     cur->dh_type == DISP_TYPE_MUTANT) {
1903                                         mtx_unlock(&ntoskrnl_dispatchlock);
1904                                         panic("mutant limit exceeded");
1905                                 }
1906                                 wcnt--;
1907                                 if (wtype == WAITTYPE_ANY) {
1908                                         status = w->wb_waitkey &
1909                                             STATUS_WAIT_0;
1910                                         goto wait_done;
1911                                 }
1912                         }
1913                         w = w->wb_next;
1914                 } while (w != whead);
1915
1916                 /*
1917                  * If all objects have been signalled, or if this
1918                  * is a WAITTYPE_ANY wait and we were woke up by
1919                  * someone, we can bail.
1920                  */
1921
1922                 if (wcnt == 0) {
1923                         status = STATUS_SUCCESS;
1924                         goto wait_done;
1925                 }
1926
1927                 /*
1928                  * If this is WAITTYPE_ALL wait, and there's still
1929                  * objects that haven't been signalled, deduct the
1930                  * time that's elapsed so far from the timeout and
1931                  * wait again (or continue waiting indefinitely if
1932                  * there's no timeout).
1933                  */
1934
1935                 if (duetime != NULL) {
1936                         tv.tv_sec -= (t2.tv_sec - t1.tv_sec);
1937                         tv.tv_usec -= (t2.tv_nsec - t1.tv_nsec) / 1000;
1938                 }
1939         }
1940
1941
1942 wait_done:
1943
1944         cv_destroy(&we.we_cv);
1945
1946         for (i = 0; i < cnt; i++) {
1947                 if (whead[i].wb_object != NULL)
1948                         RemoveEntryList(&whead[i].wb_waitlist);
1949
1950         }
1951         mtx_unlock(&ntoskrnl_dispatchlock);
1952
1953         return(status);
1954 }
1955
1956 static void
1957 WRITE_REGISTER_USHORT(uint16_t *reg, uint16_t val)
1958 {
1959         bus_space_write_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1960         return;
1961 }
1962
1963 static uint16_t
1964 READ_REGISTER_USHORT(reg)
1965         uint16_t                *reg;
1966 {
1967         return(bus_space_read_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1968 }
1969
1970 static void
1971 WRITE_REGISTER_ULONG(reg, val)
1972         uint32_t                *reg;
1973         uint32_t                val;
1974 {
1975         bus_space_write_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1976         return;
1977 }
1978
1979 static uint32_t
1980 READ_REGISTER_ULONG(reg)
1981         uint32_t                *reg;
1982 {
1983         return(bus_space_read_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1984 }
1985
1986 static uint8_t
1987 READ_REGISTER_UCHAR(uint8_t *reg)
1988 {
1989         return(bus_space_read_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1990 }
1991
1992 static void
1993 WRITE_REGISTER_UCHAR(uint8_t *reg, uint8_t val)
1994 {
1995         bus_space_write_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1996         return;
1997 }
1998
1999 static int64_t
2000 _allmul(a, b)
2001         int64_t                 a;
2002         int64_t                 b;
2003 {
2004         return (a * b);
2005 }
2006
2007 static int64_t
2008 _alldiv(a, b)
2009         int64_t                 a;
2010         int64_t                 b;
2011 {
2012         return (a / b);
2013 }
2014
2015 static int64_t
2016 _allrem(a, b)
2017         int64_t                 a;
2018         int64_t                 b;
2019 {
2020         return (a % b);
2021 }
2022
2023 static uint64_t
2024 _aullmul(a, b)
2025         uint64_t                a;
2026         uint64_t                b;
2027 {
2028         return (a * b);
2029 }
2030
2031 static uint64_t
2032 _aulldiv(a, b)
2033         uint64_t                a;
2034         uint64_t                b;
2035 {
2036         return (a / b);
2037 }
2038
2039 static uint64_t
2040 _aullrem(a, b)
2041         uint64_t                a;
2042         uint64_t                b;
2043 {
2044         return (a % b);
2045 }
2046
2047 static int64_t
2048 _allshl(int64_t a, uint8_t b)
2049 {
2050         return (a << b);
2051 }
2052
2053 static uint64_t
2054 _aullshl(uint64_t a, uint8_t b)
2055 {
2056         return (a << b);
2057 }
2058
2059 static int64_t
2060 _allshr(int64_t a, uint8_t b)
2061 {
2062         return (a >> b);
2063 }
2064
2065 static uint64_t
2066 _aullshr(uint64_t a, uint8_t b)
2067 {
2068         return (a >> b);
2069 }
2070
2071 static slist_entry *
2072 ntoskrnl_pushsl(head, entry)
2073         slist_header            *head;
2074         slist_entry             *entry;
2075 {
2076         slist_entry             *oldhead;
2077
2078         oldhead = head->slh_list.slh_next;
2079         entry->sl_next = head->slh_list.slh_next;
2080         head->slh_list.slh_next = entry;
2081         head->slh_list.slh_depth++;
2082         head->slh_list.slh_seq++;
2083
2084         return(oldhead);
2085 }
2086
2087 static slist_entry *
2088 ntoskrnl_popsl(head)
2089         slist_header            *head;
2090 {
2091         slist_entry             *first;
2092
2093         first = head->slh_list.slh_next;
2094         if (first != NULL) {
2095                 head->slh_list.slh_next = first->sl_next;
2096                 head->slh_list.slh_depth--;
2097                 head->slh_list.slh_seq++;
2098         }
2099
2100         return(first);
2101 }
2102
2103 /*
2104  * We need this to make lookaside lists work for amd64.
2105  * We pass a pointer to ExAllocatePoolWithTag() the lookaside
2106  * list structure. For amd64 to work right, this has to be a
2107  * pointer to the wrapped version of the routine, not the
2108  * original. Letting the Windows driver invoke the original
2109  * function directly will result in a convention calling
2110  * mismatch and a pretty crash. On x86, this effectively
2111  * becomes a no-op since ipt_func and ipt_wrap are the same.
2112  */
2113
2114 static funcptr
2115 ntoskrnl_findwrap(func)
2116         funcptr                 func;
2117 {
2118         image_patch_table       *patch;
2119
2120         patch = ntoskrnl_functbl;
2121         while (patch->ipt_func != NULL) {
2122                 if ((funcptr)patch->ipt_func == func)
2123                         return((funcptr)patch->ipt_wrap);
2124                 patch++;
2125         }
2126
2127         return(NULL);
2128 }
2129
2130 static void
2131 ExInitializePagedLookasideList(paged_lookaside_list *lookaside,
2132         lookaside_alloc_func *allocfunc, lookaside_free_func *freefunc,
2133         uint32_t flags, size_t size, uint32_t tag, uint16_t depth)
2134 {
2135         bzero((char *)lookaside, sizeof(paged_lookaside_list));
2136
2137         if (size < sizeof(slist_entry))
2138                 lookaside->nll_l.gl_size = sizeof(slist_entry);
2139         else
2140                 lookaside->nll_l.gl_size = size;
2141         lookaside->nll_l.gl_tag = tag;
2142         if (allocfunc == NULL)
2143                 lookaside->nll_l.gl_allocfunc =
2144                     ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
2145         else
2146                 lookaside->nll_l.gl_allocfunc = allocfunc;
2147
2148         if (freefunc == NULL)
2149                 lookaside->nll_l.gl_freefunc =
2150                     ntoskrnl_findwrap((funcptr)ExFreePool);
2151         else
2152                 lookaside->nll_l.gl_freefunc = freefunc;
2153
2154 #ifdef __i386__
2155         KeInitializeSpinLock(&lookaside->nll_obsoletelock);
2156 #endif
2157
2158         lookaside->nll_l.gl_type = NonPagedPool;
2159         lookaside->nll_l.gl_depth = depth;
2160         lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
2161
2162         return;
2163 }
2164
2165 static void
2166 ExDeletePagedLookasideList(lookaside)
2167         paged_lookaside_list   *lookaside;
2168 {
2169         void                    *buf;
2170         void            (*freefunc)(void *);
2171
2172         freefunc = lookaside->nll_l.gl_freefunc;
2173         while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
2174                 MSCALL1(freefunc, buf);
2175
2176         return;
2177 }
2178
2179 static void
2180 ExInitializeNPagedLookasideList(npaged_lookaside_list *lookaside,
2181         lookaside_alloc_func *allocfunc, lookaside_free_func *freefunc,
2182         uint32_t flags, size_t size, uint32_t tag, uint16_t depth)
2183 {
2184         bzero((char *)lookaside, sizeof(npaged_lookaside_list));
2185
2186         if (size < sizeof(slist_entry))
2187                 lookaside->nll_l.gl_size = sizeof(slist_entry);
2188         else
2189                 lookaside->nll_l.gl_size = size;
2190         lookaside->nll_l.gl_tag = tag;
2191         if (allocfunc == NULL)
2192                 lookaside->nll_l.gl_allocfunc =
2193                     ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
2194         else
2195                 lookaside->nll_l.gl_allocfunc = allocfunc;
2196
2197         if (freefunc == NULL)
2198                 lookaside->nll_l.gl_freefunc =
2199                     ntoskrnl_findwrap((funcptr)ExFreePool);
2200         else
2201                 lookaside->nll_l.gl_freefunc = freefunc;
2202
2203 #ifdef __i386__
2204         KeInitializeSpinLock(&lookaside->nll_obsoletelock);
2205 #endif
2206
2207         lookaside->nll_l.gl_type = NonPagedPool;
2208         lookaside->nll_l.gl_depth = depth;
2209         lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
2210
2211         return;
2212 }
2213
2214 static void
2215 ExDeleteNPagedLookasideList(lookaside)
2216         npaged_lookaside_list   *lookaside;
2217 {
2218         void                    *buf;
2219         void            (*freefunc)(void *);
2220
2221         freefunc = lookaside->nll_l.gl_freefunc;
2222         while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
2223                 MSCALL1(freefunc, buf);
2224
2225         return;
2226 }
2227
2228 slist_entry *
2229 InterlockedPushEntrySList(head, entry)
2230         slist_header            *head;
2231         slist_entry             *entry;
2232 {
2233         slist_entry             *oldhead;
2234
2235         mtx_lock_spin(&ntoskrnl_interlock);
2236         oldhead = ntoskrnl_pushsl(head, entry);
2237         mtx_unlock_spin(&ntoskrnl_interlock);
2238
2239         return(oldhead);
2240 }
2241
2242 slist_entry *
2243 InterlockedPopEntrySList(head)
2244         slist_header            *head;
2245 {
2246         slist_entry             *first;
2247
2248         mtx_lock_spin(&ntoskrnl_interlock);
2249         first = ntoskrnl_popsl(head);
2250         mtx_unlock_spin(&ntoskrnl_interlock);
2251
2252         return(first);
2253 }
2254
2255 static slist_entry *
2256 ExInterlockedPushEntrySList(head, entry, lock)
2257         slist_header            *head;
2258         slist_entry             *entry;
2259         kspin_lock              *lock;
2260 {
2261         return(InterlockedPushEntrySList(head, entry));
2262 }
2263
2264 static slist_entry *
2265 ExInterlockedPopEntrySList(head, lock)
2266         slist_header            *head;
2267         kspin_lock              *lock;
2268 {
2269         return(InterlockedPopEntrySList(head));
2270 }
2271
2272 uint16_t
2273 ExQueryDepthSList(head)
2274         slist_header            *head;
2275 {
2276         uint16_t                depth;
2277
2278         mtx_lock_spin(&ntoskrnl_interlock);
2279         depth = head->slh_list.slh_depth;
2280         mtx_unlock_spin(&ntoskrnl_interlock);
2281
2282         return(depth);
2283 }
2284
2285 void
2286 KeInitializeSpinLock(lock)
2287         kspin_lock              *lock;
2288 {
2289         *lock = 0;
2290
2291         return;
2292 }
2293
2294 #ifdef __i386__
2295 void
2296 KefAcquireSpinLockAtDpcLevel(lock)
2297         kspin_lock              *lock;
2298 {
2299 #ifdef NTOSKRNL_DEBUG_SPINLOCKS
2300         int                     i = 0;
2301 #endif
2302
2303         while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0) {
2304                 /* sit and spin */;
2305 #ifdef NTOSKRNL_DEBUG_SPINLOCKS
2306                 i++;
2307                 if (i > 200000000)
2308                         panic("DEADLOCK!");
2309 #endif
2310         }
2311
2312         return;
2313 }
2314
2315 void
2316 KefReleaseSpinLockFromDpcLevel(lock)
2317         kspin_lock              *lock;
2318 {
2319         atomic_store_rel_int((volatile u_int *)lock, 0);
2320
2321         return;
2322 }
2323
2324 uint8_t
2325 KeAcquireSpinLockRaiseToDpc(kspin_lock *lock)
2326 {
2327         uint8_t                 oldirql;
2328
2329         if (KeGetCurrentIrql() > DISPATCH_LEVEL)
2330                 panic("IRQL_NOT_LESS_THAN_OR_EQUAL");
2331
2332         KeRaiseIrql(DISPATCH_LEVEL, &oldirql);
2333         KeAcquireSpinLockAtDpcLevel(lock);
2334
2335         return(oldirql);
2336 }
2337 #else
2338 void
2339 KeAcquireSpinLockAtDpcLevel(kspin_lock *lock)
2340 {
2341         while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0)
2342                 /* sit and spin */;
2343
2344         return;
2345 }
2346
2347 void
2348 KeReleaseSpinLockFromDpcLevel(kspin_lock *lock)
2349 {
2350         atomic_store_rel_int((volatile u_int *)lock, 0);
2351
2352         return;
2353 }
2354 #endif /* __i386__ */
2355
2356 uintptr_t
2357 InterlockedExchange(dst, val)
2358         volatile uint32_t       *dst;
2359         uintptr_t               val;
2360 {
2361         uintptr_t               r;
2362
2363         mtx_lock_spin(&ntoskrnl_interlock);
2364         r = *dst;
2365         *dst = val;
2366         mtx_unlock_spin(&ntoskrnl_interlock);
2367
2368         return(r);
2369 }
2370
2371 static uint32_t
2372 InterlockedIncrement(addend)
2373         volatile uint32_t       *addend;
2374 {
2375         atomic_add_long((volatile u_long *)addend, 1);
2376         return(*addend);
2377 }
2378
2379 static uint32_t
2380 InterlockedDecrement(addend)
2381         volatile uint32_t       *addend;
2382 {
2383         atomic_subtract_long((volatile u_long *)addend, 1);
2384         return(*addend);
2385 }
2386
2387 static void
2388 ExInterlockedAddLargeStatistic(addend, inc)
2389         uint64_t                *addend;
2390         uint32_t                inc;
2391 {
2392         mtx_lock_spin(&ntoskrnl_interlock);
2393         *addend += inc;
2394         mtx_unlock_spin(&ntoskrnl_interlock);
2395
2396         return;
2397 };
2398
2399 mdl *
2400 IoAllocateMdl(void *vaddr, uint32_t len, uint8_t secondarybuf,
2401         uint8_t chargequota, irp *iopkt)
2402 {
2403         mdl                     *m;
2404         int                     zone = 0;
2405
2406         if (MmSizeOfMdl(vaddr, len) > MDL_ZONE_SIZE)
2407                 m = ExAllocatePoolWithTag(NonPagedPool,
2408                     MmSizeOfMdl(vaddr, len), 0);
2409         else {
2410                 m = uma_zalloc(mdl_zone, M_NOWAIT | M_ZERO);
2411                 zone++;
2412         }
2413
2414         if (m == NULL)
2415                 return (NULL);
2416
2417         MmInitializeMdl(m, vaddr, len);
2418
2419         /*
2420          * MmInitializMdl() clears the flags field, so we
2421          * have to set this here. If the MDL came from the
2422          * MDL UMA zone, tag it so we can release it to
2423          * the right place later.
2424          */
2425         if (zone)
2426                 m->mdl_flags = MDL_ZONE_ALLOCED;
2427
2428         if (iopkt != NULL) {
2429                 if (secondarybuf == TRUE) {
2430                         mdl                     *last;
2431                         last = iopkt->irp_mdl;
2432                         while (last->mdl_next != NULL)
2433                                 last = last->mdl_next;
2434                         last->mdl_next = m;
2435                 } else {
2436                         if (iopkt->irp_mdl != NULL)
2437                                 panic("leaking an MDL in IoAllocateMdl()");
2438                         iopkt->irp_mdl = m;
2439                 }
2440         }
2441
2442         return (m);
2443 }
2444
2445 void
2446 IoFreeMdl(m)
2447         mdl                     *m;
2448 {
2449         if (m == NULL)
2450                 return;
2451
2452         if (m->mdl_flags & MDL_ZONE_ALLOCED)
2453                 uma_zfree(mdl_zone, m);
2454         else
2455                 ExFreePool(m);
2456
2457         return;
2458 }
2459
2460 static void *
2461 MmAllocateContiguousMemory(size, highest)
2462         uint32_t                size;
2463         uint64_t                highest;
2464 {
2465         void *addr;
2466         size_t pagelength = roundup(size, PAGE_SIZE);
2467
2468         addr = ExAllocatePoolWithTag(NonPagedPool, pagelength, 0);
2469
2470         return(addr);
2471 }
2472
2473 static void *
2474 MmAllocateContiguousMemorySpecifyCache(size, lowest, highest,
2475     boundary, cachetype)
2476         uint32_t                size;
2477         uint64_t                lowest;
2478         uint64_t                highest;
2479         uint64_t                boundary;
2480         uint32_t                cachetype;
2481 {
2482         void *addr;
2483         size_t pagelength = roundup(size, PAGE_SIZE);
2484
2485         addr = ExAllocatePoolWithTag(NonPagedPool, pagelength, 0);
2486
2487         return(addr);
2488 }
2489
2490 static void
2491 MmFreeContiguousMemory(base)
2492         void                    *base;
2493 {
2494         ExFreePool(base);
2495 }
2496
2497 static void
2498 MmFreeContiguousMemorySpecifyCache(base, size, cachetype)
2499         void                    *base;
2500         uint32_t                size;
2501         uint32_t                cachetype;
2502 {
2503         ExFreePool(base);
2504 }
2505
2506 static uint32_t
2507 MmSizeOfMdl(vaddr, len)
2508         void                    *vaddr;
2509         size_t                  len;
2510 {
2511         uint32_t                l;
2512
2513         l = sizeof(struct mdl) +
2514             (sizeof(vm_offset_t *) * SPAN_PAGES(vaddr, len));
2515
2516         return(l);
2517 }
2518
2519 /*
2520  * The Microsoft documentation says this routine fills in the
2521  * page array of an MDL with the _physical_ page addresses that
2522  * comprise the buffer, but we don't really want to do that here.
2523  * Instead, we just fill in the page array with the kernel virtual
2524  * addresses of the buffers.
2525  */
2526 void
2527 MmBuildMdlForNonPagedPool(m)
2528         mdl                     *m;
2529 {
2530         vm_offset_t             *mdl_pages;
2531         int                     pagecnt, i;
2532
2533         pagecnt = SPAN_PAGES(m->mdl_byteoffset, m->mdl_bytecount);
2534
2535         if (pagecnt > (m->mdl_size - sizeof(mdl)) / sizeof(vm_offset_t *))
2536                 panic("not enough pages in MDL to describe buffer");
2537
2538         mdl_pages = MmGetMdlPfnArray(m);
2539
2540         for (i = 0; i < pagecnt; i++)
2541                 *mdl_pages = (vm_offset_t)m->mdl_startva + (i * PAGE_SIZE);
2542
2543         m->mdl_flags |= MDL_SOURCE_IS_NONPAGED_POOL;
2544         m->mdl_mappedsystemva = MmGetMdlVirtualAddress(m);
2545
2546         return;
2547 }
2548
2549 static void *
2550 MmMapLockedPages(mdl *buf, uint8_t accessmode)
2551 {
2552         buf->mdl_flags |= MDL_MAPPED_TO_SYSTEM_VA;
2553         return(MmGetMdlVirtualAddress(buf));
2554 }
2555
2556 static void *
2557 MmMapLockedPagesSpecifyCache(mdl *buf, uint8_t accessmode, uint32_t cachetype,
2558         void *vaddr, uint32_t bugcheck, uint32_t prio)
2559 {
2560         return(MmMapLockedPages(buf, accessmode));
2561 }
2562
2563 static void
2564 MmUnmapLockedPages(vaddr, buf)
2565         void                    *vaddr;
2566         mdl                     *buf;
2567 {
2568         buf->mdl_flags &= ~MDL_MAPPED_TO_SYSTEM_VA;
2569         return;
2570 }
2571
2572 /*
2573  * This function has a problem in that it will break if you
2574  * compile this module without PAE and try to use it on a PAE
2575  * kernel. Unfortunately, there's no way around this at the
2576  * moment. It's slightly less broken that using pmap_kextract().
2577  * You'd think the virtual memory subsystem would help us out
2578  * here, but it doesn't.
2579  */
2580
2581 static uint8_t
2582 MmIsAddressValid(vaddr)
2583         void                    *vaddr;
2584 {
2585         if (pmap_extract(kernel_map->pmap, (vm_offset_t)vaddr))
2586                 return(TRUE);
2587
2588         return(FALSE);
2589 }
2590
2591 void *
2592 MmMapIoSpace(paddr, len, cachetype)
2593         uint64_t                paddr;
2594         uint32_t                len;
2595         uint32_t                cachetype;
2596 {
2597         devclass_t              nexus_class;
2598         device_t                *nexus_devs, devp;
2599         int                     nexus_count = 0;
2600         device_t                matching_dev = NULL;
2601         struct resource         *res;
2602         int                     i;
2603         vm_offset_t             v;
2604
2605         /* There will always be at least one nexus. */
2606
2607         nexus_class = devclass_find("nexus");
2608         devclass_get_devices(nexus_class, &nexus_devs, &nexus_count);
2609
2610         for (i = 0; i < nexus_count; i++) {
2611                 devp = nexus_devs[i];
2612                 matching_dev = ntoskrnl_finddev(devp, paddr, &res);
2613                 if (matching_dev)
2614                         break;
2615         }
2616
2617         free(nexus_devs, M_TEMP);
2618
2619         if (matching_dev == NULL)
2620                 return(NULL);
2621
2622         v = (vm_offset_t)rman_get_virtual(res);
2623         if (paddr > rman_get_start(res))
2624                 v += paddr - rman_get_start(res);
2625
2626         return((void *)v);
2627 }
2628
2629 void
2630 MmUnmapIoSpace(vaddr, len)
2631         void                    *vaddr;
2632         size_t                  len;
2633 {
2634         return;
2635 }
2636
2637
2638 static device_t
2639 ntoskrnl_finddev(dev, paddr, res)
2640         device_t                dev;
2641         uint64_t                paddr;
2642         struct resource         **res;
2643 {
2644         device_t                *children = NULL;
2645         device_t                matching_dev;
2646         int                     childcnt;
2647         struct resource         *r;
2648         struct resource_list    *rl;
2649         struct resource_list_entry      *rle;
2650         uint32_t                flags;
2651         int                     i;
2652
2653         /* We only want devices that have been successfully probed. */
2654
2655         if (device_is_alive(dev) == FALSE)
2656                 return(NULL);
2657
2658         rl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev);
2659         if (rl != NULL) {
2660 #if __FreeBSD_version < 600022
2661                 SLIST_FOREACH(rle, rl, link) {
2662 #else
2663                 STAILQ_FOREACH(rle, rl, link) {
2664 #endif
2665                         r = rle->res;
2666
2667                         if (r == NULL)
2668                                 continue;
2669
2670                         flags = rman_get_flags(r);
2671
2672                         if (rle->type == SYS_RES_MEMORY &&
2673                             paddr >= rman_get_start(r) &&
2674                             paddr <= rman_get_end(r)) {
2675                                 if (!(flags & RF_ACTIVE))
2676                                         bus_activate_resource(dev,
2677                                             SYS_RES_MEMORY, 0, r);
2678                                 *res = r;
2679                                 return(dev);
2680                         }
2681                 }
2682         }
2683
2684         /*
2685          * If this device has children, do another
2686          * level of recursion to inspect them.
2687          */
2688
2689         device_get_children(dev, &children, &childcnt);
2690
2691         for (i = 0; i < childcnt; i++) {
2692                 matching_dev = ntoskrnl_finddev(children[i], paddr, res);
2693                 if (matching_dev != NULL) {
2694                         free(children, M_TEMP);
2695                         return(matching_dev);
2696                 }
2697         }
2698
2699         
2700         /* Won't somebody please think of the children! */
2701
2702         if (children != NULL)
2703                 free(children, M_TEMP);
2704
2705         return(NULL);
2706 }
2707
2708 /*
2709  * Workitems are unlike DPCs, in that they run in a user-mode thread
2710  * context rather than at DISPATCH_LEVEL in kernel context. In our
2711  * case we run them in kernel context anyway.
2712  */
2713 static void
2714 ntoskrnl_workitem_thread(arg)
2715         void                    *arg;
2716 {
2717         kdpc_queue              *kq;
2718         list_entry              *l;
2719         io_workitem             *iw;
2720         uint8_t                 irql;
2721
2722         kq = arg;
2723
2724         InitializeListHead(&kq->kq_disp);
2725         kq->kq_td = curthread;
2726         kq->kq_exit = 0;
2727         KeInitializeSpinLock(&kq->kq_lock);
2728         KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
2729
2730         while (1) {
2731                 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
2732
2733                 KeAcquireSpinLock(&kq->kq_lock, &irql);
2734
2735                 if (kq->kq_exit) {
2736                         kq->kq_exit = 0;
2737                         KeReleaseSpinLock(&kq->kq_lock, irql);
2738                         break;
2739                 }
2740
2741                 while (!IsListEmpty(&kq->kq_disp)) {
2742                         l = RemoveHeadList(&kq->kq_disp);
2743                         iw = CONTAINING_RECORD(l,
2744                             io_workitem, iw_listentry);
2745                         InitializeListHead((&iw->iw_listentry));
2746                         if (iw->iw_func == NULL)
2747                                 continue;
2748                         KeReleaseSpinLock(&kq->kq_lock, irql);
2749                         MSCALL2(iw->iw_func, iw->iw_dobj, iw->iw_ctx);
2750                         KeAcquireSpinLock(&kq->kq_lock, &irql);
2751                 }
2752
2753                 KeReleaseSpinLock(&kq->kq_lock, irql);
2754         }
2755
2756 #if __FreeBSD_version < 502113
2757         mtx_lock(&Giant);
2758 #endif
2759         kthread_exit(0);
2760         return; /* notreached */
2761 }
2762
2763 static void
2764 ntoskrnl_destroy_workitem_threads(void)
2765 {
2766         kdpc_queue              *kq;
2767         int                     i;
2768
2769         for (i = 0; i < WORKITEM_THREADS; i++) {
2770                 kq = wq_queues + i;
2771                 kq->kq_exit = 1;
2772                 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);       
2773                 while (kq->kq_exit)
2774                         tsleep(kq->kq_td->td_proc, PWAIT, "waitiw", hz/10);
2775         }
2776
2777         return;
2778 }
2779
2780 io_workitem *
2781 IoAllocateWorkItem(dobj)
2782         device_object           *dobj;
2783 {
2784         io_workitem             *iw;
2785
2786         iw = uma_zalloc(iw_zone, M_NOWAIT);
2787         if (iw == NULL)
2788                 return(NULL);
2789
2790         InitializeListHead(&iw->iw_listentry);
2791         iw->iw_dobj = dobj;
2792
2793         mtx_lock(&ntoskrnl_dispatchlock);
2794         iw->iw_idx = wq_idx;
2795         WORKIDX_INC(wq_idx);
2796         mtx_unlock(&ntoskrnl_dispatchlock);
2797
2798         return(iw);
2799 }
2800
2801 void
2802 IoFreeWorkItem(iw)
2803         io_workitem             *iw;
2804 {
2805         uma_zfree(iw_zone, iw);
2806         return;
2807 }
2808
2809 void
2810 IoQueueWorkItem(iw, iw_func, qtype, ctx)
2811         io_workitem             *iw;
2812         io_workitem_func        iw_func;
2813         uint32_t                qtype;
2814         void                    *ctx;
2815 {
2816         kdpc_queue              *kq;
2817         list_entry              *l;
2818         io_workitem             *cur;
2819         uint8_t                 irql;
2820
2821         kq = wq_queues + iw->iw_idx;
2822
2823         KeAcquireSpinLock(&kq->kq_lock, &irql);
2824
2825         /*
2826          * Traverse the list and make sure this workitem hasn't
2827          * already been inserted. Queuing the same workitem
2828          * twice will hose the list but good.
2829          */
2830
2831         l = kq->kq_disp.nle_flink;
2832         while (l != &kq->kq_disp) {
2833                 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
2834                 if (cur == iw) {
2835                         /* Already queued -- do nothing. */
2836                         KeReleaseSpinLock(&kq->kq_lock, irql);
2837                         return;
2838                 }
2839                 l = l->nle_flink;
2840         }
2841
2842         iw->iw_func = iw_func;
2843         iw->iw_ctx = ctx;
2844
2845         InsertTailList((&kq->kq_disp), (&iw->iw_listentry));
2846         KeReleaseSpinLock(&kq->kq_lock, irql);
2847
2848         KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
2849
2850         return;
2851 }
2852
2853 static void
2854 ntoskrnl_workitem(dobj, arg)
2855         device_object           *dobj;
2856         void                    *arg;
2857 {
2858         io_workitem             *iw;
2859         work_queue_item         *w;
2860         work_item_func          f;
2861
2862         iw = arg;
2863         w = (work_queue_item *)dobj;
2864         f = (work_item_func)w->wqi_func;
2865         uma_zfree(iw_zone, iw);
2866         MSCALL2(f, w, w->wqi_ctx);
2867
2868         return;
2869 }
2870
2871 /*
2872  * The ExQueueWorkItem() API is deprecated in Windows XP. Microsoft
2873  * warns that it's unsafe and to use IoQueueWorkItem() instead. The
2874  * problem with ExQueueWorkItem() is that it can't guard against
2875  * the condition where a driver submits a job to the work queue and
2876  * is then unloaded before the job is able to run. IoQueueWorkItem()
2877  * acquires a reference to the device's device_object via the
2878  * object manager and retains it until after the job has completed,
2879  * which prevents the driver from being unloaded before the job
2880  * runs. (We don't currently support this behavior, though hopefully
2881  * that will change once the object manager API is fleshed out a bit.)
2882  *
2883  * Having said all that, the ExQueueWorkItem() API remains, because
2884  * there are still other parts of Windows that use it, including
2885  * NDIS itself: NdisScheduleWorkItem() calls ExQueueWorkItem().
2886  * We fake up the ExQueueWorkItem() API on top of our implementation
2887  * of IoQueueWorkItem(). Workitem thread #3 is reserved exclusively
2888  * for ExQueueWorkItem() jobs, and we pass a pointer to the work
2889  * queue item (provided by the caller) in to IoAllocateWorkItem()
2890  * instead of the device_object. We need to save this pointer so
2891  * we can apply a sanity check: as with the DPC queue and other
2892  * workitem queues, we can't allow the same work queue item to
2893  * be queued twice. If it's already pending, we silently return
2894  */
2895
2896 void
2897 ExQueueWorkItem(w, qtype)
2898         work_queue_item         *w;
2899         uint32_t                qtype;
2900 {
2901         io_workitem             *iw;
2902         io_workitem_func        iwf;
2903         kdpc_queue              *kq;
2904         list_entry              *l;
2905         io_workitem             *cur;
2906         uint8_t                 irql;
2907
2908
2909         /*
2910          * We need to do a special sanity test to make sure
2911          * the ExQueueWorkItem() API isn't used to queue
2912          * the same workitem twice. Rather than checking the
2913          * io_workitem pointer itself, we test the attached
2914          * device object, which is really a pointer to the
2915          * legacy work queue item structure.
2916          */
2917
2918         kq = wq_queues + WORKITEM_LEGACY_THREAD;
2919         KeAcquireSpinLock(&kq->kq_lock, &irql);
2920         l = kq->kq_disp.nle_flink;
2921         while (l != &kq->kq_disp) {
2922                 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
2923                 if (cur->iw_dobj == (device_object *)w) {
2924                         /* Already queued -- do nothing. */
2925                         KeReleaseSpinLock(&kq->kq_lock, irql);
2926                         return;
2927                 }
2928                 l = l->nle_flink;
2929         }
2930         KeReleaseSpinLock(&kq->kq_lock, irql);
2931
2932         iw = IoAllocateWorkItem((device_object *)w);
2933         if (iw == NULL)
2934                 return;
2935
2936         iw->iw_idx = WORKITEM_LEGACY_THREAD;
2937         iwf = (io_workitem_func)ntoskrnl_findwrap((funcptr)ntoskrnl_workitem);
2938         IoQueueWorkItem(iw, iwf, qtype, iw);
2939
2940         return;
2941 }
2942
2943 static void
2944 RtlZeroMemory(dst, len)
2945         void                    *dst;
2946         size_t                  len;
2947 {
2948         bzero(dst, len);
2949         return;
2950 }
2951
2952 static void
2953 RtlCopyMemory(dst, src, len)
2954         void                    *dst;
2955         const void              *src;
2956         size_t                  len;
2957 {
2958         bcopy(src, dst, len);
2959         return;
2960 }
2961
2962 static size_t
2963 RtlCompareMemory(s1, s2, len)
2964         const void              *s1;
2965         const void              *s2;
2966         size_t                  len;
2967 {
2968         size_t                  i, total = 0;
2969         uint8_t                 *m1, *m2;
2970
2971         m1 = __DECONST(char *, s1);
2972         m2 = __DECONST(char *, s2);
2973
2974         for (i = 0; i < len; i++) {
2975                 if (m1[i] == m2[i])
2976                         total++;
2977         }
2978         return(total);
2979 }
2980
2981 void
2982 RtlInitAnsiString(dst, src)
2983         ansi_string             *dst;
2984         char                    *src;
2985 {
2986         ansi_string             *a;
2987
2988         a = dst;
2989         if (a == NULL)
2990                 return;
2991         if (src == NULL) {
2992                 a->as_len = a->as_maxlen = 0;
2993                 a->as_buf = NULL;
2994         } else {
2995                 a->as_buf = src;
2996                 a->as_len = a->as_maxlen = strlen(src);
2997         }
2998
2999         return;
3000 }
3001
3002 void
3003 RtlInitUnicodeString(dst, src)
3004         unicode_string          *dst;
3005         uint16_t                *src;
3006 {
3007         unicode_string          *u;
3008         int                     i;
3009
3010         u = dst;
3011         if (u == NULL)
3012                 return;
3013         if (src == NULL) {
3014                 u->us_len = u->us_maxlen = 0;
3015                 u->us_buf = NULL;
3016         } else {
3017                 i = 0;
3018                 while(src[i] != 0)
3019                         i++;
3020                 u->us_buf = src;
3021                 u->us_len = u->us_maxlen = i * 2;
3022         }
3023
3024         return;
3025 }
3026
3027 ndis_status
3028 RtlUnicodeStringToInteger(ustr, base, val)
3029         unicode_string          *ustr;
3030         uint32_t                base;
3031         uint32_t                *val;
3032 {
3033         uint16_t                *uchr;
3034         int                     len, neg = 0;
3035         char                    abuf[64];
3036         char                    *astr;
3037
3038         uchr = ustr->us_buf;
3039         len = ustr->us_len;
3040         bzero(abuf, sizeof(abuf));
3041
3042         if ((char)((*uchr) & 0xFF) == '-') {
3043                 neg = 1;
3044                 uchr++;
3045                 len -= 2;
3046         } else if ((char)((*uchr) & 0xFF) == '+') {
3047                 neg = 0;
3048                 uchr++;
3049                 len -= 2;
3050         }
3051
3052         if (base == 0) {
3053                 if ((char)((*uchr) & 0xFF) == 'b') {
3054                         base = 2;
3055                         uchr++;
3056                         len -= 2;
3057                 } else if ((char)((*uchr) & 0xFF) == 'o') {
3058                         base = 8;
3059                         uchr++;
3060                         len -= 2;
3061                 } else if ((char)((*uchr) & 0xFF) == 'x') {
3062                         base = 16;
3063                         uchr++;
3064                         len -= 2;
3065                 } else
3066                         base = 10;
3067         }
3068
3069         astr = abuf;
3070         if (neg) {
3071                 strcpy(astr, "-");
3072                 astr++;
3073         }
3074
3075         ntoskrnl_unicode_to_ascii(uchr, astr, len);
3076         *val = strtoul(abuf, NULL, base);
3077
3078         return(STATUS_SUCCESS);
3079 }
3080
3081 void
3082 RtlFreeUnicodeString(ustr)
3083         unicode_string          *ustr;
3084 {
3085         if (ustr->us_buf == NULL)
3086                 return;
3087         ExFreePool(ustr->us_buf);
3088         ustr->us_buf = NULL;
3089         return;
3090 }
3091
3092 void
3093 RtlFreeAnsiString(astr)
3094         ansi_string             *astr;
3095 {
3096         if (astr->as_buf == NULL)
3097                 return;
3098         ExFreePool(astr->as_buf);
3099         astr->as_buf = NULL;
3100         return;
3101 }
3102
3103 static int
3104 atoi(str)
3105         const char              *str;
3106 {
3107         return (int)strtol(str, (char **)NULL, 10);
3108 }
3109
3110 static long
3111 atol(str)
3112         const char              *str;
3113 {
3114         return strtol(str, (char **)NULL, 10);
3115 }
3116
3117 static int
3118 rand(void)
3119 {
3120         struct timeval          tv;
3121
3122         microtime(&tv);
3123         srandom(tv.tv_usec);
3124         return((int)random());
3125 }
3126
3127 static void
3128 srand(seed)
3129         unsigned int            seed;
3130 {
3131         srandom(seed);
3132         return;
3133 }
3134
3135 static uint8_t
3136 IoIsWdmVersionAvailable(uint8_t major, uint8_t minor)
3137 {
3138         if (major == WDM_MAJOR && minor == WDM_MINOR_WINXP)
3139                 return(TRUE);
3140         return(FALSE);
3141 }
3142
3143 static ndis_status
3144 IoGetDeviceObjectPointer(name, reqaccess, fileobj, devobj)
3145         unicode_string          *name;
3146         uint32_t                reqaccess;
3147         void                    *fileobj;
3148         device_object           *devobj;
3149 {
3150         return(STATUS_SUCCESS);
3151 }
3152
3153 static ndis_status
3154 IoGetDeviceProperty(devobj, regprop, buflen, prop, reslen)
3155         device_object           *devobj;
3156         uint32_t                regprop;
3157         uint32_t                buflen;
3158         void                    *prop;
3159         uint32_t                *reslen;
3160 {
3161         driver_object           *drv;
3162         uint16_t                **name;
3163
3164         drv = devobj->do_drvobj;
3165
3166         switch (regprop) {
3167         case DEVPROP_DRIVER_KEYNAME:
3168                 name = prop;
3169                 *name = drv->dro_drivername.us_buf;
3170                 *reslen = drv->dro_drivername.us_len;
3171                 break;
3172         default:
3173                 return(STATUS_INVALID_PARAMETER_2);
3174                 break;
3175         }
3176
3177         return(STATUS_SUCCESS);
3178 }
3179
3180 static void
3181 KeInitializeMutex(kmutex, level)
3182         kmutant                 *kmutex;
3183         uint32_t                level;
3184 {
3185         InitializeListHead((&kmutex->km_header.dh_waitlisthead));
3186         kmutex->km_abandoned = FALSE;
3187         kmutex->km_apcdisable = 1;
3188         kmutex->km_header.dh_sigstate = 1;
3189         kmutex->km_header.dh_type = DISP_TYPE_MUTANT;
3190         kmutex->km_header.dh_size = sizeof(kmutant) / sizeof(uint32_t);
3191         kmutex->km_ownerthread = NULL;
3192         return;
3193 }
3194
3195 static uint32_t
3196 KeReleaseMutex(kmutant *kmutex, uint8_t kwait)
3197 {
3198         uint32_t                prevstate;
3199
3200         mtx_lock(&ntoskrnl_dispatchlock);
3201         prevstate = kmutex->km_header.dh_sigstate;
3202         if (kmutex->km_ownerthread != curthread) {
3203                 mtx_unlock(&ntoskrnl_dispatchlock);
3204                 return(STATUS_MUTANT_NOT_OWNED);
3205         }
3206
3207         kmutex->km_header.dh_sigstate++;
3208         kmutex->km_abandoned = FALSE;
3209
3210         if (kmutex->km_header.dh_sigstate == 1) {
3211                 kmutex->km_ownerthread = NULL;
3212                 ntoskrnl_waittest(&kmutex->km_header, IO_NO_INCREMENT);
3213         }
3214
3215         mtx_unlock(&ntoskrnl_dispatchlock);
3216
3217         return(prevstate);
3218 }
3219
3220 static uint32_t
3221 KeReadStateMutex(kmutex)
3222         kmutant                 *kmutex;
3223 {
3224         return(kmutex->km_header.dh_sigstate);
3225 }
3226
3227 void
3228 KeInitializeEvent(nt_kevent *kevent, uint32_t type, uint8_t state)
3229 {
3230         InitializeListHead((&kevent->k_header.dh_waitlisthead));
3231         kevent->k_header.dh_sigstate = state;
3232         if (type == EVENT_TYPE_NOTIFY)
3233                 kevent->k_header.dh_type = DISP_TYPE_NOTIFICATION_EVENT;
3234         else
3235                 kevent->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_EVENT;
3236         kevent->k_header.dh_size = sizeof(nt_kevent) / sizeof(uint32_t);
3237         return;
3238 }
3239
3240 uint32_t
3241 KeResetEvent(kevent)
3242         nt_kevent               *kevent;
3243 {
3244         uint32_t                prevstate;
3245
3246         mtx_lock(&ntoskrnl_dispatchlock);
3247         prevstate = kevent->k_header.dh_sigstate;
3248         kevent->k_header.dh_sigstate = FALSE;
3249         mtx_unlock(&ntoskrnl_dispatchlock);
3250
3251         return(prevstate);
3252 }
3253
3254 uint32_t
3255 KeSetEvent(nt_kevent *kevent, uint32_t increment, uint8_t kwait)
3256 {
3257         uint32_t                prevstate;
3258         wait_block              *w;
3259         nt_dispatch_header      *dh;
3260         struct thread           *td;
3261         wb_ext                  *we;
3262
3263         mtx_lock(&ntoskrnl_dispatchlock);
3264         prevstate = kevent->k_header.dh_sigstate;
3265         dh = &kevent->k_header;
3266
3267         if (IsListEmpty(&dh->dh_waitlisthead))
3268                 /*
3269                  * If there's nobody in the waitlist, just set
3270                  * the state to signalled.
3271                  */
3272                 dh->dh_sigstate = 1;
3273         else {
3274                 /*
3275                  * Get the first waiter. If this is a synchronization
3276                  * event, just wake up that one thread (don't bother
3277                  * setting the state to signalled since we're supposed
3278                  * to automatically clear synchronization events anyway).
3279                  *
3280                  * If it's a notification event, or the the first
3281                  * waiter is doing a WAITTYPE_ALL wait, go through
3282                  * the full wait satisfaction process.
3283                  */
3284                 w = CONTAINING_RECORD(dh->dh_waitlisthead.nle_flink,
3285                     wait_block, wb_waitlist);
3286                 we = w->wb_ext;
3287                 td = we->we_td;
3288                 if (kevent->k_header.dh_type == DISP_TYPE_NOTIFICATION_EVENT ||
3289                     w->wb_waittype == WAITTYPE_ALL) {
3290                         if (prevstate == 0) {
3291                                 dh->dh_sigstate = 1;
3292                                 ntoskrnl_waittest(dh, increment);
3293                         }
3294                 } else {
3295                         w->wb_awakened |= TRUE;
3296                         cv_broadcastpri(&we->we_cv,
3297                             (w->wb_oldpri - (increment * 4)) > PRI_MIN_KERN ?
3298                             w->wb_oldpri - (increment * 4) : PRI_MIN_KERN);
3299                 }
3300         }
3301
3302         mtx_unlock(&ntoskrnl_dispatchlock);
3303
3304         return(prevstate);
3305 }
3306
3307 void
3308 KeClearEvent(kevent)
3309         nt_kevent               *kevent;
3310 {
3311         kevent->k_header.dh_sigstate = FALSE;
3312         return;
3313 }
3314
3315 uint32_t
3316 KeReadStateEvent(kevent)
3317         nt_kevent               *kevent;
3318 {
3319         return(kevent->k_header.dh_sigstate);
3320 }
3321
3322 /*
3323  * The object manager in Windows is responsible for managing
3324  * references and access to various types of objects, including
3325  * device_objects, events, threads, timers and so on. However,
3326  * there's a difference in the way objects are handled in user
3327  * mode versus kernel mode.
3328  *
3329  * In user mode (i.e. Win32 applications), all objects are
3330  * managed by the object manager. For example, when you create
3331  * a timer or event object, you actually end up with an 
3332  * object_header (for the object manager's bookkeeping
3333  * purposes) and an object body (which contains the actual object
3334  * structure, e.g. ktimer, kevent, etc...). This allows Windows
3335  * to manage resource quotas and to enforce access restrictions
3336  * on basically every kind of system object handled by the kernel.
3337  *
3338  * However, in kernel mode, you only end up using the object
3339  * manager some of the time. For example, in a driver, you create
3340  * a timer object by simply allocating the memory for a ktimer
3341  * structure and initializing it with KeInitializeTimer(). Hence,
3342  * the timer has no object_header and no reference counting or
3343  * security/resource checks are done on it. The assumption in
3344  * this case is that if you're running in kernel mode, you know
3345  * what you're doing, and you're already at an elevated privilege
3346  * anyway.
3347  *
3348  * There are some exceptions to this. The two most important ones
3349  * for our purposes are device_objects and threads. We need to use
3350  * the object manager to do reference counting on device_objects,
3351  * and for threads, you can only get a pointer to a thread's
3352  * dispatch header by using ObReferenceObjectByHandle() on the
3353  * handle returned by PsCreateSystemThread().
3354  */
3355
3356 static ndis_status
3357 ObReferenceObjectByHandle(ndis_handle handle, uint32_t reqaccess, void *otype,
3358         uint8_t accessmode, void **object, void **handleinfo)
3359 {
3360         nt_objref               *nr;
3361
3362         nr = malloc(sizeof(nt_objref), M_DEVBUF, M_NOWAIT|M_ZERO);
3363         if (nr == NULL)
3364                 return(STATUS_INSUFFICIENT_RESOURCES);
3365
3366         InitializeListHead((&nr->no_dh.dh_waitlisthead));
3367         nr->no_obj = handle;
3368         nr->no_dh.dh_type = DISP_TYPE_THREAD;
3369         nr->no_dh.dh_sigstate = 0;
3370         nr->no_dh.dh_size = (uint8_t)(sizeof(struct thread) /
3371             sizeof(uint32_t));
3372         TAILQ_INSERT_TAIL(&ntoskrnl_reflist, nr, link);
3373         *object = nr;
3374
3375         return(STATUS_SUCCESS);
3376 }
3377
3378 static void
3379 ObfDereferenceObject(object)
3380         void                    *object;
3381 {
3382         nt_objref               *nr;
3383
3384         nr = object;
3385         TAILQ_REMOVE(&ntoskrnl_reflist, nr, link);
3386         free(nr, M_DEVBUF);
3387
3388         return;
3389 }
3390
3391 static uint32_t
3392 ZwClose(handle)
3393         ndis_handle             handle;
3394 {
3395         return(STATUS_SUCCESS);
3396 }
3397
3398 static uint32_t
3399 WmiQueryTraceInformation(traceclass, traceinfo, infolen, reqlen, buf)
3400         uint32_t                traceclass;
3401         void                    *traceinfo;
3402         uint32_t                infolen;
3403         uint32_t                reqlen;
3404         void                    *buf;
3405 {
3406         return(STATUS_NOT_FOUND);
3407 }
3408
3409 static uint32_t
3410 WmiTraceMessage(uint64_t loghandle, uint32_t messageflags,
3411         void *guid, uint16_t messagenum, ...)
3412 {
3413         return(STATUS_SUCCESS);
3414 }
3415
3416 static uint32_t
3417 IoWMIRegistrationControl(dobj, action)
3418         device_object           *dobj;
3419         uint32_t                action;
3420 {
3421         return(STATUS_SUCCESS);
3422 }
3423
3424 /*
3425  * This is here just in case the thread returns without calling
3426  * PsTerminateSystemThread().
3427  */
3428 static void
3429 ntoskrnl_thrfunc(arg)
3430         void                    *arg;
3431 {
3432         thread_context          *thrctx;
3433         uint32_t (*tfunc)(void *);
3434         void                    *tctx;
3435         uint32_t                rval;
3436
3437         thrctx = arg;
3438         tfunc = thrctx->tc_thrfunc;
3439         tctx = thrctx->tc_thrctx;
3440         free(thrctx, M_TEMP);
3441
3442         rval = MSCALL1(tfunc, tctx);
3443
3444         PsTerminateSystemThread(rval);
3445         return; /* notreached */
3446 }
3447
3448 static ndis_status
3449 PsCreateSystemThread(handle, reqaccess, objattrs, phandle,
3450         clientid, thrfunc, thrctx)
3451         ndis_handle             *handle;
3452         uint32_t                reqaccess;
3453         void                    *objattrs;
3454         ndis_handle             phandle;
3455         void                    *clientid;
3456         void                    *thrfunc;
3457         void                    *thrctx;
3458 {
3459         int                     error;
3460         char                    tname[128];
3461         thread_context          *tc;
3462         struct proc             *p;
3463
3464         tc = malloc(sizeof(thread_context), M_TEMP, M_NOWAIT);
3465         if (tc == NULL)
3466                 return(STATUS_INSUFFICIENT_RESOURCES);
3467
3468         tc->tc_thrctx = thrctx;
3469         tc->tc_thrfunc = thrfunc;
3470
3471         sprintf(tname, "windows kthread %d", ntoskrnl_kth);
3472         error = kthread_create(ntoskrnl_thrfunc, tc, &p,
3473             RFHIGHPID, NDIS_KSTACK_PAGES, tname);
3474
3475         if (error) {
3476                 free(tc, M_TEMP);
3477                 return(STATUS_INSUFFICIENT_RESOURCES);
3478         }
3479
3480         *handle = p;
3481         ntoskrnl_kth++;
3482
3483         return(STATUS_SUCCESS);
3484 }
3485
3486 /*
3487  * In Windows, the exit of a thread is an event that you're allowed
3488  * to wait on, assuming you've obtained a reference to the thread using
3489  * ObReferenceObjectByHandle(). Unfortunately, the only way we can
3490  * simulate this behavior is to register each thread we create in a
3491  * reference list, and if someone holds a reference to us, we poke
3492  * them.
3493  */
3494 static ndis_status
3495 PsTerminateSystemThread(status)
3496         ndis_status             status;
3497 {
3498         struct nt_objref        *nr;
3499
3500         mtx_lock(&ntoskrnl_dispatchlock);
3501         TAILQ_FOREACH(nr, &ntoskrnl_reflist, link) {
3502                 if (nr->no_obj != curthread->td_proc)
3503                         continue;
3504                 nr->no_dh.dh_sigstate = 1;
3505                 ntoskrnl_waittest(&nr->no_dh, IO_NO_INCREMENT);
3506                 break;
3507         }
3508         mtx_unlock(&ntoskrnl_dispatchlock);
3509
3510         ntoskrnl_kth--;
3511
3512 #if __FreeBSD_version < 502113
3513         mtx_lock(&Giant);
3514 #endif
3515         kthread_exit(0);
3516         return(0);      /* notreached */
3517 }
3518
3519 static uint32_t
3520 DbgPrint(char *fmt, ...)
3521 {
3522         va_list                 ap;
3523
3524         if (bootverbose) {
3525                 va_start(ap, fmt);
3526                 vprintf(fmt, ap);
3527         }
3528
3529         return(STATUS_SUCCESS);
3530 }
3531
3532 static void
3533 DbgBreakPoint(void)
3534 {
3535
3536 #if __FreeBSD_version < 502113
3537         Debugger("DbgBreakPoint(): breakpoint");
3538 #else
3539         kdb_enter_why(KDB_WHY_NDIS, "DbgBreakPoint(): breakpoint");
3540 #endif
3541 }
3542
3543 static void
3544 KeBugCheckEx(code, param1, param2, param3, param4)
3545     uint32_t                    code;
3546     u_long                      param1;
3547     u_long                      param2;
3548     u_long                      param3;
3549     u_long                      param4;
3550 {
3551         panic("KeBugCheckEx: STOP 0x%X", code);
3552 }
3553
3554 static void
3555 ntoskrnl_timercall(arg)
3556         void                    *arg;
3557 {
3558         ktimer                  *timer;
3559         struct timeval          tv;
3560         kdpc                    *dpc;
3561
3562         mtx_lock(&ntoskrnl_dispatchlock);
3563
3564         timer = arg;
3565
3566 #ifdef NTOSKRNL_DEBUG_TIMERS
3567         ntoskrnl_timer_fires++;
3568 #endif
3569         ntoskrnl_remove_timer(timer);
3570
3571         /*
3572          * This should never happen, but complain
3573          * if it does.
3574          */
3575
3576         if (timer->k_header.dh_inserted == FALSE) {
3577                 mtx_unlock(&ntoskrnl_dispatchlock);
3578                 printf("NTOS: timer %p fired even though "
3579                     "it was canceled\n", timer);
3580                 return;
3581         }
3582
3583         /* Mark the timer as no longer being on the timer queue. */
3584
3585         timer->k_header.dh_inserted = FALSE;
3586
3587         /* Now signal the object and satisfy any waits on it. */
3588
3589         timer->k_header.dh_sigstate = 1;
3590         ntoskrnl_waittest(&timer->k_header, IO_NO_INCREMENT);
3591
3592         /*
3593          * If this is a periodic timer, re-arm it
3594          * so it will fire again. We do this before
3595          * calling any deferred procedure calls because
3596          * it's possible the DPC might cancel the timer,
3597          * in which case it would be wrong for us to
3598          * re-arm it again afterwards.
3599          */
3600
3601         if (timer->k_period) {
3602                 tv.tv_sec = 0;
3603                 tv.tv_usec = timer->k_period * 1000;
3604                 timer->k_header.dh_inserted = TRUE;
3605                 ntoskrnl_insert_timer(timer, tvtohz(&tv));
3606 #ifdef NTOSKRNL_DEBUG_TIMERS
3607                 ntoskrnl_timer_reloads++;
3608 #endif
3609         }
3610
3611         dpc = timer->k_dpc;
3612
3613         mtx_unlock(&ntoskrnl_dispatchlock);
3614
3615         /* If there's a DPC associated with the timer, queue it up. */
3616
3617         if (dpc != NULL)
3618                 KeInsertQueueDpc(dpc, NULL, NULL);
3619
3620         return;
3621 }
3622
3623 #ifdef NTOSKRNL_DEBUG_TIMERS
3624 static int
3625 sysctl_show_timers(SYSCTL_HANDLER_ARGS)
3626 {
3627         int                     ret;
3628
3629         ret = 0;
3630         ntoskrnl_show_timers();
3631         return (sysctl_handle_int(oidp, &ret, 0, req));
3632 }
3633
3634 static void
3635 ntoskrnl_show_timers()
3636 {
3637         int                     i = 0;
3638         list_entry              *l;
3639
3640         mtx_lock_spin(&ntoskrnl_calllock);
3641         l = ntoskrnl_calllist.nle_flink;
3642         while(l != &ntoskrnl_calllist) {
3643                 i++;
3644                 l = l->nle_flink;
3645         }
3646         mtx_unlock_spin(&ntoskrnl_calllock);
3647
3648         printf("\n");
3649         printf("%d timers available (out of %d)\n", i, NTOSKRNL_TIMEOUTS);
3650         printf("timer sets: %qu\n", ntoskrnl_timer_sets);
3651         printf("timer reloads: %qu\n", ntoskrnl_timer_reloads);
3652         printf("timer cancels: %qu\n", ntoskrnl_timer_cancels);
3653         printf("timer fires: %qu\n", ntoskrnl_timer_fires);
3654         printf("\n");
3655
3656         return;
3657 }
3658 #endif
3659
3660 /*
3661  * Must be called with dispatcher lock held.
3662  */
3663
3664 static void
3665 ntoskrnl_insert_timer(timer, ticks)
3666         ktimer                  *timer;
3667         int                     ticks;
3668 {
3669         callout_entry           *e;
3670         list_entry              *l;
3671         struct callout          *c;
3672
3673         /*
3674          * Try and allocate a timer.
3675          */
3676         mtx_lock_spin(&ntoskrnl_calllock);
3677         if (IsListEmpty(&ntoskrnl_calllist)) {
3678                 mtx_unlock_spin(&ntoskrnl_calllock);
3679 #ifdef NTOSKRNL_DEBUG_TIMERS
3680                 ntoskrnl_show_timers();
3681 #endif
3682                 panic("out of timers!");
3683         }
3684         l = RemoveHeadList(&ntoskrnl_calllist);
3685         mtx_unlock_spin(&ntoskrnl_calllock);
3686
3687         e = CONTAINING_RECORD(l, callout_entry, ce_list);
3688         c = &e->ce_callout;
3689
3690         timer->k_callout = c;
3691
3692         callout_init(c, CALLOUT_MPSAFE);
3693         callout_reset(c, ticks, ntoskrnl_timercall, timer);
3694
3695         return;
3696 }
3697
3698 static void
3699 ntoskrnl_remove_timer(timer)
3700         ktimer                  *timer;
3701 {
3702         callout_entry           *e;
3703
3704         e = (callout_entry *)timer->k_callout;
3705         callout_stop(timer->k_callout);
3706
3707         mtx_lock_spin(&ntoskrnl_calllock);
3708         InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
3709         mtx_unlock_spin(&ntoskrnl_calllock);
3710
3711         return;
3712 }
3713
3714 void
3715 KeInitializeTimer(timer)
3716         ktimer                  *timer;
3717 {
3718         if (timer == NULL)
3719                 return;
3720
3721         KeInitializeTimerEx(timer,  EVENT_TYPE_NOTIFY);
3722
3723         return;
3724 }
3725
3726 void
3727 KeInitializeTimerEx(timer, type)
3728         ktimer                  *timer;
3729         uint32_t                type;
3730 {
3731         if (timer == NULL)
3732                 return;
3733
3734         bzero((char *)timer, sizeof(ktimer));
3735         InitializeListHead((&timer->k_header.dh_waitlisthead));
3736         timer->k_header.dh_sigstate = FALSE;
3737         timer->k_header.dh_inserted = FALSE;
3738         if (type == EVENT_TYPE_NOTIFY)
3739                 timer->k_header.dh_type = DISP_TYPE_NOTIFICATION_TIMER;
3740         else
3741                 timer->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_TIMER;
3742         timer->k_header.dh_size = sizeof(ktimer) / sizeof(uint32_t);
3743
3744         return;
3745 }
3746
3747 /*
3748  * DPC subsystem. A Windows Defered Procedure Call has the following
3749  * properties:
3750  * - It runs at DISPATCH_LEVEL.
3751  * - It can have one of 3 importance values that control when it
3752  *   runs relative to other DPCs in the queue.
3753  * - On SMP systems, it can be set to run on a specific processor.
3754  * In order to satisfy the last property, we create a DPC thread for
3755  * each CPU in the system and bind it to that CPU. Each thread
3756  * maintains three queues with different importance levels, which
3757  * will be processed in order from lowest to highest.
3758  *
3759  * In Windows, interrupt handlers run as DPCs. (Not to be confused
3760  * with ISRs, which run in interrupt context and can preempt DPCs.)
3761  * ISRs are given the highest importance so that they'll take
3762  * precedence over timers and other things.
3763  */
3764
3765 static void
3766 ntoskrnl_dpc_thread(arg)
3767         void                    *arg;
3768 {
3769         kdpc_queue              *kq;
3770         kdpc                    *d;
3771         list_entry              *l;
3772         uint8_t                 irql;
3773
3774         kq = arg;
3775
3776         InitializeListHead(&kq->kq_disp);
3777         kq->kq_td = curthread;
3778         kq->kq_exit = 0;
3779         kq->kq_running = FALSE;
3780         KeInitializeSpinLock(&kq->kq_lock);
3781         KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
3782         KeInitializeEvent(&kq->kq_done, EVENT_TYPE_SYNC, FALSE);
3783
3784         /*
3785          * Elevate our priority. DPCs are used to run interrupt
3786          * handlers, and they should trigger as soon as possible
3787          * once scheduled by an ISR.
3788          */
3789
3790         thread_lock(curthread);
3791 #ifdef NTOSKRNL_MULTIPLE_DPCS
3792 #if __FreeBSD_version >= 502102
3793         sched_bind(curthread, kq->kq_cpu);
3794 #endif
3795 #endif
3796         sched_prio(curthread, PRI_MIN_KERN);
3797 #if __FreeBSD_version < 600000
3798         curthread->td_base_pri = PRI_MIN_KERN;
3799 #endif
3800         thread_unlock(curthread);
3801
3802         while (1) {
3803                 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
3804
3805                 KeAcquireSpinLock(&kq->kq_lock, &irql);
3806
3807                 if (kq->kq_exit) {
3808                         kq->kq_exit = 0;
3809                         KeReleaseSpinLock(&kq->kq_lock, irql);
3810                         break;
3811                 }
3812
3813                 kq->kq_running = TRUE;
3814
3815                 while (!IsListEmpty(&kq->kq_disp)) {
3816                         l = RemoveHeadList((&kq->kq_disp));
3817                         d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
3818                         InitializeListHead((&d->k_dpclistentry));
3819                         KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
3820                         MSCALL4(d->k_deferedfunc, d, d->k_deferredctx,
3821                             d->k_sysarg1, d->k_sysarg2);
3822                         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3823                 }
3824
3825                 kq->kq_running = FALSE;
3826
3827                 KeReleaseSpinLock(&kq->kq_lock, irql);
3828
3829                 KeSetEvent(&kq->kq_done, IO_NO_INCREMENT, FALSE);
3830         }
3831
3832 #if __FreeBSD_version < 502113
3833         mtx_lock(&Giant);
3834 #endif
3835         kthread_exit(0);
3836         return; /* notreached */
3837 }
3838
3839 static void
3840 ntoskrnl_destroy_dpc_threads(void)
3841 {
3842         kdpc_queue              *kq;
3843         kdpc                    dpc;
3844         int                     i;
3845
3846         kq = kq_queues;
3847 #ifdef NTOSKRNL_MULTIPLE_DPCS
3848         for (i = 0; i < mp_ncpus; i++) {
3849 #else
3850         for (i = 0; i < 1; i++) {
3851 #endif
3852                 kq += i;
3853
3854                 kq->kq_exit = 1;
3855                 KeInitializeDpc(&dpc, NULL, NULL);
3856                 KeSetTargetProcessorDpc(&dpc, i);
3857                 KeInsertQueueDpc(&dpc, NULL, NULL);
3858                 while (kq->kq_exit)
3859                         tsleep(kq->kq_td->td_proc, PWAIT, "dpcw", hz/10);
3860         }
3861
3862         return;
3863 }
3864
3865 static uint8_t
3866 ntoskrnl_insert_dpc(head, dpc)
3867         list_entry              *head;
3868         kdpc                    *dpc;
3869 {
3870         list_entry              *l;
3871         kdpc                    *d;
3872
3873         l = head->nle_flink;
3874         while (l != head) {
3875                 d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
3876                 if (d == dpc)
3877                         return(FALSE);
3878                 l = l->nle_flink;
3879         }
3880
3881         if (dpc->k_importance == KDPC_IMPORTANCE_LOW)
3882                 InsertTailList((head), (&dpc->k_dpclistentry));
3883         else
3884                 InsertHeadList((head), (&dpc->k_dpclistentry));
3885
3886         return (TRUE);
3887 }
3888
3889 void
3890 KeInitializeDpc(dpc, dpcfunc, dpcctx)
3891         kdpc                    *dpc;
3892         void                    *dpcfunc;
3893         void                    *dpcctx;
3894 {
3895
3896         if (dpc == NULL)
3897                 return;
3898
3899         dpc->k_deferedfunc = dpcfunc;
3900         dpc->k_deferredctx = dpcctx;
3901         dpc->k_num = KDPC_CPU_DEFAULT;
3902         dpc->k_importance = KDPC_IMPORTANCE_MEDIUM;
3903         InitializeListHead((&dpc->k_dpclistentry));
3904
3905         return;
3906 }
3907
3908 uint8_t
3909 KeInsertQueueDpc(dpc, sysarg1, sysarg2)
3910         kdpc                    *dpc;
3911         void                    *sysarg1;
3912         void                    *sysarg2;
3913 {
3914         kdpc_queue              *kq;
3915         uint8_t                 r;
3916         uint8_t                 irql;
3917
3918         if (dpc == NULL)
3919                 return(FALSE);
3920
3921         kq = kq_queues;
3922
3923 #ifdef NTOSKRNL_MULTIPLE_DPCS
3924         KeRaiseIrql(DISPATCH_LEVEL, &irql);
3925
3926         /*
3927          * By default, the DPC is queued to run on the same CPU
3928          * that scheduled it.
3929          */
3930
3931         if (dpc->k_num == KDPC_CPU_DEFAULT)
3932                 kq += curthread->td_oncpu;
3933         else
3934                 kq += dpc->k_num;
3935         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3936 #else
3937         KeAcquireSpinLock(&kq->kq_lock, &irql);
3938 #endif
3939
3940         r = ntoskrnl_insert_dpc(&kq->kq_disp, dpc);
3941         if (r == TRUE) {
3942                 dpc->k_sysarg1 = sysarg1;
3943                 dpc->k_sysarg2 = sysarg2;
3944         }
3945         KeReleaseSpinLock(&kq->kq_lock, irql);
3946
3947         if (r == FALSE)
3948                 return(r);
3949
3950         KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
3951
3952         return(r);
3953 }
3954
3955 uint8_t
3956 KeRemoveQueueDpc(dpc)
3957         kdpc                    *dpc;
3958 {
3959         kdpc_queue              *kq;
3960         uint8_t                 irql;
3961
3962         if (dpc == NULL)
3963                 return(FALSE);
3964
3965 #ifdef NTOSKRNL_MULTIPLE_DPCS
3966         KeRaiseIrql(DISPATCH_LEVEL, &irql);
3967
3968         kq = kq_queues + dpc->k_num;
3969
3970         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3971 #else
3972         kq = kq_queues;
3973         KeAcquireSpinLock(&kq->kq_lock, &irql);
3974 #endif
3975
3976         if (dpc->k_dpclistentry.nle_flink == &dpc->k_dpclistentry) {
3977                 KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
3978                 KeLowerIrql(irql);
3979                 return(FALSE);
3980         }
3981
3982         RemoveEntryList((&dpc->k_dpclistentry));
3983         InitializeListHead((&dpc->k_dpclistentry));
3984
3985         KeReleaseSpinLock(&kq->kq_lock, irql);
3986
3987         return(TRUE);
3988 }
3989
3990 void
3991 KeSetImportanceDpc(dpc, imp)
3992         kdpc                    *dpc;
3993         uint32_t                imp;
3994 {
3995         if (imp != KDPC_IMPORTANCE_LOW &&
3996             imp != KDPC_IMPORTANCE_MEDIUM &&
3997             imp != KDPC_IMPORTANCE_HIGH)
3998                 return;
3999
4000         dpc->k_importance = (uint8_t)imp;
4001         return;
4002 }
4003
4004 void
4005 KeSetTargetProcessorDpc(kdpc *dpc, uint8_t cpu)
4006 {
4007         if (cpu > mp_ncpus)
4008                 return;
4009
4010         dpc->k_num = cpu;
4011         return;
4012 }
4013
4014 void
4015 KeFlushQueuedDpcs(void)
4016 {
4017         kdpc_queue              *kq;
4018         int                     i;
4019
4020         /*
4021          * Poke each DPC queue and wait
4022          * for them to drain.
4023          */
4024
4025 #ifdef NTOSKRNL_MULTIPLE_DPCS
4026         for (i = 0; i < mp_ncpus; i++) {
4027 #else
4028         for (i = 0; i < 1; i++) {
4029 #endif
4030                 kq = kq_queues + i;
4031                 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
4032                 KeWaitForSingleObject(&kq->kq_done, 0, 0, TRUE, NULL);
4033         }
4034
4035         return;
4036 }
4037
4038 uint32_t
4039 KeGetCurrentProcessorNumber(void)
4040 {
4041         return((uint32_t)curthread->td_oncpu);
4042 }
4043
4044 uint8_t
4045 KeSetTimerEx(timer, duetime, period, dpc)
4046         ktimer                  *timer;
4047         int64_t                 duetime;
4048         uint32_t                period;
4049         kdpc                    *dpc;
4050 {
4051         struct timeval          tv;
4052         uint64_t                curtime;
4053         uint8_t                 pending;
4054
4055         if (timer == NULL)
4056                 return(FALSE);
4057
4058         mtx_lock(&ntoskrnl_dispatchlock);
4059
4060         if (timer->k_header.dh_inserted == TRUE) {
4061                 ntoskrnl_remove_timer(timer);
4062 #ifdef NTOSKRNL_DEBUG_TIMERS
4063                 ntoskrnl_timer_cancels++;
4064 #endif
4065                 timer->k_header.dh_inserted = FALSE;
4066                 pending = TRUE;
4067         } else
4068                 pending = FALSE;
4069
4070         timer->k_duetime = duetime;
4071         timer->k_period = period;
4072         timer->k_header.dh_sigstate = FALSE;
4073         timer->k_dpc = dpc;
4074
4075         if (duetime < 0) {
4076                 tv.tv_sec = - (duetime) / 10000000;
4077                 tv.tv_usec = (- (duetime) / 10) -
4078                     (tv.tv_sec * 1000000);
4079         } else {
4080                 ntoskrnl_time(&curtime);
4081                 if (duetime < curtime)
4082                         tv.tv_sec = tv.tv_usec = 0;
4083                 else {
4084                         tv.tv_sec = ((duetime) - curtime) / 10000000;
4085                         tv.tv_usec = ((duetime) - curtime) / 10 -
4086                             (tv.tv_sec * 1000000);
4087                 }
4088         }
4089
4090         timer->k_header.dh_inserted = TRUE;
4091         ntoskrnl_insert_timer(timer, tvtohz(&tv));
4092 #ifdef NTOSKRNL_DEBUG_TIMERS
4093         ntoskrnl_timer_sets++;
4094 #endif
4095
4096         mtx_unlock(&ntoskrnl_dispatchlock);
4097
4098         return(pending);
4099 }
4100
4101 uint8_t
4102 KeSetTimer(timer, duetime, dpc)
4103         ktimer                  *timer;
4104         int64_t                 duetime;
4105         kdpc                    *dpc;
4106 {
4107         return (KeSetTimerEx(timer, duetime, 0, dpc));
4108 }
4109
4110 /*
4111  * The Windows DDK documentation seems to say that cancelling
4112  * a timer that has a DPC will result in the DPC also being
4113  * cancelled, but this isn't really the case.
4114  */
4115
4116 uint8_t
4117 KeCancelTimer(timer)
4118         ktimer                  *timer;
4119 {
4120         uint8_t                 pending;
4121
4122         if (timer == NULL)
4123                 return(FALSE);
4124
4125         mtx_lock(&ntoskrnl_dispatchlock);
4126
4127         pending = timer->k_header.dh_inserted;
4128
4129         if (timer->k_header.dh_inserted == TRUE) {
4130                 timer->k_header.dh_inserted = FALSE;
4131                 ntoskrnl_remove_timer(timer);
4132 #ifdef NTOSKRNL_DEBUG_TIMERS
4133                 ntoskrnl_timer_cancels++;
4134 #endif
4135         }
4136
4137         mtx_unlock(&ntoskrnl_dispatchlock);
4138
4139         return(pending);
4140 }
4141
4142 uint8_t
4143 KeReadStateTimer(timer)
4144         ktimer                  *timer;
4145 {
4146         return(timer->k_header.dh_sigstate);
4147 }
4148
4149 static void
4150 dummy()
4151 {
4152         printf ("ntoskrnl dummy called...\n");
4153         return;
4154 }
4155
4156
4157 image_patch_table ntoskrnl_functbl[] = {
4158         IMPORT_SFUNC(RtlZeroMemory, 2),
4159         IMPORT_SFUNC(RtlCopyMemory, 3),
4160         IMPORT_SFUNC(RtlCompareMemory, 3),
4161         IMPORT_SFUNC(RtlEqualUnicodeString, 3),
4162         IMPORT_SFUNC(RtlCopyUnicodeString, 2),
4163         IMPORT_SFUNC(RtlUnicodeStringToAnsiString, 3),
4164         IMPORT_SFUNC(RtlAnsiStringToUnicodeString, 3),
4165         IMPORT_SFUNC(RtlInitAnsiString, 2),
4166         IMPORT_SFUNC_MAP(RtlInitString, RtlInitAnsiString, 2),
4167         IMPORT_SFUNC(RtlInitUnicodeString, 2),
4168         IMPORT_SFUNC(RtlFreeAnsiString, 1),
4169         IMPORT_SFUNC(RtlFreeUnicodeString, 1),
4170         IMPORT_SFUNC(RtlUnicodeStringToInteger, 3),
4171         IMPORT_CFUNC(sprintf, 0),
4172         IMPORT_CFUNC(vsprintf, 0),
4173         IMPORT_CFUNC_MAP(_snprintf, snprintf, 0),
4174         IMPORT_CFUNC_MAP(_vsnprintf, vsnprintf, 0),
4175         IMPORT_CFUNC(DbgPrint, 0),
4176         IMPORT_SFUNC(DbgBreakPoint, 0),
4177         IMPORT_SFUNC(KeBugCheckEx, 5),
4178         IMPORT_CFUNC(strncmp, 0),
4179         IMPORT_CFUNC(strcmp, 0),
4180         IMPORT_CFUNC_MAP(stricmp, strcasecmp, 0),
4181         IMPORT_CFUNC(strncpy, 0),
4182         IMPORT_CFUNC(strcpy, 0),
4183         IMPORT_CFUNC(strlen, 0),
4184         IMPORT_CFUNC_MAP(toupper, ntoskrnl_toupper, 0),
4185         IMPORT_CFUNC_MAP(tolower, ntoskrnl_tolower, 0),
4186         IMPORT_CFUNC_MAP(strstr, ntoskrnl_strstr, 0),
4187         IMPORT_CFUNC_MAP(strncat, ntoskrnl_strncat, 0),
4188         IMPORT_CFUNC_MAP(strchr, index, 0),
4189         IMPORT_CFUNC_MAP(strrchr, rindex, 0),
4190         IMPORT_CFUNC(memcpy, 0),
4191         IMPORT_CFUNC_MAP(memmove, ntoskrnl_memmove, 0),
4192         IMPORT_CFUNC_MAP(memset, ntoskrnl_memset, 0),
4193         IMPORT_CFUNC_MAP(memchr, ntoskrnl_memchr, 0),
4194         IMPORT_SFUNC(IoAllocateDriverObjectExtension, 4),
4195         IMPORT_SFUNC(IoGetDriverObjectExtension, 2),
4196         IMPORT_FFUNC(IofCallDriver, 2),
4197         IMPORT_FFUNC(IofCompleteRequest, 2),
4198         IMPORT_SFUNC(IoAcquireCancelSpinLock, 1),
4199         IMPORT_SFUNC(IoReleaseCancelSpinLock, 1),
4200         IMPORT_SFUNC(IoCancelIrp, 1),
4201         IMPORT_SFUNC(IoConnectInterrupt, 11),
4202         IMPORT_SFUNC(IoDisconnectInterrupt, 1),
4203         IMPORT_SFUNC(IoCreateDevice, 7),
4204         IMPORT_SFUNC(IoDeleteDevice, 1),
4205         IMPORT_SFUNC(IoGetAttachedDevice, 1),
4206         IMPORT_SFUNC(IoAttachDeviceToDeviceStack, 2),
4207         IMPORT_SFUNC(IoDetachDevice, 1),
4208         IMPORT_SFUNC(IoBuildSynchronousFsdRequest, 7),
4209         IMPORT_SFUNC(IoBuildAsynchronousFsdRequest, 6),
4210         IMPORT_SFUNC(IoBuildDeviceIoControlRequest, 9),
4211         IMPORT_SFUNC(IoAllocateIrp, 2),
4212         IMPORT_SFUNC(IoReuseIrp, 2),
4213         IMPORT_SFUNC(IoMakeAssociatedIrp, 2),
4214         IMPORT_SFUNC(IoFreeIrp, 1),
4215         IMPORT_SFUNC(IoInitializeIrp, 3),
4216         IMPORT_SFUNC(KeAcquireInterruptSpinLock, 1),
4217         IMPORT_SFUNC(KeReleaseInterruptSpinLock, 2),
4218         IMPORT_SFUNC(KeSynchronizeExecution, 3),
4219         IMPORT_SFUNC(KeWaitForSingleObject, 5),
4220         IMPORT_SFUNC(KeWaitForMultipleObjects, 8),
4221         IMPORT_SFUNC(_allmul, 4),
4222         IMPORT_SFUNC(_alldiv, 4),
4223         IMPORT_SFUNC(_allrem, 4),
4224         IMPORT_RFUNC(_allshr, 0),
4225         IMPORT_RFUNC(_allshl, 0),
4226         IMPORT_SFUNC(_aullmul, 4),
4227         IMPORT_SFUNC(_aulldiv, 4),
4228         IMPORT_SFUNC(_aullrem, 4),
4229         IMPORT_RFUNC(_aullshr, 0),
4230         IMPORT_RFUNC(_aullshl, 0),
4231         IMPORT_CFUNC(atoi, 0),
4232         IMPORT_CFUNC(atol, 0),
4233         IMPORT_CFUNC(rand, 0),
4234         IMPORT_CFUNC(srand, 0),
4235         IMPORT_SFUNC(WRITE_REGISTER_USHORT, 2),
4236         IMPORT_SFUNC(READ_REGISTER_USHORT, 1),
4237         IMPORT_SFUNC(WRITE_REGISTER_ULONG, 2),
4238         IMPORT_SFUNC(READ_REGISTER_ULONG, 1),
4239         IMPORT_SFUNC(READ_REGISTER_UCHAR, 1),
4240         IMPORT_SFUNC(WRITE_REGISTER_UCHAR, 2),
4241         IMPORT_SFUNC(ExInitializePagedLookasideList, 7),
4242         IMPORT_SFUNC(ExDeletePagedLookasideList, 1),
4243         IMPORT_SFUNC(ExInitializeNPagedLookasideList, 7),
4244         IMPORT_SFUNC(ExDeleteNPagedLookasideList, 1),
4245         IMPORT_FFUNC(InterlockedPopEntrySList, 1),
4246         IMPORT_FFUNC(InterlockedPushEntrySList, 2),
4247         IMPORT_SFUNC(ExQueryDepthSList, 1),
4248         IMPORT_FFUNC_MAP(ExpInterlockedPopEntrySList,
4249                 InterlockedPopEntrySList, 1),
4250         IMPORT_FFUNC_MAP(ExpInterlockedPushEntrySList,
4251                 InterlockedPushEntrySList, 2),
4252         IMPORT_FFUNC(ExInterlockedPopEntrySList, 2),
4253         IMPORT_FFUNC(ExInterlockedPushEntrySList, 3),
4254         IMPORT_SFUNC(ExAllocatePoolWithTag, 3),
4255         IMPORT_SFUNC(ExFreePool, 1),
4256 #ifdef __i386__
4257         IMPORT_FFUNC(KefAcquireSpinLockAtDpcLevel, 1),
4258         IMPORT_FFUNC(KefReleaseSpinLockFromDpcLevel,1),
4259         IMPORT_FFUNC(KeAcquireSpinLockRaiseToDpc, 1),
4260 #else
4261         /*
4262          * For AMD64, we can get away with just mapping
4263          * KeAcquireSpinLockRaiseToDpc() directly to KfAcquireSpinLock()
4264          * because the calling conventions end up being the same.
4265          * On i386, we have to be careful because KfAcquireSpinLock()
4266          * is _fastcall but KeAcquireSpinLockRaiseToDpc() isn't.
4267          */
4268         IMPORT_SFUNC(KeAcquireSpinLockAtDpcLevel, 1),
4269         IMPORT_SFUNC(KeReleaseSpinLockFromDpcLevel, 1),
4270         IMPORT_SFUNC_MAP(KeAcquireSpinLockRaiseToDpc, KfAcquireSpinLock, 1),
4271 #endif
4272         IMPORT_SFUNC_MAP(KeReleaseSpinLock, KfReleaseSpinLock, 1),
4273         IMPORT_FFUNC(InterlockedIncrement, 1),
4274         IMPORT_FFUNC(InterlockedDecrement, 1),
4275         IMPORT_FFUNC(InterlockedExchange, 2),
4276         IMPORT_FFUNC(ExInterlockedAddLargeStatistic, 2),
4277         IMPORT_SFUNC(IoAllocateMdl, 5),
4278         IMPORT_SFUNC(IoFreeMdl, 1),
4279         IMPORT_SFUNC(MmAllocateContiguousMemory, 2),
4280         IMPORT_SFUNC(MmAllocateContiguousMemorySpecifyCache, 5),
4281         IMPORT_SFUNC(MmFreeContiguousMemory, 1),
4282         IMPORT_SFUNC(MmFreeContiguousMemorySpecifyCache, 3),
4283         IMPORT_SFUNC_MAP(MmGetPhysicalAddress, pmap_kextract, 1),
4284         IMPORT_SFUNC(MmSizeOfMdl, 1),
4285         IMPORT_SFUNC(MmMapLockedPages, 2),
4286         IMPORT_SFUNC(MmMapLockedPagesSpecifyCache, 6),
4287         IMPORT_SFUNC(MmUnmapLockedPages, 2),
4288         IMPORT_SFUNC(MmBuildMdlForNonPagedPool, 1),
4289         IMPORT_SFUNC(MmIsAddressValid, 1),
4290         IMPORT_SFUNC(MmMapIoSpace, 3 + 1),
4291         IMPORT_SFUNC(MmUnmapIoSpace, 2),
4292         IMPORT_SFUNC(KeInitializeSpinLock, 1),
4293         IMPORT_SFUNC(IoIsWdmVersionAvailable, 2),
4294         IMPORT_SFUNC(IoGetDeviceObjectPointer, 4),
4295         IMPORT_SFUNC(IoGetDeviceProperty, 5),
4296         IMPORT_SFUNC(IoAllocateWorkItem, 1),
4297         IMPORT_SFUNC(IoFreeWorkItem, 1),
4298         IMPORT_SFUNC(IoQueueWorkItem, 4),
4299         IMPORT_SFUNC(ExQueueWorkItem, 2),
4300         IMPORT_SFUNC(ntoskrnl_workitem, 2),
4301         IMPORT_SFUNC(KeInitializeMutex, 2),
4302         IMPORT_SFUNC(KeReleaseMutex, 2),
4303         IMPORT_SFUNC(KeReadStateMutex, 1),
4304         IMPORT_SFUNC(KeInitializeEvent, 3),
4305         IMPORT_SFUNC(KeSetEvent, 3),
4306         IMPORT_SFUNC(KeResetEvent, 1),
4307         IMPORT_SFUNC(KeClearEvent, 1),
4308         IMPORT_SFUNC(KeReadStateEvent, 1),
4309         IMPORT_SFUNC(KeInitializeTimer, 1),
4310         IMPORT_SFUNC(KeInitializeTimerEx, 2),
4311         IMPORT_SFUNC(KeSetTimer, 3),
4312         IMPORT_SFUNC(KeSetTimerEx, 4),
4313         IMPORT_SFUNC(KeCancelTimer, 1),
4314         IMPORT_SFUNC(KeReadStateTimer, 1),
4315         IMPORT_SFUNC(KeInitializeDpc, 3),
4316         IMPORT_SFUNC(KeInsertQueueDpc, 3),
4317         IMPORT_SFUNC(KeRemoveQueueDpc, 1),
4318         IMPORT_SFUNC(KeSetImportanceDpc, 2),
4319         IMPORT_SFUNC(KeSetTargetProcessorDpc, 2),
4320         IMPORT_SFUNC(KeFlushQueuedDpcs, 0),
4321         IMPORT_SFUNC(KeGetCurrentProcessorNumber, 1),
4322         IMPORT_SFUNC(ObReferenceObjectByHandle, 6),
4323         IMPORT_FFUNC(ObfDereferenceObject, 1),
4324         IMPORT_SFUNC(ZwClose, 1),
4325         IMPORT_SFUNC(PsCreateSystemThread, 7),
4326         IMPORT_SFUNC(PsTerminateSystemThread, 1),
4327         IMPORT_SFUNC(IoWMIRegistrationControl, 2),
4328         IMPORT_SFUNC(WmiQueryTraceInformation, 5),
4329         IMPORT_CFUNC(WmiTraceMessage, 0),
4330         IMPORT_SFUNC(KeQuerySystemTime, 1),
4331         IMPORT_CFUNC(KeTickCount, 0),
4332
4333         /*
4334          * This last entry is a catch-all for any function we haven't
4335          * implemented yet. The PE import list patching routine will
4336          * use it for any function that doesn't have an explicit match
4337          * in this table.
4338          */
4339
4340         { NULL, (FUNC)dummy, NULL, 0, WINDRV_WRAP_STDCALL },
4341
4342         /* End of list. */
4343
4344         { NULL, NULL, NULL }
4345 };