]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/compat/ndis/subr_ntoskrnl.c
Sync: merge r215464 through r215708 from ^/head.
[FreeBSD/FreeBSD.git] / sys / compat / ndis / subr_ntoskrnl.c
1 /*-
2  * Copyright (c) 2003
3  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *      This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #include <sys/ctype.h>
37 #include <sys/unistd.h>
38 #include <sys/param.h>
39 #include <sys/types.h>
40 #include <sys/errno.h>
41 #include <sys/systm.h>
42 #include <sys/malloc.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45
46 #include <sys/callout.h>
47 #include <sys/kdb.h>
48 #include <sys/kernel.h>
49 #include <sys/proc.h>
50 #include <sys/condvar.h>
51 #include <sys/kthread.h>
52 #include <sys/module.h>
53 #include <sys/smp.h>
54 #include <sys/sched.h>
55 #include <sys/sysctl.h>
56
57 #include <machine/atomic.h>
58 #include <machine/bus.h>
59 #include <machine/stdarg.h>
60 #include <machine/resource.h>
61
62 #include <sys/bus.h>
63 #include <sys/rman.h>
64
65 #include <vm/vm.h>
66 #include <vm/vm_param.h>
67 #include <vm/pmap.h>
68 #include <vm/uma.h>
69 #include <vm/vm_kern.h>
70 #include <vm/vm_map.h>
71 #include <vm/vm_extern.h>
72
73 #include <compat/ndis/pe_var.h>
74 #include <compat/ndis/cfg_var.h>
75 #include <compat/ndis/resource_var.h>
76 #include <compat/ndis/ntoskrnl_var.h>
77 #include <compat/ndis/hal_var.h>
78 #include <compat/ndis/ndis_var.h>
79
80 #ifdef NTOSKRNL_DEBUG_TIMERS
81 static int sysctl_show_timers(SYSCTL_HANDLER_ARGS);
82
83 SYSCTL_PROC(_debug, OID_AUTO, ntoskrnl_timers, CTLFLAG_RW, 0, 0,
84         sysctl_show_timers, "I", "Show ntoskrnl timer stats");
85 #endif
86
87 struct kdpc_queue {
88         list_entry              kq_disp;
89         struct thread           *kq_td;
90         int                     kq_cpu;
91         int                     kq_exit;
92         int                     kq_running;
93         kspin_lock              kq_lock;
94         nt_kevent               kq_proc;
95         nt_kevent               kq_done;
96 };
97
98 typedef struct kdpc_queue kdpc_queue;
99
100 struct wb_ext {
101         struct cv               we_cv;
102         struct thread           *we_td;
103 };
104
105 typedef struct wb_ext wb_ext;
106
107 #define NTOSKRNL_TIMEOUTS       256
108 #ifdef NTOSKRNL_DEBUG_TIMERS
109 static uint64_t ntoskrnl_timer_fires;
110 static uint64_t ntoskrnl_timer_sets;
111 static uint64_t ntoskrnl_timer_reloads;
112 static uint64_t ntoskrnl_timer_cancels;
113 #endif
114
115 struct callout_entry {
116         struct callout          ce_callout;
117         list_entry              ce_list;
118 };
119
120 typedef struct callout_entry callout_entry;
121
122 static struct list_entry ntoskrnl_calllist;
123 static struct mtx ntoskrnl_calllock;
124 struct kuser_shared_data kuser_shared_data;
125
126 static struct list_entry ntoskrnl_intlist;
127 static kspin_lock ntoskrnl_intlock;
128
129 static uint8_t RtlEqualUnicodeString(unicode_string *,
130         unicode_string *, uint8_t);
131 static void RtlCopyUnicodeString(unicode_string *,
132         unicode_string *);
133 static irp *IoBuildSynchronousFsdRequest(uint32_t, device_object *,
134          void *, uint32_t, uint64_t *, nt_kevent *, io_status_block *);
135 static irp *IoBuildAsynchronousFsdRequest(uint32_t,
136         device_object *, void *, uint32_t, uint64_t *, io_status_block *);
137 static irp *IoBuildDeviceIoControlRequest(uint32_t,
138         device_object *, void *, uint32_t, void *, uint32_t,
139         uint8_t, nt_kevent *, io_status_block *);
140 static irp *IoAllocateIrp(uint8_t, uint8_t);
141 static void IoReuseIrp(irp *, uint32_t);
142 static void IoFreeIrp(irp *);
143 static void IoInitializeIrp(irp *, uint16_t, uint8_t);
144 static irp *IoMakeAssociatedIrp(irp *, uint8_t);
145 static uint32_t KeWaitForMultipleObjects(uint32_t,
146         nt_dispatch_header **, uint32_t, uint32_t, uint32_t, uint8_t,
147         int64_t *, wait_block *);
148 static void ntoskrnl_waittest(nt_dispatch_header *, uint32_t);
149 static void ntoskrnl_satisfy_wait(nt_dispatch_header *, struct thread *);
150 static void ntoskrnl_satisfy_multiple_waits(wait_block *);
151 static int ntoskrnl_is_signalled(nt_dispatch_header *, struct thread *);
152 static void ntoskrnl_insert_timer(ktimer *, int);
153 static void ntoskrnl_remove_timer(ktimer *);
154 #ifdef NTOSKRNL_DEBUG_TIMERS
155 static void ntoskrnl_show_timers(void);
156 #endif
157 static void ntoskrnl_timercall(void *);
158 static void ntoskrnl_dpc_thread(void *);
159 static void ntoskrnl_destroy_dpc_threads(void);
160 static void ntoskrnl_destroy_workitem_threads(void);
161 static void ntoskrnl_workitem_thread(void *);
162 static void ntoskrnl_workitem(device_object *, void *);
163 static void ntoskrnl_unicode_to_ascii(uint16_t *, char *, int);
164 static void ntoskrnl_ascii_to_unicode(char *, uint16_t *, int);
165 static uint8_t ntoskrnl_insert_dpc(list_entry *, kdpc *);
166 static void WRITE_REGISTER_USHORT(uint16_t *, uint16_t);
167 static uint16_t READ_REGISTER_USHORT(uint16_t *);
168 static void WRITE_REGISTER_ULONG(uint32_t *, uint32_t);
169 static uint32_t READ_REGISTER_ULONG(uint32_t *);
170 static void WRITE_REGISTER_UCHAR(uint8_t *, uint8_t);
171 static uint8_t READ_REGISTER_UCHAR(uint8_t *);
172 static int64_t _allmul(int64_t, int64_t);
173 static int64_t _alldiv(int64_t, int64_t);
174 static int64_t _allrem(int64_t, int64_t);
175 static int64_t _allshr(int64_t, uint8_t);
176 static int64_t _allshl(int64_t, uint8_t);
177 static uint64_t _aullmul(uint64_t, uint64_t);
178 static uint64_t _aulldiv(uint64_t, uint64_t);
179 static uint64_t _aullrem(uint64_t, uint64_t);
180 static uint64_t _aullshr(uint64_t, uint8_t);
181 static uint64_t _aullshl(uint64_t, uint8_t);
182 static slist_entry *ntoskrnl_pushsl(slist_header *, slist_entry *);
183 static slist_entry *ntoskrnl_popsl(slist_header *);
184 static void ExInitializePagedLookasideList(paged_lookaside_list *,
185         lookaside_alloc_func *, lookaside_free_func *,
186         uint32_t, size_t, uint32_t, uint16_t);
187 static void ExDeletePagedLookasideList(paged_lookaside_list *);
188 static void ExInitializeNPagedLookasideList(npaged_lookaside_list *,
189         lookaside_alloc_func *, lookaside_free_func *,
190         uint32_t, size_t, uint32_t, uint16_t);
191 static void ExDeleteNPagedLookasideList(npaged_lookaside_list *);
192 static slist_entry
193         *ExInterlockedPushEntrySList(slist_header *,
194         slist_entry *, kspin_lock *);
195 static slist_entry
196         *ExInterlockedPopEntrySList(slist_header *, kspin_lock *);
197 static uint32_t InterlockedIncrement(volatile uint32_t *);
198 static uint32_t InterlockedDecrement(volatile uint32_t *);
199 static void ExInterlockedAddLargeStatistic(uint64_t *, uint32_t);
200 static void *MmAllocateContiguousMemory(uint32_t, uint64_t);
201 static void *MmAllocateContiguousMemorySpecifyCache(uint32_t,
202         uint64_t, uint64_t, uint64_t, enum nt_caching_type);
203 static void MmFreeContiguousMemory(void *);
204 static void MmFreeContiguousMemorySpecifyCache(void *, uint32_t,
205         enum nt_caching_type);
206 static uint32_t MmSizeOfMdl(void *, size_t);
207 static void *MmMapLockedPages(mdl *, uint8_t);
208 static void *MmMapLockedPagesSpecifyCache(mdl *,
209         uint8_t, uint32_t, void *, uint32_t, uint32_t);
210 static void MmUnmapLockedPages(void *, mdl *);
211 static device_t ntoskrnl_finddev(device_t, uint64_t, struct resource **);
212 static void RtlZeroMemory(void *, size_t);
213 static void RtlCopyMemory(void *, const void *, size_t);
214 static size_t RtlCompareMemory(const void *, const void *, size_t);
215 static ndis_status RtlUnicodeStringToInteger(unicode_string *,
216         uint32_t, uint32_t *);
217 static int atoi (const char *);
218 static long atol (const char *);
219 static int rand(void);
220 static void srand(unsigned int);
221 static void KeQuerySystemTime(uint64_t *);
222 static uint32_t KeTickCount(void);
223 static uint8_t IoIsWdmVersionAvailable(uint8_t, uint8_t);
224 static void ntoskrnl_thrfunc(void *);
225 static ndis_status PsCreateSystemThread(ndis_handle *,
226         uint32_t, void *, ndis_handle, void *, void *, void *);
227 static ndis_status PsTerminateSystemThread(ndis_status);
228 static ndis_status IoGetDeviceObjectPointer(unicode_string *,
229         uint32_t, void *, device_object *);
230 static ndis_status IoGetDeviceProperty(device_object *, uint32_t,
231         uint32_t, void *, uint32_t *);
232 static void KeInitializeMutex(kmutant *, uint32_t);
233 static uint32_t KeReleaseMutex(kmutant *, uint8_t);
234 static uint32_t KeReadStateMutex(kmutant *);
235 static ndis_status ObReferenceObjectByHandle(ndis_handle,
236         uint32_t, void *, uint8_t, void **, void **);
237 static void ObfDereferenceObject(void *);
238 static uint32_t ZwClose(ndis_handle);
239 static uint32_t WmiQueryTraceInformation(uint32_t, void *, uint32_t,
240         uint32_t, void *);
241 static uint32_t WmiTraceMessage(uint64_t, uint32_t, void *, uint16_t, ...);
242 static uint32_t IoWMIRegistrationControl(device_object *, uint32_t);
243 static void *ntoskrnl_memset(void *, int, size_t);
244 static void *ntoskrnl_memmove(void *, void *, size_t);
245 static void *ntoskrnl_memchr(void *, unsigned char, size_t);
246 static char *ntoskrnl_strstr(char *, char *);
247 static char *ntoskrnl_strncat(char *, char *, size_t);
248 static int ntoskrnl_toupper(int);
249 static int ntoskrnl_tolower(int);
250 static funcptr ntoskrnl_findwrap(funcptr);
251 static uint32_t DbgPrint(char *, ...);
252 static void DbgBreakPoint(void);
253 static void KeBugCheckEx(uint32_t, u_long, u_long, u_long, u_long);
254 static int32_t KeDelayExecutionThread(uint8_t, uint8_t, int64_t *);
255 static int32_t KeSetPriorityThread(struct thread *, int32_t);
256 static void dummy(void);
257
258 static struct mtx ntoskrnl_dispatchlock;
259 static struct mtx ntoskrnl_interlock;
260 static kspin_lock ntoskrnl_cancellock;
261 static int ntoskrnl_kth = 0;
262 static struct nt_objref_head ntoskrnl_reflist;
263 static uma_zone_t mdl_zone;
264 static uma_zone_t iw_zone;
265 static struct kdpc_queue *kq_queues;
266 static struct kdpc_queue *wq_queues;
267 static int wq_idx = 0;
268
269 int
270 ntoskrnl_libinit()
271 {
272         image_patch_table       *patch;
273         int                     error;
274         struct proc             *p;
275         kdpc_queue              *kq;
276         callout_entry           *e;
277         int                     i;
278
279         mtx_init(&ntoskrnl_dispatchlock,
280             "ntoskrnl dispatch lock", MTX_NDIS_LOCK, MTX_DEF|MTX_RECURSE);
281         mtx_init(&ntoskrnl_interlock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN);
282         KeInitializeSpinLock(&ntoskrnl_cancellock);
283         KeInitializeSpinLock(&ntoskrnl_intlock);
284         TAILQ_INIT(&ntoskrnl_reflist);
285
286         InitializeListHead(&ntoskrnl_calllist);
287         InitializeListHead(&ntoskrnl_intlist);
288         mtx_init(&ntoskrnl_calllock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN);
289
290         kq_queues = ExAllocatePoolWithTag(NonPagedPool,
291 #ifdef NTOSKRNL_MULTIPLE_DPCS
292             sizeof(kdpc_queue) * mp_ncpus, 0);
293 #else
294             sizeof(kdpc_queue), 0);
295 #endif
296
297         if (kq_queues == NULL)
298                 return (ENOMEM);
299
300         wq_queues = ExAllocatePoolWithTag(NonPagedPool,
301             sizeof(kdpc_queue) * WORKITEM_THREADS, 0);
302
303         if (wq_queues == NULL)
304                 return (ENOMEM);
305
306 #ifdef NTOSKRNL_MULTIPLE_DPCS
307         bzero((char *)kq_queues, sizeof(kdpc_queue) * mp_ncpus);
308 #else
309         bzero((char *)kq_queues, sizeof(kdpc_queue));
310 #endif
311         bzero((char *)wq_queues, sizeof(kdpc_queue) * WORKITEM_THREADS);
312
313         /*
314          * Launch the DPC threads.
315          */
316
317 #ifdef NTOSKRNL_MULTIPLE_DPCS
318         for (i = 0; i < mp_ncpus; i++) {
319 #else
320         for (i = 0; i < 1; i++) {
321 #endif
322                 kq = kq_queues + i;
323                 kq->kq_cpu = i;
324                 error = kproc_create(ntoskrnl_dpc_thread, kq, &p,
325                     RFHIGHPID, NDIS_KSTACK_PAGES, "Windows DPC %d", i);
326                 if (error)
327                         panic("failed to launch DPC thread");
328         }
329
330         /*
331          * Launch the workitem threads.
332          */
333
334         for (i = 0; i < WORKITEM_THREADS; i++) {
335                 kq = wq_queues + i;
336                 error = kproc_create(ntoskrnl_workitem_thread, kq, &p,
337                     RFHIGHPID, NDIS_KSTACK_PAGES, "Windows Workitem %d", i);
338                 if (error)
339                         panic("failed to launch workitem thread");
340         }
341
342         patch = ntoskrnl_functbl;
343         while (patch->ipt_func != NULL) {
344                 windrv_wrap((funcptr)patch->ipt_func,
345                     (funcptr *)&patch->ipt_wrap,
346                     patch->ipt_argcnt, patch->ipt_ftype);
347                 patch++;
348         }
349
350         for (i = 0; i < NTOSKRNL_TIMEOUTS; i++) {
351                 e = ExAllocatePoolWithTag(NonPagedPool,
352                     sizeof(callout_entry), 0);
353                 if (e == NULL)
354                         panic("failed to allocate timeouts");
355                 mtx_lock_spin(&ntoskrnl_calllock);
356                 InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
357                 mtx_unlock_spin(&ntoskrnl_calllock);
358         }
359
360         /*
361          * MDLs are supposed to be variable size (they describe
362          * buffers containing some number of pages, but we don't
363          * know ahead of time how many pages that will be). But
364          * always allocating them off the heap is very slow. As
365          * a compromise, we create an MDL UMA zone big enough to
366          * handle any buffer requiring up to 16 pages, and we
367          * use those for any MDLs for buffers of 16 pages or less
368          * in size. For buffers larger than that (which we assume
369          * will be few and far between, we allocate the MDLs off
370          * the heap.
371          */
372
373         mdl_zone = uma_zcreate("Windows MDL", MDL_ZONE_SIZE,
374             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
375
376         iw_zone = uma_zcreate("Windows WorkItem", sizeof(io_workitem),
377             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
378
379         return (0);
380 }
381
382 int
383 ntoskrnl_libfini()
384 {
385         image_patch_table       *patch;
386         callout_entry           *e;
387         list_entry              *l;
388
389         patch = ntoskrnl_functbl;
390         while (patch->ipt_func != NULL) {
391                 windrv_unwrap(patch->ipt_wrap);
392                 patch++;
393         }
394
395         /* Stop the workitem queues. */
396         ntoskrnl_destroy_workitem_threads();
397         /* Stop the DPC queues. */
398         ntoskrnl_destroy_dpc_threads();
399
400         ExFreePool(kq_queues);
401         ExFreePool(wq_queues);
402
403         uma_zdestroy(mdl_zone);
404         uma_zdestroy(iw_zone);
405
406         mtx_lock_spin(&ntoskrnl_calllock);
407         while(!IsListEmpty(&ntoskrnl_calllist)) {
408                 l = RemoveHeadList(&ntoskrnl_calllist);
409                 e = CONTAINING_RECORD(l, callout_entry, ce_list);
410                 mtx_unlock_spin(&ntoskrnl_calllock);
411                 ExFreePool(e);
412                 mtx_lock_spin(&ntoskrnl_calllock);
413         }
414         mtx_unlock_spin(&ntoskrnl_calllock);
415
416         mtx_destroy(&ntoskrnl_dispatchlock);
417         mtx_destroy(&ntoskrnl_interlock);
418         mtx_destroy(&ntoskrnl_calllock);
419
420         return (0);
421 }
422
423 /*
424  * We need to be able to reference this externally from the wrapper;
425  * GCC only generates a local implementation of memset.
426  */
427 static void *
428 ntoskrnl_memset(buf, ch, size)
429         void                    *buf;
430         int                     ch;
431         size_t                  size;
432 {
433         return (memset(buf, ch, size));
434 }
435
436 static void *
437 ntoskrnl_memmove(dst, src, size)
438         void                    *src;
439         void                    *dst;
440         size_t                  size;
441 {
442         bcopy(src, dst, size);
443         return (dst);
444 }
445
446 static void *
447 ntoskrnl_memchr(void *buf, unsigned char ch, size_t len)
448 {
449         if (len != 0) {
450                 unsigned char *p = buf;
451
452                 do {
453                         if (*p++ == ch)
454                                 return (p - 1);
455                 } while (--len != 0);
456         }
457         return (NULL);
458 }
459
460 static char *
461 ntoskrnl_strstr(s, find)
462         char *s, *find;
463 {
464         char c, sc;
465         size_t len;
466
467         if ((c = *find++) != 0) {
468                 len = strlen(find);
469                 do {
470                         do {
471                                 if ((sc = *s++) == 0)
472                                         return (NULL);
473                         } while (sc != c);
474                 } while (strncmp(s, find, len) != 0);
475                 s--;
476         }
477         return ((char *)s);
478 }
479
480 /* Taken from libc */
481 static char *
482 ntoskrnl_strncat(dst, src, n)
483         char            *dst;
484         char            *src;
485         size_t          n;
486 {
487         if (n != 0) {
488                 char *d = dst;
489                 const char *s = src;
490
491                 while (*d != 0)
492                         d++;
493                 do {
494                         if ((*d = *s++) == 0)
495                                 break;
496                         d++;
497                 } while (--n != 0);
498                 *d = 0;
499         }
500         return (dst);
501 }
502
503 static int
504 ntoskrnl_toupper(c)
505         int                     c;
506 {
507         return (toupper(c));
508 }
509
510 static int
511 ntoskrnl_tolower(c)
512         int                     c;
513 {
514         return (tolower(c));
515 }
516
517 static uint8_t
518 RtlEqualUnicodeString(unicode_string *str1, unicode_string *str2,
519         uint8_t caseinsensitive)
520 {
521         int                     i;
522
523         if (str1->us_len != str2->us_len)
524                 return (FALSE);
525
526         for (i = 0; i < str1->us_len; i++) {
527                 if (caseinsensitive == TRUE) {
528                         if (toupper((char)(str1->us_buf[i] & 0xFF)) !=
529                             toupper((char)(str2->us_buf[i] & 0xFF)))
530                                 return (FALSE);
531                 } else {
532                         if (str1->us_buf[i] != str2->us_buf[i])
533                                 return (FALSE);
534                 }
535         }
536
537         return (TRUE);
538 }
539
540 static void
541 RtlCopyUnicodeString(dest, src)
542         unicode_string          *dest;
543         unicode_string          *src;
544 {
545
546         if (dest->us_maxlen >= src->us_len)
547                 dest->us_len = src->us_len;
548         else
549                 dest->us_len = dest->us_maxlen;
550         memcpy(dest->us_buf, src->us_buf, dest->us_len);
551 }
552
553 static void
554 ntoskrnl_ascii_to_unicode(ascii, unicode, len)
555         char                    *ascii;
556         uint16_t                *unicode;
557         int                     len;
558 {
559         int                     i;
560         uint16_t                *ustr;
561
562         ustr = unicode;
563         for (i = 0; i < len; i++) {
564                 *ustr = (uint16_t)ascii[i];
565                 ustr++;
566         }
567 }
568
569 static void
570 ntoskrnl_unicode_to_ascii(unicode, ascii, len)
571         uint16_t                *unicode;
572         char                    *ascii;
573         int                     len;
574 {
575         int                     i;
576         uint8_t                 *astr;
577
578         astr = ascii;
579         for (i = 0; i < len / 2; i++) {
580                 *astr = (uint8_t)unicode[i];
581                 astr++;
582         }
583 }
584
585 uint32_t
586 RtlUnicodeStringToAnsiString(ansi_string *dest, unicode_string *src, uint8_t allocate)
587 {
588         if (dest == NULL || src == NULL)
589                 return (STATUS_INVALID_PARAMETER);
590
591         dest->as_len = src->us_len / 2;
592         if (dest->as_maxlen < dest->as_len)
593                 dest->as_len = dest->as_maxlen;
594
595         if (allocate == TRUE) {
596                 dest->as_buf = ExAllocatePoolWithTag(NonPagedPool,
597                     (src->us_len / 2) + 1, 0);
598                 if (dest->as_buf == NULL)
599                         return (STATUS_INSUFFICIENT_RESOURCES);
600                 dest->as_len = dest->as_maxlen = src->us_len / 2;
601         } else {
602                 dest->as_len = src->us_len / 2; /* XXX */
603                 if (dest->as_maxlen < dest->as_len)
604                         dest->as_len = dest->as_maxlen;
605         }
606
607         ntoskrnl_unicode_to_ascii(src->us_buf, dest->as_buf,
608             dest->as_len * 2);
609
610         return (STATUS_SUCCESS);
611 }
612
613 uint32_t
614 RtlAnsiStringToUnicodeString(unicode_string *dest, ansi_string *src,
615         uint8_t allocate)
616 {
617         if (dest == NULL || src == NULL)
618                 return (STATUS_INVALID_PARAMETER);
619
620         if (allocate == TRUE) {
621                 dest->us_buf = ExAllocatePoolWithTag(NonPagedPool,
622                     src->as_len * 2, 0);
623                 if (dest->us_buf == NULL)
624                         return (STATUS_INSUFFICIENT_RESOURCES);
625                 dest->us_len = dest->us_maxlen = strlen(src->as_buf) * 2;
626         } else {
627                 dest->us_len = src->as_len * 2; /* XXX */
628                 if (dest->us_maxlen < dest->us_len)
629                         dest->us_len = dest->us_maxlen;
630         }
631
632         ntoskrnl_ascii_to_unicode(src->as_buf, dest->us_buf,
633             dest->us_len / 2);
634
635         return (STATUS_SUCCESS);
636 }
637
638 void *
639 ExAllocatePoolWithTag(pooltype, len, tag)
640         uint32_t                pooltype;
641         size_t                  len;
642         uint32_t                tag;
643 {
644         void                    *buf;
645
646         buf = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
647         if (buf == NULL)
648                 return (NULL);
649
650         return (buf);
651 }
652
653 void
654 ExFreePool(buf)
655         void                    *buf;
656 {
657         free(buf, M_DEVBUF);
658 }
659
660 uint32_t
661 IoAllocateDriverObjectExtension(drv, clid, extlen, ext)
662         driver_object           *drv;
663         void                    *clid;
664         uint32_t                extlen;
665         void                    **ext;
666 {
667         custom_extension        *ce;
668
669         ce = ExAllocatePoolWithTag(NonPagedPool, sizeof(custom_extension)
670             + extlen, 0);
671
672         if (ce == NULL)
673                 return (STATUS_INSUFFICIENT_RESOURCES);
674
675         ce->ce_clid = clid;
676         InsertTailList((&drv->dro_driverext->dre_usrext), (&ce->ce_list));
677
678         *ext = (void *)(ce + 1);
679
680         return (STATUS_SUCCESS);
681 }
682
683 void *
684 IoGetDriverObjectExtension(drv, clid)
685         driver_object           *drv;
686         void                    *clid;
687 {
688         list_entry              *e;
689         custom_extension        *ce;
690
691         /*
692          * Sanity check. Our dummy bus drivers don't have
693          * any driver extentions.
694          */
695
696         if (drv->dro_driverext == NULL)
697                 return (NULL);
698
699         e = drv->dro_driverext->dre_usrext.nle_flink;
700         while (e != &drv->dro_driverext->dre_usrext) {
701                 ce = (custom_extension *)e;
702                 if (ce->ce_clid == clid)
703                         return ((void *)(ce + 1));
704                 e = e->nle_flink;
705         }
706
707         return (NULL);
708 }
709
710
711 uint32_t
712 IoCreateDevice(driver_object *drv, uint32_t devextlen, unicode_string *devname,
713         uint32_t devtype, uint32_t devchars, uint8_t exclusive,
714         device_object **newdev)
715 {
716         device_object           *dev;
717
718         dev = ExAllocatePoolWithTag(NonPagedPool, sizeof(device_object), 0);
719         if (dev == NULL)
720                 return (STATUS_INSUFFICIENT_RESOURCES);
721
722         dev->do_type = devtype;
723         dev->do_drvobj = drv;
724         dev->do_currirp = NULL;
725         dev->do_flags = 0;
726
727         if (devextlen) {
728                 dev->do_devext = ExAllocatePoolWithTag(NonPagedPool,
729                     devextlen, 0);
730
731                 if (dev->do_devext == NULL) {
732                         ExFreePool(dev);
733                         return (STATUS_INSUFFICIENT_RESOURCES);
734                 }
735
736                 bzero(dev->do_devext, devextlen);
737         } else
738                 dev->do_devext = NULL;
739
740         dev->do_size = sizeof(device_object) + devextlen;
741         dev->do_refcnt = 1;
742         dev->do_attacheddev = NULL;
743         dev->do_nextdev = NULL;
744         dev->do_devtype = devtype;
745         dev->do_stacksize = 1;
746         dev->do_alignreq = 1;
747         dev->do_characteristics = devchars;
748         dev->do_iotimer = NULL;
749         KeInitializeEvent(&dev->do_devlock, EVENT_TYPE_SYNC, TRUE);
750
751         /*
752          * Vpd is used for disk/tape devices,
753          * but we don't support those. (Yet.)
754          */
755         dev->do_vpb = NULL;
756
757         dev->do_devobj_ext = ExAllocatePoolWithTag(NonPagedPool,
758             sizeof(devobj_extension), 0);
759
760         if (dev->do_devobj_ext == NULL) {
761                 if (dev->do_devext != NULL)
762                         ExFreePool(dev->do_devext);
763                 ExFreePool(dev);
764                 return (STATUS_INSUFFICIENT_RESOURCES);
765         }
766
767         dev->do_devobj_ext->dve_type = 0;
768         dev->do_devobj_ext->dve_size = sizeof(devobj_extension);
769         dev->do_devobj_ext->dve_devobj = dev;
770
771         /*
772          * Attach this device to the driver object's list
773          * of devices. Note: this is not the same as attaching
774          * the device to the device stack. The driver's AddDevice
775          * routine must explicitly call IoAddDeviceToDeviceStack()
776          * to do that.
777          */
778
779         if (drv->dro_devobj == NULL) {
780                 drv->dro_devobj = dev;
781                 dev->do_nextdev = NULL;
782         } else {
783                 dev->do_nextdev = drv->dro_devobj;
784                 drv->dro_devobj = dev;
785         }
786
787         *newdev = dev;
788
789         return (STATUS_SUCCESS);
790 }
791
792 void
793 IoDeleteDevice(dev)
794         device_object           *dev;
795 {
796         device_object           *prev;
797
798         if (dev == NULL)
799                 return;
800
801         if (dev->do_devobj_ext != NULL)
802                 ExFreePool(dev->do_devobj_ext);
803
804         if (dev->do_devext != NULL)
805                 ExFreePool(dev->do_devext);
806
807         /* Unlink the device from the driver's device list. */
808
809         prev = dev->do_drvobj->dro_devobj;
810         if (prev == dev)
811                 dev->do_drvobj->dro_devobj = dev->do_nextdev;
812         else {
813                 while (prev->do_nextdev != dev)
814                         prev = prev->do_nextdev;
815                 prev->do_nextdev = dev->do_nextdev;
816         }
817
818         ExFreePool(dev);
819 }
820
821 device_object *
822 IoGetAttachedDevice(dev)
823         device_object           *dev;
824 {
825         device_object           *d;
826
827         if (dev == NULL)
828                 return (NULL);
829
830         d = dev;
831
832         while (d->do_attacheddev != NULL)
833                 d = d->do_attacheddev;
834
835         return (d);
836 }
837
838 static irp *
839 IoBuildSynchronousFsdRequest(func, dobj, buf, len, off, event, status)
840         uint32_t                func;
841         device_object           *dobj;
842         void                    *buf;
843         uint32_t                len;
844         uint64_t                *off;
845         nt_kevent               *event;
846         io_status_block         *status;
847 {
848         irp                     *ip;
849
850         ip = IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status);
851         if (ip == NULL)
852                 return (NULL);
853         ip->irp_usrevent = event;
854
855         return (ip);
856 }
857
858 static irp *
859 IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status)
860         uint32_t                func;
861         device_object           *dobj;
862         void                    *buf;
863         uint32_t                len;
864         uint64_t                *off;
865         io_status_block         *status;
866 {
867         irp                     *ip;
868         io_stack_location       *sl;
869
870         ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
871         if (ip == NULL)
872                 return (NULL);
873
874         ip->irp_usriostat = status;
875         ip->irp_tail.irp_overlay.irp_thread = NULL;
876
877         sl = IoGetNextIrpStackLocation(ip);
878         sl->isl_major = func;
879         sl->isl_minor = 0;
880         sl->isl_flags = 0;
881         sl->isl_ctl = 0;
882         sl->isl_devobj = dobj;
883         sl->isl_fileobj = NULL;
884         sl->isl_completionfunc = NULL;
885
886         ip->irp_userbuf = buf;
887
888         if (dobj->do_flags & DO_BUFFERED_IO) {
889                 ip->irp_assoc.irp_sysbuf =
890                     ExAllocatePoolWithTag(NonPagedPool, len, 0);
891                 if (ip->irp_assoc.irp_sysbuf == NULL) {
892                         IoFreeIrp(ip);
893                         return (NULL);
894                 }
895                 bcopy(buf, ip->irp_assoc.irp_sysbuf, len);
896         }
897
898         if (dobj->do_flags & DO_DIRECT_IO) {
899                 ip->irp_mdl = IoAllocateMdl(buf, len, FALSE, FALSE, ip);
900                 if (ip->irp_mdl == NULL) {
901                         if (ip->irp_assoc.irp_sysbuf != NULL)
902                                 ExFreePool(ip->irp_assoc.irp_sysbuf);
903                         IoFreeIrp(ip);
904                         return (NULL);
905                 }
906                 ip->irp_userbuf = NULL;
907                 ip->irp_assoc.irp_sysbuf = NULL;
908         }
909
910         if (func == IRP_MJ_READ) {
911                 sl->isl_parameters.isl_read.isl_len = len;
912                 if (off != NULL)
913                         sl->isl_parameters.isl_read.isl_byteoff = *off;
914                 else
915                         sl->isl_parameters.isl_read.isl_byteoff = 0;
916         }
917
918         if (func == IRP_MJ_WRITE) {
919                 sl->isl_parameters.isl_write.isl_len = len;
920                 if (off != NULL)
921                         sl->isl_parameters.isl_write.isl_byteoff = *off;
922                 else
923                         sl->isl_parameters.isl_write.isl_byteoff = 0;
924         }
925
926         return (ip);
927 }
928
929 static irp *
930 IoBuildDeviceIoControlRequest(uint32_t iocode, device_object *dobj, void *ibuf,
931         uint32_t ilen, void *obuf, uint32_t olen, uint8_t isinternal,
932         nt_kevent *event, io_status_block *status)
933 {
934         irp                     *ip;
935         io_stack_location       *sl;
936         uint32_t                buflen;
937
938         ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
939         if (ip == NULL)
940                 return (NULL);
941         ip->irp_usrevent = event;
942         ip->irp_usriostat = status;
943         ip->irp_tail.irp_overlay.irp_thread = NULL;
944
945         sl = IoGetNextIrpStackLocation(ip);
946         sl->isl_major = isinternal == TRUE ?
947             IRP_MJ_INTERNAL_DEVICE_CONTROL : IRP_MJ_DEVICE_CONTROL;
948         sl->isl_minor = 0;
949         sl->isl_flags = 0;
950         sl->isl_ctl = 0;
951         sl->isl_devobj = dobj;
952         sl->isl_fileobj = NULL;
953         sl->isl_completionfunc = NULL;
954         sl->isl_parameters.isl_ioctl.isl_iocode = iocode;
955         sl->isl_parameters.isl_ioctl.isl_ibuflen = ilen;
956         sl->isl_parameters.isl_ioctl.isl_obuflen = olen;
957
958         switch(IO_METHOD(iocode)) {
959         case METHOD_BUFFERED:
960                 if (ilen > olen)
961                         buflen = ilen;
962                 else
963                         buflen = olen;
964                 if (buflen) {
965                         ip->irp_assoc.irp_sysbuf =
966                             ExAllocatePoolWithTag(NonPagedPool, buflen, 0);
967                         if (ip->irp_assoc.irp_sysbuf == NULL) {
968                                 IoFreeIrp(ip);
969                                 return (NULL);
970                         }
971                 }
972                 if (ilen && ibuf != NULL) {
973                         bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
974                         bzero((char *)ip->irp_assoc.irp_sysbuf + ilen,
975                             buflen - ilen);
976                 } else
977                         bzero(ip->irp_assoc.irp_sysbuf, ilen);
978                 ip->irp_userbuf = obuf;
979                 break;
980         case METHOD_IN_DIRECT:
981         case METHOD_OUT_DIRECT:
982                 if (ilen && ibuf != NULL) {
983                         ip->irp_assoc.irp_sysbuf =
984                             ExAllocatePoolWithTag(NonPagedPool, ilen, 0);
985                         if (ip->irp_assoc.irp_sysbuf == NULL) {
986                                 IoFreeIrp(ip);
987                                 return (NULL);
988                         }
989                         bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
990                 }
991                 if (olen && obuf != NULL) {
992                         ip->irp_mdl = IoAllocateMdl(obuf, olen,
993                             FALSE, FALSE, ip);
994                         /*
995                          * Normally we would MmProbeAndLockPages()
996                          * here, but we don't have to in our
997                          * imlementation.
998                          */
999                 }
1000                 break;
1001         case METHOD_NEITHER:
1002                 ip->irp_userbuf = obuf;
1003                 sl->isl_parameters.isl_ioctl.isl_type3ibuf = ibuf;
1004                 break;
1005         default:
1006                 break;
1007         }
1008
1009         /*
1010          * Ideally, we should associate this IRP with the calling
1011          * thread here.
1012          */
1013
1014         return (ip);
1015 }
1016
1017 static irp *
1018 IoAllocateIrp(uint8_t stsize, uint8_t chargequota)
1019 {
1020         irp                     *i;
1021
1022         i = ExAllocatePoolWithTag(NonPagedPool, IoSizeOfIrp(stsize), 0);
1023         if (i == NULL)
1024                 return (NULL);
1025
1026         IoInitializeIrp(i, IoSizeOfIrp(stsize), stsize);
1027
1028         return (i);
1029 }
1030
1031 static irp *
1032 IoMakeAssociatedIrp(irp *ip, uint8_t stsize)
1033 {
1034         irp                     *associrp;
1035
1036         associrp = IoAllocateIrp(stsize, FALSE);
1037         if (associrp == NULL)
1038                 return (NULL);
1039
1040         mtx_lock(&ntoskrnl_dispatchlock);
1041         associrp->irp_flags |= IRP_ASSOCIATED_IRP;
1042         associrp->irp_tail.irp_overlay.irp_thread =
1043             ip->irp_tail.irp_overlay.irp_thread;
1044         associrp->irp_assoc.irp_master = ip;
1045         mtx_unlock(&ntoskrnl_dispatchlock);
1046
1047         return (associrp);
1048 }
1049
1050 static void
1051 IoFreeIrp(ip)
1052         irp                     *ip;
1053 {
1054         ExFreePool(ip);
1055 }
1056
1057 static void
1058 IoInitializeIrp(irp *io, uint16_t psize, uint8_t ssize)
1059 {
1060         bzero((char *)io, IoSizeOfIrp(ssize));
1061         io->irp_size = psize;
1062         io->irp_stackcnt = ssize;
1063         io->irp_currentstackloc = ssize;
1064         InitializeListHead(&io->irp_thlist);
1065         io->irp_tail.irp_overlay.irp_csl =
1066             (io_stack_location *)(io + 1) + ssize;
1067 }
1068
1069 static void
1070 IoReuseIrp(ip, status)
1071         irp                     *ip;
1072         uint32_t                status;
1073 {
1074         uint8_t                 allocflags;
1075
1076         allocflags = ip->irp_allocflags;
1077         IoInitializeIrp(ip, ip->irp_size, ip->irp_stackcnt);
1078         ip->irp_iostat.isb_status = status;
1079         ip->irp_allocflags = allocflags;
1080 }
1081
1082 void
1083 IoAcquireCancelSpinLock(uint8_t *irql)
1084 {
1085         KeAcquireSpinLock(&ntoskrnl_cancellock, irql);
1086 }
1087
1088 void
1089 IoReleaseCancelSpinLock(uint8_t irql)
1090 {
1091         KeReleaseSpinLock(&ntoskrnl_cancellock, irql);
1092 }
1093
1094 uint8_t
1095 IoCancelIrp(irp *ip)
1096 {
1097         cancel_func             cfunc;
1098         uint8_t                 cancelirql;
1099
1100         IoAcquireCancelSpinLock(&cancelirql);
1101         cfunc = IoSetCancelRoutine(ip, NULL);
1102         ip->irp_cancel = TRUE;
1103         if (cfunc == NULL) {
1104                 IoReleaseCancelSpinLock(cancelirql);
1105                 return (FALSE);
1106         }
1107         ip->irp_cancelirql = cancelirql;
1108         MSCALL2(cfunc, IoGetCurrentIrpStackLocation(ip)->isl_devobj, ip);
1109         return (uint8_t)IoSetCancelValue(ip, TRUE);
1110 }
1111
1112 uint32_t
1113 IofCallDriver(dobj, ip)
1114         device_object           *dobj;
1115         irp                     *ip;
1116 {
1117         driver_object           *drvobj;
1118         io_stack_location       *sl;
1119         uint32_t                status;
1120         driver_dispatch         disp;
1121
1122         drvobj = dobj->do_drvobj;
1123
1124         if (ip->irp_currentstackloc <= 0)
1125                 panic("IoCallDriver(): out of stack locations");
1126
1127         IoSetNextIrpStackLocation(ip);
1128         sl = IoGetCurrentIrpStackLocation(ip);
1129
1130         sl->isl_devobj = dobj;
1131
1132         disp = drvobj->dro_dispatch[sl->isl_major];
1133         status = MSCALL2(disp, dobj, ip);
1134
1135         return (status);
1136 }
1137
1138 void
1139 IofCompleteRequest(irp *ip, uint8_t prioboost)
1140 {
1141         uint32_t                status;
1142         device_object           *dobj;
1143         io_stack_location       *sl;
1144         completion_func         cf;
1145
1146         KASSERT(ip->irp_iostat.isb_status != STATUS_PENDING,
1147             ("incorrect IRP(%p) status (STATUS_PENDING)", ip));
1148
1149         sl = IoGetCurrentIrpStackLocation(ip);
1150         IoSkipCurrentIrpStackLocation(ip);
1151
1152         do {
1153                 if (sl->isl_ctl & SL_PENDING_RETURNED)
1154                         ip->irp_pendingreturned = TRUE;
1155
1156                 if (ip->irp_currentstackloc != (ip->irp_stackcnt + 1))
1157                         dobj = IoGetCurrentIrpStackLocation(ip)->isl_devobj;
1158                 else
1159                         dobj = NULL;
1160
1161                 if (sl->isl_completionfunc != NULL &&
1162                     ((ip->irp_iostat.isb_status == STATUS_SUCCESS &&
1163                     sl->isl_ctl & SL_INVOKE_ON_SUCCESS) ||
1164                     (ip->irp_iostat.isb_status != STATUS_SUCCESS &&
1165                     sl->isl_ctl & SL_INVOKE_ON_ERROR) ||
1166                     (ip->irp_cancel == TRUE &&
1167                     sl->isl_ctl & SL_INVOKE_ON_CANCEL))) {
1168                         cf = sl->isl_completionfunc;
1169                         status = MSCALL3(cf, dobj, ip, sl->isl_completionctx);
1170                         if (status == STATUS_MORE_PROCESSING_REQUIRED)
1171                                 return;
1172                 } else {
1173                         if ((ip->irp_currentstackloc <= ip->irp_stackcnt) &&
1174                             (ip->irp_pendingreturned == TRUE))
1175                                 IoMarkIrpPending(ip);
1176                 }
1177
1178                 /* move to the next.  */
1179                 IoSkipCurrentIrpStackLocation(ip);
1180                 sl++;
1181         } while (ip->irp_currentstackloc <= (ip->irp_stackcnt + 1));
1182
1183         if (ip->irp_usriostat != NULL)
1184                 *ip->irp_usriostat = ip->irp_iostat;
1185         if (ip->irp_usrevent != NULL)
1186                 KeSetEvent(ip->irp_usrevent, prioboost, FALSE);
1187
1188         /* Handle any associated IRPs. */
1189
1190         if (ip->irp_flags & IRP_ASSOCIATED_IRP) {
1191                 uint32_t                masterirpcnt;
1192                 irp                     *masterirp;
1193                 mdl                     *m;
1194
1195                 masterirp = ip->irp_assoc.irp_master;
1196                 masterirpcnt =
1197                     InterlockedDecrement(&masterirp->irp_assoc.irp_irpcnt);
1198
1199                 while ((m = ip->irp_mdl) != NULL) {
1200                         ip->irp_mdl = m->mdl_next;
1201                         IoFreeMdl(m);
1202                 }
1203                 IoFreeIrp(ip);
1204                 if (masterirpcnt == 0)
1205                         IoCompleteRequest(masterirp, IO_NO_INCREMENT);
1206                 return;
1207         }
1208
1209         /* With any luck, these conditions will never arise. */
1210
1211         if (ip->irp_flags & IRP_PAGING_IO) {
1212                 if (ip->irp_mdl != NULL)
1213                         IoFreeMdl(ip->irp_mdl);
1214                 IoFreeIrp(ip);
1215         }
1216 }
1217
1218 void
1219 ntoskrnl_intr(arg)
1220         void                    *arg;
1221 {
1222         kinterrupt              *iobj;
1223         uint8_t                 irql;
1224         uint8_t                 claimed;
1225         list_entry              *l;
1226
1227         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1228         l = ntoskrnl_intlist.nle_flink;
1229         while (l != &ntoskrnl_intlist) {
1230                 iobj = CONTAINING_RECORD(l, kinterrupt, ki_list);
1231                 claimed = MSCALL2(iobj->ki_svcfunc, iobj, iobj->ki_svcctx);
1232                 if (claimed == TRUE)
1233                         break;
1234                 l = l->nle_flink;
1235         }
1236         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1237 }
1238
1239 uint8_t
1240 KeAcquireInterruptSpinLock(iobj)
1241         kinterrupt              *iobj;
1242 {
1243         uint8_t                 irql;
1244         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1245         return (irql);
1246 }
1247
1248 void
1249 KeReleaseInterruptSpinLock(kinterrupt *iobj, uint8_t irql)
1250 {
1251         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1252 }
1253
1254 uint8_t
1255 KeSynchronizeExecution(iobj, syncfunc, syncctx)
1256         kinterrupt              *iobj;
1257         void                    *syncfunc;
1258         void                    *syncctx;
1259 {
1260         uint8_t                 irql;
1261
1262         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1263         MSCALL1(syncfunc, syncctx);
1264         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1265
1266         return (TRUE);
1267 }
1268
1269 /*
1270  * IoConnectInterrupt() is passed only the interrupt vector and
1271  * irql that a device wants to use, but no device-specific tag
1272  * of any kind. This conflicts rather badly with FreeBSD's
1273  * bus_setup_intr(), which needs the device_t for the device
1274  * requesting interrupt delivery. In order to bypass this
1275  * inconsistency, we implement a second level of interrupt
1276  * dispatching on top of bus_setup_intr(). All devices use
1277  * ntoskrnl_intr() as their ISR, and any device requesting
1278  * interrupts will be registered with ntoskrnl_intr()'s interrupt
1279  * dispatch list. When an interrupt arrives, we walk the list
1280  * and invoke all the registered ISRs. This effectively makes all
1281  * interrupts shared, but it's the only way to duplicate the
1282  * semantics of IoConnectInterrupt() and IoDisconnectInterrupt() properly.
1283  */
1284
1285 uint32_t
1286 IoConnectInterrupt(kinterrupt **iobj, void *svcfunc, void *svcctx,
1287         kspin_lock *lock, uint32_t vector, uint8_t irql, uint8_t syncirql,
1288         uint8_t imode, uint8_t shared, uint32_t affinity, uint8_t savefloat)
1289 {
1290         uint8_t                 curirql;
1291
1292         *iobj = ExAllocatePoolWithTag(NonPagedPool, sizeof(kinterrupt), 0);
1293         if (*iobj == NULL)
1294                 return (STATUS_INSUFFICIENT_RESOURCES);
1295
1296         (*iobj)->ki_svcfunc = svcfunc;
1297         (*iobj)->ki_svcctx = svcctx;
1298
1299         if (lock == NULL) {
1300                 KeInitializeSpinLock(&(*iobj)->ki_lock_priv);
1301                 (*iobj)->ki_lock = &(*iobj)->ki_lock_priv;
1302         } else
1303                 (*iobj)->ki_lock = lock;
1304
1305         KeAcquireSpinLock(&ntoskrnl_intlock, &curirql);
1306         InsertHeadList((&ntoskrnl_intlist), (&(*iobj)->ki_list));
1307         KeReleaseSpinLock(&ntoskrnl_intlock, curirql);
1308
1309         return (STATUS_SUCCESS);
1310 }
1311
1312 void
1313 IoDisconnectInterrupt(iobj)
1314         kinterrupt              *iobj;
1315 {
1316         uint8_t                 irql;
1317
1318         if (iobj == NULL)
1319                 return;
1320
1321         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1322         RemoveEntryList((&iobj->ki_list));
1323         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1324
1325         ExFreePool(iobj);
1326 }
1327
1328 device_object *
1329 IoAttachDeviceToDeviceStack(src, dst)
1330         device_object           *src;
1331         device_object           *dst;
1332 {
1333         device_object           *attached;
1334
1335         mtx_lock(&ntoskrnl_dispatchlock);
1336         attached = IoGetAttachedDevice(dst);
1337         attached->do_attacheddev = src;
1338         src->do_attacheddev = NULL;
1339         src->do_stacksize = attached->do_stacksize + 1;
1340         mtx_unlock(&ntoskrnl_dispatchlock);
1341
1342         return (attached);
1343 }
1344
1345 void
1346 IoDetachDevice(topdev)
1347         device_object           *topdev;
1348 {
1349         device_object           *tail;
1350
1351         mtx_lock(&ntoskrnl_dispatchlock);
1352
1353         /* First, break the chain. */
1354         tail = topdev->do_attacheddev;
1355         if (tail == NULL) {
1356                 mtx_unlock(&ntoskrnl_dispatchlock);
1357                 return;
1358         }
1359         topdev->do_attacheddev = tail->do_attacheddev;
1360         topdev->do_refcnt--;
1361
1362         /* Now reduce the stacksize count for the takm_il objects. */
1363
1364         tail = topdev->do_attacheddev;
1365         while (tail != NULL) {
1366                 tail->do_stacksize--;
1367                 tail = tail->do_attacheddev;
1368         }
1369
1370         mtx_unlock(&ntoskrnl_dispatchlock);
1371 }
1372
1373 /*
1374  * For the most part, an object is considered signalled if
1375  * dh_sigstate == TRUE. The exception is for mutant objects
1376  * (mutexes), where the logic works like this:
1377  *
1378  * - If the thread already owns the object and sigstate is
1379  *   less than or equal to 0, then the object is considered
1380  *   signalled (recursive acquisition).
1381  * - If dh_sigstate == 1, the object is also considered
1382  *   signalled.
1383  */
1384
1385 static int
1386 ntoskrnl_is_signalled(obj, td)
1387         nt_dispatch_header      *obj;
1388         struct thread           *td;
1389 {
1390         kmutant                 *km;
1391
1392         if (obj->dh_type == DISP_TYPE_MUTANT) {
1393                 km = (kmutant *)obj;
1394                 if ((obj->dh_sigstate <= 0 && km->km_ownerthread == td) ||
1395                     obj->dh_sigstate == 1)
1396                         return (TRUE);
1397                 return (FALSE);
1398         }
1399
1400         if (obj->dh_sigstate > 0)
1401                 return (TRUE);
1402         return (FALSE);
1403 }
1404
1405 static void
1406 ntoskrnl_satisfy_wait(obj, td)
1407         nt_dispatch_header      *obj;
1408         struct thread           *td;
1409 {
1410         kmutant                 *km;
1411
1412         switch (obj->dh_type) {
1413         case DISP_TYPE_MUTANT:
1414                 km = (struct kmutant *)obj;
1415                 obj->dh_sigstate--;
1416                 /*
1417                  * If sigstate reaches 0, the mutex is now
1418                  * non-signalled (the new thread owns it).
1419                  */
1420                 if (obj->dh_sigstate == 0) {
1421                         km->km_ownerthread = td;
1422                         if (km->km_abandoned == TRUE)
1423                                 km->km_abandoned = FALSE;
1424                 }
1425                 break;
1426         /* Synchronization objects get reset to unsignalled. */
1427         case DISP_TYPE_SYNCHRONIZATION_EVENT:
1428         case DISP_TYPE_SYNCHRONIZATION_TIMER:
1429                 obj->dh_sigstate = 0;
1430                 break;
1431         case DISP_TYPE_SEMAPHORE:
1432                 obj->dh_sigstate--;
1433                 break;
1434         default:
1435                 break;
1436         }
1437 }
1438
1439 static void
1440 ntoskrnl_satisfy_multiple_waits(wb)
1441         wait_block              *wb;
1442 {
1443         wait_block              *cur;
1444         struct thread           *td;
1445
1446         cur = wb;
1447         td = wb->wb_kthread;
1448
1449         do {
1450                 ntoskrnl_satisfy_wait(wb->wb_object, td);
1451                 cur->wb_awakened = TRUE;
1452                 cur = cur->wb_next;
1453         } while (cur != wb);
1454 }
1455
1456 /* Always called with dispatcher lock held. */
1457 static void
1458 ntoskrnl_waittest(obj, increment)
1459         nt_dispatch_header      *obj;
1460         uint32_t                increment;
1461 {
1462         wait_block              *w, *next;
1463         list_entry              *e;
1464         struct thread           *td;
1465         wb_ext                  *we;
1466         int                     satisfied;
1467
1468         /*
1469          * Once an object has been signalled, we walk its list of
1470          * wait blocks. If a wait block can be awakened, then satisfy
1471          * waits as necessary and wake the thread.
1472          *
1473          * The rules work like this:
1474          *
1475          * If a wait block is marked as WAITTYPE_ANY, then
1476          * we can satisfy the wait conditions on the current
1477          * object and wake the thread right away. Satisfying
1478          * the wait also has the effect of breaking us out
1479          * of the search loop.
1480          *
1481          * If the object is marked as WAITTYLE_ALL, then the
1482          * wait block will be part of a circularly linked
1483          * list of wait blocks belonging to a waiting thread
1484          * that's sleeping in KeWaitForMultipleObjects(). In
1485          * order to wake the thread, all the objects in the
1486          * wait list must be in the signalled state. If they
1487          * are, we then satisfy all of them and wake the
1488          * thread.
1489          *
1490          */
1491
1492         e = obj->dh_waitlisthead.nle_flink;
1493
1494         while (e != &obj->dh_waitlisthead && obj->dh_sigstate > 0) {
1495                 w = CONTAINING_RECORD(e, wait_block, wb_waitlist);
1496                 we = w->wb_ext;
1497                 td = we->we_td;
1498                 satisfied = FALSE;
1499                 if (w->wb_waittype == WAITTYPE_ANY) {
1500                         /*
1501                          * Thread can be awakened if
1502                          * any wait is satisfied.
1503                          */
1504                         ntoskrnl_satisfy_wait(obj, td);
1505                         satisfied = TRUE;
1506                         w->wb_awakened = TRUE;
1507                 } else {
1508                         /*
1509                          * Thread can only be woken up
1510                          * if all waits are satisfied.
1511                          * If the thread is waiting on multiple
1512                          * objects, they should all be linked
1513                          * through the wb_next pointers in the
1514                          * wait blocks.
1515                          */
1516                         satisfied = TRUE;
1517                         next = w->wb_next;
1518                         while (next != w) {
1519                                 if (ntoskrnl_is_signalled(obj, td) == FALSE) {
1520                                         satisfied = FALSE;
1521                                         break;
1522                                 }
1523                                 next = next->wb_next;
1524                         }
1525                         ntoskrnl_satisfy_multiple_waits(w);
1526                 }
1527
1528                 if (satisfied == TRUE)
1529                         cv_broadcastpri(&we->we_cv,
1530                             (w->wb_oldpri - (increment * 4)) > PRI_MIN_KERN ?
1531                             w->wb_oldpri - (increment * 4) : PRI_MIN_KERN);
1532
1533                 e = e->nle_flink;
1534         }
1535 }
1536
1537 /*
1538  * Return the number of 100 nanosecond intervals since
1539  * January 1, 1601. (?!?!)
1540  */
1541 void
1542 ntoskrnl_time(tval)
1543         uint64_t                *tval;
1544 {
1545         struct timespec         ts;
1546
1547         nanotime(&ts);
1548         *tval = (uint64_t)ts.tv_nsec / 100 + (uint64_t)ts.tv_sec * 10000000 +
1549             11644473600 * 10000000; /* 100ns ticks from 1601 to 1970 */
1550 }
1551
1552 static void
1553 KeQuerySystemTime(current_time)
1554         uint64_t                *current_time;
1555 {
1556         ntoskrnl_time(current_time);
1557 }
1558
1559 static uint32_t
1560 KeTickCount(void)
1561 {
1562         struct timeval tv;
1563         getmicrouptime(&tv);
1564         return tvtohz(&tv);
1565 }
1566
1567
1568 /*
1569  * KeWaitForSingleObject() is a tricky beast, because it can be used
1570  * with several different object types: semaphores, timers, events,
1571  * mutexes and threads. Semaphores don't appear very often, but the
1572  * other object types are quite common. KeWaitForSingleObject() is
1573  * what's normally used to acquire a mutex, and it can be used to
1574  * wait for a thread termination.
1575  *
1576  * The Windows NDIS API is implemented in terms of Windows kernel
1577  * primitives, and some of the object manipulation is duplicated in
1578  * NDIS. For example, NDIS has timers and events, which are actually
1579  * Windows kevents and ktimers. Now, you're supposed to only use the
1580  * NDIS variants of these objects within the confines of the NDIS API,
1581  * but there are some naughty developers out there who will use
1582  * KeWaitForSingleObject() on NDIS timer and event objects, so we
1583  * have to support that as well. Conseqently, our NDIS timer and event
1584  * code has to be closely tied into our ntoskrnl timer and event code,
1585  * just as it is in Windows.
1586  *
1587  * KeWaitForSingleObject() may do different things for different kinds
1588  * of objects:
1589  *
1590  * - For events, we check if the event has been signalled. If the
1591  *   event is already in the signalled state, we just return immediately,
1592  *   otherwise we wait for it to be set to the signalled state by someone
1593  *   else calling KeSetEvent(). Events can be either synchronization or
1594  *   notification events.
1595  *
1596  * - For timers, if the timer has already fired and the timer is in
1597  *   the signalled state, we just return, otherwise we wait on the
1598  *   timer. Unlike an event, timers get signalled automatically when
1599  *   they expire rather than someone having to trip them manually.
1600  *   Timers initialized with KeInitializeTimer() are always notification
1601  *   events: KeInitializeTimerEx() lets you initialize a timer as
1602  *   either a notification or synchronization event.
1603  *
1604  * - For mutexes, we try to acquire the mutex and if we can't, we wait
1605  *   on the mutex until it's available and then grab it. When a mutex is
1606  *   released, it enters the signalled state, which wakes up one of the
1607  *   threads waiting to acquire it. Mutexes are always synchronization
1608  *   events.
1609  *
1610  * - For threads, the only thing we do is wait until the thread object
1611  *   enters a signalled state, which occurs when the thread terminates.
1612  *   Threads are always notification events.
1613  *
1614  * A notification event wakes up all threads waiting on an object. A
1615  * synchronization event wakes up just one. Also, a synchronization event
1616  * is auto-clearing, which means we automatically set the event back to
1617  * the non-signalled state once the wakeup is done.
1618  */
1619
1620 uint32_t
1621 KeWaitForSingleObject(void *arg, uint32_t reason, uint32_t mode,
1622     uint8_t alertable, int64_t *duetime)
1623 {
1624         wait_block              w;
1625         struct thread           *td = curthread;
1626         struct timeval          tv;
1627         int                     error = 0;
1628         uint64_t                curtime;
1629         wb_ext                  we;
1630         nt_dispatch_header      *obj;
1631
1632         obj = arg;
1633
1634         if (obj == NULL)
1635                 return (STATUS_INVALID_PARAMETER);
1636
1637         mtx_lock(&ntoskrnl_dispatchlock);
1638
1639         cv_init(&we.we_cv, "KeWFS");
1640         we.we_td = td;
1641
1642         /*
1643          * Check to see if this object is already signalled,
1644          * and just return without waiting if it is.
1645          */
1646         if (ntoskrnl_is_signalled(obj, td) == TRUE) {
1647                 /* Sanity check the signal state value. */
1648                 if (obj->dh_sigstate != INT32_MIN) {
1649                         ntoskrnl_satisfy_wait(obj, curthread);
1650                         mtx_unlock(&ntoskrnl_dispatchlock);
1651                         return (STATUS_SUCCESS);
1652                 } else {
1653                         /*
1654                          * There's a limit to how many times we can
1655                          * recursively acquire a mutant. If we hit
1656                          * the limit, something is very wrong.
1657                          */
1658                         if (obj->dh_type == DISP_TYPE_MUTANT) {
1659                                 mtx_unlock(&ntoskrnl_dispatchlock);
1660                                 panic("mutant limit exceeded");
1661                         }
1662                 }
1663         }
1664
1665         bzero((char *)&w, sizeof(wait_block));
1666         w.wb_object = obj;
1667         w.wb_ext = &we;
1668         w.wb_waittype = WAITTYPE_ANY;
1669         w.wb_next = &w;
1670         w.wb_waitkey = 0;
1671         w.wb_awakened = FALSE;
1672         w.wb_oldpri = td->td_priority;
1673
1674         InsertTailList((&obj->dh_waitlisthead), (&w.wb_waitlist));
1675
1676         /*
1677          * The timeout value is specified in 100 nanosecond units
1678          * and can be a positive or negative number. If it's positive,
1679          * then the duetime is absolute, and we need to convert it
1680          * to an absolute offset relative to now in order to use it.
1681          * If it's negative, then the duetime is relative and we
1682          * just have to convert the units.
1683          */
1684
1685         if (duetime != NULL) {
1686                 if (*duetime < 0) {
1687                         tv.tv_sec = - (*duetime) / 10000000;
1688                         tv.tv_usec = (- (*duetime) / 10) -
1689                             (tv.tv_sec * 1000000);
1690                 } else {
1691                         ntoskrnl_time(&curtime);
1692                         if (*duetime < curtime)
1693                                 tv.tv_sec = tv.tv_usec = 0;
1694                         else {
1695                                 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1696                                 tv.tv_usec = ((*duetime) - curtime) / 10 -
1697                                     (tv.tv_sec * 1000000);
1698                         }
1699                 }
1700         }
1701
1702         if (duetime == NULL)
1703                 cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
1704         else
1705                 error = cv_timedwait(&we.we_cv,
1706                     &ntoskrnl_dispatchlock, tvtohz(&tv));
1707
1708         RemoveEntryList(&w.wb_waitlist);
1709
1710         cv_destroy(&we.we_cv);
1711
1712         /* We timed out. Leave the object alone and return status. */
1713
1714         if (error == EWOULDBLOCK) {
1715                 mtx_unlock(&ntoskrnl_dispatchlock);
1716                 return (STATUS_TIMEOUT);
1717         }
1718
1719         mtx_unlock(&ntoskrnl_dispatchlock);
1720
1721         return (STATUS_SUCCESS);
1722 /*
1723         return (KeWaitForMultipleObjects(1, &obj, WAITTYPE_ALL, reason,
1724             mode, alertable, duetime, &w));
1725 */
1726 }
1727
1728 static uint32_t
1729 KeWaitForMultipleObjects(uint32_t cnt, nt_dispatch_header *obj[], uint32_t wtype,
1730         uint32_t reason, uint32_t mode, uint8_t alertable, int64_t *duetime,
1731         wait_block *wb_array)
1732 {
1733         struct thread           *td = curthread;
1734         wait_block              *whead, *w;
1735         wait_block              _wb_array[MAX_WAIT_OBJECTS];
1736         nt_dispatch_header      *cur;
1737         struct timeval          tv;
1738         int                     i, wcnt = 0, error = 0;
1739         uint64_t                curtime;
1740         struct timespec         t1, t2;
1741         uint32_t                status = STATUS_SUCCESS;
1742         wb_ext                  we;
1743
1744         if (cnt > MAX_WAIT_OBJECTS)
1745                 return (STATUS_INVALID_PARAMETER);
1746         if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL)
1747                 return (STATUS_INVALID_PARAMETER);
1748
1749         mtx_lock(&ntoskrnl_dispatchlock);
1750
1751         cv_init(&we.we_cv, "KeWFM");
1752         we.we_td = td;
1753
1754         if (wb_array == NULL)
1755                 whead = _wb_array;
1756         else
1757                 whead = wb_array;
1758
1759         bzero((char *)whead, sizeof(wait_block) * cnt);
1760
1761         /* First pass: see if we can satisfy any waits immediately. */
1762
1763         wcnt = 0;
1764         w = whead;
1765
1766         for (i = 0; i < cnt; i++) {
1767                 InsertTailList((&obj[i]->dh_waitlisthead),
1768                     (&w->wb_waitlist));
1769                 w->wb_ext = &we;
1770                 w->wb_object = obj[i];
1771                 w->wb_waittype = wtype;
1772                 w->wb_waitkey = i;
1773                 w->wb_awakened = FALSE;
1774                 w->wb_oldpri = td->td_priority;
1775                 w->wb_next = w + 1;
1776                 w++;
1777                 wcnt++;
1778                 if (ntoskrnl_is_signalled(obj[i], td)) {
1779                         /*
1780                          * There's a limit to how many times
1781                          * we can recursively acquire a mutant.
1782                          * If we hit the limit, something
1783                          * is very wrong.
1784                          */
1785                         if (obj[i]->dh_sigstate == INT32_MIN &&
1786                             obj[i]->dh_type == DISP_TYPE_MUTANT) {
1787                                 mtx_unlock(&ntoskrnl_dispatchlock);
1788                                 panic("mutant limit exceeded");
1789                         }
1790
1791                         /*
1792                          * If this is a WAITTYPE_ANY wait, then
1793                          * satisfy the waited object and exit
1794                          * right now.
1795                          */
1796
1797                         if (wtype == WAITTYPE_ANY) {
1798                                 ntoskrnl_satisfy_wait(obj[i], td);
1799                                 status = STATUS_WAIT_0 + i;
1800                                 goto wait_done;
1801                         } else {
1802                                 w--;
1803                                 wcnt--;
1804                                 w->wb_object = NULL;
1805                                 RemoveEntryList(&w->wb_waitlist);
1806                         }
1807                 }
1808         }
1809
1810         /*
1811          * If this is a WAITTYPE_ALL wait and all objects are
1812          * already signalled, satisfy the waits and exit now.
1813          */
1814
1815         if (wtype == WAITTYPE_ALL && wcnt == 0) {
1816                 for (i = 0; i < cnt; i++)
1817                         ntoskrnl_satisfy_wait(obj[i], td);
1818                 status = STATUS_SUCCESS;
1819                 goto wait_done;
1820         }
1821
1822         /*
1823          * Create a circular waitblock list. The waitcount
1824          * must always be non-zero when we get here.
1825          */
1826
1827         (w - 1)->wb_next = whead;
1828
1829         /* Wait on any objects that aren't yet signalled. */
1830
1831         /* Calculate timeout, if any. */
1832
1833         if (duetime != NULL) {
1834                 if (*duetime < 0) {
1835                         tv.tv_sec = - (*duetime) / 10000000;
1836                         tv.tv_usec = (- (*duetime) / 10) -
1837                             (tv.tv_sec * 1000000);
1838                 } else {
1839                         ntoskrnl_time(&curtime);
1840                         if (*duetime < curtime)
1841                                 tv.tv_sec = tv.tv_usec = 0;
1842                         else {
1843                                 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1844                                 tv.tv_usec = ((*duetime) - curtime) / 10 -
1845                                     (tv.tv_sec * 1000000);
1846                         }
1847                 }
1848         }
1849
1850         while (wcnt) {
1851                 nanotime(&t1);
1852
1853                 if (duetime == NULL)
1854                         cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
1855                 else
1856                         error = cv_timedwait(&we.we_cv,
1857                             &ntoskrnl_dispatchlock, tvtohz(&tv));
1858
1859                 /* Wait with timeout expired. */
1860
1861                 if (error) {
1862                         status = STATUS_TIMEOUT;
1863                         goto wait_done;
1864                 }
1865
1866                 nanotime(&t2);
1867
1868                 /* See what's been signalled. */
1869
1870                 w = whead;
1871                 do {
1872                         cur = w->wb_object;
1873                         if (ntoskrnl_is_signalled(cur, td) == TRUE ||
1874                             w->wb_awakened == TRUE) {
1875                                 /* Sanity check the signal state value. */
1876                                 if (cur->dh_sigstate == INT32_MIN &&
1877                                     cur->dh_type == DISP_TYPE_MUTANT) {
1878                                         mtx_unlock(&ntoskrnl_dispatchlock);
1879                                         panic("mutant limit exceeded");
1880                                 }
1881                                 wcnt--;
1882                                 if (wtype == WAITTYPE_ANY) {
1883                                         status = w->wb_waitkey &
1884                                             STATUS_WAIT_0;
1885                                         goto wait_done;
1886                                 }
1887                         }
1888                         w = w->wb_next;
1889                 } while (w != whead);
1890
1891                 /*
1892                  * If all objects have been signalled, or if this
1893                  * is a WAITTYPE_ANY wait and we were woke up by
1894                  * someone, we can bail.
1895                  */
1896
1897                 if (wcnt == 0) {
1898                         status = STATUS_SUCCESS;
1899                         goto wait_done;
1900                 }
1901
1902                 /*
1903                  * If this is WAITTYPE_ALL wait, and there's still
1904                  * objects that haven't been signalled, deduct the
1905                  * time that's elapsed so far from the timeout and
1906                  * wait again (or continue waiting indefinitely if
1907                  * there's no timeout).
1908                  */
1909
1910                 if (duetime != NULL) {
1911                         tv.tv_sec -= (t2.tv_sec - t1.tv_sec);
1912                         tv.tv_usec -= (t2.tv_nsec - t1.tv_nsec) / 1000;
1913                 }
1914         }
1915
1916
1917 wait_done:
1918
1919         cv_destroy(&we.we_cv);
1920
1921         for (i = 0; i < cnt; i++) {
1922                 if (whead[i].wb_object != NULL)
1923                         RemoveEntryList(&whead[i].wb_waitlist);
1924
1925         }
1926         mtx_unlock(&ntoskrnl_dispatchlock);
1927
1928         return (status);
1929 }
1930
1931 static void
1932 WRITE_REGISTER_USHORT(uint16_t *reg, uint16_t val)
1933 {
1934         bus_space_write_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1935 }
1936
1937 static uint16_t
1938 READ_REGISTER_USHORT(reg)
1939         uint16_t                *reg;
1940 {
1941         return (bus_space_read_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1942 }
1943
1944 static void
1945 WRITE_REGISTER_ULONG(reg, val)
1946         uint32_t                *reg;
1947         uint32_t                val;
1948 {
1949         bus_space_write_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1950 }
1951
1952 static uint32_t
1953 READ_REGISTER_ULONG(reg)
1954         uint32_t                *reg;
1955 {
1956         return (bus_space_read_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1957 }
1958
1959 static uint8_t
1960 READ_REGISTER_UCHAR(uint8_t *reg)
1961 {
1962         return (bus_space_read_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1963 }
1964
1965 static void
1966 WRITE_REGISTER_UCHAR(uint8_t *reg, uint8_t val)
1967 {
1968         bus_space_write_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1969 }
1970
1971 static int64_t
1972 _allmul(a, b)
1973         int64_t                 a;
1974         int64_t                 b;
1975 {
1976         return (a * b);
1977 }
1978
1979 static int64_t
1980 _alldiv(a, b)
1981         int64_t                 a;
1982         int64_t                 b;
1983 {
1984         return (a / b);
1985 }
1986
1987 static int64_t
1988 _allrem(a, b)
1989         int64_t                 a;
1990         int64_t                 b;
1991 {
1992         return (a % b);
1993 }
1994
1995 static uint64_t
1996 _aullmul(a, b)
1997         uint64_t                a;
1998         uint64_t                b;
1999 {
2000         return (a * b);
2001 }
2002
2003 static uint64_t
2004 _aulldiv(a, b)
2005         uint64_t                a;
2006         uint64_t                b;
2007 {
2008         return (a / b);
2009 }
2010
2011 static uint64_t
2012 _aullrem(a, b)
2013         uint64_t                a;
2014         uint64_t                b;
2015 {
2016         return (a % b);
2017 }
2018
2019 static int64_t
2020 _allshl(int64_t a, uint8_t b)
2021 {
2022         return (a << b);
2023 }
2024
2025 static uint64_t
2026 _aullshl(uint64_t a, uint8_t b)
2027 {
2028         return (a << b);
2029 }
2030
2031 static int64_t
2032 _allshr(int64_t a, uint8_t b)
2033 {
2034         return (a >> b);
2035 }
2036
2037 static uint64_t
2038 _aullshr(uint64_t a, uint8_t b)
2039 {
2040         return (a >> b);
2041 }
2042
2043 static slist_entry *
2044 ntoskrnl_pushsl(head, entry)
2045         slist_header            *head;
2046         slist_entry             *entry;
2047 {
2048         slist_entry             *oldhead;
2049
2050         oldhead = head->slh_list.slh_next;
2051         entry->sl_next = head->slh_list.slh_next;
2052         head->slh_list.slh_next = entry;
2053         head->slh_list.slh_depth++;
2054         head->slh_list.slh_seq++;
2055
2056         return (oldhead);
2057 }
2058
2059 static slist_entry *
2060 ntoskrnl_popsl(head)
2061         slist_header            *head;
2062 {
2063         slist_entry             *first;
2064
2065         first = head->slh_list.slh_next;
2066         if (first != NULL) {
2067                 head->slh_list.slh_next = first->sl_next;
2068                 head->slh_list.slh_depth--;
2069                 head->slh_list.slh_seq++;
2070         }
2071
2072         return (first);
2073 }
2074
2075 /*
2076  * We need this to make lookaside lists work for amd64.
2077  * We pass a pointer to ExAllocatePoolWithTag() the lookaside
2078  * list structure. For amd64 to work right, this has to be a
2079  * pointer to the wrapped version of the routine, not the
2080  * original. Letting the Windows driver invoke the original
2081  * function directly will result in a convention calling
2082  * mismatch and a pretty crash. On x86, this effectively
2083  * becomes a no-op since ipt_func and ipt_wrap are the same.
2084  */
2085
2086 static funcptr
2087 ntoskrnl_findwrap(func)
2088         funcptr                 func;
2089 {
2090         image_patch_table       *patch;
2091
2092         patch = ntoskrnl_functbl;
2093         while (patch->ipt_func != NULL) {
2094                 if ((funcptr)patch->ipt_func == func)
2095                         return ((funcptr)patch->ipt_wrap);
2096                 patch++;
2097         }
2098
2099         return (NULL);
2100 }
2101
2102 static void
2103 ExInitializePagedLookasideList(paged_lookaside_list *lookaside,
2104         lookaside_alloc_func *allocfunc, lookaside_free_func *freefunc,
2105         uint32_t flags, size_t size, uint32_t tag, uint16_t depth)
2106 {
2107         bzero((char *)lookaside, sizeof(paged_lookaside_list));
2108
2109         if (size < sizeof(slist_entry))
2110                 lookaside->nll_l.gl_size = sizeof(slist_entry);
2111         else
2112                 lookaside->nll_l.gl_size = size;
2113         lookaside->nll_l.gl_tag = tag;
2114         if (allocfunc == NULL)
2115                 lookaside->nll_l.gl_allocfunc =
2116                     ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
2117         else
2118                 lookaside->nll_l.gl_allocfunc = allocfunc;
2119
2120         if (freefunc == NULL)
2121                 lookaside->nll_l.gl_freefunc =
2122                     ntoskrnl_findwrap((funcptr)ExFreePool);
2123         else
2124                 lookaside->nll_l.gl_freefunc = freefunc;
2125
2126 #ifdef __i386__
2127         KeInitializeSpinLock(&lookaside->nll_obsoletelock);
2128 #endif
2129
2130         lookaside->nll_l.gl_type = NonPagedPool;
2131         lookaside->nll_l.gl_depth = depth;
2132         lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
2133 }
2134
2135 static void
2136 ExDeletePagedLookasideList(lookaside)
2137         paged_lookaside_list   *lookaside;
2138 {
2139         void                    *buf;
2140         void            (*freefunc)(void *);
2141
2142         freefunc = lookaside->nll_l.gl_freefunc;
2143         while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
2144                 MSCALL1(freefunc, buf);
2145 }
2146
2147 static void
2148 ExInitializeNPagedLookasideList(npaged_lookaside_list *lookaside,
2149         lookaside_alloc_func *allocfunc, lookaside_free_func *freefunc,
2150         uint32_t flags, size_t size, uint32_t tag, uint16_t depth)
2151 {
2152         bzero((char *)lookaside, sizeof(npaged_lookaside_list));
2153
2154         if (size < sizeof(slist_entry))
2155                 lookaside->nll_l.gl_size = sizeof(slist_entry);
2156         else
2157                 lookaside->nll_l.gl_size = size;
2158         lookaside->nll_l.gl_tag = tag;
2159         if (allocfunc == NULL)
2160                 lookaside->nll_l.gl_allocfunc =
2161                     ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
2162         else
2163                 lookaside->nll_l.gl_allocfunc = allocfunc;
2164
2165         if (freefunc == NULL)
2166                 lookaside->nll_l.gl_freefunc =
2167                     ntoskrnl_findwrap((funcptr)ExFreePool);
2168         else
2169                 lookaside->nll_l.gl_freefunc = freefunc;
2170
2171 #ifdef __i386__
2172         KeInitializeSpinLock(&lookaside->nll_obsoletelock);
2173 #endif
2174
2175         lookaside->nll_l.gl_type = NonPagedPool;
2176         lookaside->nll_l.gl_depth = depth;
2177         lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
2178 }
2179
2180 static void
2181 ExDeleteNPagedLookasideList(lookaside)
2182         npaged_lookaside_list   *lookaside;
2183 {
2184         void                    *buf;
2185         void            (*freefunc)(void *);
2186
2187         freefunc = lookaside->nll_l.gl_freefunc;
2188         while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
2189                 MSCALL1(freefunc, buf);
2190 }
2191
2192 slist_entry *
2193 InterlockedPushEntrySList(head, entry)
2194         slist_header            *head;
2195         slist_entry             *entry;
2196 {
2197         slist_entry             *oldhead;
2198
2199         mtx_lock_spin(&ntoskrnl_interlock);
2200         oldhead = ntoskrnl_pushsl(head, entry);
2201         mtx_unlock_spin(&ntoskrnl_interlock);
2202
2203         return (oldhead);
2204 }
2205
2206 slist_entry *
2207 InterlockedPopEntrySList(head)
2208         slist_header            *head;
2209 {
2210         slist_entry             *first;
2211
2212         mtx_lock_spin(&ntoskrnl_interlock);
2213         first = ntoskrnl_popsl(head);
2214         mtx_unlock_spin(&ntoskrnl_interlock);
2215
2216         return (first);
2217 }
2218
2219 static slist_entry *
2220 ExInterlockedPushEntrySList(head, entry, lock)
2221         slist_header            *head;
2222         slist_entry             *entry;
2223         kspin_lock              *lock;
2224 {
2225         return (InterlockedPushEntrySList(head, entry));
2226 }
2227
2228 static slist_entry *
2229 ExInterlockedPopEntrySList(head, lock)
2230         slist_header            *head;
2231         kspin_lock              *lock;
2232 {
2233         return (InterlockedPopEntrySList(head));
2234 }
2235
2236 uint16_t
2237 ExQueryDepthSList(head)
2238         slist_header            *head;
2239 {
2240         uint16_t                depth;
2241
2242         mtx_lock_spin(&ntoskrnl_interlock);
2243         depth = head->slh_list.slh_depth;
2244         mtx_unlock_spin(&ntoskrnl_interlock);
2245
2246         return (depth);
2247 }
2248
2249 void
2250 KeInitializeSpinLock(lock)
2251         kspin_lock              *lock;
2252 {
2253         *lock = 0;
2254 }
2255
2256 #ifdef __i386__
2257 void
2258 KefAcquireSpinLockAtDpcLevel(lock)
2259         kspin_lock              *lock;
2260 {
2261 #ifdef NTOSKRNL_DEBUG_SPINLOCKS
2262         int                     i = 0;
2263 #endif
2264
2265         while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0) {
2266                 /* sit and spin */;
2267 #ifdef NTOSKRNL_DEBUG_SPINLOCKS
2268                 i++;
2269                 if (i > 200000000)
2270                         panic("DEADLOCK!");
2271 #endif
2272         }
2273 }
2274
2275 void
2276 KefReleaseSpinLockFromDpcLevel(lock)
2277         kspin_lock              *lock;
2278 {
2279         atomic_store_rel_int((volatile u_int *)lock, 0);
2280 }
2281
2282 uint8_t
2283 KeAcquireSpinLockRaiseToDpc(kspin_lock *lock)
2284 {
2285         uint8_t                 oldirql;
2286
2287         if (KeGetCurrentIrql() > DISPATCH_LEVEL)
2288                 panic("IRQL_NOT_LESS_THAN_OR_EQUAL");
2289
2290         KeRaiseIrql(DISPATCH_LEVEL, &oldirql);
2291         KeAcquireSpinLockAtDpcLevel(lock);
2292
2293         return (oldirql);
2294 }
2295 #else
2296 void
2297 KeAcquireSpinLockAtDpcLevel(kspin_lock *lock)
2298 {
2299         while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0)
2300                 /* sit and spin */;
2301 }
2302
2303 void
2304 KeReleaseSpinLockFromDpcLevel(kspin_lock *lock)
2305 {
2306         atomic_store_rel_int((volatile u_int *)lock, 0);
2307 }
2308 #endif /* __i386__ */
2309
2310 uintptr_t
2311 InterlockedExchange(dst, val)
2312         volatile uint32_t       *dst;
2313         uintptr_t               val;
2314 {
2315         uintptr_t               r;
2316
2317         mtx_lock_spin(&ntoskrnl_interlock);
2318         r = *dst;
2319         *dst = val;
2320         mtx_unlock_spin(&ntoskrnl_interlock);
2321
2322         return (r);
2323 }
2324
2325 static uint32_t
2326 InterlockedIncrement(addend)
2327         volatile uint32_t       *addend;
2328 {
2329         atomic_add_long((volatile u_long *)addend, 1);
2330         return (*addend);
2331 }
2332
2333 static uint32_t
2334 InterlockedDecrement(addend)
2335         volatile uint32_t       *addend;
2336 {
2337         atomic_subtract_long((volatile u_long *)addend, 1);
2338         return (*addend);
2339 }
2340
2341 static void
2342 ExInterlockedAddLargeStatistic(addend, inc)
2343         uint64_t                *addend;
2344         uint32_t                inc;
2345 {
2346         mtx_lock_spin(&ntoskrnl_interlock);
2347         *addend += inc;
2348         mtx_unlock_spin(&ntoskrnl_interlock);
2349 };
2350
2351 mdl *
2352 IoAllocateMdl(void *vaddr, uint32_t len, uint8_t secondarybuf,
2353         uint8_t chargequota, irp *iopkt)
2354 {
2355         mdl                     *m;
2356         int                     zone = 0;
2357
2358         if (MmSizeOfMdl(vaddr, len) > MDL_ZONE_SIZE)
2359                 m = ExAllocatePoolWithTag(NonPagedPool,
2360                     MmSizeOfMdl(vaddr, len), 0);
2361         else {
2362                 m = uma_zalloc(mdl_zone, M_NOWAIT | M_ZERO);
2363                 zone++;
2364         }
2365
2366         if (m == NULL)
2367                 return (NULL);
2368
2369         MmInitializeMdl(m, vaddr, len);
2370
2371         /*
2372          * MmInitializMdl() clears the flags field, so we
2373          * have to set this here. If the MDL came from the
2374          * MDL UMA zone, tag it so we can release it to
2375          * the right place later.
2376          */
2377         if (zone)
2378                 m->mdl_flags = MDL_ZONE_ALLOCED;
2379
2380         if (iopkt != NULL) {
2381                 if (secondarybuf == TRUE) {
2382                         mdl                     *last;
2383                         last = iopkt->irp_mdl;
2384                         while (last->mdl_next != NULL)
2385                                 last = last->mdl_next;
2386                         last->mdl_next = m;
2387                 } else {
2388                         if (iopkt->irp_mdl != NULL)
2389                                 panic("leaking an MDL in IoAllocateMdl()");
2390                         iopkt->irp_mdl = m;
2391                 }
2392         }
2393
2394         return (m);
2395 }
2396
2397 void
2398 IoFreeMdl(m)
2399         mdl                     *m;
2400 {
2401         if (m == NULL)
2402                 return;
2403
2404         if (m->mdl_flags & MDL_ZONE_ALLOCED)
2405                 uma_zfree(mdl_zone, m);
2406         else
2407                 ExFreePool(m);
2408 }
2409
2410 static void *
2411 MmAllocateContiguousMemory(size, highest)
2412         uint32_t                size;
2413         uint64_t                highest;
2414 {
2415         void *addr;
2416         size_t pagelength = roundup(size, PAGE_SIZE);
2417
2418         addr = ExAllocatePoolWithTag(NonPagedPool, pagelength, 0);
2419
2420         return (addr);
2421 }
2422
2423 static void *
2424 MmAllocateContiguousMemorySpecifyCache(size, lowest, highest,
2425     boundary, cachetype)
2426         uint32_t                size;
2427         uint64_t                lowest;
2428         uint64_t                highest;
2429         uint64_t                boundary;
2430         enum nt_caching_type    cachetype;
2431 {
2432         vm_memattr_t            memattr;
2433         void                    *ret;
2434
2435         switch (cachetype) {
2436         case MmNonCached:
2437                 memattr = VM_MEMATTR_UNCACHEABLE;
2438                 break;
2439         case MmWriteCombined:
2440                 memattr = VM_MEMATTR_WRITE_COMBINING;
2441                 break;
2442         case MmNonCachedUnordered:
2443                 memattr = VM_MEMATTR_UNCACHEABLE;
2444                 break;
2445         case MmCached:
2446         case MmHardwareCoherentCached:
2447         case MmUSWCCached:
2448         default:
2449                 memattr = VM_MEMATTR_DEFAULT;
2450                 break;
2451         }
2452
2453         ret = (void *)kmem_alloc_contig(kernel_map, size, M_ZERO | M_NOWAIT,
2454             lowest, highest, PAGE_SIZE, boundary, memattr);
2455         if (ret != NULL)
2456                 malloc_type_allocated(M_DEVBUF, round_page(size));
2457         return (ret);
2458 }
2459
2460 static void
2461 MmFreeContiguousMemory(base)
2462         void                    *base;
2463 {
2464         ExFreePool(base);
2465 }
2466
2467 static void
2468 MmFreeContiguousMemorySpecifyCache(base, size, cachetype)
2469         void                    *base;
2470         uint32_t                size;
2471         enum nt_caching_type    cachetype;
2472 {
2473         contigfree(base, size, M_DEVBUF);
2474 }
2475
2476 static uint32_t
2477 MmSizeOfMdl(vaddr, len)
2478         void                    *vaddr;
2479         size_t                  len;
2480 {
2481         uint32_t                l;
2482
2483         l = sizeof(struct mdl) +
2484             (sizeof(vm_offset_t *) * SPAN_PAGES(vaddr, len));
2485
2486         return (l);
2487 }
2488
2489 /*
2490  * The Microsoft documentation says this routine fills in the
2491  * page array of an MDL with the _physical_ page addresses that
2492  * comprise the buffer, but we don't really want to do that here.
2493  * Instead, we just fill in the page array with the kernel virtual
2494  * addresses of the buffers.
2495  */
2496 void
2497 MmBuildMdlForNonPagedPool(m)
2498         mdl                     *m;
2499 {
2500         vm_offset_t             *mdl_pages;
2501         int                     pagecnt, i;
2502
2503         pagecnt = SPAN_PAGES(m->mdl_byteoffset, m->mdl_bytecount);
2504
2505         if (pagecnt > (m->mdl_size - sizeof(mdl)) / sizeof(vm_offset_t *))
2506                 panic("not enough pages in MDL to describe buffer");
2507
2508         mdl_pages = MmGetMdlPfnArray(m);
2509
2510         for (i = 0; i < pagecnt; i++)
2511                 *mdl_pages = (vm_offset_t)m->mdl_startva + (i * PAGE_SIZE);
2512
2513         m->mdl_flags |= MDL_SOURCE_IS_NONPAGED_POOL;
2514         m->mdl_mappedsystemva = MmGetMdlVirtualAddress(m);
2515 }
2516
2517 static void *
2518 MmMapLockedPages(mdl *buf, uint8_t accessmode)
2519 {
2520         buf->mdl_flags |= MDL_MAPPED_TO_SYSTEM_VA;
2521         return (MmGetMdlVirtualAddress(buf));
2522 }
2523
2524 static void *
2525 MmMapLockedPagesSpecifyCache(mdl *buf, uint8_t accessmode, uint32_t cachetype,
2526         void *vaddr, uint32_t bugcheck, uint32_t prio)
2527 {
2528         return (MmMapLockedPages(buf, accessmode));
2529 }
2530
2531 static void
2532 MmUnmapLockedPages(vaddr, buf)
2533         void                    *vaddr;
2534         mdl                     *buf;
2535 {
2536         buf->mdl_flags &= ~MDL_MAPPED_TO_SYSTEM_VA;
2537 }
2538
2539 /*
2540  * This function has a problem in that it will break if you
2541  * compile this module without PAE and try to use it on a PAE
2542  * kernel. Unfortunately, there's no way around this at the
2543  * moment. It's slightly less broken that using pmap_kextract().
2544  * You'd think the virtual memory subsystem would help us out
2545  * here, but it doesn't.
2546  */
2547
2548 static uint64_t
2549 MmGetPhysicalAddress(void *base)
2550 {
2551         return (pmap_extract(kernel_map->pmap, (vm_offset_t)base));
2552 }
2553
2554 uint8_t
2555 MmIsAddressValid(vaddr)
2556         void                    *vaddr;
2557 {
2558         if (pmap_extract(kernel_map->pmap, (vm_offset_t)vaddr))
2559                 return (TRUE);
2560
2561         return (FALSE);
2562 }
2563
2564 void *
2565 MmMapIoSpace(paddr, len, cachetype)
2566         uint64_t                paddr;
2567         uint32_t                len;
2568         uint32_t                cachetype;
2569 {
2570         devclass_t              nexus_class;
2571         device_t                *nexus_devs, devp;
2572         int                     nexus_count = 0;
2573         device_t                matching_dev = NULL;
2574         struct resource         *res;
2575         int                     i;
2576         vm_offset_t             v;
2577
2578         /* There will always be at least one nexus. */
2579
2580         nexus_class = devclass_find("nexus");
2581         devclass_get_devices(nexus_class, &nexus_devs, &nexus_count);
2582
2583         for (i = 0; i < nexus_count; i++) {
2584                 devp = nexus_devs[i];
2585                 matching_dev = ntoskrnl_finddev(devp, paddr, &res);
2586                 if (matching_dev)
2587                         break;
2588         }
2589
2590         free(nexus_devs, M_TEMP);
2591
2592         if (matching_dev == NULL)
2593                 return (NULL);
2594
2595         v = (vm_offset_t)rman_get_virtual(res);
2596         if (paddr > rman_get_start(res))
2597                 v += paddr - rman_get_start(res);
2598
2599         return ((void *)v);
2600 }
2601
2602 void
2603 MmUnmapIoSpace(vaddr, len)
2604         void                    *vaddr;
2605         size_t                  len;
2606 {
2607 }
2608
2609
2610 static device_t
2611 ntoskrnl_finddev(dev, paddr, res)
2612         device_t                dev;
2613         uint64_t                paddr;
2614         struct resource         **res;
2615 {
2616         device_t                *children = NULL;
2617         device_t                matching_dev;
2618         int                     childcnt;
2619         struct resource         *r;
2620         struct resource_list    *rl;
2621         struct resource_list_entry      *rle;
2622         uint32_t                flags;
2623         int                     i;
2624
2625         /* We only want devices that have been successfully probed. */
2626
2627         if (device_is_alive(dev) == FALSE)
2628                 return (NULL);
2629
2630         rl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev);
2631         if (rl != NULL) {
2632                 STAILQ_FOREACH(rle, rl, link) {
2633                         r = rle->res;
2634
2635                         if (r == NULL)
2636                                 continue;
2637
2638                         flags = rman_get_flags(r);
2639
2640                         if (rle->type == SYS_RES_MEMORY &&
2641                             paddr >= rman_get_start(r) &&
2642                             paddr <= rman_get_end(r)) {
2643                                 if (!(flags & RF_ACTIVE))
2644                                         bus_activate_resource(dev,
2645                                             SYS_RES_MEMORY, 0, r);
2646                                 *res = r;
2647                                 return (dev);
2648                         }
2649                 }
2650         }
2651
2652         /*
2653          * If this device has children, do another
2654          * level of recursion to inspect them.
2655          */
2656
2657         device_get_children(dev, &children, &childcnt);
2658
2659         for (i = 0; i < childcnt; i++) {
2660                 matching_dev = ntoskrnl_finddev(children[i], paddr, res);
2661                 if (matching_dev != NULL) {
2662                         free(children, M_TEMP);
2663                         return (matching_dev);
2664                 }
2665         }
2666
2667
2668         /* Won't somebody please think of the children! */
2669
2670         if (children != NULL)
2671                 free(children, M_TEMP);
2672
2673         return (NULL);
2674 }
2675
2676 /*
2677  * Workitems are unlike DPCs, in that they run in a user-mode thread
2678  * context rather than at DISPATCH_LEVEL in kernel context. In our
2679  * case we run them in kernel context anyway.
2680  */
2681 static void
2682 ntoskrnl_workitem_thread(arg)
2683         void                    *arg;
2684 {
2685         kdpc_queue              *kq;
2686         list_entry              *l;
2687         io_workitem             *iw;
2688         uint8_t                 irql;
2689
2690         kq = arg;
2691
2692         InitializeListHead(&kq->kq_disp);
2693         kq->kq_td = curthread;
2694         kq->kq_exit = 0;
2695         KeInitializeSpinLock(&kq->kq_lock);
2696         KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
2697
2698         while (1) {
2699                 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
2700
2701                 KeAcquireSpinLock(&kq->kq_lock, &irql);
2702
2703                 if (kq->kq_exit) {
2704                         kq->kq_exit = 0;
2705                         KeReleaseSpinLock(&kq->kq_lock, irql);
2706                         break;
2707                 }
2708
2709                 while (!IsListEmpty(&kq->kq_disp)) {
2710                         l = RemoveHeadList(&kq->kq_disp);
2711                         iw = CONTAINING_RECORD(l,
2712                             io_workitem, iw_listentry);
2713                         InitializeListHead((&iw->iw_listentry));
2714                         if (iw->iw_func == NULL)
2715                                 continue;
2716                         KeReleaseSpinLock(&kq->kq_lock, irql);
2717                         MSCALL2(iw->iw_func, iw->iw_dobj, iw->iw_ctx);
2718                         KeAcquireSpinLock(&kq->kq_lock, &irql);
2719                 }
2720
2721                 KeReleaseSpinLock(&kq->kq_lock, irql);
2722         }
2723
2724         kproc_exit(0);
2725         return; /* notreached */
2726 }
2727
2728 static void
2729 ntoskrnl_destroy_workitem_threads(void)
2730 {
2731         kdpc_queue              *kq;
2732         int                     i;
2733
2734         for (i = 0; i < WORKITEM_THREADS; i++) {
2735                 kq = wq_queues + i;
2736                 kq->kq_exit = 1;
2737                 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
2738                 while (kq->kq_exit)
2739                         tsleep(kq->kq_td->td_proc, PWAIT, "waitiw", hz/10);
2740         }
2741 }
2742
2743 io_workitem *
2744 IoAllocateWorkItem(dobj)
2745         device_object           *dobj;
2746 {
2747         io_workitem             *iw;
2748
2749         iw = uma_zalloc(iw_zone, M_NOWAIT);
2750         if (iw == NULL)
2751                 return (NULL);
2752
2753         InitializeListHead(&iw->iw_listentry);
2754         iw->iw_dobj = dobj;
2755
2756         mtx_lock(&ntoskrnl_dispatchlock);
2757         iw->iw_idx = wq_idx;
2758         WORKIDX_INC(wq_idx);
2759         mtx_unlock(&ntoskrnl_dispatchlock);
2760
2761         return (iw);
2762 }
2763
2764 void
2765 IoFreeWorkItem(iw)
2766         io_workitem             *iw;
2767 {
2768         uma_zfree(iw_zone, iw);
2769 }
2770
2771 void
2772 IoQueueWorkItem(iw, iw_func, qtype, ctx)
2773         io_workitem             *iw;
2774         io_workitem_func        iw_func;
2775         uint32_t                qtype;
2776         void                    *ctx;
2777 {
2778         kdpc_queue              *kq;
2779         list_entry              *l;
2780         io_workitem             *cur;
2781         uint8_t                 irql;
2782
2783         kq = wq_queues + iw->iw_idx;
2784
2785         KeAcquireSpinLock(&kq->kq_lock, &irql);
2786
2787         /*
2788          * Traverse the list and make sure this workitem hasn't
2789          * already been inserted. Queuing the same workitem
2790          * twice will hose the list but good.
2791          */
2792
2793         l = kq->kq_disp.nle_flink;
2794         while (l != &kq->kq_disp) {
2795                 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
2796                 if (cur == iw) {
2797                         /* Already queued -- do nothing. */
2798                         KeReleaseSpinLock(&kq->kq_lock, irql);
2799                         return;
2800                 }
2801                 l = l->nle_flink;
2802         }
2803
2804         iw->iw_func = iw_func;
2805         iw->iw_ctx = ctx;
2806
2807         InsertTailList((&kq->kq_disp), (&iw->iw_listentry));
2808         KeReleaseSpinLock(&kq->kq_lock, irql);
2809
2810         KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
2811 }
2812
2813 static void
2814 ntoskrnl_workitem(dobj, arg)
2815         device_object           *dobj;
2816         void                    *arg;
2817 {
2818         io_workitem             *iw;
2819         work_queue_item         *w;
2820         work_item_func          f;
2821
2822         iw = arg;
2823         w = (work_queue_item *)dobj;
2824         f = (work_item_func)w->wqi_func;
2825         uma_zfree(iw_zone, iw);
2826         MSCALL2(f, w, w->wqi_ctx);
2827 }
2828
2829 /*
2830  * The ExQueueWorkItem() API is deprecated in Windows XP. Microsoft
2831  * warns that it's unsafe and to use IoQueueWorkItem() instead. The
2832  * problem with ExQueueWorkItem() is that it can't guard against
2833  * the condition where a driver submits a job to the work queue and
2834  * is then unloaded before the job is able to run. IoQueueWorkItem()
2835  * acquires a reference to the device's device_object via the
2836  * object manager and retains it until after the job has completed,
2837  * which prevents the driver from being unloaded before the job
2838  * runs. (We don't currently support this behavior, though hopefully
2839  * that will change once the object manager API is fleshed out a bit.)
2840  *
2841  * Having said all that, the ExQueueWorkItem() API remains, because
2842  * there are still other parts of Windows that use it, including
2843  * NDIS itself: NdisScheduleWorkItem() calls ExQueueWorkItem().
2844  * We fake up the ExQueueWorkItem() API on top of our implementation
2845  * of IoQueueWorkItem(). Workitem thread #3 is reserved exclusively
2846  * for ExQueueWorkItem() jobs, and we pass a pointer to the work
2847  * queue item (provided by the caller) in to IoAllocateWorkItem()
2848  * instead of the device_object. We need to save this pointer so
2849  * we can apply a sanity check: as with the DPC queue and other
2850  * workitem queues, we can't allow the same work queue item to
2851  * be queued twice. If it's already pending, we silently return
2852  */
2853
2854 void
2855 ExQueueWorkItem(w, qtype)
2856         work_queue_item         *w;
2857         uint32_t                qtype;
2858 {
2859         io_workitem             *iw;
2860         io_workitem_func        iwf;
2861         kdpc_queue              *kq;
2862         list_entry              *l;
2863         io_workitem             *cur;
2864         uint8_t                 irql;
2865
2866
2867         /*
2868          * We need to do a special sanity test to make sure
2869          * the ExQueueWorkItem() API isn't used to queue
2870          * the same workitem twice. Rather than checking the
2871          * io_workitem pointer itself, we test the attached
2872          * device object, which is really a pointer to the
2873          * legacy work queue item structure.
2874          */
2875
2876         kq = wq_queues + WORKITEM_LEGACY_THREAD;
2877         KeAcquireSpinLock(&kq->kq_lock, &irql);
2878         l = kq->kq_disp.nle_flink;
2879         while (l != &kq->kq_disp) {
2880                 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
2881                 if (cur->iw_dobj == (device_object *)w) {
2882                         /* Already queued -- do nothing. */
2883                         KeReleaseSpinLock(&kq->kq_lock, irql);
2884                         return;
2885                 }
2886                 l = l->nle_flink;
2887         }
2888         KeReleaseSpinLock(&kq->kq_lock, irql);
2889
2890         iw = IoAllocateWorkItem((device_object *)w);
2891         if (iw == NULL)
2892                 return;
2893
2894         iw->iw_idx = WORKITEM_LEGACY_THREAD;
2895         iwf = (io_workitem_func)ntoskrnl_findwrap((funcptr)ntoskrnl_workitem);
2896         IoQueueWorkItem(iw, iwf, qtype, iw);
2897 }
2898
2899 static void
2900 RtlZeroMemory(dst, len)
2901         void                    *dst;
2902         size_t                  len;
2903 {
2904         bzero(dst, len);
2905 }
2906
2907 static void
2908 RtlCopyMemory(dst, src, len)
2909         void                    *dst;
2910         const void              *src;
2911         size_t                  len;
2912 {
2913         bcopy(src, dst, len);
2914 }
2915
2916 static size_t
2917 RtlCompareMemory(s1, s2, len)
2918         const void              *s1;
2919         const void              *s2;
2920         size_t                  len;
2921 {
2922         size_t                  i, total = 0;
2923         uint8_t                 *m1, *m2;
2924
2925         m1 = __DECONST(char *, s1);
2926         m2 = __DECONST(char *, s2);
2927
2928         for (i = 0; i < len; i++) {
2929                 if (m1[i] == m2[i])
2930                         total++;
2931         }
2932         return (total);
2933 }
2934
2935 void
2936 RtlInitAnsiString(dst, src)
2937         ansi_string             *dst;
2938         char                    *src;
2939 {
2940         ansi_string             *a;
2941
2942         a = dst;
2943         if (a == NULL)
2944                 return;
2945         if (src == NULL) {
2946                 a->as_len = a->as_maxlen = 0;
2947                 a->as_buf = NULL;
2948         } else {
2949                 a->as_buf = src;
2950                 a->as_len = a->as_maxlen = strlen(src);
2951         }
2952 }
2953
2954 void
2955 RtlInitUnicodeString(dst, src)
2956         unicode_string          *dst;
2957         uint16_t                *src;
2958 {
2959         unicode_string          *u;
2960         int                     i;
2961
2962         u = dst;
2963         if (u == NULL)
2964                 return;
2965         if (src == NULL) {
2966                 u->us_len = u->us_maxlen = 0;
2967                 u->us_buf = NULL;
2968         } else {
2969                 i = 0;
2970                 while(src[i] != 0)
2971                         i++;
2972                 u->us_buf = src;
2973                 u->us_len = u->us_maxlen = i * 2;
2974         }
2975 }
2976
2977 ndis_status
2978 RtlUnicodeStringToInteger(ustr, base, val)
2979         unicode_string          *ustr;
2980         uint32_t                base;
2981         uint32_t                *val;
2982 {
2983         uint16_t                *uchr;
2984         int                     len, neg = 0;
2985         char                    abuf[64];
2986         char                    *astr;
2987
2988         uchr = ustr->us_buf;
2989         len = ustr->us_len;
2990         bzero(abuf, sizeof(abuf));
2991
2992         if ((char)((*uchr) & 0xFF) == '-') {
2993                 neg = 1;
2994                 uchr++;
2995                 len -= 2;
2996         } else if ((char)((*uchr) & 0xFF) == '+') {
2997                 neg = 0;
2998                 uchr++;
2999                 len -= 2;
3000         }
3001
3002         if (base == 0) {
3003                 if ((char)((*uchr) & 0xFF) == 'b') {
3004                         base = 2;
3005                         uchr++;
3006                         len -= 2;
3007                 } else if ((char)((*uchr) & 0xFF) == 'o') {
3008                         base = 8;
3009                         uchr++;
3010                         len -= 2;
3011                 } else if ((char)((*uchr) & 0xFF) == 'x') {
3012                         base = 16;
3013                         uchr++;
3014                         len -= 2;
3015                 } else
3016                         base = 10;
3017         }
3018
3019         astr = abuf;
3020         if (neg) {
3021                 strcpy(astr, "-");
3022                 astr++;
3023         }
3024
3025         ntoskrnl_unicode_to_ascii(uchr, astr, len);
3026         *val = strtoul(abuf, NULL, base);
3027
3028         return (STATUS_SUCCESS);
3029 }
3030
3031 void
3032 RtlFreeUnicodeString(ustr)
3033         unicode_string          *ustr;
3034 {
3035         if (ustr->us_buf == NULL)
3036                 return;
3037         ExFreePool(ustr->us_buf);
3038         ustr->us_buf = NULL;
3039 }
3040
3041 void
3042 RtlFreeAnsiString(astr)
3043         ansi_string             *astr;
3044 {
3045         if (astr->as_buf == NULL)
3046                 return;
3047         ExFreePool(astr->as_buf);
3048         astr->as_buf = NULL;
3049 }
3050
3051 static int
3052 atoi(str)
3053         const char              *str;
3054 {
3055         return (int)strtol(str, (char **)NULL, 10);
3056 }
3057
3058 static long
3059 atol(str)
3060         const char              *str;
3061 {
3062         return strtol(str, (char **)NULL, 10);
3063 }
3064
3065 static int
3066 rand(void)
3067 {
3068         struct timeval          tv;
3069
3070         microtime(&tv);
3071         srandom(tv.tv_usec);
3072         return ((int)random());
3073 }
3074
3075 static void
3076 srand(seed)
3077         unsigned int            seed;
3078 {
3079         srandom(seed);
3080 }
3081
3082 static uint8_t
3083 IoIsWdmVersionAvailable(uint8_t major, uint8_t minor)
3084 {
3085         if (major == WDM_MAJOR && minor == WDM_MINOR_WINXP)
3086                 return (TRUE);
3087         return (FALSE);
3088 }
3089
3090 static ndis_status
3091 IoGetDeviceObjectPointer(name, reqaccess, fileobj, devobj)
3092         unicode_string          *name;
3093         uint32_t                reqaccess;
3094         void                    *fileobj;
3095         device_object           *devobj;
3096 {
3097         return (STATUS_SUCCESS);
3098 }
3099
3100 static ndis_status
3101 IoGetDeviceProperty(devobj, regprop, buflen, prop, reslen)
3102         device_object           *devobj;
3103         uint32_t                regprop;
3104         uint32_t                buflen;
3105         void                    *prop;
3106         uint32_t                *reslen;
3107 {
3108         driver_object           *drv;
3109         uint16_t                **name;
3110
3111         drv = devobj->do_drvobj;
3112
3113         switch (regprop) {
3114         case DEVPROP_DRIVER_KEYNAME:
3115                 name = prop;
3116                 *name = drv->dro_drivername.us_buf;
3117                 *reslen = drv->dro_drivername.us_len;
3118                 break;
3119         default:
3120                 return (STATUS_INVALID_PARAMETER_2);
3121                 break;
3122         }
3123
3124         return (STATUS_SUCCESS);
3125 }
3126
3127 static void
3128 KeInitializeMutex(kmutex, level)
3129         kmutant                 *kmutex;
3130         uint32_t                level;
3131 {
3132         InitializeListHead((&kmutex->km_header.dh_waitlisthead));
3133         kmutex->km_abandoned = FALSE;
3134         kmutex->km_apcdisable = 1;
3135         kmutex->km_header.dh_sigstate = 1;
3136         kmutex->km_header.dh_type = DISP_TYPE_MUTANT;
3137         kmutex->km_header.dh_size = sizeof(kmutant) / sizeof(uint32_t);
3138         kmutex->km_ownerthread = NULL;
3139 }
3140
3141 static uint32_t
3142 KeReleaseMutex(kmutant *kmutex, uint8_t kwait)
3143 {
3144         uint32_t                prevstate;
3145
3146         mtx_lock(&ntoskrnl_dispatchlock);
3147         prevstate = kmutex->km_header.dh_sigstate;
3148         if (kmutex->km_ownerthread != curthread) {
3149                 mtx_unlock(&ntoskrnl_dispatchlock);
3150                 return (STATUS_MUTANT_NOT_OWNED);
3151         }
3152
3153         kmutex->km_header.dh_sigstate++;
3154         kmutex->km_abandoned = FALSE;
3155
3156         if (kmutex->km_header.dh_sigstate == 1) {
3157                 kmutex->km_ownerthread = NULL;
3158                 ntoskrnl_waittest(&kmutex->km_header, IO_NO_INCREMENT);
3159         }
3160
3161         mtx_unlock(&ntoskrnl_dispatchlock);
3162
3163         return (prevstate);
3164 }
3165
3166 static uint32_t
3167 KeReadStateMutex(kmutex)
3168         kmutant                 *kmutex;
3169 {
3170         return (kmutex->km_header.dh_sigstate);
3171 }
3172
3173 void
3174 KeInitializeEvent(nt_kevent *kevent, uint32_t type, uint8_t state)
3175 {
3176         InitializeListHead((&kevent->k_header.dh_waitlisthead));
3177         kevent->k_header.dh_sigstate = state;
3178         if (type == EVENT_TYPE_NOTIFY)
3179                 kevent->k_header.dh_type = DISP_TYPE_NOTIFICATION_EVENT;
3180         else
3181                 kevent->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_EVENT;
3182         kevent->k_header.dh_size = sizeof(nt_kevent) / sizeof(uint32_t);
3183 }
3184
3185 uint32_t
3186 KeResetEvent(kevent)
3187         nt_kevent               *kevent;
3188 {
3189         uint32_t                prevstate;
3190
3191         mtx_lock(&ntoskrnl_dispatchlock);
3192         prevstate = kevent->k_header.dh_sigstate;
3193         kevent->k_header.dh_sigstate = FALSE;
3194         mtx_unlock(&ntoskrnl_dispatchlock);
3195
3196         return (prevstate);
3197 }
3198
3199 uint32_t
3200 KeSetEvent(nt_kevent *kevent, uint32_t increment, uint8_t kwait)
3201 {
3202         uint32_t                prevstate;
3203         wait_block              *w;
3204         nt_dispatch_header      *dh;
3205         struct thread           *td;
3206         wb_ext                  *we;
3207
3208         mtx_lock(&ntoskrnl_dispatchlock);
3209         prevstate = kevent->k_header.dh_sigstate;
3210         dh = &kevent->k_header;
3211
3212         if (IsListEmpty(&dh->dh_waitlisthead))
3213                 /*
3214                  * If there's nobody in the waitlist, just set
3215                  * the state to signalled.
3216                  */
3217                 dh->dh_sigstate = 1;
3218         else {
3219                 /*
3220                  * Get the first waiter. If this is a synchronization
3221                  * event, just wake up that one thread (don't bother
3222                  * setting the state to signalled since we're supposed
3223                  * to automatically clear synchronization events anyway).
3224                  *
3225                  * If it's a notification event, or the the first
3226                  * waiter is doing a WAITTYPE_ALL wait, go through
3227                  * the full wait satisfaction process.
3228                  */
3229                 w = CONTAINING_RECORD(dh->dh_waitlisthead.nle_flink,
3230                     wait_block, wb_waitlist);
3231                 we = w->wb_ext;
3232                 td = we->we_td;
3233                 if (kevent->k_header.dh_type == DISP_TYPE_NOTIFICATION_EVENT ||
3234                     w->wb_waittype == WAITTYPE_ALL) {
3235                         if (prevstate == 0) {
3236                                 dh->dh_sigstate = 1;
3237                                 ntoskrnl_waittest(dh, increment);
3238                         }
3239                 } else {
3240                         w->wb_awakened |= TRUE;
3241                         cv_broadcastpri(&we->we_cv,
3242                             (w->wb_oldpri - (increment * 4)) > PRI_MIN_KERN ?
3243                             w->wb_oldpri - (increment * 4) : PRI_MIN_KERN);
3244                 }
3245         }
3246
3247         mtx_unlock(&ntoskrnl_dispatchlock);
3248
3249         return (prevstate);
3250 }
3251
3252 void
3253 KeClearEvent(kevent)
3254         nt_kevent               *kevent;
3255 {
3256         kevent->k_header.dh_sigstate = FALSE;
3257 }
3258
3259 uint32_t
3260 KeReadStateEvent(kevent)
3261         nt_kevent               *kevent;
3262 {
3263         return (kevent->k_header.dh_sigstate);
3264 }
3265
3266 /*
3267  * The object manager in Windows is responsible for managing
3268  * references and access to various types of objects, including
3269  * device_objects, events, threads, timers and so on. However,
3270  * there's a difference in the way objects are handled in user
3271  * mode versus kernel mode.
3272  *
3273  * In user mode (i.e. Win32 applications), all objects are
3274  * managed by the object manager. For example, when you create
3275  * a timer or event object, you actually end up with an 
3276  * object_header (for the object manager's bookkeeping
3277  * purposes) and an object body (which contains the actual object
3278  * structure, e.g. ktimer, kevent, etc...). This allows Windows
3279  * to manage resource quotas and to enforce access restrictions
3280  * on basically every kind of system object handled by the kernel.
3281  *
3282  * However, in kernel mode, you only end up using the object
3283  * manager some of the time. For example, in a driver, you create
3284  * a timer object by simply allocating the memory for a ktimer
3285  * structure and initializing it with KeInitializeTimer(). Hence,
3286  * the timer has no object_header and no reference counting or
3287  * security/resource checks are done on it. The assumption in
3288  * this case is that if you're running in kernel mode, you know
3289  * what you're doing, and you're already at an elevated privilege
3290  * anyway.
3291  *
3292  * There are some exceptions to this. The two most important ones
3293  * for our purposes are device_objects and threads. We need to use
3294  * the object manager to do reference counting on device_objects,
3295  * and for threads, you can only get a pointer to a thread's
3296  * dispatch header by using ObReferenceObjectByHandle() on the
3297  * handle returned by PsCreateSystemThread().
3298  */
3299
3300 static ndis_status
3301 ObReferenceObjectByHandle(ndis_handle handle, uint32_t reqaccess, void *otype,
3302         uint8_t accessmode, void **object, void **handleinfo)
3303 {
3304         nt_objref               *nr;
3305
3306         nr = malloc(sizeof(nt_objref), M_DEVBUF, M_NOWAIT|M_ZERO);
3307         if (nr == NULL)
3308                 return (STATUS_INSUFFICIENT_RESOURCES);
3309
3310         InitializeListHead((&nr->no_dh.dh_waitlisthead));
3311         nr->no_obj = handle;
3312         nr->no_dh.dh_type = DISP_TYPE_THREAD;
3313         nr->no_dh.dh_sigstate = 0;
3314         nr->no_dh.dh_size = (uint8_t)(sizeof(struct thread) /
3315             sizeof(uint32_t));
3316         TAILQ_INSERT_TAIL(&ntoskrnl_reflist, nr, link);
3317         *object = nr;
3318
3319         return (STATUS_SUCCESS);
3320 }
3321
3322 static void
3323 ObfDereferenceObject(object)
3324         void                    *object;
3325 {
3326         nt_objref               *nr;
3327
3328         nr = object;
3329         TAILQ_REMOVE(&ntoskrnl_reflist, nr, link);
3330         free(nr, M_DEVBUF);
3331 }
3332
3333 static uint32_t
3334 ZwClose(handle)
3335         ndis_handle             handle;
3336 {
3337         return (STATUS_SUCCESS);
3338 }
3339
3340 static uint32_t
3341 WmiQueryTraceInformation(traceclass, traceinfo, infolen, reqlen, buf)
3342         uint32_t                traceclass;
3343         void                    *traceinfo;
3344         uint32_t                infolen;
3345         uint32_t                reqlen;
3346         void                    *buf;
3347 {
3348         return (STATUS_NOT_FOUND);
3349 }
3350
3351 static uint32_t
3352 WmiTraceMessage(uint64_t loghandle, uint32_t messageflags,
3353         void *guid, uint16_t messagenum, ...)
3354 {
3355         return (STATUS_SUCCESS);
3356 }
3357
3358 static uint32_t
3359 IoWMIRegistrationControl(dobj, action)
3360         device_object           *dobj;
3361         uint32_t                action;
3362 {
3363         return (STATUS_SUCCESS);
3364 }
3365
3366 /*
3367  * This is here just in case the thread returns without calling
3368  * PsTerminateSystemThread().
3369  */
3370 static void
3371 ntoskrnl_thrfunc(arg)
3372         void                    *arg;
3373 {
3374         thread_context          *thrctx;
3375         uint32_t (*tfunc)(void *);
3376         void                    *tctx;
3377         uint32_t                rval;
3378
3379         thrctx = arg;
3380         tfunc = thrctx->tc_thrfunc;
3381         tctx = thrctx->tc_thrctx;
3382         free(thrctx, M_TEMP);
3383
3384         rval = MSCALL1(tfunc, tctx);
3385
3386         PsTerminateSystemThread(rval);
3387         return; /* notreached */
3388 }
3389
3390 static ndis_status
3391 PsCreateSystemThread(handle, reqaccess, objattrs, phandle,
3392         clientid, thrfunc, thrctx)
3393         ndis_handle             *handle;
3394         uint32_t                reqaccess;
3395         void                    *objattrs;
3396         ndis_handle             phandle;
3397         void                    *clientid;
3398         void                    *thrfunc;
3399         void                    *thrctx;
3400 {
3401         int                     error;
3402         thread_context          *tc;
3403         struct proc             *p;
3404
3405         tc = malloc(sizeof(thread_context), M_TEMP, M_NOWAIT);
3406         if (tc == NULL)
3407                 return (STATUS_INSUFFICIENT_RESOURCES);
3408
3409         tc->tc_thrctx = thrctx;
3410         tc->tc_thrfunc = thrfunc;
3411
3412         error = kproc_create(ntoskrnl_thrfunc, tc, &p,
3413             RFHIGHPID, NDIS_KSTACK_PAGES, "Windows Kthread %d", ntoskrnl_kth);
3414
3415         if (error) {
3416                 free(tc, M_TEMP);
3417                 return (STATUS_INSUFFICIENT_RESOURCES);
3418         }
3419
3420         *handle = p;
3421         ntoskrnl_kth++;
3422
3423         return (STATUS_SUCCESS);
3424 }
3425
3426 /*
3427  * In Windows, the exit of a thread is an event that you're allowed
3428  * to wait on, assuming you've obtained a reference to the thread using
3429  * ObReferenceObjectByHandle(). Unfortunately, the only way we can
3430  * simulate this behavior is to register each thread we create in a
3431  * reference list, and if someone holds a reference to us, we poke
3432  * them.
3433  */
3434 static ndis_status
3435 PsTerminateSystemThread(status)
3436         ndis_status             status;
3437 {
3438         struct nt_objref        *nr;
3439
3440         mtx_lock(&ntoskrnl_dispatchlock);
3441         TAILQ_FOREACH(nr, &ntoskrnl_reflist, link) {
3442                 if (nr->no_obj != curthread->td_proc)
3443                         continue;
3444                 nr->no_dh.dh_sigstate = 1;
3445                 ntoskrnl_waittest(&nr->no_dh, IO_NO_INCREMENT);
3446                 break;
3447         }
3448         mtx_unlock(&ntoskrnl_dispatchlock);
3449
3450         ntoskrnl_kth--;
3451
3452         kproc_exit(0);
3453         return (0);     /* notreached */
3454 }
3455
3456 static uint32_t
3457 DbgPrint(char *fmt, ...)
3458 {
3459         va_list                 ap;
3460
3461         if (bootverbose) {
3462                 va_start(ap, fmt);
3463                 vprintf(fmt, ap);
3464         }
3465
3466         return (STATUS_SUCCESS);
3467 }
3468
3469 static void
3470 DbgBreakPoint(void)
3471 {
3472
3473         kdb_enter(KDB_WHY_NDIS, "DbgBreakPoint(): breakpoint");
3474 }
3475
3476 static void
3477 KeBugCheckEx(code, param1, param2, param3, param4)
3478     uint32_t                    code;
3479     u_long                      param1;
3480     u_long                      param2;
3481     u_long                      param3;
3482     u_long                      param4;
3483 {
3484         panic("KeBugCheckEx: STOP 0x%X", code);
3485 }
3486
3487 static void
3488 ntoskrnl_timercall(arg)
3489         void                    *arg;
3490 {
3491         ktimer                  *timer;
3492         struct timeval          tv;
3493         kdpc                    *dpc;
3494
3495         mtx_lock(&ntoskrnl_dispatchlock);
3496
3497         timer = arg;
3498
3499 #ifdef NTOSKRNL_DEBUG_TIMERS
3500         ntoskrnl_timer_fires++;
3501 #endif
3502         ntoskrnl_remove_timer(timer);
3503
3504         /*
3505          * This should never happen, but complain
3506          * if it does.
3507          */
3508
3509         if (timer->k_header.dh_inserted == FALSE) {
3510                 mtx_unlock(&ntoskrnl_dispatchlock);
3511                 printf("NTOS: timer %p fired even though "
3512                     "it was canceled\n", timer);
3513                 return;
3514         }
3515
3516         /* Mark the timer as no longer being on the timer queue. */
3517
3518         timer->k_header.dh_inserted = FALSE;
3519
3520         /* Now signal the object and satisfy any waits on it. */
3521
3522         timer->k_header.dh_sigstate = 1;
3523         ntoskrnl_waittest(&timer->k_header, IO_NO_INCREMENT);
3524
3525         /*
3526          * If this is a periodic timer, re-arm it
3527          * so it will fire again. We do this before
3528          * calling any deferred procedure calls because
3529          * it's possible the DPC might cancel the timer,
3530          * in which case it would be wrong for us to
3531          * re-arm it again afterwards.
3532          */
3533
3534         if (timer->k_period) {
3535                 tv.tv_sec = 0;
3536                 tv.tv_usec = timer->k_period * 1000;
3537                 timer->k_header.dh_inserted = TRUE;
3538                 ntoskrnl_insert_timer(timer, tvtohz(&tv));
3539 #ifdef NTOSKRNL_DEBUG_TIMERS
3540                 ntoskrnl_timer_reloads++;
3541 #endif
3542         }
3543
3544         dpc = timer->k_dpc;
3545
3546         mtx_unlock(&ntoskrnl_dispatchlock);
3547
3548         /* If there's a DPC associated with the timer, queue it up. */
3549
3550         if (dpc != NULL)
3551                 KeInsertQueueDpc(dpc, NULL, NULL);
3552 }
3553
3554 #ifdef NTOSKRNL_DEBUG_TIMERS
3555 static int
3556 sysctl_show_timers(SYSCTL_HANDLER_ARGS)
3557 {
3558         int                     ret;
3559
3560         ret = 0;
3561         ntoskrnl_show_timers();
3562         return (sysctl_handle_int(oidp, &ret, 0, req));
3563 }
3564
3565 static void
3566 ntoskrnl_show_timers()
3567 {
3568         int                     i = 0;
3569         list_entry              *l;
3570
3571         mtx_lock_spin(&ntoskrnl_calllock);
3572         l = ntoskrnl_calllist.nle_flink;
3573         while(l != &ntoskrnl_calllist) {
3574                 i++;
3575                 l = l->nle_flink;
3576         }
3577         mtx_unlock_spin(&ntoskrnl_calllock);
3578
3579         printf("\n");
3580         printf("%d timers available (out of %d)\n", i, NTOSKRNL_TIMEOUTS);
3581         printf("timer sets: %qu\n", ntoskrnl_timer_sets);
3582         printf("timer reloads: %qu\n", ntoskrnl_timer_reloads);
3583         printf("timer cancels: %qu\n", ntoskrnl_timer_cancels);
3584         printf("timer fires: %qu\n", ntoskrnl_timer_fires);
3585         printf("\n");
3586 }
3587 #endif
3588
3589 /*
3590  * Must be called with dispatcher lock held.
3591  */
3592
3593 static void
3594 ntoskrnl_insert_timer(timer, ticks)
3595         ktimer                  *timer;
3596         int                     ticks;
3597 {
3598         callout_entry           *e;
3599         list_entry              *l;
3600         struct callout          *c;
3601
3602         /*
3603          * Try and allocate a timer.
3604          */
3605         mtx_lock_spin(&ntoskrnl_calllock);
3606         if (IsListEmpty(&ntoskrnl_calllist)) {
3607                 mtx_unlock_spin(&ntoskrnl_calllock);
3608 #ifdef NTOSKRNL_DEBUG_TIMERS
3609                 ntoskrnl_show_timers();
3610 #endif
3611                 panic("out of timers!");
3612         }
3613         l = RemoveHeadList(&ntoskrnl_calllist);
3614         mtx_unlock_spin(&ntoskrnl_calllock);
3615
3616         e = CONTAINING_RECORD(l, callout_entry, ce_list);
3617         c = &e->ce_callout;
3618
3619         timer->k_callout = c;
3620
3621         callout_init(c, CALLOUT_MPSAFE);
3622         callout_reset(c, ticks, ntoskrnl_timercall, timer);
3623 }
3624
3625 static void
3626 ntoskrnl_remove_timer(timer)
3627         ktimer                  *timer;
3628 {
3629         callout_entry           *e;
3630
3631         e = (callout_entry *)timer->k_callout;
3632         callout_stop(timer->k_callout);
3633
3634         mtx_lock_spin(&ntoskrnl_calllock);
3635         InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
3636         mtx_unlock_spin(&ntoskrnl_calllock);
3637 }
3638
3639 void
3640 KeInitializeTimer(timer)
3641         ktimer                  *timer;
3642 {
3643         if (timer == NULL)
3644                 return;
3645
3646         KeInitializeTimerEx(timer,  EVENT_TYPE_NOTIFY);
3647 }
3648
3649 void
3650 KeInitializeTimerEx(timer, type)
3651         ktimer                  *timer;
3652         uint32_t                type;
3653 {
3654         if (timer == NULL)
3655                 return;
3656
3657         bzero((char *)timer, sizeof(ktimer));
3658         InitializeListHead((&timer->k_header.dh_waitlisthead));
3659         timer->k_header.dh_sigstate = FALSE;
3660         timer->k_header.dh_inserted = FALSE;
3661         if (type == EVENT_TYPE_NOTIFY)
3662                 timer->k_header.dh_type = DISP_TYPE_NOTIFICATION_TIMER;
3663         else
3664                 timer->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_TIMER;
3665         timer->k_header.dh_size = sizeof(ktimer) / sizeof(uint32_t);
3666 }
3667
3668 /*
3669  * DPC subsystem. A Windows Defered Procedure Call has the following
3670  * properties:
3671  * - It runs at DISPATCH_LEVEL.
3672  * - It can have one of 3 importance values that control when it
3673  *   runs relative to other DPCs in the queue.
3674  * - On SMP systems, it can be set to run on a specific processor.
3675  * In order to satisfy the last property, we create a DPC thread for
3676  * each CPU in the system and bind it to that CPU. Each thread
3677  * maintains three queues with different importance levels, which
3678  * will be processed in order from lowest to highest.
3679  *
3680  * In Windows, interrupt handlers run as DPCs. (Not to be confused
3681  * with ISRs, which run in interrupt context and can preempt DPCs.)
3682  * ISRs are given the highest importance so that they'll take
3683  * precedence over timers and other things.
3684  */
3685
3686 static void
3687 ntoskrnl_dpc_thread(arg)
3688         void                    *arg;
3689 {
3690         kdpc_queue              *kq;
3691         kdpc                    *d;
3692         list_entry              *l;
3693         uint8_t                 irql;
3694
3695         kq = arg;
3696
3697         InitializeListHead(&kq->kq_disp);
3698         kq->kq_td = curthread;
3699         kq->kq_exit = 0;
3700         kq->kq_running = FALSE;
3701         KeInitializeSpinLock(&kq->kq_lock);
3702         KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
3703         KeInitializeEvent(&kq->kq_done, EVENT_TYPE_SYNC, FALSE);
3704
3705         /*
3706          * Elevate our priority. DPCs are used to run interrupt
3707          * handlers, and they should trigger as soon as possible
3708          * once scheduled by an ISR.
3709          */
3710
3711         thread_lock(curthread);
3712 #ifdef NTOSKRNL_MULTIPLE_DPCS
3713         sched_bind(curthread, kq->kq_cpu);
3714 #endif
3715         sched_prio(curthread, PRI_MIN_KERN);
3716         thread_unlock(curthread);
3717
3718         while (1) {
3719                 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
3720
3721                 KeAcquireSpinLock(&kq->kq_lock, &irql);
3722
3723                 if (kq->kq_exit) {
3724                         kq->kq_exit = 0;
3725                         KeReleaseSpinLock(&kq->kq_lock, irql);
3726                         break;
3727                 }
3728
3729                 kq->kq_running = TRUE;
3730
3731                 while (!IsListEmpty(&kq->kq_disp)) {
3732                         l = RemoveHeadList((&kq->kq_disp));
3733                         d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
3734                         InitializeListHead((&d->k_dpclistentry));
3735                         KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
3736                         MSCALL4(d->k_deferedfunc, d, d->k_deferredctx,
3737                             d->k_sysarg1, d->k_sysarg2);
3738                         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3739                 }
3740
3741                 kq->kq_running = FALSE;
3742
3743                 KeReleaseSpinLock(&kq->kq_lock, irql);
3744
3745                 KeSetEvent(&kq->kq_done, IO_NO_INCREMENT, FALSE);
3746         }
3747
3748         kproc_exit(0);
3749         return; /* notreached */
3750 }
3751
3752 static void
3753 ntoskrnl_destroy_dpc_threads(void)
3754 {
3755         kdpc_queue              *kq;
3756         kdpc                    dpc;
3757         int                     i;
3758
3759         kq = kq_queues;
3760 #ifdef NTOSKRNL_MULTIPLE_DPCS
3761         for (i = 0; i < mp_ncpus; i++) {
3762 #else
3763         for (i = 0; i < 1; i++) {
3764 #endif
3765                 kq += i;
3766
3767                 kq->kq_exit = 1;
3768                 KeInitializeDpc(&dpc, NULL, NULL);
3769                 KeSetTargetProcessorDpc(&dpc, i);
3770                 KeInsertQueueDpc(&dpc, NULL, NULL);
3771                 while (kq->kq_exit)
3772                         tsleep(kq->kq_td->td_proc, PWAIT, "dpcw", hz/10);
3773         }
3774 }
3775
3776 static uint8_t
3777 ntoskrnl_insert_dpc(head, dpc)
3778         list_entry              *head;
3779         kdpc                    *dpc;
3780 {
3781         list_entry              *l;
3782         kdpc                    *d;
3783
3784         l = head->nle_flink;
3785         while (l != head) {
3786                 d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
3787                 if (d == dpc)
3788                         return (FALSE);
3789                 l = l->nle_flink;
3790         }
3791
3792         if (dpc->k_importance == KDPC_IMPORTANCE_LOW)
3793                 InsertTailList((head), (&dpc->k_dpclistentry));
3794         else
3795                 InsertHeadList((head), (&dpc->k_dpclistentry));
3796
3797         return (TRUE);
3798 }
3799
3800 void
3801 KeInitializeDpc(dpc, dpcfunc, dpcctx)
3802         kdpc                    *dpc;
3803         void                    *dpcfunc;
3804         void                    *dpcctx;
3805 {
3806
3807         if (dpc == NULL)
3808                 return;
3809
3810         dpc->k_deferedfunc = dpcfunc;
3811         dpc->k_deferredctx = dpcctx;
3812         dpc->k_num = KDPC_CPU_DEFAULT;
3813         dpc->k_importance = KDPC_IMPORTANCE_MEDIUM;
3814         InitializeListHead((&dpc->k_dpclistentry));
3815 }
3816
3817 uint8_t
3818 KeInsertQueueDpc(dpc, sysarg1, sysarg2)
3819         kdpc                    *dpc;
3820         void                    *sysarg1;
3821         void                    *sysarg2;
3822 {
3823         kdpc_queue              *kq;
3824         uint8_t                 r;
3825         uint8_t                 irql;
3826
3827         if (dpc == NULL)
3828                 return (FALSE);
3829
3830         kq = kq_queues;
3831
3832 #ifdef NTOSKRNL_MULTIPLE_DPCS
3833         KeRaiseIrql(DISPATCH_LEVEL, &irql);
3834
3835         /*
3836          * By default, the DPC is queued to run on the same CPU
3837          * that scheduled it.
3838          */
3839
3840         if (dpc->k_num == KDPC_CPU_DEFAULT)
3841                 kq += curthread->td_oncpu;
3842         else
3843                 kq += dpc->k_num;
3844         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3845 #else
3846         KeAcquireSpinLock(&kq->kq_lock, &irql);
3847 #endif
3848
3849         r = ntoskrnl_insert_dpc(&kq->kq_disp, dpc);
3850         if (r == TRUE) {
3851                 dpc->k_sysarg1 = sysarg1;
3852                 dpc->k_sysarg2 = sysarg2;
3853         }
3854         KeReleaseSpinLock(&kq->kq_lock, irql);
3855
3856         if (r == FALSE)
3857                 return (r);
3858
3859         KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
3860
3861         return (r);
3862 }
3863
3864 uint8_t
3865 KeRemoveQueueDpc(dpc)
3866         kdpc                    *dpc;
3867 {
3868         kdpc_queue              *kq;
3869         uint8_t                 irql;
3870
3871         if (dpc == NULL)
3872                 return (FALSE);
3873
3874 #ifdef NTOSKRNL_MULTIPLE_DPCS
3875         KeRaiseIrql(DISPATCH_LEVEL, &irql);
3876
3877         kq = kq_queues + dpc->k_num;
3878
3879         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3880 #else
3881         kq = kq_queues;
3882         KeAcquireSpinLock(&kq->kq_lock, &irql);
3883 #endif
3884
3885         if (dpc->k_dpclistentry.nle_flink == &dpc->k_dpclistentry) {
3886                 KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
3887                 KeLowerIrql(irql);
3888                 return (FALSE);
3889         }
3890
3891         RemoveEntryList((&dpc->k_dpclistentry));
3892         InitializeListHead((&dpc->k_dpclistentry));
3893
3894         KeReleaseSpinLock(&kq->kq_lock, irql);
3895
3896         return (TRUE);
3897 }
3898
3899 void
3900 KeSetImportanceDpc(dpc, imp)
3901         kdpc                    *dpc;
3902         uint32_t                imp;
3903 {
3904         if (imp != KDPC_IMPORTANCE_LOW &&
3905             imp != KDPC_IMPORTANCE_MEDIUM &&
3906             imp != KDPC_IMPORTANCE_HIGH)
3907                 return;
3908
3909         dpc->k_importance = (uint8_t)imp;
3910 }
3911
3912 void
3913 KeSetTargetProcessorDpc(kdpc *dpc, uint8_t cpu)
3914 {
3915         if (cpu > mp_ncpus)
3916                 return;
3917
3918         dpc->k_num = cpu;
3919 }
3920
3921 void
3922 KeFlushQueuedDpcs(void)
3923 {
3924         kdpc_queue              *kq;
3925         int                     i;
3926
3927         /*
3928          * Poke each DPC queue and wait
3929          * for them to drain.
3930          */
3931
3932 #ifdef NTOSKRNL_MULTIPLE_DPCS
3933         for (i = 0; i < mp_ncpus; i++) {
3934 #else
3935         for (i = 0; i < 1; i++) {
3936 #endif
3937                 kq = kq_queues + i;
3938                 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
3939                 KeWaitForSingleObject(&kq->kq_done, 0, 0, TRUE, NULL);
3940         }
3941 }
3942
3943 uint32_t
3944 KeGetCurrentProcessorNumber(void)
3945 {
3946         return ((uint32_t)curthread->td_oncpu);
3947 }
3948
3949 uint8_t
3950 KeSetTimerEx(timer, duetime, period, dpc)
3951         ktimer                  *timer;
3952         int64_t                 duetime;
3953         uint32_t                period;
3954         kdpc                    *dpc;
3955 {
3956         struct timeval          tv;
3957         uint64_t                curtime;
3958         uint8_t                 pending;
3959
3960         if (timer == NULL)
3961                 return (FALSE);
3962
3963         mtx_lock(&ntoskrnl_dispatchlock);
3964
3965         if (timer->k_header.dh_inserted == TRUE) {
3966                 ntoskrnl_remove_timer(timer);
3967 #ifdef NTOSKRNL_DEBUG_TIMERS
3968                 ntoskrnl_timer_cancels++;
3969 #endif
3970                 timer->k_header.dh_inserted = FALSE;
3971                 pending = TRUE;
3972         } else
3973                 pending = FALSE;
3974
3975         timer->k_duetime = duetime;
3976         timer->k_period = period;
3977         timer->k_header.dh_sigstate = FALSE;
3978         timer->k_dpc = dpc;
3979
3980         if (duetime < 0) {
3981                 tv.tv_sec = - (duetime) / 10000000;
3982                 tv.tv_usec = (- (duetime) / 10) -
3983                     (tv.tv_sec * 1000000);
3984         } else {
3985                 ntoskrnl_time(&curtime);
3986                 if (duetime < curtime)
3987                         tv.tv_sec = tv.tv_usec = 0;
3988                 else {
3989                         tv.tv_sec = ((duetime) - curtime) / 10000000;
3990                         tv.tv_usec = ((duetime) - curtime) / 10 -
3991                             (tv.tv_sec * 1000000);
3992                 }
3993         }
3994
3995         timer->k_header.dh_inserted = TRUE;
3996         ntoskrnl_insert_timer(timer, tvtohz(&tv));
3997 #ifdef NTOSKRNL_DEBUG_TIMERS
3998         ntoskrnl_timer_sets++;
3999 #endif
4000
4001         mtx_unlock(&ntoskrnl_dispatchlock);
4002
4003         return (pending);
4004 }
4005
4006 uint8_t
4007 KeSetTimer(timer, duetime, dpc)
4008         ktimer                  *timer;
4009         int64_t                 duetime;
4010         kdpc                    *dpc;
4011 {
4012         return (KeSetTimerEx(timer, duetime, 0, dpc));
4013 }
4014
4015 /*
4016  * The Windows DDK documentation seems to say that cancelling
4017  * a timer that has a DPC will result in the DPC also being
4018  * cancelled, but this isn't really the case.
4019  */
4020
4021 uint8_t
4022 KeCancelTimer(timer)
4023         ktimer                  *timer;
4024 {
4025         uint8_t                 pending;
4026
4027         if (timer == NULL)
4028                 return (FALSE);
4029
4030         mtx_lock(&ntoskrnl_dispatchlock);
4031
4032         pending = timer->k_header.dh_inserted;
4033
4034         if (timer->k_header.dh_inserted == TRUE) {
4035                 timer->k_header.dh_inserted = FALSE;
4036                 ntoskrnl_remove_timer(timer);
4037 #ifdef NTOSKRNL_DEBUG_TIMERS
4038                 ntoskrnl_timer_cancels++;
4039 #endif
4040         }
4041
4042         mtx_unlock(&ntoskrnl_dispatchlock);
4043
4044         return (pending);
4045 }
4046
4047 uint8_t
4048 KeReadStateTimer(timer)
4049         ktimer                  *timer;
4050 {
4051         return (timer->k_header.dh_sigstate);
4052 }
4053
4054 static int32_t
4055 KeDelayExecutionThread(uint8_t wait_mode, uint8_t alertable, int64_t *interval)
4056 {
4057         ktimer                  timer;
4058
4059         if (wait_mode != 0)
4060                 panic("invalid wait_mode %d", wait_mode);
4061
4062         KeInitializeTimer(&timer);
4063         KeSetTimer(&timer, *interval, NULL);
4064         KeWaitForSingleObject(&timer, 0, 0, alertable, NULL);
4065
4066         return STATUS_SUCCESS;
4067 }
4068
4069 static uint64_t
4070 KeQueryInterruptTime(void)
4071 {
4072         int ticks;
4073         struct timeval tv;
4074
4075         getmicrouptime(&tv);
4076
4077         ticks = tvtohz(&tv);
4078
4079         return ticks * ((10000000 + hz - 1) / hz);
4080 }
4081
4082 static struct thread *
4083 KeGetCurrentThread(void)
4084 {
4085
4086         return curthread;
4087 }
4088
4089 static int32_t
4090 KeSetPriorityThread(td, pri)
4091         struct thread   *td;
4092         int32_t         pri;
4093 {
4094         int32_t old;
4095
4096         if (td == NULL)
4097                 return LOW_REALTIME_PRIORITY;
4098
4099         if (td->td_priority <= PRI_MIN_KERN)
4100                 old = HIGH_PRIORITY;
4101         else if (td->td_priority >= PRI_MAX_KERN)
4102                 old = LOW_PRIORITY;
4103         else
4104                 old = LOW_REALTIME_PRIORITY;
4105
4106         thread_lock(td);
4107         if (pri == HIGH_PRIORITY)
4108                 sched_prio(td, PRI_MIN_KERN);
4109         if (pri == LOW_REALTIME_PRIORITY)
4110                 sched_prio(td, PRI_MIN_KERN + (PRI_MAX_KERN - PRI_MIN_KERN) / 2);
4111         if (pri == LOW_PRIORITY)
4112                 sched_prio(td, PRI_MAX_KERN);
4113         thread_unlock(td);
4114
4115         return old;
4116 }
4117
4118 static void
4119 dummy()
4120 {
4121         printf("ntoskrnl dummy called...\n");
4122 }
4123
4124
4125 image_patch_table ntoskrnl_functbl[] = {
4126         IMPORT_SFUNC(RtlZeroMemory, 2),
4127         IMPORT_SFUNC(RtlCopyMemory, 3),
4128         IMPORT_SFUNC(RtlCompareMemory, 3),
4129         IMPORT_SFUNC(RtlEqualUnicodeString, 3),
4130         IMPORT_SFUNC(RtlCopyUnicodeString, 2),
4131         IMPORT_SFUNC(RtlUnicodeStringToAnsiString, 3),
4132         IMPORT_SFUNC(RtlAnsiStringToUnicodeString, 3),
4133         IMPORT_SFUNC(RtlInitAnsiString, 2),
4134         IMPORT_SFUNC_MAP(RtlInitString, RtlInitAnsiString, 2),
4135         IMPORT_SFUNC(RtlInitUnicodeString, 2),
4136         IMPORT_SFUNC(RtlFreeAnsiString, 1),
4137         IMPORT_SFUNC(RtlFreeUnicodeString, 1),
4138         IMPORT_SFUNC(RtlUnicodeStringToInteger, 3),
4139         IMPORT_CFUNC(sprintf, 0),
4140         IMPORT_CFUNC(vsprintf, 0),
4141         IMPORT_CFUNC_MAP(_snprintf, snprintf, 0),
4142         IMPORT_CFUNC_MAP(_vsnprintf, vsnprintf, 0),
4143         IMPORT_CFUNC(DbgPrint, 0),
4144         IMPORT_SFUNC(DbgBreakPoint, 0),
4145         IMPORT_SFUNC(KeBugCheckEx, 5),
4146         IMPORT_CFUNC(strncmp, 0),
4147         IMPORT_CFUNC(strcmp, 0),
4148         IMPORT_CFUNC_MAP(stricmp, strcasecmp, 0),
4149         IMPORT_CFUNC(strncpy, 0),
4150         IMPORT_CFUNC(strcpy, 0),
4151         IMPORT_CFUNC(strlen, 0),
4152         IMPORT_CFUNC_MAP(toupper, ntoskrnl_toupper, 0),
4153         IMPORT_CFUNC_MAP(tolower, ntoskrnl_tolower, 0),
4154         IMPORT_CFUNC_MAP(strstr, ntoskrnl_strstr, 0),
4155         IMPORT_CFUNC_MAP(strncat, ntoskrnl_strncat, 0),
4156         IMPORT_CFUNC_MAP(strchr, index, 0),
4157         IMPORT_CFUNC_MAP(strrchr, rindex, 0),
4158         IMPORT_CFUNC(memcpy, 0),
4159         IMPORT_CFUNC_MAP(memmove, ntoskrnl_memmove, 0),
4160         IMPORT_CFUNC_MAP(memset, ntoskrnl_memset, 0),
4161         IMPORT_CFUNC_MAP(memchr, ntoskrnl_memchr, 0),
4162         IMPORT_SFUNC(IoAllocateDriverObjectExtension, 4),
4163         IMPORT_SFUNC(IoGetDriverObjectExtension, 2),
4164         IMPORT_FFUNC(IofCallDriver, 2),
4165         IMPORT_FFUNC(IofCompleteRequest, 2),
4166         IMPORT_SFUNC(IoAcquireCancelSpinLock, 1),
4167         IMPORT_SFUNC(IoReleaseCancelSpinLock, 1),
4168         IMPORT_SFUNC(IoCancelIrp, 1),
4169         IMPORT_SFUNC(IoConnectInterrupt, 11),
4170         IMPORT_SFUNC(IoDisconnectInterrupt, 1),
4171         IMPORT_SFUNC(IoCreateDevice, 7),
4172         IMPORT_SFUNC(IoDeleteDevice, 1),
4173         IMPORT_SFUNC(IoGetAttachedDevice, 1),
4174         IMPORT_SFUNC(IoAttachDeviceToDeviceStack, 2),
4175         IMPORT_SFUNC(IoDetachDevice, 1),
4176         IMPORT_SFUNC(IoBuildSynchronousFsdRequest, 7),
4177         IMPORT_SFUNC(IoBuildAsynchronousFsdRequest, 6),
4178         IMPORT_SFUNC(IoBuildDeviceIoControlRequest, 9),
4179         IMPORT_SFUNC(IoAllocateIrp, 2),
4180         IMPORT_SFUNC(IoReuseIrp, 2),
4181         IMPORT_SFUNC(IoMakeAssociatedIrp, 2),
4182         IMPORT_SFUNC(IoFreeIrp, 1),
4183         IMPORT_SFUNC(IoInitializeIrp, 3),
4184         IMPORT_SFUNC(KeAcquireInterruptSpinLock, 1),
4185         IMPORT_SFUNC(KeReleaseInterruptSpinLock, 2),
4186         IMPORT_SFUNC(KeSynchronizeExecution, 3),
4187         IMPORT_SFUNC(KeWaitForSingleObject, 5),
4188         IMPORT_SFUNC(KeWaitForMultipleObjects, 8),
4189         IMPORT_SFUNC(_allmul, 4),
4190         IMPORT_SFUNC(_alldiv, 4),
4191         IMPORT_SFUNC(_allrem, 4),
4192         IMPORT_RFUNC(_allshr, 0),
4193         IMPORT_RFUNC(_allshl, 0),
4194         IMPORT_SFUNC(_aullmul, 4),
4195         IMPORT_SFUNC(_aulldiv, 4),
4196         IMPORT_SFUNC(_aullrem, 4),
4197         IMPORT_RFUNC(_aullshr, 0),
4198         IMPORT_RFUNC(_aullshl, 0),
4199         IMPORT_CFUNC(atoi, 0),
4200         IMPORT_CFUNC(atol, 0),
4201         IMPORT_CFUNC(rand, 0),
4202         IMPORT_CFUNC(srand, 0),
4203         IMPORT_SFUNC(WRITE_REGISTER_USHORT, 2),
4204         IMPORT_SFUNC(READ_REGISTER_USHORT, 1),
4205         IMPORT_SFUNC(WRITE_REGISTER_ULONG, 2),
4206         IMPORT_SFUNC(READ_REGISTER_ULONG, 1),
4207         IMPORT_SFUNC(READ_REGISTER_UCHAR, 1),
4208         IMPORT_SFUNC(WRITE_REGISTER_UCHAR, 2),
4209         IMPORT_SFUNC(ExInitializePagedLookasideList, 7),
4210         IMPORT_SFUNC(ExDeletePagedLookasideList, 1),
4211         IMPORT_SFUNC(ExInitializeNPagedLookasideList, 7),
4212         IMPORT_SFUNC(ExDeleteNPagedLookasideList, 1),
4213         IMPORT_FFUNC(InterlockedPopEntrySList, 1),
4214         IMPORT_FFUNC(InterlockedPushEntrySList, 2),
4215         IMPORT_SFUNC(ExQueryDepthSList, 1),
4216         IMPORT_FFUNC_MAP(ExpInterlockedPopEntrySList,
4217                 InterlockedPopEntrySList, 1),
4218         IMPORT_FFUNC_MAP(ExpInterlockedPushEntrySList,
4219                 InterlockedPushEntrySList, 2),
4220         IMPORT_FFUNC(ExInterlockedPopEntrySList, 2),
4221         IMPORT_FFUNC(ExInterlockedPushEntrySList, 3),
4222         IMPORT_SFUNC(ExAllocatePoolWithTag, 3),
4223         IMPORT_SFUNC(ExFreePool, 1),
4224 #ifdef __i386__
4225         IMPORT_FFUNC(KefAcquireSpinLockAtDpcLevel, 1),
4226         IMPORT_FFUNC(KefReleaseSpinLockFromDpcLevel,1),
4227         IMPORT_FFUNC(KeAcquireSpinLockRaiseToDpc, 1),
4228 #else
4229         /*
4230          * For AMD64, we can get away with just mapping
4231          * KeAcquireSpinLockRaiseToDpc() directly to KfAcquireSpinLock()
4232          * because the calling conventions end up being the same.
4233          * On i386, we have to be careful because KfAcquireSpinLock()
4234          * is _fastcall but KeAcquireSpinLockRaiseToDpc() isn't.
4235          */
4236         IMPORT_SFUNC(KeAcquireSpinLockAtDpcLevel, 1),
4237         IMPORT_SFUNC(KeReleaseSpinLockFromDpcLevel, 1),
4238         IMPORT_SFUNC_MAP(KeAcquireSpinLockRaiseToDpc, KfAcquireSpinLock, 1),
4239 #endif
4240         IMPORT_SFUNC_MAP(KeReleaseSpinLock, KfReleaseSpinLock, 1),
4241         IMPORT_FFUNC(InterlockedIncrement, 1),
4242         IMPORT_FFUNC(InterlockedDecrement, 1),
4243         IMPORT_FFUNC(InterlockedExchange, 2),
4244         IMPORT_FFUNC(ExInterlockedAddLargeStatistic, 2),
4245         IMPORT_SFUNC(IoAllocateMdl, 5),
4246         IMPORT_SFUNC(IoFreeMdl, 1),
4247         IMPORT_SFUNC(MmAllocateContiguousMemory, 2 + 1),
4248         IMPORT_SFUNC(MmAllocateContiguousMemorySpecifyCache, 5 + 3),
4249         IMPORT_SFUNC(MmFreeContiguousMemory, 1),
4250         IMPORT_SFUNC(MmFreeContiguousMemorySpecifyCache, 3),
4251         IMPORT_SFUNC(MmSizeOfMdl, 1),
4252         IMPORT_SFUNC(MmMapLockedPages, 2),
4253         IMPORT_SFUNC(MmMapLockedPagesSpecifyCache, 6),
4254         IMPORT_SFUNC(MmUnmapLockedPages, 2),
4255         IMPORT_SFUNC(MmBuildMdlForNonPagedPool, 1),
4256         IMPORT_SFUNC(MmGetPhysicalAddress, 1),
4257         IMPORT_SFUNC(MmIsAddressValid, 1),
4258         IMPORT_SFUNC(MmMapIoSpace, 3 + 1),
4259         IMPORT_SFUNC(MmUnmapIoSpace, 2),
4260         IMPORT_SFUNC(KeInitializeSpinLock, 1),
4261         IMPORT_SFUNC(IoIsWdmVersionAvailable, 2),
4262         IMPORT_SFUNC(IoGetDeviceObjectPointer, 4),
4263         IMPORT_SFUNC(IoGetDeviceProperty, 5),
4264         IMPORT_SFUNC(IoAllocateWorkItem, 1),
4265         IMPORT_SFUNC(IoFreeWorkItem, 1),
4266         IMPORT_SFUNC(IoQueueWorkItem, 4),
4267         IMPORT_SFUNC(ExQueueWorkItem, 2),
4268         IMPORT_SFUNC(ntoskrnl_workitem, 2),
4269         IMPORT_SFUNC(KeInitializeMutex, 2),
4270         IMPORT_SFUNC(KeReleaseMutex, 2),
4271         IMPORT_SFUNC(KeReadStateMutex, 1),
4272         IMPORT_SFUNC(KeInitializeEvent, 3),
4273         IMPORT_SFUNC(KeSetEvent, 3),
4274         IMPORT_SFUNC(KeResetEvent, 1),
4275         IMPORT_SFUNC(KeClearEvent, 1),
4276         IMPORT_SFUNC(KeReadStateEvent, 1),
4277         IMPORT_SFUNC(KeInitializeTimer, 1),
4278         IMPORT_SFUNC(KeInitializeTimerEx, 2),
4279         IMPORT_SFUNC(KeSetTimer, 3),
4280         IMPORT_SFUNC(KeSetTimerEx, 4),
4281         IMPORT_SFUNC(KeCancelTimer, 1),
4282         IMPORT_SFUNC(KeReadStateTimer, 1),
4283         IMPORT_SFUNC(KeInitializeDpc, 3),
4284         IMPORT_SFUNC(KeInsertQueueDpc, 3),
4285         IMPORT_SFUNC(KeRemoveQueueDpc, 1),
4286         IMPORT_SFUNC(KeSetImportanceDpc, 2),
4287         IMPORT_SFUNC(KeSetTargetProcessorDpc, 2),
4288         IMPORT_SFUNC(KeFlushQueuedDpcs, 0),
4289         IMPORT_SFUNC(KeGetCurrentProcessorNumber, 1),
4290         IMPORT_SFUNC(ObReferenceObjectByHandle, 6),
4291         IMPORT_FFUNC(ObfDereferenceObject, 1),
4292         IMPORT_SFUNC(ZwClose, 1),
4293         IMPORT_SFUNC(PsCreateSystemThread, 7),
4294         IMPORT_SFUNC(PsTerminateSystemThread, 1),
4295         IMPORT_SFUNC(IoWMIRegistrationControl, 2),
4296         IMPORT_SFUNC(WmiQueryTraceInformation, 5),
4297         IMPORT_CFUNC(WmiTraceMessage, 0),
4298         IMPORT_SFUNC(KeQuerySystemTime, 1),
4299         IMPORT_CFUNC(KeTickCount, 0),
4300         IMPORT_SFUNC(KeDelayExecutionThread, 3),
4301         IMPORT_SFUNC(KeQueryInterruptTime, 0),
4302         IMPORT_SFUNC(KeGetCurrentThread, 0),
4303         IMPORT_SFUNC(KeSetPriorityThread, 2),
4304
4305         /*
4306          * This last entry is a catch-all for any function we haven't
4307          * implemented yet. The PE import list patching routine will
4308          * use it for any function that doesn't have an explicit match
4309          * in this table.
4310          */
4311
4312         { NULL, (FUNC)dummy, NULL, 0, WINDRV_WRAP_STDCALL },
4313
4314         /* End of list. */
4315
4316         { NULL, NULL, NULL }
4317 };