]> CyberLeo.Net >> Repos - FreeBSD/stable/8.git/blob - sys/compat/ndis/subr_ntoskrnl.c
MFC r215707:
[FreeBSD/stable/8.git] / sys / compat / ndis / subr_ntoskrnl.c
1 /*-
2  * Copyright (c) 2003
3  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *      This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #include <sys/ctype.h>
37 #include <sys/unistd.h>
38 #include <sys/param.h>
39 #include <sys/types.h>
40 #include <sys/errno.h>
41 #include <sys/systm.h>
42 #include <sys/malloc.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45
46 #include <sys/callout.h>
47 #include <sys/kdb.h>
48 #include <sys/kernel.h>
49 #include <sys/proc.h>
50 #include <sys/condvar.h>
51 #include <sys/kthread.h>
52 #include <sys/module.h>
53 #include <sys/smp.h>
54 #include <sys/sched.h>
55 #include <sys/sysctl.h>
56
57 #include <machine/atomic.h>
58 #include <machine/bus.h>
59 #include <machine/stdarg.h>
60 #include <machine/resource.h>
61
62 #include <sys/bus.h>
63 #include <sys/rman.h>
64
65 #include <vm/vm.h>
66 #include <vm/vm_param.h>
67 #include <vm/pmap.h>
68 #include <vm/uma.h>
69 #include <vm/vm_kern.h>
70 #include <vm/vm_map.h>
71 #include <vm/vm_extern.h>
72
73 #include <compat/ndis/pe_var.h>
74 #include <compat/ndis/cfg_var.h>
75 #include <compat/ndis/resource_var.h>
76 #include <compat/ndis/ntoskrnl_var.h>
77 #include <compat/ndis/hal_var.h>
78 #include <compat/ndis/ndis_var.h>
79
80 #ifdef NTOSKRNL_DEBUG_TIMERS
81 static int sysctl_show_timers(SYSCTL_HANDLER_ARGS);
82
83 SYSCTL_PROC(_debug, OID_AUTO, ntoskrnl_timers, CTLFLAG_RW, 0, 0,
84         sysctl_show_timers, "I", "Show ntoskrnl timer stats");
85 #endif
86
87 struct kdpc_queue {
88         list_entry              kq_disp;
89         struct thread           *kq_td;
90         int                     kq_cpu;
91         int                     kq_exit;
92         int                     kq_running;
93         kspin_lock              kq_lock;
94         nt_kevent               kq_proc;
95         nt_kevent               kq_done;
96 };
97
98 typedef struct kdpc_queue kdpc_queue;
99
100 struct wb_ext {
101         struct cv               we_cv;
102         struct thread           *we_td;
103 };
104
105 typedef struct wb_ext wb_ext;
106
107 #define NTOSKRNL_TIMEOUTS       256
108 #ifdef NTOSKRNL_DEBUG_TIMERS
109 static uint64_t ntoskrnl_timer_fires;
110 static uint64_t ntoskrnl_timer_sets;
111 static uint64_t ntoskrnl_timer_reloads;
112 static uint64_t ntoskrnl_timer_cancels;
113 #endif
114
115 struct callout_entry {
116         struct callout          ce_callout;
117         list_entry              ce_list;
118 };
119
120 typedef struct callout_entry callout_entry;
121
122 static struct list_entry ntoskrnl_calllist;
123 static struct mtx ntoskrnl_calllock;
124
125 static struct list_entry ntoskrnl_intlist;
126 static kspin_lock ntoskrnl_intlock;
127
128 static uint8_t RtlEqualUnicodeString(unicode_string *,
129         unicode_string *, uint8_t);
130 static void RtlCopyUnicodeString(unicode_string *,
131         unicode_string *);
132 static irp *IoBuildSynchronousFsdRequest(uint32_t, device_object *,
133          void *, uint32_t, uint64_t *, nt_kevent *, io_status_block *);
134 static irp *IoBuildAsynchronousFsdRequest(uint32_t,
135         device_object *, void *, uint32_t, uint64_t *, io_status_block *);
136 static irp *IoBuildDeviceIoControlRequest(uint32_t,
137         device_object *, void *, uint32_t, void *, uint32_t,
138         uint8_t, nt_kevent *, io_status_block *);
139 static irp *IoAllocateIrp(uint8_t, uint8_t);
140 static void IoReuseIrp(irp *, uint32_t);
141 static void IoFreeIrp(irp *);
142 static void IoInitializeIrp(irp *, uint16_t, uint8_t);
143 static irp *IoMakeAssociatedIrp(irp *, uint8_t);
144 static uint32_t KeWaitForMultipleObjects(uint32_t,
145         nt_dispatch_header **, uint32_t, uint32_t, uint32_t, uint8_t,
146         int64_t *, wait_block *);
147 static void ntoskrnl_waittest(nt_dispatch_header *, uint32_t);
148 static void ntoskrnl_satisfy_wait(nt_dispatch_header *, struct thread *);
149 static void ntoskrnl_satisfy_multiple_waits(wait_block *);
150 static int ntoskrnl_is_signalled(nt_dispatch_header *, struct thread *);
151 static void ntoskrnl_insert_timer(ktimer *, int);
152 static void ntoskrnl_remove_timer(ktimer *);
153 #ifdef NTOSKRNL_DEBUG_TIMERS
154 static void ntoskrnl_show_timers(void);
155 #endif
156 static void ntoskrnl_timercall(void *);
157 static void ntoskrnl_dpc_thread(void *);
158 static void ntoskrnl_destroy_dpc_threads(void);
159 static void ntoskrnl_destroy_workitem_threads(void);
160 static void ntoskrnl_workitem_thread(void *);
161 static void ntoskrnl_workitem(device_object *, void *);
162 static void ntoskrnl_unicode_to_ascii(uint16_t *, char *, int);
163 static void ntoskrnl_ascii_to_unicode(char *, uint16_t *, int);
164 static uint8_t ntoskrnl_insert_dpc(list_entry *, kdpc *);
165 static void WRITE_REGISTER_USHORT(uint16_t *, uint16_t);
166 static uint16_t READ_REGISTER_USHORT(uint16_t *);
167 static void WRITE_REGISTER_ULONG(uint32_t *, uint32_t);
168 static uint32_t READ_REGISTER_ULONG(uint32_t *);
169 static void WRITE_REGISTER_UCHAR(uint8_t *, uint8_t);
170 static uint8_t READ_REGISTER_UCHAR(uint8_t *);
171 static int64_t _allmul(int64_t, int64_t);
172 static int64_t _alldiv(int64_t, int64_t);
173 static int64_t _allrem(int64_t, int64_t);
174 static int64_t _allshr(int64_t, uint8_t);
175 static int64_t _allshl(int64_t, uint8_t);
176 static uint64_t _aullmul(uint64_t, uint64_t);
177 static uint64_t _aulldiv(uint64_t, uint64_t);
178 static uint64_t _aullrem(uint64_t, uint64_t);
179 static uint64_t _aullshr(uint64_t, uint8_t);
180 static uint64_t _aullshl(uint64_t, uint8_t);
181 static slist_entry *ntoskrnl_pushsl(slist_header *, slist_entry *);
182 static slist_entry *ntoskrnl_popsl(slist_header *);
183 static void ExInitializePagedLookasideList(paged_lookaside_list *,
184         lookaside_alloc_func *, lookaside_free_func *,
185         uint32_t, size_t, uint32_t, uint16_t);
186 static void ExDeletePagedLookasideList(paged_lookaside_list *);
187 static void ExInitializeNPagedLookasideList(npaged_lookaside_list *,
188         lookaside_alloc_func *, lookaside_free_func *,
189         uint32_t, size_t, uint32_t, uint16_t);
190 static void ExDeleteNPagedLookasideList(npaged_lookaside_list *);
191 static slist_entry
192         *ExInterlockedPushEntrySList(slist_header *,
193         slist_entry *, kspin_lock *);
194 static slist_entry
195         *ExInterlockedPopEntrySList(slist_header *, kspin_lock *);
196 static uint32_t InterlockedIncrement(volatile uint32_t *);
197 static uint32_t InterlockedDecrement(volatile uint32_t *);
198 static void ExInterlockedAddLargeStatistic(uint64_t *, uint32_t);
199 static void *MmAllocateContiguousMemory(uint32_t, uint64_t);
200 static void *MmAllocateContiguousMemorySpecifyCache(uint32_t,
201         uint64_t, uint64_t, uint64_t, enum nt_caching_type);
202 static void MmFreeContiguousMemory(void *);
203 static void MmFreeContiguousMemorySpecifyCache(void *, uint32_t,
204         enum nt_caching_type);
205 static uint32_t MmSizeOfMdl(void *, size_t);
206 static void *MmMapLockedPages(mdl *, uint8_t);
207 static void *MmMapLockedPagesSpecifyCache(mdl *,
208         uint8_t, uint32_t, void *, uint32_t, uint32_t);
209 static void MmUnmapLockedPages(void *, mdl *);
210 static device_t ntoskrnl_finddev(device_t, uint64_t, struct resource **);
211 static void RtlZeroMemory(void *, size_t);
212 static void RtlCopyMemory(void *, const void *, size_t);
213 static size_t RtlCompareMemory(const void *, const void *, size_t);
214 static ndis_status RtlUnicodeStringToInteger(unicode_string *,
215         uint32_t, uint32_t *);
216 static int atoi (const char *);
217 static long atol (const char *);
218 static int rand(void);
219 static void srand(unsigned int);
220 static void KeQuerySystemTime(uint64_t *);
221 static uint32_t KeTickCount(void);
222 static uint8_t IoIsWdmVersionAvailable(uint8_t, uint8_t);
223 static void ntoskrnl_thrfunc(void *);
224 static ndis_status PsCreateSystemThread(ndis_handle *,
225         uint32_t, void *, ndis_handle, void *, void *, void *);
226 static ndis_status PsTerminateSystemThread(ndis_status);
227 static ndis_status IoGetDeviceObjectPointer(unicode_string *,
228         uint32_t, void *, device_object *);
229 static ndis_status IoGetDeviceProperty(device_object *, uint32_t,
230         uint32_t, void *, uint32_t *);
231 static void KeInitializeMutex(kmutant *, uint32_t);
232 static uint32_t KeReleaseMutex(kmutant *, uint8_t);
233 static uint32_t KeReadStateMutex(kmutant *);
234 static ndis_status ObReferenceObjectByHandle(ndis_handle,
235         uint32_t, void *, uint8_t, void **, void **);
236 static void ObfDereferenceObject(void *);
237 static uint32_t ZwClose(ndis_handle);
238 static uint32_t WmiQueryTraceInformation(uint32_t, void *, uint32_t,
239         uint32_t, void *);
240 static uint32_t WmiTraceMessage(uint64_t, uint32_t, void *, uint16_t, ...);
241 static uint32_t IoWMIRegistrationControl(device_object *, uint32_t);
242 static void *ntoskrnl_memset(void *, int, size_t);
243 static void *ntoskrnl_memmove(void *, void *, size_t);
244 static void *ntoskrnl_memchr(void *, unsigned char, size_t);
245 static char *ntoskrnl_strstr(char *, char *);
246 static char *ntoskrnl_strncat(char *, char *, size_t);
247 static int ntoskrnl_toupper(int);
248 static int ntoskrnl_tolower(int);
249 static funcptr ntoskrnl_findwrap(funcptr);
250 static uint32_t DbgPrint(char *, ...);
251 static void DbgBreakPoint(void);
252 static void KeBugCheckEx(uint32_t, u_long, u_long, u_long, u_long);
253 static int32_t KeDelayExecutionThread(uint8_t, uint8_t, int64_t *);
254 static int32_t KeSetPriorityThread(struct thread *, int32_t);
255 static void dummy(void);
256
257 static struct mtx ntoskrnl_dispatchlock;
258 static struct mtx ntoskrnl_interlock;
259 static kspin_lock ntoskrnl_cancellock;
260 static int ntoskrnl_kth = 0;
261 static struct nt_objref_head ntoskrnl_reflist;
262 static uma_zone_t mdl_zone;
263 static uma_zone_t iw_zone;
264 static struct kdpc_queue *kq_queues;
265 static struct kdpc_queue *wq_queues;
266 static int wq_idx = 0;
267
268 int
269 ntoskrnl_libinit()
270 {
271         image_patch_table       *patch;
272         int                     error;
273         struct proc             *p;
274         kdpc_queue              *kq;
275         callout_entry           *e;
276         int                     i;
277
278         mtx_init(&ntoskrnl_dispatchlock,
279             "ntoskrnl dispatch lock", MTX_NDIS_LOCK, MTX_DEF|MTX_RECURSE);
280         mtx_init(&ntoskrnl_interlock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN);
281         KeInitializeSpinLock(&ntoskrnl_cancellock);
282         KeInitializeSpinLock(&ntoskrnl_intlock);
283         TAILQ_INIT(&ntoskrnl_reflist);
284
285         InitializeListHead(&ntoskrnl_calllist);
286         InitializeListHead(&ntoskrnl_intlist);
287         mtx_init(&ntoskrnl_calllock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN);
288
289         kq_queues = ExAllocatePoolWithTag(NonPagedPool,
290 #ifdef NTOSKRNL_MULTIPLE_DPCS
291             sizeof(kdpc_queue) * mp_ncpus, 0);
292 #else
293             sizeof(kdpc_queue), 0);
294 #endif
295
296         if (kq_queues == NULL)
297                 return (ENOMEM);
298
299         wq_queues = ExAllocatePoolWithTag(NonPagedPool,
300             sizeof(kdpc_queue) * WORKITEM_THREADS, 0);
301
302         if (wq_queues == NULL)
303                 return (ENOMEM);
304
305 #ifdef NTOSKRNL_MULTIPLE_DPCS
306         bzero((char *)kq_queues, sizeof(kdpc_queue) * mp_ncpus);
307 #else
308         bzero((char *)kq_queues, sizeof(kdpc_queue));
309 #endif
310         bzero((char *)wq_queues, sizeof(kdpc_queue) * WORKITEM_THREADS);
311
312         /*
313          * Launch the DPC threads.
314          */
315
316 #ifdef NTOSKRNL_MULTIPLE_DPCS
317         for (i = 0; i < mp_ncpus; i++) {
318 #else
319         for (i = 0; i < 1; i++) {
320 #endif
321                 kq = kq_queues + i;
322                 kq->kq_cpu = i;
323                 error = kproc_create(ntoskrnl_dpc_thread, kq, &p,
324                     RFHIGHPID, NDIS_KSTACK_PAGES, "Windows DPC %d", i);
325                 if (error)
326                         panic("failed to launch DPC thread");
327         }
328
329         /*
330          * Launch the workitem threads.
331          */
332
333         for (i = 0; i < WORKITEM_THREADS; i++) {
334                 kq = wq_queues + i;
335                 error = kproc_create(ntoskrnl_workitem_thread, kq, &p,
336                     RFHIGHPID, NDIS_KSTACK_PAGES, "Windows Workitem %d", i);
337                 if (error)
338                         panic("failed to launch workitem thread");
339         }
340
341         patch = ntoskrnl_functbl;
342         while (patch->ipt_func != NULL) {
343                 windrv_wrap((funcptr)patch->ipt_func,
344                     (funcptr *)&patch->ipt_wrap,
345                     patch->ipt_argcnt, patch->ipt_ftype);
346                 patch++;
347         }
348
349         for (i = 0; i < NTOSKRNL_TIMEOUTS; i++) {
350                 e = ExAllocatePoolWithTag(NonPagedPool,
351                     sizeof(callout_entry), 0);
352                 if (e == NULL)
353                         panic("failed to allocate timeouts");
354                 mtx_lock_spin(&ntoskrnl_calllock);
355                 InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
356                 mtx_unlock_spin(&ntoskrnl_calllock);
357         }
358
359         /*
360          * MDLs are supposed to be variable size (they describe
361          * buffers containing some number of pages, but we don't
362          * know ahead of time how many pages that will be). But
363          * always allocating them off the heap is very slow. As
364          * a compromise, we create an MDL UMA zone big enough to
365          * handle any buffer requiring up to 16 pages, and we
366          * use those for any MDLs for buffers of 16 pages or less
367          * in size. For buffers larger than that (which we assume
368          * will be few and far between, we allocate the MDLs off
369          * the heap.
370          */
371
372         mdl_zone = uma_zcreate("Windows MDL", MDL_ZONE_SIZE,
373             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
374
375         iw_zone = uma_zcreate("Windows WorkItem", sizeof(io_workitem),
376             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
377
378         return (0);
379 }
380
381 int
382 ntoskrnl_libfini()
383 {
384         image_patch_table       *patch;
385         callout_entry           *e;
386         list_entry              *l;
387
388         patch = ntoskrnl_functbl;
389         while (patch->ipt_func != NULL) {
390                 windrv_unwrap(patch->ipt_wrap);
391                 patch++;
392         }
393
394         /* Stop the workitem queues. */
395         ntoskrnl_destroy_workitem_threads();
396         /* Stop the DPC queues. */
397         ntoskrnl_destroy_dpc_threads();
398
399         ExFreePool(kq_queues);
400         ExFreePool(wq_queues);
401
402         uma_zdestroy(mdl_zone);
403         uma_zdestroy(iw_zone);
404
405         mtx_lock_spin(&ntoskrnl_calllock);
406         while(!IsListEmpty(&ntoskrnl_calllist)) {
407                 l = RemoveHeadList(&ntoskrnl_calllist);
408                 e = CONTAINING_RECORD(l, callout_entry, ce_list);
409                 mtx_unlock_spin(&ntoskrnl_calllock);
410                 ExFreePool(e);
411                 mtx_lock_spin(&ntoskrnl_calllock);
412         }
413         mtx_unlock_spin(&ntoskrnl_calllock);
414
415         mtx_destroy(&ntoskrnl_dispatchlock);
416         mtx_destroy(&ntoskrnl_interlock);
417         mtx_destroy(&ntoskrnl_calllock);
418
419         return (0);
420 }
421
422 /*
423  * We need to be able to reference this externally from the wrapper;
424  * GCC only generates a local implementation of memset.
425  */
426 static void *
427 ntoskrnl_memset(buf, ch, size)
428         void                    *buf;
429         int                     ch;
430         size_t                  size;
431 {
432         return (memset(buf, ch, size));
433 }
434
435 static void *
436 ntoskrnl_memmove(dst, src, size)
437         void                    *src;
438         void                    *dst;
439         size_t                  size;
440 {
441         bcopy(src, dst, size);
442         return (dst);
443 }
444
445 static void *
446 ntoskrnl_memchr(void *buf, unsigned char ch, size_t len)
447 {
448         if (len != 0) {
449                 unsigned char *p = buf;
450
451                 do {
452                         if (*p++ == ch)
453                                 return (p - 1);
454                 } while (--len != 0);
455         }
456         return (NULL);
457 }
458
459 static char *
460 ntoskrnl_strstr(s, find)
461         char *s, *find;
462 {
463         char c, sc;
464         size_t len;
465
466         if ((c = *find++) != 0) {
467                 len = strlen(find);
468                 do {
469                         do {
470                                 if ((sc = *s++) == 0)
471                                         return (NULL);
472                         } while (sc != c);
473                 } while (strncmp(s, find, len) != 0);
474                 s--;
475         }
476         return ((char *)s);
477 }
478
479 /* Taken from libc */
480 static char *
481 ntoskrnl_strncat(dst, src, n)
482         char            *dst;
483         char            *src;
484         size_t          n;
485 {
486         if (n != 0) {
487                 char *d = dst;
488                 const char *s = src;
489
490                 while (*d != 0)
491                         d++;
492                 do {
493                         if ((*d = *s++) == 0)
494                                 break;
495                         d++;
496                 } while (--n != 0);
497                 *d = 0;
498         }
499         return (dst);
500 }
501
502 static int
503 ntoskrnl_toupper(c)
504         int                     c;
505 {
506         return (toupper(c));
507 }
508
509 static int
510 ntoskrnl_tolower(c)
511         int                     c;
512 {
513         return (tolower(c));
514 }
515
516 static uint8_t
517 RtlEqualUnicodeString(unicode_string *str1, unicode_string *str2,
518         uint8_t caseinsensitive)
519 {
520         int                     i;
521
522         if (str1->us_len != str2->us_len)
523                 return (FALSE);
524
525         for (i = 0; i < str1->us_len; i++) {
526                 if (caseinsensitive == TRUE) {
527                         if (toupper((char)(str1->us_buf[i] & 0xFF)) !=
528                             toupper((char)(str2->us_buf[i] & 0xFF)))
529                                 return (FALSE);
530                 } else {
531                         if (str1->us_buf[i] != str2->us_buf[i])
532                                 return (FALSE);
533                 }
534         }
535
536         return (TRUE);
537 }
538
539 static void
540 RtlCopyUnicodeString(dest, src)
541         unicode_string          *dest;
542         unicode_string          *src;
543 {
544
545         if (dest->us_maxlen >= src->us_len)
546                 dest->us_len = src->us_len;
547         else
548                 dest->us_len = dest->us_maxlen;
549         memcpy(dest->us_buf, src->us_buf, dest->us_len);
550 }
551
552 static void
553 ntoskrnl_ascii_to_unicode(ascii, unicode, len)
554         char                    *ascii;
555         uint16_t                *unicode;
556         int                     len;
557 {
558         int                     i;
559         uint16_t                *ustr;
560
561         ustr = unicode;
562         for (i = 0; i < len; i++) {
563                 *ustr = (uint16_t)ascii[i];
564                 ustr++;
565         }
566 }
567
568 static void
569 ntoskrnl_unicode_to_ascii(unicode, ascii, len)
570         uint16_t                *unicode;
571         char                    *ascii;
572         int                     len;
573 {
574         int                     i;
575         uint8_t                 *astr;
576
577         astr = ascii;
578         for (i = 0; i < len / 2; i++) {
579                 *astr = (uint8_t)unicode[i];
580                 astr++;
581         }
582 }
583
584 uint32_t
585 RtlUnicodeStringToAnsiString(ansi_string *dest, unicode_string *src, uint8_t allocate)
586 {
587         if (dest == NULL || src == NULL)
588                 return (STATUS_INVALID_PARAMETER);
589
590         dest->as_len = src->us_len / 2;
591         if (dest->as_maxlen < dest->as_len)
592                 dest->as_len = dest->as_maxlen;
593
594         if (allocate == TRUE) {
595                 dest->as_buf = ExAllocatePoolWithTag(NonPagedPool,
596                     (src->us_len / 2) + 1, 0);
597                 if (dest->as_buf == NULL)
598                         return (STATUS_INSUFFICIENT_RESOURCES);
599                 dest->as_len = dest->as_maxlen = src->us_len / 2;
600         } else {
601                 dest->as_len = src->us_len / 2; /* XXX */
602                 if (dest->as_maxlen < dest->as_len)
603                         dest->as_len = dest->as_maxlen;
604         }
605
606         ntoskrnl_unicode_to_ascii(src->us_buf, dest->as_buf,
607             dest->as_len * 2);
608
609         return (STATUS_SUCCESS);
610 }
611
612 uint32_t
613 RtlAnsiStringToUnicodeString(unicode_string *dest, ansi_string *src,
614         uint8_t allocate)
615 {
616         if (dest == NULL || src == NULL)
617                 return (STATUS_INVALID_PARAMETER);
618
619         if (allocate == TRUE) {
620                 dest->us_buf = ExAllocatePoolWithTag(NonPagedPool,
621                     src->as_len * 2, 0);
622                 if (dest->us_buf == NULL)
623                         return (STATUS_INSUFFICIENT_RESOURCES);
624                 dest->us_len = dest->us_maxlen = strlen(src->as_buf) * 2;
625         } else {
626                 dest->us_len = src->as_len * 2; /* XXX */
627                 if (dest->us_maxlen < dest->us_len)
628                         dest->us_len = dest->us_maxlen;
629         }
630
631         ntoskrnl_ascii_to_unicode(src->as_buf, dest->us_buf,
632             dest->us_len / 2);
633
634         return (STATUS_SUCCESS);
635 }
636
637 void *
638 ExAllocatePoolWithTag(pooltype, len, tag)
639         uint32_t                pooltype;
640         size_t                  len;
641         uint32_t                tag;
642 {
643         void                    *buf;
644
645         buf = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
646         if (buf == NULL)
647                 return (NULL);
648
649         return (buf);
650 }
651
652 void
653 ExFreePool(buf)
654         void                    *buf;
655 {
656         free(buf, M_DEVBUF);
657 }
658
659 uint32_t
660 IoAllocateDriverObjectExtension(drv, clid, extlen, ext)
661         driver_object           *drv;
662         void                    *clid;
663         uint32_t                extlen;
664         void                    **ext;
665 {
666         custom_extension        *ce;
667
668         ce = ExAllocatePoolWithTag(NonPagedPool, sizeof(custom_extension)
669             + extlen, 0);
670
671         if (ce == NULL)
672                 return (STATUS_INSUFFICIENT_RESOURCES);
673
674         ce->ce_clid = clid;
675         InsertTailList((&drv->dro_driverext->dre_usrext), (&ce->ce_list));
676
677         *ext = (void *)(ce + 1);
678
679         return (STATUS_SUCCESS);
680 }
681
682 void *
683 IoGetDriverObjectExtension(drv, clid)
684         driver_object           *drv;
685         void                    *clid;
686 {
687         list_entry              *e;
688         custom_extension        *ce;
689
690         /*
691          * Sanity check. Our dummy bus drivers don't have
692          * any driver extentions.
693          */
694
695         if (drv->dro_driverext == NULL)
696                 return (NULL);
697
698         e = drv->dro_driverext->dre_usrext.nle_flink;
699         while (e != &drv->dro_driverext->dre_usrext) {
700                 ce = (custom_extension *)e;
701                 if (ce->ce_clid == clid)
702                         return ((void *)(ce + 1));
703                 e = e->nle_flink;
704         }
705
706         return (NULL);
707 }
708
709
710 uint32_t
711 IoCreateDevice(driver_object *drv, uint32_t devextlen, unicode_string *devname,
712         uint32_t devtype, uint32_t devchars, uint8_t exclusive,
713         device_object **newdev)
714 {
715         device_object           *dev;
716
717         dev = ExAllocatePoolWithTag(NonPagedPool, sizeof(device_object), 0);
718         if (dev == NULL)
719                 return (STATUS_INSUFFICIENT_RESOURCES);
720
721         dev->do_type = devtype;
722         dev->do_drvobj = drv;
723         dev->do_currirp = NULL;
724         dev->do_flags = 0;
725
726         if (devextlen) {
727                 dev->do_devext = ExAllocatePoolWithTag(NonPagedPool,
728                     devextlen, 0);
729
730                 if (dev->do_devext == NULL) {
731                         ExFreePool(dev);
732                         return (STATUS_INSUFFICIENT_RESOURCES);
733                 }
734
735                 bzero(dev->do_devext, devextlen);
736         } else
737                 dev->do_devext = NULL;
738
739         dev->do_size = sizeof(device_object) + devextlen;
740         dev->do_refcnt = 1;
741         dev->do_attacheddev = NULL;
742         dev->do_nextdev = NULL;
743         dev->do_devtype = devtype;
744         dev->do_stacksize = 1;
745         dev->do_alignreq = 1;
746         dev->do_characteristics = devchars;
747         dev->do_iotimer = NULL;
748         KeInitializeEvent(&dev->do_devlock, EVENT_TYPE_SYNC, TRUE);
749
750         /*
751          * Vpd is used for disk/tape devices,
752          * but we don't support those. (Yet.)
753          */
754         dev->do_vpb = NULL;
755
756         dev->do_devobj_ext = ExAllocatePoolWithTag(NonPagedPool,
757             sizeof(devobj_extension), 0);
758
759         if (dev->do_devobj_ext == NULL) {
760                 if (dev->do_devext != NULL)
761                         ExFreePool(dev->do_devext);
762                 ExFreePool(dev);
763                 return (STATUS_INSUFFICIENT_RESOURCES);
764         }
765
766         dev->do_devobj_ext->dve_type = 0;
767         dev->do_devobj_ext->dve_size = sizeof(devobj_extension);
768         dev->do_devobj_ext->dve_devobj = dev;
769
770         /*
771          * Attach this device to the driver object's list
772          * of devices. Note: this is not the same as attaching
773          * the device to the device stack. The driver's AddDevice
774          * routine must explicitly call IoAddDeviceToDeviceStack()
775          * to do that.
776          */
777
778         if (drv->dro_devobj == NULL) {
779                 drv->dro_devobj = dev;
780                 dev->do_nextdev = NULL;
781         } else {
782                 dev->do_nextdev = drv->dro_devobj;
783                 drv->dro_devobj = dev;
784         }
785
786         *newdev = dev;
787
788         return (STATUS_SUCCESS);
789 }
790
791 void
792 IoDeleteDevice(dev)
793         device_object           *dev;
794 {
795         device_object           *prev;
796
797         if (dev == NULL)
798                 return;
799
800         if (dev->do_devobj_ext != NULL)
801                 ExFreePool(dev->do_devobj_ext);
802
803         if (dev->do_devext != NULL)
804                 ExFreePool(dev->do_devext);
805
806         /* Unlink the device from the driver's device list. */
807
808         prev = dev->do_drvobj->dro_devobj;
809         if (prev == dev)
810                 dev->do_drvobj->dro_devobj = dev->do_nextdev;
811         else {
812                 while (prev->do_nextdev != dev)
813                         prev = prev->do_nextdev;
814                 prev->do_nextdev = dev->do_nextdev;
815         }
816
817         ExFreePool(dev);
818 }
819
820 device_object *
821 IoGetAttachedDevice(dev)
822         device_object           *dev;
823 {
824         device_object           *d;
825
826         if (dev == NULL)
827                 return (NULL);
828
829         d = dev;
830
831         while (d->do_attacheddev != NULL)
832                 d = d->do_attacheddev;
833
834         return (d);
835 }
836
837 static irp *
838 IoBuildSynchronousFsdRequest(func, dobj, buf, len, off, event, status)
839         uint32_t                func;
840         device_object           *dobj;
841         void                    *buf;
842         uint32_t                len;
843         uint64_t                *off;
844         nt_kevent               *event;
845         io_status_block         *status;
846 {
847         irp                     *ip;
848
849         ip = IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status);
850         if (ip == NULL)
851                 return (NULL);
852         ip->irp_usrevent = event;
853
854         return (ip);
855 }
856
857 static irp *
858 IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status)
859         uint32_t                func;
860         device_object           *dobj;
861         void                    *buf;
862         uint32_t                len;
863         uint64_t                *off;
864         io_status_block         *status;
865 {
866         irp                     *ip;
867         io_stack_location       *sl;
868
869         ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
870         if (ip == NULL)
871                 return (NULL);
872
873         ip->irp_usriostat = status;
874         ip->irp_tail.irp_overlay.irp_thread = NULL;
875
876         sl = IoGetNextIrpStackLocation(ip);
877         sl->isl_major = func;
878         sl->isl_minor = 0;
879         sl->isl_flags = 0;
880         sl->isl_ctl = 0;
881         sl->isl_devobj = dobj;
882         sl->isl_fileobj = NULL;
883         sl->isl_completionfunc = NULL;
884
885         ip->irp_userbuf = buf;
886
887         if (dobj->do_flags & DO_BUFFERED_IO) {
888                 ip->irp_assoc.irp_sysbuf =
889                     ExAllocatePoolWithTag(NonPagedPool, len, 0);
890                 if (ip->irp_assoc.irp_sysbuf == NULL) {
891                         IoFreeIrp(ip);
892                         return (NULL);
893                 }
894                 bcopy(buf, ip->irp_assoc.irp_sysbuf, len);
895         }
896
897         if (dobj->do_flags & DO_DIRECT_IO) {
898                 ip->irp_mdl = IoAllocateMdl(buf, len, FALSE, FALSE, ip);
899                 if (ip->irp_mdl == NULL) {
900                         if (ip->irp_assoc.irp_sysbuf != NULL)
901                                 ExFreePool(ip->irp_assoc.irp_sysbuf);
902                         IoFreeIrp(ip);
903                         return (NULL);
904                 }
905                 ip->irp_userbuf = NULL;
906                 ip->irp_assoc.irp_sysbuf = NULL;
907         }
908
909         if (func == IRP_MJ_READ) {
910                 sl->isl_parameters.isl_read.isl_len = len;
911                 if (off != NULL)
912                         sl->isl_parameters.isl_read.isl_byteoff = *off;
913                 else
914                         sl->isl_parameters.isl_read.isl_byteoff = 0;
915         }
916
917         if (func == IRP_MJ_WRITE) {
918                 sl->isl_parameters.isl_write.isl_len = len;
919                 if (off != NULL)
920                         sl->isl_parameters.isl_write.isl_byteoff = *off;
921                 else
922                         sl->isl_parameters.isl_write.isl_byteoff = 0;
923         }
924
925         return (ip);
926 }
927
928 static irp *
929 IoBuildDeviceIoControlRequest(uint32_t iocode, device_object *dobj, void *ibuf,
930         uint32_t ilen, void *obuf, uint32_t olen, uint8_t isinternal,
931         nt_kevent *event, io_status_block *status)
932 {
933         irp                     *ip;
934         io_stack_location       *sl;
935         uint32_t                buflen;
936
937         ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
938         if (ip == NULL)
939                 return (NULL);
940         ip->irp_usrevent = event;
941         ip->irp_usriostat = status;
942         ip->irp_tail.irp_overlay.irp_thread = NULL;
943
944         sl = IoGetNextIrpStackLocation(ip);
945         sl->isl_major = isinternal == TRUE ?
946             IRP_MJ_INTERNAL_DEVICE_CONTROL : IRP_MJ_DEVICE_CONTROL;
947         sl->isl_minor = 0;
948         sl->isl_flags = 0;
949         sl->isl_ctl = 0;
950         sl->isl_devobj = dobj;
951         sl->isl_fileobj = NULL;
952         sl->isl_completionfunc = NULL;
953         sl->isl_parameters.isl_ioctl.isl_iocode = iocode;
954         sl->isl_parameters.isl_ioctl.isl_ibuflen = ilen;
955         sl->isl_parameters.isl_ioctl.isl_obuflen = olen;
956
957         switch(IO_METHOD(iocode)) {
958         case METHOD_BUFFERED:
959                 if (ilen > olen)
960                         buflen = ilen;
961                 else
962                         buflen = olen;
963                 if (buflen) {
964                         ip->irp_assoc.irp_sysbuf =
965                             ExAllocatePoolWithTag(NonPagedPool, buflen, 0);
966                         if (ip->irp_assoc.irp_sysbuf == NULL) {
967                                 IoFreeIrp(ip);
968                                 return (NULL);
969                         }
970                 }
971                 if (ilen && ibuf != NULL) {
972                         bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
973                         bzero((char *)ip->irp_assoc.irp_sysbuf + ilen,
974                             buflen - ilen);
975                 } else
976                         bzero(ip->irp_assoc.irp_sysbuf, ilen);
977                 ip->irp_userbuf = obuf;
978                 break;
979         case METHOD_IN_DIRECT:
980         case METHOD_OUT_DIRECT:
981                 if (ilen && ibuf != NULL) {
982                         ip->irp_assoc.irp_sysbuf =
983                             ExAllocatePoolWithTag(NonPagedPool, ilen, 0);
984                         if (ip->irp_assoc.irp_sysbuf == NULL) {
985                                 IoFreeIrp(ip);
986                                 return (NULL);
987                         }
988                         bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
989                 }
990                 if (olen && obuf != NULL) {
991                         ip->irp_mdl = IoAllocateMdl(obuf, olen,
992                             FALSE, FALSE, ip);
993                         /*
994                          * Normally we would MmProbeAndLockPages()
995                          * here, but we don't have to in our
996                          * imlementation.
997                          */
998                 }
999                 break;
1000         case METHOD_NEITHER:
1001                 ip->irp_userbuf = obuf;
1002                 sl->isl_parameters.isl_ioctl.isl_type3ibuf = ibuf;
1003                 break;
1004         default:
1005                 break;
1006         }
1007
1008         /*
1009          * Ideally, we should associate this IRP with the calling
1010          * thread here.
1011          */
1012
1013         return (ip);
1014 }
1015
1016 static irp *
1017 IoAllocateIrp(uint8_t stsize, uint8_t chargequota)
1018 {
1019         irp                     *i;
1020
1021         i = ExAllocatePoolWithTag(NonPagedPool, IoSizeOfIrp(stsize), 0);
1022         if (i == NULL)
1023                 return (NULL);
1024
1025         IoInitializeIrp(i, IoSizeOfIrp(stsize), stsize);
1026
1027         return (i);
1028 }
1029
1030 static irp *
1031 IoMakeAssociatedIrp(irp *ip, uint8_t stsize)
1032 {
1033         irp                     *associrp;
1034
1035         associrp = IoAllocateIrp(stsize, FALSE);
1036         if (associrp == NULL)
1037                 return (NULL);
1038
1039         mtx_lock(&ntoskrnl_dispatchlock);
1040         associrp->irp_flags |= IRP_ASSOCIATED_IRP;
1041         associrp->irp_tail.irp_overlay.irp_thread =
1042             ip->irp_tail.irp_overlay.irp_thread;
1043         associrp->irp_assoc.irp_master = ip;
1044         mtx_unlock(&ntoskrnl_dispatchlock);
1045
1046         return (associrp);
1047 }
1048
1049 static void
1050 IoFreeIrp(ip)
1051         irp                     *ip;
1052 {
1053         ExFreePool(ip);
1054 }
1055
1056 static void
1057 IoInitializeIrp(irp *io, uint16_t psize, uint8_t ssize)
1058 {
1059         bzero((char *)io, IoSizeOfIrp(ssize));
1060         io->irp_size = psize;
1061         io->irp_stackcnt = ssize;
1062         io->irp_currentstackloc = ssize;
1063         InitializeListHead(&io->irp_thlist);
1064         io->irp_tail.irp_overlay.irp_csl =
1065             (io_stack_location *)(io + 1) + ssize;
1066 }
1067
1068 static void
1069 IoReuseIrp(ip, status)
1070         irp                     *ip;
1071         uint32_t                status;
1072 {
1073         uint8_t                 allocflags;
1074
1075         allocflags = ip->irp_allocflags;
1076         IoInitializeIrp(ip, ip->irp_size, ip->irp_stackcnt);
1077         ip->irp_iostat.isb_status = status;
1078         ip->irp_allocflags = allocflags;
1079 }
1080
1081 void
1082 IoAcquireCancelSpinLock(uint8_t *irql)
1083 {
1084         KeAcquireSpinLock(&ntoskrnl_cancellock, irql);
1085 }
1086
1087 void
1088 IoReleaseCancelSpinLock(uint8_t irql)
1089 {
1090         KeReleaseSpinLock(&ntoskrnl_cancellock, irql);
1091 }
1092
1093 uint8_t
1094 IoCancelIrp(irp *ip)
1095 {
1096         cancel_func             cfunc;
1097         uint8_t                 cancelirql;
1098
1099         IoAcquireCancelSpinLock(&cancelirql);
1100         cfunc = IoSetCancelRoutine(ip, NULL);
1101         ip->irp_cancel = TRUE;
1102         if (cfunc == NULL) {
1103                 IoReleaseCancelSpinLock(cancelirql);
1104                 return (FALSE);
1105         }
1106         ip->irp_cancelirql = cancelirql;
1107         MSCALL2(cfunc, IoGetCurrentIrpStackLocation(ip)->isl_devobj, ip);
1108         return (uint8_t)IoSetCancelValue(ip, TRUE);
1109 }
1110
1111 uint32_t
1112 IofCallDriver(dobj, ip)
1113         device_object           *dobj;
1114         irp                     *ip;
1115 {
1116         driver_object           *drvobj;
1117         io_stack_location       *sl;
1118         uint32_t                status;
1119         driver_dispatch         disp;
1120
1121         drvobj = dobj->do_drvobj;
1122
1123         if (ip->irp_currentstackloc <= 0)
1124                 panic("IoCallDriver(): out of stack locations");
1125
1126         IoSetNextIrpStackLocation(ip);
1127         sl = IoGetCurrentIrpStackLocation(ip);
1128
1129         sl->isl_devobj = dobj;
1130
1131         disp = drvobj->dro_dispatch[sl->isl_major];
1132         status = MSCALL2(disp, dobj, ip);
1133
1134         return (status);
1135 }
1136
1137 void
1138 IofCompleteRequest(irp *ip, uint8_t prioboost)
1139 {
1140         uint32_t                status;
1141         device_object           *dobj;
1142         io_stack_location       *sl;
1143         completion_func         cf;
1144
1145         KASSERT(ip->irp_iostat.isb_status != STATUS_PENDING,
1146             ("incorrect IRP(%p) status (STATUS_PENDING)", ip));
1147
1148         sl = IoGetCurrentIrpStackLocation(ip);
1149         IoSkipCurrentIrpStackLocation(ip);
1150
1151         do {
1152                 if (sl->isl_ctl & SL_PENDING_RETURNED)
1153                         ip->irp_pendingreturned = TRUE;
1154
1155                 if (ip->irp_currentstackloc != (ip->irp_stackcnt + 1))
1156                         dobj = IoGetCurrentIrpStackLocation(ip)->isl_devobj;
1157                 else
1158                         dobj = NULL;
1159
1160                 if (sl->isl_completionfunc != NULL &&
1161                     ((ip->irp_iostat.isb_status == STATUS_SUCCESS &&
1162                     sl->isl_ctl & SL_INVOKE_ON_SUCCESS) ||
1163                     (ip->irp_iostat.isb_status != STATUS_SUCCESS &&
1164                     sl->isl_ctl & SL_INVOKE_ON_ERROR) ||
1165                     (ip->irp_cancel == TRUE &&
1166                     sl->isl_ctl & SL_INVOKE_ON_CANCEL))) {
1167                         cf = sl->isl_completionfunc;
1168                         status = MSCALL3(cf, dobj, ip, sl->isl_completionctx);
1169                         if (status == STATUS_MORE_PROCESSING_REQUIRED)
1170                                 return;
1171                 } else {
1172                         if ((ip->irp_currentstackloc <= ip->irp_stackcnt) &&
1173                             (ip->irp_pendingreturned == TRUE))
1174                                 IoMarkIrpPending(ip);
1175                 }
1176
1177                 /* move to the next.  */
1178                 IoSkipCurrentIrpStackLocation(ip);
1179                 sl++;
1180         } while (ip->irp_currentstackloc <= (ip->irp_stackcnt + 1));
1181
1182         if (ip->irp_usriostat != NULL)
1183                 *ip->irp_usriostat = ip->irp_iostat;
1184         if (ip->irp_usrevent != NULL)
1185                 KeSetEvent(ip->irp_usrevent, prioboost, FALSE);
1186
1187         /* Handle any associated IRPs. */
1188
1189         if (ip->irp_flags & IRP_ASSOCIATED_IRP) {
1190                 uint32_t                masterirpcnt;
1191                 irp                     *masterirp;
1192                 mdl                     *m;
1193
1194                 masterirp = ip->irp_assoc.irp_master;
1195                 masterirpcnt =
1196                     InterlockedDecrement(&masterirp->irp_assoc.irp_irpcnt);
1197
1198                 while ((m = ip->irp_mdl) != NULL) {
1199                         ip->irp_mdl = m->mdl_next;
1200                         IoFreeMdl(m);
1201                 }
1202                 IoFreeIrp(ip);
1203                 if (masterirpcnt == 0)
1204                         IoCompleteRequest(masterirp, IO_NO_INCREMENT);
1205                 return;
1206         }
1207
1208         /* With any luck, these conditions will never arise. */
1209
1210         if (ip->irp_flags & IRP_PAGING_IO) {
1211                 if (ip->irp_mdl != NULL)
1212                         IoFreeMdl(ip->irp_mdl);
1213                 IoFreeIrp(ip);
1214         }
1215 }
1216
1217 void
1218 ntoskrnl_intr(arg)
1219         void                    *arg;
1220 {
1221         kinterrupt              *iobj;
1222         uint8_t                 irql;
1223         uint8_t                 claimed;
1224         list_entry              *l;
1225
1226         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1227         l = ntoskrnl_intlist.nle_flink;
1228         while (l != &ntoskrnl_intlist) {
1229                 iobj = CONTAINING_RECORD(l, kinterrupt, ki_list);
1230                 claimed = MSCALL2(iobj->ki_svcfunc, iobj, iobj->ki_svcctx);
1231                 if (claimed == TRUE)
1232                         break;
1233                 l = l->nle_flink;
1234         }
1235         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1236 }
1237
1238 uint8_t
1239 KeAcquireInterruptSpinLock(iobj)
1240         kinterrupt              *iobj;
1241 {
1242         uint8_t                 irql;
1243         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1244         return (irql);
1245 }
1246
1247 void
1248 KeReleaseInterruptSpinLock(kinterrupt *iobj, uint8_t irql)
1249 {
1250         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1251 }
1252
1253 uint8_t
1254 KeSynchronizeExecution(iobj, syncfunc, syncctx)
1255         kinterrupt              *iobj;
1256         void                    *syncfunc;
1257         void                    *syncctx;
1258 {
1259         uint8_t                 irql;
1260
1261         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1262         MSCALL1(syncfunc, syncctx);
1263         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1264
1265         return (TRUE);
1266 }
1267
1268 /*
1269  * IoConnectInterrupt() is passed only the interrupt vector and
1270  * irql that a device wants to use, but no device-specific tag
1271  * of any kind. This conflicts rather badly with FreeBSD's
1272  * bus_setup_intr(), which needs the device_t for the device
1273  * requesting interrupt delivery. In order to bypass this
1274  * inconsistency, we implement a second level of interrupt
1275  * dispatching on top of bus_setup_intr(). All devices use
1276  * ntoskrnl_intr() as their ISR, and any device requesting
1277  * interrupts will be registered with ntoskrnl_intr()'s interrupt
1278  * dispatch list. When an interrupt arrives, we walk the list
1279  * and invoke all the registered ISRs. This effectively makes all
1280  * interrupts shared, but it's the only way to duplicate the
1281  * semantics of IoConnectInterrupt() and IoDisconnectInterrupt() properly.
1282  */
1283
1284 uint32_t
1285 IoConnectInterrupt(kinterrupt **iobj, void *svcfunc, void *svcctx,
1286         kspin_lock *lock, uint32_t vector, uint8_t irql, uint8_t syncirql,
1287         uint8_t imode, uint8_t shared, uint32_t affinity, uint8_t savefloat)
1288 {
1289         uint8_t                 curirql;
1290
1291         *iobj = ExAllocatePoolWithTag(NonPagedPool, sizeof(kinterrupt), 0);
1292         if (*iobj == NULL)
1293                 return (STATUS_INSUFFICIENT_RESOURCES);
1294
1295         (*iobj)->ki_svcfunc = svcfunc;
1296         (*iobj)->ki_svcctx = svcctx;
1297
1298         if (lock == NULL) {
1299                 KeInitializeSpinLock(&(*iobj)->ki_lock_priv);
1300                 (*iobj)->ki_lock = &(*iobj)->ki_lock_priv;
1301         } else
1302                 (*iobj)->ki_lock = lock;
1303
1304         KeAcquireSpinLock(&ntoskrnl_intlock, &curirql);
1305         InsertHeadList((&ntoskrnl_intlist), (&(*iobj)->ki_list));
1306         KeReleaseSpinLock(&ntoskrnl_intlock, curirql);
1307
1308         return (STATUS_SUCCESS);
1309 }
1310
1311 void
1312 IoDisconnectInterrupt(iobj)
1313         kinterrupt              *iobj;
1314 {
1315         uint8_t                 irql;
1316
1317         if (iobj == NULL)
1318                 return;
1319
1320         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1321         RemoveEntryList((&iobj->ki_list));
1322         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1323
1324         ExFreePool(iobj);
1325 }
1326
1327 device_object *
1328 IoAttachDeviceToDeviceStack(src, dst)
1329         device_object           *src;
1330         device_object           *dst;
1331 {
1332         device_object           *attached;
1333
1334         mtx_lock(&ntoskrnl_dispatchlock);
1335         attached = IoGetAttachedDevice(dst);
1336         attached->do_attacheddev = src;
1337         src->do_attacheddev = NULL;
1338         src->do_stacksize = attached->do_stacksize + 1;
1339         mtx_unlock(&ntoskrnl_dispatchlock);
1340
1341         return (attached);
1342 }
1343
1344 void
1345 IoDetachDevice(topdev)
1346         device_object           *topdev;
1347 {
1348         device_object           *tail;
1349
1350         mtx_lock(&ntoskrnl_dispatchlock);
1351
1352         /* First, break the chain. */
1353         tail = topdev->do_attacheddev;
1354         if (tail == NULL) {
1355                 mtx_unlock(&ntoskrnl_dispatchlock);
1356                 return;
1357         }
1358         topdev->do_attacheddev = tail->do_attacheddev;
1359         topdev->do_refcnt--;
1360
1361         /* Now reduce the stacksize count for the takm_il objects. */
1362
1363         tail = topdev->do_attacheddev;
1364         while (tail != NULL) {
1365                 tail->do_stacksize--;
1366                 tail = tail->do_attacheddev;
1367         }
1368
1369         mtx_unlock(&ntoskrnl_dispatchlock);
1370 }
1371
1372 /*
1373  * For the most part, an object is considered signalled if
1374  * dh_sigstate == TRUE. The exception is for mutant objects
1375  * (mutexes), where the logic works like this:
1376  *
1377  * - If the thread already owns the object and sigstate is
1378  *   less than or equal to 0, then the object is considered
1379  *   signalled (recursive acquisition).
1380  * - If dh_sigstate == 1, the object is also considered
1381  *   signalled.
1382  */
1383
1384 static int
1385 ntoskrnl_is_signalled(obj, td)
1386         nt_dispatch_header      *obj;
1387         struct thread           *td;
1388 {
1389         kmutant                 *km;
1390
1391         if (obj->dh_type == DISP_TYPE_MUTANT) {
1392                 km = (kmutant *)obj;
1393                 if ((obj->dh_sigstate <= 0 && km->km_ownerthread == td) ||
1394                     obj->dh_sigstate == 1)
1395                         return (TRUE);
1396                 return (FALSE);
1397         }
1398
1399         if (obj->dh_sigstate > 0)
1400                 return (TRUE);
1401         return (FALSE);
1402 }
1403
1404 static void
1405 ntoskrnl_satisfy_wait(obj, td)
1406         nt_dispatch_header      *obj;
1407         struct thread           *td;
1408 {
1409         kmutant                 *km;
1410
1411         switch (obj->dh_type) {
1412         case DISP_TYPE_MUTANT:
1413                 km = (struct kmutant *)obj;
1414                 obj->dh_sigstate--;
1415                 /*
1416                  * If sigstate reaches 0, the mutex is now
1417                  * non-signalled (the new thread owns it).
1418                  */
1419                 if (obj->dh_sigstate == 0) {
1420                         km->km_ownerthread = td;
1421                         if (km->km_abandoned == TRUE)
1422                                 km->km_abandoned = FALSE;
1423                 }
1424                 break;
1425         /* Synchronization objects get reset to unsignalled. */
1426         case DISP_TYPE_SYNCHRONIZATION_EVENT:
1427         case DISP_TYPE_SYNCHRONIZATION_TIMER:
1428                 obj->dh_sigstate = 0;
1429                 break;
1430         case DISP_TYPE_SEMAPHORE:
1431                 obj->dh_sigstate--;
1432                 break;
1433         default:
1434                 break;
1435         }
1436 }
1437
1438 static void
1439 ntoskrnl_satisfy_multiple_waits(wb)
1440         wait_block              *wb;
1441 {
1442         wait_block              *cur;
1443         struct thread           *td;
1444
1445         cur = wb;
1446         td = wb->wb_kthread;
1447
1448         do {
1449                 ntoskrnl_satisfy_wait(wb->wb_object, td);
1450                 cur->wb_awakened = TRUE;
1451                 cur = cur->wb_next;
1452         } while (cur != wb);
1453 }
1454
1455 /* Always called with dispatcher lock held. */
1456 static void
1457 ntoskrnl_waittest(obj, increment)
1458         nt_dispatch_header      *obj;
1459         uint32_t                increment;
1460 {
1461         wait_block              *w, *next;
1462         list_entry              *e;
1463         struct thread           *td;
1464         wb_ext                  *we;
1465         int                     satisfied;
1466
1467         /*
1468          * Once an object has been signalled, we walk its list of
1469          * wait blocks. If a wait block can be awakened, then satisfy
1470          * waits as necessary and wake the thread.
1471          *
1472          * The rules work like this:
1473          *
1474          * If a wait block is marked as WAITTYPE_ANY, then
1475          * we can satisfy the wait conditions on the current
1476          * object and wake the thread right away. Satisfying
1477          * the wait also has the effect of breaking us out
1478          * of the search loop.
1479          *
1480          * If the object is marked as WAITTYLE_ALL, then the
1481          * wait block will be part of a circularly linked
1482          * list of wait blocks belonging to a waiting thread
1483          * that's sleeping in KeWaitForMultipleObjects(). In
1484          * order to wake the thread, all the objects in the
1485          * wait list must be in the signalled state. If they
1486          * are, we then satisfy all of them and wake the
1487          * thread.
1488          *
1489          */
1490
1491         e = obj->dh_waitlisthead.nle_flink;
1492
1493         while (e != &obj->dh_waitlisthead && obj->dh_sigstate > 0) {
1494                 w = CONTAINING_RECORD(e, wait_block, wb_waitlist);
1495                 we = w->wb_ext;
1496                 td = we->we_td;
1497                 satisfied = FALSE;
1498                 if (w->wb_waittype == WAITTYPE_ANY) {
1499                         /*
1500                          * Thread can be awakened if
1501                          * any wait is satisfied.
1502                          */
1503                         ntoskrnl_satisfy_wait(obj, td);
1504                         satisfied = TRUE;
1505                         w->wb_awakened = TRUE;
1506                 } else {
1507                         /*
1508                          * Thread can only be woken up
1509                          * if all waits are satisfied.
1510                          * If the thread is waiting on multiple
1511                          * objects, they should all be linked
1512                          * through the wb_next pointers in the
1513                          * wait blocks.
1514                          */
1515                         satisfied = TRUE;
1516                         next = w->wb_next;
1517                         while (next != w) {
1518                                 if (ntoskrnl_is_signalled(obj, td) == FALSE) {
1519                                         satisfied = FALSE;
1520                                         break;
1521                                 }
1522                                 next = next->wb_next;
1523                         }
1524                         ntoskrnl_satisfy_multiple_waits(w);
1525                 }
1526
1527                 if (satisfied == TRUE)
1528                         cv_broadcastpri(&we->we_cv,
1529                             (w->wb_oldpri - (increment * 4)) > PRI_MIN_KERN ?
1530                             w->wb_oldpri - (increment * 4) : PRI_MIN_KERN);
1531
1532                 e = e->nle_flink;
1533         }
1534 }
1535
1536 /*
1537  * Return the number of 100 nanosecond intervals since
1538  * January 1, 1601. (?!?!)
1539  */
1540 void
1541 ntoskrnl_time(tval)
1542         uint64_t                *tval;
1543 {
1544         struct timespec         ts;
1545
1546         nanotime(&ts);
1547         *tval = (uint64_t)ts.tv_nsec / 100 + (uint64_t)ts.tv_sec * 10000000 +
1548             11644473600 * 10000000; /* 100ns ticks from 1601 to 1970 */
1549 }
1550
1551 static void
1552 KeQuerySystemTime(current_time)
1553         uint64_t                *current_time;
1554 {
1555         ntoskrnl_time(current_time);
1556 }
1557
1558 static uint32_t
1559 KeTickCount(void)
1560 {
1561         struct timeval tv;
1562         getmicrouptime(&tv);
1563         return tvtohz(&tv);
1564 }
1565
1566
1567 /*
1568  * KeWaitForSingleObject() is a tricky beast, because it can be used
1569  * with several different object types: semaphores, timers, events,
1570  * mutexes and threads. Semaphores don't appear very often, but the
1571  * other object types are quite common. KeWaitForSingleObject() is
1572  * what's normally used to acquire a mutex, and it can be used to
1573  * wait for a thread termination.
1574  *
1575  * The Windows NDIS API is implemented in terms of Windows kernel
1576  * primitives, and some of the object manipulation is duplicated in
1577  * NDIS. For example, NDIS has timers and events, which are actually
1578  * Windows kevents and ktimers. Now, you're supposed to only use the
1579  * NDIS variants of these objects within the confines of the NDIS API,
1580  * but there are some naughty developers out there who will use
1581  * KeWaitForSingleObject() on NDIS timer and event objects, so we
1582  * have to support that as well. Conseqently, our NDIS timer and event
1583  * code has to be closely tied into our ntoskrnl timer and event code,
1584  * just as it is in Windows.
1585  *
1586  * KeWaitForSingleObject() may do different things for different kinds
1587  * of objects:
1588  *
1589  * - For events, we check if the event has been signalled. If the
1590  *   event is already in the signalled state, we just return immediately,
1591  *   otherwise we wait for it to be set to the signalled state by someone
1592  *   else calling KeSetEvent(). Events can be either synchronization or
1593  *   notification events.
1594  *
1595  * - For timers, if the timer has already fired and the timer is in
1596  *   the signalled state, we just return, otherwise we wait on the
1597  *   timer. Unlike an event, timers get signalled automatically when
1598  *   they expire rather than someone having to trip them manually.
1599  *   Timers initialized with KeInitializeTimer() are always notification
1600  *   events: KeInitializeTimerEx() lets you initialize a timer as
1601  *   either a notification or synchronization event.
1602  *
1603  * - For mutexes, we try to acquire the mutex and if we can't, we wait
1604  *   on the mutex until it's available and then grab it. When a mutex is
1605  *   released, it enters the signalled state, which wakes up one of the
1606  *   threads waiting to acquire it. Mutexes are always synchronization
1607  *   events.
1608  *
1609  * - For threads, the only thing we do is wait until the thread object
1610  *   enters a signalled state, which occurs when the thread terminates.
1611  *   Threads are always notification events.
1612  *
1613  * A notification event wakes up all threads waiting on an object. A
1614  * synchronization event wakes up just one. Also, a synchronization event
1615  * is auto-clearing, which means we automatically set the event back to
1616  * the non-signalled state once the wakeup is done.
1617  */
1618
1619 uint32_t
1620 KeWaitForSingleObject(void *arg, uint32_t reason, uint32_t mode,
1621     uint8_t alertable, int64_t *duetime)
1622 {
1623         wait_block              w;
1624         struct thread           *td = curthread;
1625         struct timeval          tv;
1626         int                     error = 0;
1627         uint64_t                curtime;
1628         wb_ext                  we;
1629         nt_dispatch_header      *obj;
1630
1631         obj = arg;
1632
1633         if (obj == NULL)
1634                 return (STATUS_INVALID_PARAMETER);
1635
1636         mtx_lock(&ntoskrnl_dispatchlock);
1637
1638         cv_init(&we.we_cv, "KeWFS");
1639         we.we_td = td;
1640
1641         /*
1642          * Check to see if this object is already signalled,
1643          * and just return without waiting if it is.
1644          */
1645         if (ntoskrnl_is_signalled(obj, td) == TRUE) {
1646                 /* Sanity check the signal state value. */
1647                 if (obj->dh_sigstate != INT32_MIN) {
1648                         ntoskrnl_satisfy_wait(obj, curthread);
1649                         mtx_unlock(&ntoskrnl_dispatchlock);
1650                         return (STATUS_SUCCESS);
1651                 } else {
1652                         /*
1653                          * There's a limit to how many times we can
1654                          * recursively acquire a mutant. If we hit
1655                          * the limit, something is very wrong.
1656                          */
1657                         if (obj->dh_type == DISP_TYPE_MUTANT) {
1658                                 mtx_unlock(&ntoskrnl_dispatchlock);
1659                                 panic("mutant limit exceeded");
1660                         }
1661                 }
1662         }
1663
1664         bzero((char *)&w, sizeof(wait_block));
1665         w.wb_object = obj;
1666         w.wb_ext = &we;
1667         w.wb_waittype = WAITTYPE_ANY;
1668         w.wb_next = &w;
1669         w.wb_waitkey = 0;
1670         w.wb_awakened = FALSE;
1671         w.wb_oldpri = td->td_priority;
1672
1673         InsertTailList((&obj->dh_waitlisthead), (&w.wb_waitlist));
1674
1675         /*
1676          * The timeout value is specified in 100 nanosecond units
1677          * and can be a positive or negative number. If it's positive,
1678          * then the duetime is absolute, and we need to convert it
1679          * to an absolute offset relative to now in order to use it.
1680          * If it's negative, then the duetime is relative and we
1681          * just have to convert the units.
1682          */
1683
1684         if (duetime != NULL) {
1685                 if (*duetime < 0) {
1686                         tv.tv_sec = - (*duetime) / 10000000;
1687                         tv.tv_usec = (- (*duetime) / 10) -
1688                             (tv.tv_sec * 1000000);
1689                 } else {
1690                         ntoskrnl_time(&curtime);
1691                         if (*duetime < curtime)
1692                                 tv.tv_sec = tv.tv_usec = 0;
1693                         else {
1694                                 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1695                                 tv.tv_usec = ((*duetime) - curtime) / 10 -
1696                                     (tv.tv_sec * 1000000);
1697                         }
1698                 }
1699         }
1700
1701         if (duetime == NULL)
1702                 cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
1703         else
1704                 error = cv_timedwait(&we.we_cv,
1705                     &ntoskrnl_dispatchlock, tvtohz(&tv));
1706
1707         RemoveEntryList(&w.wb_waitlist);
1708
1709         cv_destroy(&we.we_cv);
1710
1711         /* We timed out. Leave the object alone and return status. */
1712
1713         if (error == EWOULDBLOCK) {
1714                 mtx_unlock(&ntoskrnl_dispatchlock);
1715                 return (STATUS_TIMEOUT);
1716         }
1717
1718         mtx_unlock(&ntoskrnl_dispatchlock);
1719
1720         return (STATUS_SUCCESS);
1721 /*
1722         return (KeWaitForMultipleObjects(1, &obj, WAITTYPE_ALL, reason,
1723             mode, alertable, duetime, &w));
1724 */
1725 }
1726
1727 static uint32_t
1728 KeWaitForMultipleObjects(uint32_t cnt, nt_dispatch_header *obj[], uint32_t wtype,
1729         uint32_t reason, uint32_t mode, uint8_t alertable, int64_t *duetime,
1730         wait_block *wb_array)
1731 {
1732         struct thread           *td = curthread;
1733         wait_block              *whead, *w;
1734         wait_block              _wb_array[MAX_WAIT_OBJECTS];
1735         nt_dispatch_header      *cur;
1736         struct timeval          tv;
1737         int                     i, wcnt = 0, error = 0;
1738         uint64_t                curtime;
1739         struct timespec         t1, t2;
1740         uint32_t                status = STATUS_SUCCESS;
1741         wb_ext                  we;
1742
1743         if (cnt > MAX_WAIT_OBJECTS)
1744                 return (STATUS_INVALID_PARAMETER);
1745         if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL)
1746                 return (STATUS_INVALID_PARAMETER);
1747
1748         mtx_lock(&ntoskrnl_dispatchlock);
1749
1750         cv_init(&we.we_cv, "KeWFM");
1751         we.we_td = td;
1752
1753         if (wb_array == NULL)
1754                 whead = _wb_array;
1755         else
1756                 whead = wb_array;
1757
1758         bzero((char *)whead, sizeof(wait_block) * cnt);
1759
1760         /* First pass: see if we can satisfy any waits immediately. */
1761
1762         wcnt = 0;
1763         w = whead;
1764
1765         for (i = 0; i < cnt; i++) {
1766                 InsertTailList((&obj[i]->dh_waitlisthead),
1767                     (&w->wb_waitlist));
1768                 w->wb_ext = &we;
1769                 w->wb_object = obj[i];
1770                 w->wb_waittype = wtype;
1771                 w->wb_waitkey = i;
1772                 w->wb_awakened = FALSE;
1773                 w->wb_oldpri = td->td_priority;
1774                 w->wb_next = w + 1;
1775                 w++;
1776                 wcnt++;
1777                 if (ntoskrnl_is_signalled(obj[i], td)) {
1778                         /*
1779                          * There's a limit to how many times
1780                          * we can recursively acquire a mutant.
1781                          * If we hit the limit, something
1782                          * is very wrong.
1783                          */
1784                         if (obj[i]->dh_sigstate == INT32_MIN &&
1785                             obj[i]->dh_type == DISP_TYPE_MUTANT) {
1786                                 mtx_unlock(&ntoskrnl_dispatchlock);
1787                                 panic("mutant limit exceeded");
1788                         }
1789
1790                         /*
1791                          * If this is a WAITTYPE_ANY wait, then
1792                          * satisfy the waited object and exit
1793                          * right now.
1794                          */
1795
1796                         if (wtype == WAITTYPE_ANY) {
1797                                 ntoskrnl_satisfy_wait(obj[i], td);
1798                                 status = STATUS_WAIT_0 + i;
1799                                 goto wait_done;
1800                         } else {
1801                                 w--;
1802                                 wcnt--;
1803                                 w->wb_object = NULL;
1804                                 RemoveEntryList(&w->wb_waitlist);
1805                         }
1806                 }
1807         }
1808
1809         /*
1810          * If this is a WAITTYPE_ALL wait and all objects are
1811          * already signalled, satisfy the waits and exit now.
1812          */
1813
1814         if (wtype == WAITTYPE_ALL && wcnt == 0) {
1815                 for (i = 0; i < cnt; i++)
1816                         ntoskrnl_satisfy_wait(obj[i], td);
1817                 status = STATUS_SUCCESS;
1818                 goto wait_done;
1819         }
1820
1821         /*
1822          * Create a circular waitblock list. The waitcount
1823          * must always be non-zero when we get here.
1824          */
1825
1826         (w - 1)->wb_next = whead;
1827
1828         /* Wait on any objects that aren't yet signalled. */
1829
1830         /* Calculate timeout, if any. */
1831
1832         if (duetime != NULL) {
1833                 if (*duetime < 0) {
1834                         tv.tv_sec = - (*duetime) / 10000000;
1835                         tv.tv_usec = (- (*duetime) / 10) -
1836                             (tv.tv_sec * 1000000);
1837                 } else {
1838                         ntoskrnl_time(&curtime);
1839                         if (*duetime < curtime)
1840                                 tv.tv_sec = tv.tv_usec = 0;
1841                         else {
1842                                 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1843                                 tv.tv_usec = ((*duetime) - curtime) / 10 -
1844                                     (tv.tv_sec * 1000000);
1845                         }
1846                 }
1847         }
1848
1849         while (wcnt) {
1850                 nanotime(&t1);
1851
1852                 if (duetime == NULL)
1853                         cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
1854                 else
1855                         error = cv_timedwait(&we.we_cv,
1856                             &ntoskrnl_dispatchlock, tvtohz(&tv));
1857
1858                 /* Wait with timeout expired. */
1859
1860                 if (error) {
1861                         status = STATUS_TIMEOUT;
1862                         goto wait_done;
1863                 }
1864
1865                 nanotime(&t2);
1866
1867                 /* See what's been signalled. */
1868
1869                 w = whead;
1870                 do {
1871                         cur = w->wb_object;
1872                         if (ntoskrnl_is_signalled(cur, td) == TRUE ||
1873                             w->wb_awakened == TRUE) {
1874                                 /* Sanity check the signal state value. */
1875                                 if (cur->dh_sigstate == INT32_MIN &&
1876                                     cur->dh_type == DISP_TYPE_MUTANT) {
1877                                         mtx_unlock(&ntoskrnl_dispatchlock);
1878                                         panic("mutant limit exceeded");
1879                                 }
1880                                 wcnt--;
1881                                 if (wtype == WAITTYPE_ANY) {
1882                                         status = w->wb_waitkey &
1883                                             STATUS_WAIT_0;
1884                                         goto wait_done;
1885                                 }
1886                         }
1887                         w = w->wb_next;
1888                 } while (w != whead);
1889
1890                 /*
1891                  * If all objects have been signalled, or if this
1892                  * is a WAITTYPE_ANY wait and we were woke up by
1893                  * someone, we can bail.
1894                  */
1895
1896                 if (wcnt == 0) {
1897                         status = STATUS_SUCCESS;
1898                         goto wait_done;
1899                 }
1900
1901                 /*
1902                  * If this is WAITTYPE_ALL wait, and there's still
1903                  * objects that haven't been signalled, deduct the
1904                  * time that's elapsed so far from the timeout and
1905                  * wait again (or continue waiting indefinitely if
1906                  * there's no timeout).
1907                  */
1908
1909                 if (duetime != NULL) {
1910                         tv.tv_sec -= (t2.tv_sec - t1.tv_sec);
1911                         tv.tv_usec -= (t2.tv_nsec - t1.tv_nsec) / 1000;
1912                 }
1913         }
1914
1915
1916 wait_done:
1917
1918         cv_destroy(&we.we_cv);
1919
1920         for (i = 0; i < cnt; i++) {
1921                 if (whead[i].wb_object != NULL)
1922                         RemoveEntryList(&whead[i].wb_waitlist);
1923
1924         }
1925         mtx_unlock(&ntoskrnl_dispatchlock);
1926
1927         return (status);
1928 }
1929
1930 static void
1931 WRITE_REGISTER_USHORT(uint16_t *reg, uint16_t val)
1932 {
1933         bus_space_write_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1934 }
1935
1936 static uint16_t
1937 READ_REGISTER_USHORT(reg)
1938         uint16_t                *reg;
1939 {
1940         return (bus_space_read_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1941 }
1942
1943 static void
1944 WRITE_REGISTER_ULONG(reg, val)
1945         uint32_t                *reg;
1946         uint32_t                val;
1947 {
1948         bus_space_write_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1949 }
1950
1951 static uint32_t
1952 READ_REGISTER_ULONG(reg)
1953         uint32_t                *reg;
1954 {
1955         return (bus_space_read_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1956 }
1957
1958 static uint8_t
1959 READ_REGISTER_UCHAR(uint8_t *reg)
1960 {
1961         return (bus_space_read_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1962 }
1963
1964 static void
1965 WRITE_REGISTER_UCHAR(uint8_t *reg, uint8_t val)
1966 {
1967         bus_space_write_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1968 }
1969
1970 static int64_t
1971 _allmul(a, b)
1972         int64_t                 a;
1973         int64_t                 b;
1974 {
1975         return (a * b);
1976 }
1977
1978 static int64_t
1979 _alldiv(a, b)
1980         int64_t                 a;
1981         int64_t                 b;
1982 {
1983         return (a / b);
1984 }
1985
1986 static int64_t
1987 _allrem(a, b)
1988         int64_t                 a;
1989         int64_t                 b;
1990 {
1991         return (a % b);
1992 }
1993
1994 static uint64_t
1995 _aullmul(a, b)
1996         uint64_t                a;
1997         uint64_t                b;
1998 {
1999         return (a * b);
2000 }
2001
2002 static uint64_t
2003 _aulldiv(a, b)
2004         uint64_t                a;
2005         uint64_t                b;
2006 {
2007         return (a / b);
2008 }
2009
2010 static uint64_t
2011 _aullrem(a, b)
2012         uint64_t                a;
2013         uint64_t                b;
2014 {
2015         return (a % b);
2016 }
2017
2018 static int64_t
2019 _allshl(int64_t a, uint8_t b)
2020 {
2021         return (a << b);
2022 }
2023
2024 static uint64_t
2025 _aullshl(uint64_t a, uint8_t b)
2026 {
2027         return (a << b);
2028 }
2029
2030 static int64_t
2031 _allshr(int64_t a, uint8_t b)
2032 {
2033         return (a >> b);
2034 }
2035
2036 static uint64_t
2037 _aullshr(uint64_t a, uint8_t b)
2038 {
2039         return (a >> b);
2040 }
2041
2042 static slist_entry *
2043 ntoskrnl_pushsl(head, entry)
2044         slist_header            *head;
2045         slist_entry             *entry;
2046 {
2047         slist_entry             *oldhead;
2048
2049         oldhead = head->slh_list.slh_next;
2050         entry->sl_next = head->slh_list.slh_next;
2051         head->slh_list.slh_next = entry;
2052         head->slh_list.slh_depth++;
2053         head->slh_list.slh_seq++;
2054
2055         return (oldhead);
2056 }
2057
2058 static slist_entry *
2059 ntoskrnl_popsl(head)
2060         slist_header            *head;
2061 {
2062         slist_entry             *first;
2063
2064         first = head->slh_list.slh_next;
2065         if (first != NULL) {
2066                 head->slh_list.slh_next = first->sl_next;
2067                 head->slh_list.slh_depth--;
2068                 head->slh_list.slh_seq++;
2069         }
2070
2071         return (first);
2072 }
2073
2074 /*
2075  * We need this to make lookaside lists work for amd64.
2076  * We pass a pointer to ExAllocatePoolWithTag() the lookaside
2077  * list structure. For amd64 to work right, this has to be a
2078  * pointer to the wrapped version of the routine, not the
2079  * original. Letting the Windows driver invoke the original
2080  * function directly will result in a convention calling
2081  * mismatch and a pretty crash. On x86, this effectively
2082  * becomes a no-op since ipt_func and ipt_wrap are the same.
2083  */
2084
2085 static funcptr
2086 ntoskrnl_findwrap(func)
2087         funcptr                 func;
2088 {
2089         image_patch_table       *patch;
2090
2091         patch = ntoskrnl_functbl;
2092         while (patch->ipt_func != NULL) {
2093                 if ((funcptr)patch->ipt_func == func)
2094                         return ((funcptr)patch->ipt_wrap);
2095                 patch++;
2096         }
2097
2098         return (NULL);
2099 }
2100
2101 static void
2102 ExInitializePagedLookasideList(paged_lookaside_list *lookaside,
2103         lookaside_alloc_func *allocfunc, lookaside_free_func *freefunc,
2104         uint32_t flags, size_t size, uint32_t tag, uint16_t depth)
2105 {
2106         bzero((char *)lookaside, sizeof(paged_lookaside_list));
2107
2108         if (size < sizeof(slist_entry))
2109                 lookaside->nll_l.gl_size = sizeof(slist_entry);
2110         else
2111                 lookaside->nll_l.gl_size = size;
2112         lookaside->nll_l.gl_tag = tag;
2113         if (allocfunc == NULL)
2114                 lookaside->nll_l.gl_allocfunc =
2115                     ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
2116         else
2117                 lookaside->nll_l.gl_allocfunc = allocfunc;
2118
2119         if (freefunc == NULL)
2120                 lookaside->nll_l.gl_freefunc =
2121                     ntoskrnl_findwrap((funcptr)ExFreePool);
2122         else
2123                 lookaside->nll_l.gl_freefunc = freefunc;
2124
2125 #ifdef __i386__
2126         KeInitializeSpinLock(&lookaside->nll_obsoletelock);
2127 #endif
2128
2129         lookaside->nll_l.gl_type = NonPagedPool;
2130         lookaside->nll_l.gl_depth = depth;
2131         lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
2132 }
2133
2134 static void
2135 ExDeletePagedLookasideList(lookaside)
2136         paged_lookaside_list   *lookaside;
2137 {
2138         void                    *buf;
2139         void            (*freefunc)(void *);
2140
2141         freefunc = lookaside->nll_l.gl_freefunc;
2142         while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
2143                 MSCALL1(freefunc, buf);
2144 }
2145
2146 static void
2147 ExInitializeNPagedLookasideList(npaged_lookaside_list *lookaside,
2148         lookaside_alloc_func *allocfunc, lookaside_free_func *freefunc,
2149         uint32_t flags, size_t size, uint32_t tag, uint16_t depth)
2150 {
2151         bzero((char *)lookaside, sizeof(npaged_lookaside_list));
2152
2153         if (size < sizeof(slist_entry))
2154                 lookaside->nll_l.gl_size = sizeof(slist_entry);
2155         else
2156                 lookaside->nll_l.gl_size = size;
2157         lookaside->nll_l.gl_tag = tag;
2158         if (allocfunc == NULL)
2159                 lookaside->nll_l.gl_allocfunc =
2160                     ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
2161         else
2162                 lookaside->nll_l.gl_allocfunc = allocfunc;
2163
2164         if (freefunc == NULL)
2165                 lookaside->nll_l.gl_freefunc =
2166                     ntoskrnl_findwrap((funcptr)ExFreePool);
2167         else
2168                 lookaside->nll_l.gl_freefunc = freefunc;
2169
2170 #ifdef __i386__
2171         KeInitializeSpinLock(&lookaside->nll_obsoletelock);
2172 #endif
2173
2174         lookaside->nll_l.gl_type = NonPagedPool;
2175         lookaside->nll_l.gl_depth = depth;
2176         lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
2177 }
2178
2179 static void
2180 ExDeleteNPagedLookasideList(lookaside)
2181         npaged_lookaside_list   *lookaside;
2182 {
2183         void                    *buf;
2184         void            (*freefunc)(void *);
2185
2186         freefunc = lookaside->nll_l.gl_freefunc;
2187         while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
2188                 MSCALL1(freefunc, buf);
2189 }
2190
2191 slist_entry *
2192 InterlockedPushEntrySList(head, entry)
2193         slist_header            *head;
2194         slist_entry             *entry;
2195 {
2196         slist_entry             *oldhead;
2197
2198         mtx_lock_spin(&ntoskrnl_interlock);
2199         oldhead = ntoskrnl_pushsl(head, entry);
2200         mtx_unlock_spin(&ntoskrnl_interlock);
2201
2202         return (oldhead);
2203 }
2204
2205 slist_entry *
2206 InterlockedPopEntrySList(head)
2207         slist_header            *head;
2208 {
2209         slist_entry             *first;
2210
2211         mtx_lock_spin(&ntoskrnl_interlock);
2212         first = ntoskrnl_popsl(head);
2213         mtx_unlock_spin(&ntoskrnl_interlock);
2214
2215         return (first);
2216 }
2217
2218 static slist_entry *
2219 ExInterlockedPushEntrySList(head, entry, lock)
2220         slist_header            *head;
2221         slist_entry             *entry;
2222         kspin_lock              *lock;
2223 {
2224         return (InterlockedPushEntrySList(head, entry));
2225 }
2226
2227 static slist_entry *
2228 ExInterlockedPopEntrySList(head, lock)
2229         slist_header            *head;
2230         kspin_lock              *lock;
2231 {
2232         return (InterlockedPopEntrySList(head));
2233 }
2234
2235 uint16_t
2236 ExQueryDepthSList(head)
2237         slist_header            *head;
2238 {
2239         uint16_t                depth;
2240
2241         mtx_lock_spin(&ntoskrnl_interlock);
2242         depth = head->slh_list.slh_depth;
2243         mtx_unlock_spin(&ntoskrnl_interlock);
2244
2245         return (depth);
2246 }
2247
2248 void
2249 KeInitializeSpinLock(lock)
2250         kspin_lock              *lock;
2251 {
2252         *lock = 0;
2253 }
2254
2255 #ifdef __i386__
2256 void
2257 KefAcquireSpinLockAtDpcLevel(lock)
2258         kspin_lock              *lock;
2259 {
2260 #ifdef NTOSKRNL_DEBUG_SPINLOCKS
2261         int                     i = 0;
2262 #endif
2263
2264         while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0) {
2265                 /* sit and spin */;
2266 #ifdef NTOSKRNL_DEBUG_SPINLOCKS
2267                 i++;
2268                 if (i > 200000000)
2269                         panic("DEADLOCK!");
2270 #endif
2271         }
2272 }
2273
2274 void
2275 KefReleaseSpinLockFromDpcLevel(lock)
2276         kspin_lock              *lock;
2277 {
2278         atomic_store_rel_int((volatile u_int *)lock, 0);
2279 }
2280
2281 uint8_t
2282 KeAcquireSpinLockRaiseToDpc(kspin_lock *lock)
2283 {
2284         uint8_t                 oldirql;
2285
2286         if (KeGetCurrentIrql() > DISPATCH_LEVEL)
2287                 panic("IRQL_NOT_LESS_THAN_OR_EQUAL");
2288
2289         KeRaiseIrql(DISPATCH_LEVEL, &oldirql);
2290         KeAcquireSpinLockAtDpcLevel(lock);
2291
2292         return (oldirql);
2293 }
2294 #else
2295 void
2296 KeAcquireSpinLockAtDpcLevel(kspin_lock *lock)
2297 {
2298         while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0)
2299                 /* sit and spin */;
2300 }
2301
2302 void
2303 KeReleaseSpinLockFromDpcLevel(kspin_lock *lock)
2304 {
2305         atomic_store_rel_int((volatile u_int *)lock, 0);
2306 }
2307 #endif /* __i386__ */
2308
2309 uintptr_t
2310 InterlockedExchange(dst, val)
2311         volatile uint32_t       *dst;
2312         uintptr_t               val;
2313 {
2314         uintptr_t               r;
2315
2316         mtx_lock_spin(&ntoskrnl_interlock);
2317         r = *dst;
2318         *dst = val;
2319         mtx_unlock_spin(&ntoskrnl_interlock);
2320
2321         return (r);
2322 }
2323
2324 static uint32_t
2325 InterlockedIncrement(addend)
2326         volatile uint32_t       *addend;
2327 {
2328         atomic_add_long((volatile u_long *)addend, 1);
2329         return (*addend);
2330 }
2331
2332 static uint32_t
2333 InterlockedDecrement(addend)
2334         volatile uint32_t       *addend;
2335 {
2336         atomic_subtract_long((volatile u_long *)addend, 1);
2337         return (*addend);
2338 }
2339
2340 static void
2341 ExInterlockedAddLargeStatistic(addend, inc)
2342         uint64_t                *addend;
2343         uint32_t                inc;
2344 {
2345         mtx_lock_spin(&ntoskrnl_interlock);
2346         *addend += inc;
2347         mtx_unlock_spin(&ntoskrnl_interlock);
2348 };
2349
2350 mdl *
2351 IoAllocateMdl(void *vaddr, uint32_t len, uint8_t secondarybuf,
2352         uint8_t chargequota, irp *iopkt)
2353 {
2354         mdl                     *m;
2355         int                     zone = 0;
2356
2357         if (MmSizeOfMdl(vaddr, len) > MDL_ZONE_SIZE)
2358                 m = ExAllocatePoolWithTag(NonPagedPool,
2359                     MmSizeOfMdl(vaddr, len), 0);
2360         else {
2361                 m = uma_zalloc(mdl_zone, M_NOWAIT | M_ZERO);
2362                 zone++;
2363         }
2364
2365         if (m == NULL)
2366                 return (NULL);
2367
2368         MmInitializeMdl(m, vaddr, len);
2369
2370         /*
2371          * MmInitializMdl() clears the flags field, so we
2372          * have to set this here. If the MDL came from the
2373          * MDL UMA zone, tag it so we can release it to
2374          * the right place later.
2375          */
2376         if (zone)
2377                 m->mdl_flags = MDL_ZONE_ALLOCED;
2378
2379         if (iopkt != NULL) {
2380                 if (secondarybuf == TRUE) {
2381                         mdl                     *last;
2382                         last = iopkt->irp_mdl;
2383                         while (last->mdl_next != NULL)
2384                                 last = last->mdl_next;
2385                         last->mdl_next = m;
2386                 } else {
2387                         if (iopkt->irp_mdl != NULL)
2388                                 panic("leaking an MDL in IoAllocateMdl()");
2389                         iopkt->irp_mdl = m;
2390                 }
2391         }
2392
2393         return (m);
2394 }
2395
2396 void
2397 IoFreeMdl(m)
2398         mdl                     *m;
2399 {
2400         if (m == NULL)
2401                 return;
2402
2403         if (m->mdl_flags & MDL_ZONE_ALLOCED)
2404                 uma_zfree(mdl_zone, m);
2405         else
2406                 ExFreePool(m);
2407 }
2408
2409 static void *
2410 MmAllocateContiguousMemory(size, highest)
2411         uint32_t                size;
2412         uint64_t                highest;
2413 {
2414         void *addr;
2415         size_t pagelength = roundup(size, PAGE_SIZE);
2416
2417         addr = ExAllocatePoolWithTag(NonPagedPool, pagelength, 0);
2418
2419         return (addr);
2420 }
2421
2422 static void *
2423 MmAllocateContiguousMemorySpecifyCache(size, lowest, highest,
2424     boundary, cachetype)
2425         uint32_t                size;
2426         uint64_t                lowest;
2427         uint64_t                highest;
2428         uint64_t                boundary;
2429         enum nt_caching_type    cachetype;
2430 {
2431         vm_memattr_t            memattr;
2432         void                    *ret;
2433
2434         switch (cachetype) {
2435         case MmNonCached:
2436                 memattr = VM_MEMATTR_UNCACHEABLE;
2437                 break;
2438         case MmWriteCombined:
2439                 memattr = VM_MEMATTR_WRITE_COMBINING;
2440                 break;
2441         case MmNonCachedUnordered:
2442                 memattr = VM_MEMATTR_UNCACHEABLE;
2443                 break;
2444         case MmCached:
2445         case MmHardwareCoherentCached:
2446         case MmUSWCCached:
2447         default:
2448                 memattr = VM_MEMATTR_DEFAULT;
2449                 break;
2450         }
2451
2452         ret = (void *)kmem_alloc_contig(kernel_map, size, M_ZERO | M_NOWAIT,
2453             lowest, highest, PAGE_SIZE, boundary, memattr);
2454         if (ret != NULL)
2455                 malloc_type_allocated(M_DEVBUF, round_page(size));
2456         return (ret);
2457 }
2458
2459 static void
2460 MmFreeContiguousMemory(base)
2461         void                    *base;
2462 {
2463         ExFreePool(base);
2464 }
2465
2466 static void
2467 MmFreeContiguousMemorySpecifyCache(base, size, cachetype)
2468         void                    *base;
2469         uint32_t                size;
2470         enum nt_caching_type    cachetype;
2471 {
2472         contigfree(base, size, M_DEVBUF);
2473 }
2474
2475 static uint32_t
2476 MmSizeOfMdl(vaddr, len)
2477         void                    *vaddr;
2478         size_t                  len;
2479 {
2480         uint32_t                l;
2481
2482         l = sizeof(struct mdl) +
2483             (sizeof(vm_offset_t *) * SPAN_PAGES(vaddr, len));
2484
2485         return (l);
2486 }
2487
2488 /*
2489  * The Microsoft documentation says this routine fills in the
2490  * page array of an MDL with the _physical_ page addresses that
2491  * comprise the buffer, but we don't really want to do that here.
2492  * Instead, we just fill in the page array with the kernel virtual
2493  * addresses of the buffers.
2494  */
2495 void
2496 MmBuildMdlForNonPagedPool(m)
2497         mdl                     *m;
2498 {
2499         vm_offset_t             *mdl_pages;
2500         int                     pagecnt, i;
2501
2502         pagecnt = SPAN_PAGES(m->mdl_byteoffset, m->mdl_bytecount);
2503
2504         if (pagecnt > (m->mdl_size - sizeof(mdl)) / sizeof(vm_offset_t *))
2505                 panic("not enough pages in MDL to describe buffer");
2506
2507         mdl_pages = MmGetMdlPfnArray(m);
2508
2509         for (i = 0; i < pagecnt; i++)
2510                 *mdl_pages = (vm_offset_t)m->mdl_startva + (i * PAGE_SIZE);
2511
2512         m->mdl_flags |= MDL_SOURCE_IS_NONPAGED_POOL;
2513         m->mdl_mappedsystemva = MmGetMdlVirtualAddress(m);
2514 }
2515
2516 static void *
2517 MmMapLockedPages(mdl *buf, uint8_t accessmode)
2518 {
2519         buf->mdl_flags |= MDL_MAPPED_TO_SYSTEM_VA;
2520         return (MmGetMdlVirtualAddress(buf));
2521 }
2522
2523 static void *
2524 MmMapLockedPagesSpecifyCache(mdl *buf, uint8_t accessmode, uint32_t cachetype,
2525         void *vaddr, uint32_t bugcheck, uint32_t prio)
2526 {
2527         return (MmMapLockedPages(buf, accessmode));
2528 }
2529
2530 static void
2531 MmUnmapLockedPages(vaddr, buf)
2532         void                    *vaddr;
2533         mdl                     *buf;
2534 {
2535         buf->mdl_flags &= ~MDL_MAPPED_TO_SYSTEM_VA;
2536 }
2537
2538 /*
2539  * This function has a problem in that it will break if you
2540  * compile this module without PAE and try to use it on a PAE
2541  * kernel. Unfortunately, there's no way around this at the
2542  * moment. It's slightly less broken that using pmap_kextract().
2543  * You'd think the virtual memory subsystem would help us out
2544  * here, but it doesn't.
2545  */
2546
2547 static uint64_t
2548 MmGetPhysicalAddress(void *base)
2549 {
2550         return (pmap_extract(kernel_map->pmap, (vm_offset_t)base));
2551 }
2552
2553 uint8_t
2554 MmIsAddressValid(vaddr)
2555         void                    *vaddr;
2556 {
2557         if (pmap_extract(kernel_map->pmap, (vm_offset_t)vaddr))
2558                 return (TRUE);
2559
2560         return (FALSE);
2561 }
2562
2563 void *
2564 MmMapIoSpace(paddr, len, cachetype)
2565         uint64_t                paddr;
2566         uint32_t                len;
2567         uint32_t                cachetype;
2568 {
2569         devclass_t              nexus_class;
2570         device_t                *nexus_devs, devp;
2571         int                     nexus_count = 0;
2572         device_t                matching_dev = NULL;
2573         struct resource         *res;
2574         int                     i;
2575         vm_offset_t             v;
2576
2577         /* There will always be at least one nexus. */
2578
2579         nexus_class = devclass_find("nexus");
2580         devclass_get_devices(nexus_class, &nexus_devs, &nexus_count);
2581
2582         for (i = 0; i < nexus_count; i++) {
2583                 devp = nexus_devs[i];
2584                 matching_dev = ntoskrnl_finddev(devp, paddr, &res);
2585                 if (matching_dev)
2586                         break;
2587         }
2588
2589         free(nexus_devs, M_TEMP);
2590
2591         if (matching_dev == NULL)
2592                 return (NULL);
2593
2594         v = (vm_offset_t)rman_get_virtual(res);
2595         if (paddr > rman_get_start(res))
2596                 v += paddr - rman_get_start(res);
2597
2598         return ((void *)v);
2599 }
2600
2601 void
2602 MmUnmapIoSpace(vaddr, len)
2603         void                    *vaddr;
2604         size_t                  len;
2605 {
2606 }
2607
2608
2609 static device_t
2610 ntoskrnl_finddev(dev, paddr, res)
2611         device_t                dev;
2612         uint64_t                paddr;
2613         struct resource         **res;
2614 {
2615         device_t                *children = NULL;
2616         device_t                matching_dev;
2617         int                     childcnt;
2618         struct resource         *r;
2619         struct resource_list    *rl;
2620         struct resource_list_entry      *rle;
2621         uint32_t                flags;
2622         int                     i;
2623
2624         /* We only want devices that have been successfully probed. */
2625
2626         if (device_is_alive(dev) == FALSE)
2627                 return (NULL);
2628
2629         rl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev);
2630         if (rl != NULL) {
2631                 STAILQ_FOREACH(rle, rl, link) {
2632                         r = rle->res;
2633
2634                         if (r == NULL)
2635                                 continue;
2636
2637                         flags = rman_get_flags(r);
2638
2639                         if (rle->type == SYS_RES_MEMORY &&
2640                             paddr >= rman_get_start(r) &&
2641                             paddr <= rman_get_end(r)) {
2642                                 if (!(flags & RF_ACTIVE))
2643                                         bus_activate_resource(dev,
2644                                             SYS_RES_MEMORY, 0, r);
2645                                 *res = r;
2646                                 return (dev);
2647                         }
2648                 }
2649         }
2650
2651         /*
2652          * If this device has children, do another
2653          * level of recursion to inspect them.
2654          */
2655
2656         device_get_children(dev, &children, &childcnt);
2657
2658         for (i = 0; i < childcnt; i++) {
2659                 matching_dev = ntoskrnl_finddev(children[i], paddr, res);
2660                 if (matching_dev != NULL) {
2661                         free(children, M_TEMP);
2662                         return (matching_dev);
2663                 }
2664         }
2665
2666
2667         /* Won't somebody please think of the children! */
2668
2669         if (children != NULL)
2670                 free(children, M_TEMP);
2671
2672         return (NULL);
2673 }
2674
2675 /*
2676  * Workitems are unlike DPCs, in that they run in a user-mode thread
2677  * context rather than at DISPATCH_LEVEL in kernel context. In our
2678  * case we run them in kernel context anyway.
2679  */
2680 static void
2681 ntoskrnl_workitem_thread(arg)
2682         void                    *arg;
2683 {
2684         kdpc_queue              *kq;
2685         list_entry              *l;
2686         io_workitem             *iw;
2687         uint8_t                 irql;
2688
2689         kq = arg;
2690
2691         InitializeListHead(&kq->kq_disp);
2692         kq->kq_td = curthread;
2693         kq->kq_exit = 0;
2694         KeInitializeSpinLock(&kq->kq_lock);
2695         KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
2696
2697         while (1) {
2698                 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
2699
2700                 KeAcquireSpinLock(&kq->kq_lock, &irql);
2701
2702                 if (kq->kq_exit) {
2703                         kq->kq_exit = 0;
2704                         KeReleaseSpinLock(&kq->kq_lock, irql);
2705                         break;
2706                 }
2707
2708                 while (!IsListEmpty(&kq->kq_disp)) {
2709                         l = RemoveHeadList(&kq->kq_disp);
2710                         iw = CONTAINING_RECORD(l,
2711                             io_workitem, iw_listentry);
2712                         InitializeListHead((&iw->iw_listentry));
2713                         if (iw->iw_func == NULL)
2714                                 continue;
2715                         KeReleaseSpinLock(&kq->kq_lock, irql);
2716                         MSCALL2(iw->iw_func, iw->iw_dobj, iw->iw_ctx);
2717                         KeAcquireSpinLock(&kq->kq_lock, &irql);
2718                 }
2719
2720                 KeReleaseSpinLock(&kq->kq_lock, irql);
2721         }
2722
2723         kproc_exit(0);
2724         return; /* notreached */
2725 }
2726
2727 static void
2728 ntoskrnl_destroy_workitem_threads(void)
2729 {
2730         kdpc_queue              *kq;
2731         int                     i;
2732
2733         for (i = 0; i < WORKITEM_THREADS; i++) {
2734                 kq = wq_queues + i;
2735                 kq->kq_exit = 1;
2736                 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
2737                 while (kq->kq_exit)
2738                         tsleep(kq->kq_td->td_proc, PWAIT, "waitiw", hz/10);
2739         }
2740 }
2741
2742 io_workitem *
2743 IoAllocateWorkItem(dobj)
2744         device_object           *dobj;
2745 {
2746         io_workitem             *iw;
2747
2748         iw = uma_zalloc(iw_zone, M_NOWAIT);
2749         if (iw == NULL)
2750                 return (NULL);
2751
2752         InitializeListHead(&iw->iw_listentry);
2753         iw->iw_dobj = dobj;
2754
2755         mtx_lock(&ntoskrnl_dispatchlock);
2756         iw->iw_idx = wq_idx;
2757         WORKIDX_INC(wq_idx);
2758         mtx_unlock(&ntoskrnl_dispatchlock);
2759
2760         return (iw);
2761 }
2762
2763 void
2764 IoFreeWorkItem(iw)
2765         io_workitem             *iw;
2766 {
2767         uma_zfree(iw_zone, iw);
2768 }
2769
2770 void
2771 IoQueueWorkItem(iw, iw_func, qtype, ctx)
2772         io_workitem             *iw;
2773         io_workitem_func        iw_func;
2774         uint32_t                qtype;
2775         void                    *ctx;
2776 {
2777         kdpc_queue              *kq;
2778         list_entry              *l;
2779         io_workitem             *cur;
2780         uint8_t                 irql;
2781
2782         kq = wq_queues + iw->iw_idx;
2783
2784         KeAcquireSpinLock(&kq->kq_lock, &irql);
2785
2786         /*
2787          * Traverse the list and make sure this workitem hasn't
2788          * already been inserted. Queuing the same workitem
2789          * twice will hose the list but good.
2790          */
2791
2792         l = kq->kq_disp.nle_flink;
2793         while (l != &kq->kq_disp) {
2794                 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
2795                 if (cur == iw) {
2796                         /* Already queued -- do nothing. */
2797                         KeReleaseSpinLock(&kq->kq_lock, irql);
2798                         return;
2799                 }
2800                 l = l->nle_flink;
2801         }
2802
2803         iw->iw_func = iw_func;
2804         iw->iw_ctx = ctx;
2805
2806         InsertTailList((&kq->kq_disp), (&iw->iw_listentry));
2807         KeReleaseSpinLock(&kq->kq_lock, irql);
2808
2809         KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
2810 }
2811
2812 static void
2813 ntoskrnl_workitem(dobj, arg)
2814         device_object           *dobj;
2815         void                    *arg;
2816 {
2817         io_workitem             *iw;
2818         work_queue_item         *w;
2819         work_item_func          f;
2820
2821         iw = arg;
2822         w = (work_queue_item *)dobj;
2823         f = (work_item_func)w->wqi_func;
2824         uma_zfree(iw_zone, iw);
2825         MSCALL2(f, w, w->wqi_ctx);
2826 }
2827
2828 /*
2829  * The ExQueueWorkItem() API is deprecated in Windows XP. Microsoft
2830  * warns that it's unsafe and to use IoQueueWorkItem() instead. The
2831  * problem with ExQueueWorkItem() is that it can't guard against
2832  * the condition where a driver submits a job to the work queue and
2833  * is then unloaded before the job is able to run. IoQueueWorkItem()
2834  * acquires a reference to the device's device_object via the
2835  * object manager and retains it until after the job has completed,
2836  * which prevents the driver from being unloaded before the job
2837  * runs. (We don't currently support this behavior, though hopefully
2838  * that will change once the object manager API is fleshed out a bit.)
2839  *
2840  * Having said all that, the ExQueueWorkItem() API remains, because
2841  * there are still other parts of Windows that use it, including
2842  * NDIS itself: NdisScheduleWorkItem() calls ExQueueWorkItem().
2843  * We fake up the ExQueueWorkItem() API on top of our implementation
2844  * of IoQueueWorkItem(). Workitem thread #3 is reserved exclusively
2845  * for ExQueueWorkItem() jobs, and we pass a pointer to the work
2846  * queue item (provided by the caller) in to IoAllocateWorkItem()
2847  * instead of the device_object. We need to save this pointer so
2848  * we can apply a sanity check: as with the DPC queue and other
2849  * workitem queues, we can't allow the same work queue item to
2850  * be queued twice. If it's already pending, we silently return
2851  */
2852
2853 void
2854 ExQueueWorkItem(w, qtype)
2855         work_queue_item         *w;
2856         uint32_t                qtype;
2857 {
2858         io_workitem             *iw;
2859         io_workitem_func        iwf;
2860         kdpc_queue              *kq;
2861         list_entry              *l;
2862         io_workitem             *cur;
2863         uint8_t                 irql;
2864
2865
2866         /*
2867          * We need to do a special sanity test to make sure
2868          * the ExQueueWorkItem() API isn't used to queue
2869          * the same workitem twice. Rather than checking the
2870          * io_workitem pointer itself, we test the attached
2871          * device object, which is really a pointer to the
2872          * legacy work queue item structure.
2873          */
2874
2875         kq = wq_queues + WORKITEM_LEGACY_THREAD;
2876         KeAcquireSpinLock(&kq->kq_lock, &irql);
2877         l = kq->kq_disp.nle_flink;
2878         while (l != &kq->kq_disp) {
2879                 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
2880                 if (cur->iw_dobj == (device_object *)w) {
2881                         /* Already queued -- do nothing. */
2882                         KeReleaseSpinLock(&kq->kq_lock, irql);
2883                         return;
2884                 }
2885                 l = l->nle_flink;
2886         }
2887         KeReleaseSpinLock(&kq->kq_lock, irql);
2888
2889         iw = IoAllocateWorkItem((device_object *)w);
2890         if (iw == NULL)
2891                 return;
2892
2893         iw->iw_idx = WORKITEM_LEGACY_THREAD;
2894         iwf = (io_workitem_func)ntoskrnl_findwrap((funcptr)ntoskrnl_workitem);
2895         IoQueueWorkItem(iw, iwf, qtype, iw);
2896 }
2897
2898 static void
2899 RtlZeroMemory(dst, len)
2900         void                    *dst;
2901         size_t                  len;
2902 {
2903         bzero(dst, len);
2904 }
2905
2906 static void
2907 RtlCopyMemory(dst, src, len)
2908         void                    *dst;
2909         const void              *src;
2910         size_t                  len;
2911 {
2912         bcopy(src, dst, len);
2913 }
2914
2915 static size_t
2916 RtlCompareMemory(s1, s2, len)
2917         const void              *s1;
2918         const void              *s2;
2919         size_t                  len;
2920 {
2921         size_t                  i, total = 0;
2922         uint8_t                 *m1, *m2;
2923
2924         m1 = __DECONST(char *, s1);
2925         m2 = __DECONST(char *, s2);
2926
2927         for (i = 0; i < len; i++) {
2928                 if (m1[i] == m2[i])
2929                         total++;
2930         }
2931         return (total);
2932 }
2933
2934 void
2935 RtlInitAnsiString(dst, src)
2936         ansi_string             *dst;
2937         char                    *src;
2938 {
2939         ansi_string             *a;
2940
2941         a = dst;
2942         if (a == NULL)
2943                 return;
2944         if (src == NULL) {
2945                 a->as_len = a->as_maxlen = 0;
2946                 a->as_buf = NULL;
2947         } else {
2948                 a->as_buf = src;
2949                 a->as_len = a->as_maxlen = strlen(src);
2950         }
2951 }
2952
2953 void
2954 RtlInitUnicodeString(dst, src)
2955         unicode_string          *dst;
2956         uint16_t                *src;
2957 {
2958         unicode_string          *u;
2959         int                     i;
2960
2961         u = dst;
2962         if (u == NULL)
2963                 return;
2964         if (src == NULL) {
2965                 u->us_len = u->us_maxlen = 0;
2966                 u->us_buf = NULL;
2967         } else {
2968                 i = 0;
2969                 while(src[i] != 0)
2970                         i++;
2971                 u->us_buf = src;
2972                 u->us_len = u->us_maxlen = i * 2;
2973         }
2974 }
2975
2976 ndis_status
2977 RtlUnicodeStringToInteger(ustr, base, val)
2978         unicode_string          *ustr;
2979         uint32_t                base;
2980         uint32_t                *val;
2981 {
2982         uint16_t                *uchr;
2983         int                     len, neg = 0;
2984         char                    abuf[64];
2985         char                    *astr;
2986
2987         uchr = ustr->us_buf;
2988         len = ustr->us_len;
2989         bzero(abuf, sizeof(abuf));
2990
2991         if ((char)((*uchr) & 0xFF) == '-') {
2992                 neg = 1;
2993                 uchr++;
2994                 len -= 2;
2995         } else if ((char)((*uchr) & 0xFF) == '+') {
2996                 neg = 0;
2997                 uchr++;
2998                 len -= 2;
2999         }
3000
3001         if (base == 0) {
3002                 if ((char)((*uchr) & 0xFF) == 'b') {
3003                         base = 2;
3004                         uchr++;
3005                         len -= 2;
3006                 } else if ((char)((*uchr) & 0xFF) == 'o') {
3007                         base = 8;
3008                         uchr++;
3009                         len -= 2;
3010                 } else if ((char)((*uchr) & 0xFF) == 'x') {
3011                         base = 16;
3012                         uchr++;
3013                         len -= 2;
3014                 } else
3015                         base = 10;
3016         }
3017
3018         astr = abuf;
3019         if (neg) {
3020                 strcpy(astr, "-");
3021                 astr++;
3022         }
3023
3024         ntoskrnl_unicode_to_ascii(uchr, astr, len);
3025         *val = strtoul(abuf, NULL, base);
3026
3027         return (STATUS_SUCCESS);
3028 }
3029
3030 void
3031 RtlFreeUnicodeString(ustr)
3032         unicode_string          *ustr;
3033 {
3034         if (ustr->us_buf == NULL)
3035                 return;
3036         ExFreePool(ustr->us_buf);
3037         ustr->us_buf = NULL;
3038 }
3039
3040 void
3041 RtlFreeAnsiString(astr)
3042         ansi_string             *astr;
3043 {
3044         if (astr->as_buf == NULL)
3045                 return;
3046         ExFreePool(astr->as_buf);
3047         astr->as_buf = NULL;
3048 }
3049
3050 static int
3051 atoi(str)
3052         const char              *str;
3053 {
3054         return (int)strtol(str, (char **)NULL, 10);
3055 }
3056
3057 static long
3058 atol(str)
3059         const char              *str;
3060 {
3061         return strtol(str, (char **)NULL, 10);
3062 }
3063
3064 static int
3065 rand(void)
3066 {
3067         struct timeval          tv;
3068
3069         microtime(&tv);
3070         srandom(tv.tv_usec);
3071         return ((int)random());
3072 }
3073
3074 static void
3075 srand(seed)
3076         unsigned int            seed;
3077 {
3078         srandom(seed);
3079 }
3080
3081 static uint8_t
3082 IoIsWdmVersionAvailable(uint8_t major, uint8_t minor)
3083 {
3084         if (major == WDM_MAJOR && minor == WDM_MINOR_WINXP)
3085                 return (TRUE);
3086         return (FALSE);
3087 }
3088
3089 static ndis_status
3090 IoGetDeviceObjectPointer(name, reqaccess, fileobj, devobj)
3091         unicode_string          *name;
3092         uint32_t                reqaccess;
3093         void                    *fileobj;
3094         device_object           *devobj;
3095 {
3096         return (STATUS_SUCCESS);
3097 }
3098
3099 static ndis_status
3100 IoGetDeviceProperty(devobj, regprop, buflen, prop, reslen)
3101         device_object           *devobj;
3102         uint32_t                regprop;
3103         uint32_t                buflen;
3104         void                    *prop;
3105         uint32_t                *reslen;
3106 {
3107         driver_object           *drv;
3108         uint16_t                **name;
3109
3110         drv = devobj->do_drvobj;
3111
3112         switch (regprop) {
3113         case DEVPROP_DRIVER_KEYNAME:
3114                 name = prop;
3115                 *name = drv->dro_drivername.us_buf;
3116                 *reslen = drv->dro_drivername.us_len;
3117                 break;
3118         default:
3119                 return (STATUS_INVALID_PARAMETER_2);
3120                 break;
3121         }
3122
3123         return (STATUS_SUCCESS);
3124 }
3125
3126 static void
3127 KeInitializeMutex(kmutex, level)
3128         kmutant                 *kmutex;
3129         uint32_t                level;
3130 {
3131         InitializeListHead((&kmutex->km_header.dh_waitlisthead));
3132         kmutex->km_abandoned = FALSE;
3133         kmutex->km_apcdisable = 1;
3134         kmutex->km_header.dh_sigstate = 1;
3135         kmutex->km_header.dh_type = DISP_TYPE_MUTANT;
3136         kmutex->km_header.dh_size = sizeof(kmutant) / sizeof(uint32_t);
3137         kmutex->km_ownerthread = NULL;
3138 }
3139
3140 static uint32_t
3141 KeReleaseMutex(kmutant *kmutex, uint8_t kwait)
3142 {
3143         uint32_t                prevstate;
3144
3145         mtx_lock(&ntoskrnl_dispatchlock);
3146         prevstate = kmutex->km_header.dh_sigstate;
3147         if (kmutex->km_ownerthread != curthread) {
3148                 mtx_unlock(&ntoskrnl_dispatchlock);
3149                 return (STATUS_MUTANT_NOT_OWNED);
3150         }
3151
3152         kmutex->km_header.dh_sigstate++;
3153         kmutex->km_abandoned = FALSE;
3154
3155         if (kmutex->km_header.dh_sigstate == 1) {
3156                 kmutex->km_ownerthread = NULL;
3157                 ntoskrnl_waittest(&kmutex->km_header, IO_NO_INCREMENT);
3158         }
3159
3160         mtx_unlock(&ntoskrnl_dispatchlock);
3161
3162         return (prevstate);
3163 }
3164
3165 static uint32_t
3166 KeReadStateMutex(kmutex)
3167         kmutant                 *kmutex;
3168 {
3169         return (kmutex->km_header.dh_sigstate);
3170 }
3171
3172 void
3173 KeInitializeEvent(nt_kevent *kevent, uint32_t type, uint8_t state)
3174 {
3175         InitializeListHead((&kevent->k_header.dh_waitlisthead));
3176         kevent->k_header.dh_sigstate = state;
3177         if (type == EVENT_TYPE_NOTIFY)
3178                 kevent->k_header.dh_type = DISP_TYPE_NOTIFICATION_EVENT;
3179         else
3180                 kevent->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_EVENT;
3181         kevent->k_header.dh_size = sizeof(nt_kevent) / sizeof(uint32_t);
3182 }
3183
3184 uint32_t
3185 KeResetEvent(kevent)
3186         nt_kevent               *kevent;
3187 {
3188         uint32_t                prevstate;
3189
3190         mtx_lock(&ntoskrnl_dispatchlock);
3191         prevstate = kevent->k_header.dh_sigstate;
3192         kevent->k_header.dh_sigstate = FALSE;
3193         mtx_unlock(&ntoskrnl_dispatchlock);
3194
3195         return (prevstate);
3196 }
3197
3198 uint32_t
3199 KeSetEvent(nt_kevent *kevent, uint32_t increment, uint8_t kwait)
3200 {
3201         uint32_t                prevstate;
3202         wait_block              *w;
3203         nt_dispatch_header      *dh;
3204         struct thread           *td;
3205         wb_ext                  *we;
3206
3207         mtx_lock(&ntoskrnl_dispatchlock);
3208         prevstate = kevent->k_header.dh_sigstate;
3209         dh = &kevent->k_header;
3210
3211         if (IsListEmpty(&dh->dh_waitlisthead))
3212                 /*
3213                  * If there's nobody in the waitlist, just set
3214                  * the state to signalled.
3215                  */
3216                 dh->dh_sigstate = 1;
3217         else {
3218                 /*
3219                  * Get the first waiter. If this is a synchronization
3220                  * event, just wake up that one thread (don't bother
3221                  * setting the state to signalled since we're supposed
3222                  * to automatically clear synchronization events anyway).
3223                  *
3224                  * If it's a notification event, or the the first
3225                  * waiter is doing a WAITTYPE_ALL wait, go through
3226                  * the full wait satisfaction process.
3227                  */
3228                 w = CONTAINING_RECORD(dh->dh_waitlisthead.nle_flink,
3229                     wait_block, wb_waitlist);
3230                 we = w->wb_ext;
3231                 td = we->we_td;
3232                 if (kevent->k_header.dh_type == DISP_TYPE_NOTIFICATION_EVENT ||
3233                     w->wb_waittype == WAITTYPE_ALL) {
3234                         if (prevstate == 0) {
3235                                 dh->dh_sigstate = 1;
3236                                 ntoskrnl_waittest(dh, increment);
3237                         }
3238                 } else {
3239                         w->wb_awakened |= TRUE;
3240                         cv_broadcastpri(&we->we_cv,
3241                             (w->wb_oldpri - (increment * 4)) > PRI_MIN_KERN ?
3242                             w->wb_oldpri - (increment * 4) : PRI_MIN_KERN);
3243                 }
3244         }
3245
3246         mtx_unlock(&ntoskrnl_dispatchlock);
3247
3248         return (prevstate);
3249 }
3250
3251 void
3252 KeClearEvent(kevent)
3253         nt_kevent               *kevent;
3254 {
3255         kevent->k_header.dh_sigstate = FALSE;
3256 }
3257
3258 uint32_t
3259 KeReadStateEvent(kevent)
3260         nt_kevent               *kevent;
3261 {
3262         return (kevent->k_header.dh_sigstate);
3263 }
3264
3265 /*
3266  * The object manager in Windows is responsible for managing
3267  * references and access to various types of objects, including
3268  * device_objects, events, threads, timers and so on. However,
3269  * there's a difference in the way objects are handled in user
3270  * mode versus kernel mode.
3271  *
3272  * In user mode (i.e. Win32 applications), all objects are
3273  * managed by the object manager. For example, when you create
3274  * a timer or event object, you actually end up with an 
3275  * object_header (for the object manager's bookkeeping
3276  * purposes) and an object body (which contains the actual object
3277  * structure, e.g. ktimer, kevent, etc...). This allows Windows
3278  * to manage resource quotas and to enforce access restrictions
3279  * on basically every kind of system object handled by the kernel.
3280  *
3281  * However, in kernel mode, you only end up using the object
3282  * manager some of the time. For example, in a driver, you create
3283  * a timer object by simply allocating the memory for a ktimer
3284  * structure and initializing it with KeInitializeTimer(). Hence,
3285  * the timer has no object_header and no reference counting or
3286  * security/resource checks are done on it. The assumption in
3287  * this case is that if you're running in kernel mode, you know
3288  * what you're doing, and you're already at an elevated privilege
3289  * anyway.
3290  *
3291  * There are some exceptions to this. The two most important ones
3292  * for our purposes are device_objects and threads. We need to use
3293  * the object manager to do reference counting on device_objects,
3294  * and for threads, you can only get a pointer to a thread's
3295  * dispatch header by using ObReferenceObjectByHandle() on the
3296  * handle returned by PsCreateSystemThread().
3297  */
3298
3299 static ndis_status
3300 ObReferenceObjectByHandle(ndis_handle handle, uint32_t reqaccess, void *otype,
3301         uint8_t accessmode, void **object, void **handleinfo)
3302 {
3303         nt_objref               *nr;
3304
3305         nr = malloc(sizeof(nt_objref), M_DEVBUF, M_NOWAIT|M_ZERO);
3306         if (nr == NULL)
3307                 return (STATUS_INSUFFICIENT_RESOURCES);
3308
3309         InitializeListHead((&nr->no_dh.dh_waitlisthead));
3310         nr->no_obj = handle;
3311         nr->no_dh.dh_type = DISP_TYPE_THREAD;
3312         nr->no_dh.dh_sigstate = 0;
3313         nr->no_dh.dh_size = (uint8_t)(sizeof(struct thread) /
3314             sizeof(uint32_t));
3315         TAILQ_INSERT_TAIL(&ntoskrnl_reflist, nr, link);
3316         *object = nr;
3317
3318         return (STATUS_SUCCESS);
3319 }
3320
3321 static void
3322 ObfDereferenceObject(object)
3323         void                    *object;
3324 {
3325         nt_objref               *nr;
3326
3327         nr = object;
3328         TAILQ_REMOVE(&ntoskrnl_reflist, nr, link);
3329         free(nr, M_DEVBUF);
3330 }
3331
3332 static uint32_t
3333 ZwClose(handle)
3334         ndis_handle             handle;
3335 {
3336         return (STATUS_SUCCESS);
3337 }
3338
3339 static uint32_t
3340 WmiQueryTraceInformation(traceclass, traceinfo, infolen, reqlen, buf)
3341         uint32_t                traceclass;
3342         void                    *traceinfo;
3343         uint32_t                infolen;
3344         uint32_t                reqlen;
3345         void                    *buf;
3346 {
3347         return (STATUS_NOT_FOUND);
3348 }
3349
3350 static uint32_t
3351 WmiTraceMessage(uint64_t loghandle, uint32_t messageflags,
3352         void *guid, uint16_t messagenum, ...)
3353 {
3354         return (STATUS_SUCCESS);
3355 }
3356
3357 static uint32_t
3358 IoWMIRegistrationControl(dobj, action)
3359         device_object           *dobj;
3360         uint32_t                action;
3361 {
3362         return (STATUS_SUCCESS);
3363 }
3364
3365 /*
3366  * This is here just in case the thread returns without calling
3367  * PsTerminateSystemThread().
3368  */
3369 static void
3370 ntoskrnl_thrfunc(arg)
3371         void                    *arg;
3372 {
3373         thread_context          *thrctx;
3374         uint32_t (*tfunc)(void *);
3375         void                    *tctx;
3376         uint32_t                rval;
3377
3378         thrctx = arg;
3379         tfunc = thrctx->tc_thrfunc;
3380         tctx = thrctx->tc_thrctx;
3381         free(thrctx, M_TEMP);
3382
3383         rval = MSCALL1(tfunc, tctx);
3384
3385         PsTerminateSystemThread(rval);
3386         return; /* notreached */
3387 }
3388
3389 static ndis_status
3390 PsCreateSystemThread(handle, reqaccess, objattrs, phandle,
3391         clientid, thrfunc, thrctx)
3392         ndis_handle             *handle;
3393         uint32_t                reqaccess;
3394         void                    *objattrs;
3395         ndis_handle             phandle;
3396         void                    *clientid;
3397         void                    *thrfunc;
3398         void                    *thrctx;
3399 {
3400         int                     error;
3401         thread_context          *tc;
3402         struct proc             *p;
3403
3404         tc = malloc(sizeof(thread_context), M_TEMP, M_NOWAIT);
3405         if (tc == NULL)
3406                 return (STATUS_INSUFFICIENT_RESOURCES);
3407
3408         tc->tc_thrctx = thrctx;
3409         tc->tc_thrfunc = thrfunc;
3410
3411         error = kproc_create(ntoskrnl_thrfunc, tc, &p,
3412             RFHIGHPID, NDIS_KSTACK_PAGES, "Windows Kthread %d", ntoskrnl_kth);
3413
3414         if (error) {
3415                 free(tc, M_TEMP);
3416                 return (STATUS_INSUFFICIENT_RESOURCES);
3417         }
3418
3419         *handle = p;
3420         ntoskrnl_kth++;
3421
3422         return (STATUS_SUCCESS);
3423 }
3424
3425 /*
3426  * In Windows, the exit of a thread is an event that you're allowed
3427  * to wait on, assuming you've obtained a reference to the thread using
3428  * ObReferenceObjectByHandle(). Unfortunately, the only way we can
3429  * simulate this behavior is to register each thread we create in a
3430  * reference list, and if someone holds a reference to us, we poke
3431  * them.
3432  */
3433 static ndis_status
3434 PsTerminateSystemThread(status)
3435         ndis_status             status;
3436 {
3437         struct nt_objref        *nr;
3438
3439         mtx_lock(&ntoskrnl_dispatchlock);
3440         TAILQ_FOREACH(nr, &ntoskrnl_reflist, link) {
3441                 if (nr->no_obj != curthread->td_proc)
3442                         continue;
3443                 nr->no_dh.dh_sigstate = 1;
3444                 ntoskrnl_waittest(&nr->no_dh, IO_NO_INCREMENT);
3445                 break;
3446         }
3447         mtx_unlock(&ntoskrnl_dispatchlock);
3448
3449         ntoskrnl_kth--;
3450
3451         kproc_exit(0);
3452         return (0);     /* notreached */
3453 }
3454
3455 static uint32_t
3456 DbgPrint(char *fmt, ...)
3457 {
3458         va_list                 ap;
3459
3460         if (bootverbose) {
3461                 va_start(ap, fmt);
3462                 vprintf(fmt, ap);
3463         }
3464
3465         return (STATUS_SUCCESS);
3466 }
3467
3468 static void
3469 DbgBreakPoint(void)
3470 {
3471
3472         kdb_enter(KDB_WHY_NDIS, "DbgBreakPoint(): breakpoint");
3473 }
3474
3475 static void
3476 KeBugCheckEx(code, param1, param2, param3, param4)
3477     uint32_t                    code;
3478     u_long                      param1;
3479     u_long                      param2;
3480     u_long                      param3;
3481     u_long                      param4;
3482 {
3483         panic("KeBugCheckEx: STOP 0x%X", code);
3484 }
3485
3486 static void
3487 ntoskrnl_timercall(arg)
3488         void                    *arg;
3489 {
3490         ktimer                  *timer;
3491         struct timeval          tv;
3492         kdpc                    *dpc;
3493
3494         mtx_lock(&ntoskrnl_dispatchlock);
3495
3496         timer = arg;
3497
3498 #ifdef NTOSKRNL_DEBUG_TIMERS
3499         ntoskrnl_timer_fires++;
3500 #endif
3501         ntoskrnl_remove_timer(timer);
3502
3503         /*
3504          * This should never happen, but complain
3505          * if it does.
3506          */
3507
3508         if (timer->k_header.dh_inserted == FALSE) {
3509                 mtx_unlock(&ntoskrnl_dispatchlock);
3510                 printf("NTOS: timer %p fired even though "
3511                     "it was canceled\n", timer);
3512                 return;
3513         }
3514
3515         /* Mark the timer as no longer being on the timer queue. */
3516
3517         timer->k_header.dh_inserted = FALSE;
3518
3519         /* Now signal the object and satisfy any waits on it. */
3520
3521         timer->k_header.dh_sigstate = 1;
3522         ntoskrnl_waittest(&timer->k_header, IO_NO_INCREMENT);
3523
3524         /*
3525          * If this is a periodic timer, re-arm it
3526          * so it will fire again. We do this before
3527          * calling any deferred procedure calls because
3528          * it's possible the DPC might cancel the timer,
3529          * in which case it would be wrong for us to
3530          * re-arm it again afterwards.
3531          */
3532
3533         if (timer->k_period) {
3534                 tv.tv_sec = 0;
3535                 tv.tv_usec = timer->k_period * 1000;
3536                 timer->k_header.dh_inserted = TRUE;
3537                 ntoskrnl_insert_timer(timer, tvtohz(&tv));
3538 #ifdef NTOSKRNL_DEBUG_TIMERS
3539                 ntoskrnl_timer_reloads++;
3540 #endif
3541         }
3542
3543         dpc = timer->k_dpc;
3544
3545         mtx_unlock(&ntoskrnl_dispatchlock);
3546
3547         /* If there's a DPC associated with the timer, queue it up. */
3548
3549         if (dpc != NULL)
3550                 KeInsertQueueDpc(dpc, NULL, NULL);
3551 }
3552
3553 #ifdef NTOSKRNL_DEBUG_TIMERS
3554 static int
3555 sysctl_show_timers(SYSCTL_HANDLER_ARGS)
3556 {
3557         int                     ret;
3558
3559         ret = 0;
3560         ntoskrnl_show_timers();
3561         return (sysctl_handle_int(oidp, &ret, 0, req));
3562 }
3563
3564 static void
3565 ntoskrnl_show_timers()
3566 {
3567         int                     i = 0;
3568         list_entry              *l;
3569
3570         mtx_lock_spin(&ntoskrnl_calllock);
3571         l = ntoskrnl_calllist.nle_flink;
3572         while(l != &ntoskrnl_calllist) {
3573                 i++;
3574                 l = l->nle_flink;
3575         }
3576         mtx_unlock_spin(&ntoskrnl_calllock);
3577
3578         printf("\n");
3579         printf("%d timers available (out of %d)\n", i, NTOSKRNL_TIMEOUTS);
3580         printf("timer sets: %qu\n", ntoskrnl_timer_sets);
3581         printf("timer reloads: %qu\n", ntoskrnl_timer_reloads);
3582         printf("timer cancels: %qu\n", ntoskrnl_timer_cancels);
3583         printf("timer fires: %qu\n", ntoskrnl_timer_fires);
3584         printf("\n");
3585 }
3586 #endif
3587
3588 /*
3589  * Must be called with dispatcher lock held.
3590  */
3591
3592 static void
3593 ntoskrnl_insert_timer(timer, ticks)
3594         ktimer                  *timer;
3595         int                     ticks;
3596 {
3597         callout_entry           *e;
3598         list_entry              *l;
3599         struct callout          *c;
3600
3601         /*
3602          * Try and allocate a timer.
3603          */
3604         mtx_lock_spin(&ntoskrnl_calllock);
3605         if (IsListEmpty(&ntoskrnl_calllist)) {
3606                 mtx_unlock_spin(&ntoskrnl_calllock);
3607 #ifdef NTOSKRNL_DEBUG_TIMERS
3608                 ntoskrnl_show_timers();
3609 #endif
3610                 panic("out of timers!");
3611         }
3612         l = RemoveHeadList(&ntoskrnl_calllist);
3613         mtx_unlock_spin(&ntoskrnl_calllock);
3614
3615         e = CONTAINING_RECORD(l, callout_entry, ce_list);
3616         c = &e->ce_callout;
3617
3618         timer->k_callout = c;
3619
3620         callout_init(c, CALLOUT_MPSAFE);
3621         callout_reset(c, ticks, ntoskrnl_timercall, timer);
3622 }
3623
3624 static void
3625 ntoskrnl_remove_timer(timer)
3626         ktimer                  *timer;
3627 {
3628         callout_entry           *e;
3629
3630         e = (callout_entry *)timer->k_callout;
3631         callout_stop(timer->k_callout);
3632
3633         mtx_lock_spin(&ntoskrnl_calllock);
3634         InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
3635         mtx_unlock_spin(&ntoskrnl_calllock);
3636 }
3637
3638 void
3639 KeInitializeTimer(timer)
3640         ktimer                  *timer;
3641 {
3642         if (timer == NULL)
3643                 return;
3644
3645         KeInitializeTimerEx(timer,  EVENT_TYPE_NOTIFY);
3646 }
3647
3648 void
3649 KeInitializeTimerEx(timer, type)
3650         ktimer                  *timer;
3651         uint32_t                type;
3652 {
3653         if (timer == NULL)
3654                 return;
3655
3656         bzero((char *)timer, sizeof(ktimer));
3657         InitializeListHead((&timer->k_header.dh_waitlisthead));
3658         timer->k_header.dh_sigstate = FALSE;
3659         timer->k_header.dh_inserted = FALSE;
3660         if (type == EVENT_TYPE_NOTIFY)
3661                 timer->k_header.dh_type = DISP_TYPE_NOTIFICATION_TIMER;
3662         else
3663                 timer->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_TIMER;
3664         timer->k_header.dh_size = sizeof(ktimer) / sizeof(uint32_t);
3665 }
3666
3667 /*
3668  * DPC subsystem. A Windows Defered Procedure Call has the following
3669  * properties:
3670  * - It runs at DISPATCH_LEVEL.
3671  * - It can have one of 3 importance values that control when it
3672  *   runs relative to other DPCs in the queue.
3673  * - On SMP systems, it can be set to run on a specific processor.
3674  * In order to satisfy the last property, we create a DPC thread for
3675  * each CPU in the system and bind it to that CPU. Each thread
3676  * maintains three queues with different importance levels, which
3677  * will be processed in order from lowest to highest.
3678  *
3679  * In Windows, interrupt handlers run as DPCs. (Not to be confused
3680  * with ISRs, which run in interrupt context and can preempt DPCs.)
3681  * ISRs are given the highest importance so that they'll take
3682  * precedence over timers and other things.
3683  */
3684
3685 static void
3686 ntoskrnl_dpc_thread(arg)
3687         void                    *arg;
3688 {
3689         kdpc_queue              *kq;
3690         kdpc                    *d;
3691         list_entry              *l;
3692         uint8_t                 irql;
3693
3694         kq = arg;
3695
3696         InitializeListHead(&kq->kq_disp);
3697         kq->kq_td = curthread;
3698         kq->kq_exit = 0;
3699         kq->kq_running = FALSE;
3700         KeInitializeSpinLock(&kq->kq_lock);
3701         KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
3702         KeInitializeEvent(&kq->kq_done, EVENT_TYPE_SYNC, FALSE);
3703
3704         /*
3705          * Elevate our priority. DPCs are used to run interrupt
3706          * handlers, and they should trigger as soon as possible
3707          * once scheduled by an ISR.
3708          */
3709
3710         thread_lock(curthread);
3711 #ifdef NTOSKRNL_MULTIPLE_DPCS
3712         sched_bind(curthread, kq->kq_cpu);
3713 #endif
3714         sched_prio(curthread, PRI_MIN_KERN);
3715         thread_unlock(curthread);
3716
3717         while (1) {
3718                 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
3719
3720                 KeAcquireSpinLock(&kq->kq_lock, &irql);
3721
3722                 if (kq->kq_exit) {
3723                         kq->kq_exit = 0;
3724                         KeReleaseSpinLock(&kq->kq_lock, irql);
3725                         break;
3726                 }
3727
3728                 kq->kq_running = TRUE;
3729
3730                 while (!IsListEmpty(&kq->kq_disp)) {
3731                         l = RemoveHeadList((&kq->kq_disp));
3732                         d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
3733                         InitializeListHead((&d->k_dpclistentry));
3734                         KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
3735                         MSCALL4(d->k_deferedfunc, d, d->k_deferredctx,
3736                             d->k_sysarg1, d->k_sysarg2);
3737                         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3738                 }
3739
3740                 kq->kq_running = FALSE;
3741
3742                 KeReleaseSpinLock(&kq->kq_lock, irql);
3743
3744                 KeSetEvent(&kq->kq_done, IO_NO_INCREMENT, FALSE);
3745         }
3746
3747         kproc_exit(0);
3748         return; /* notreached */
3749 }
3750
3751 static void
3752 ntoskrnl_destroy_dpc_threads(void)
3753 {
3754         kdpc_queue              *kq;
3755         kdpc                    dpc;
3756         int                     i;
3757
3758         kq = kq_queues;
3759 #ifdef NTOSKRNL_MULTIPLE_DPCS
3760         for (i = 0; i < mp_ncpus; i++) {
3761 #else
3762         for (i = 0; i < 1; i++) {
3763 #endif
3764                 kq += i;
3765
3766                 kq->kq_exit = 1;
3767                 KeInitializeDpc(&dpc, NULL, NULL);
3768                 KeSetTargetProcessorDpc(&dpc, i);
3769                 KeInsertQueueDpc(&dpc, NULL, NULL);
3770                 while (kq->kq_exit)
3771                         tsleep(kq->kq_td->td_proc, PWAIT, "dpcw", hz/10);
3772         }
3773 }
3774
3775 static uint8_t
3776 ntoskrnl_insert_dpc(head, dpc)
3777         list_entry              *head;
3778         kdpc                    *dpc;
3779 {
3780         list_entry              *l;
3781         kdpc                    *d;
3782
3783         l = head->nle_flink;
3784         while (l != head) {
3785                 d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
3786                 if (d == dpc)
3787                         return (FALSE);
3788                 l = l->nle_flink;
3789         }
3790
3791         if (dpc->k_importance == KDPC_IMPORTANCE_LOW)
3792                 InsertTailList((head), (&dpc->k_dpclistentry));
3793         else
3794                 InsertHeadList((head), (&dpc->k_dpclistentry));
3795
3796         return (TRUE);
3797 }
3798
3799 void
3800 KeInitializeDpc(dpc, dpcfunc, dpcctx)
3801         kdpc                    *dpc;
3802         void                    *dpcfunc;
3803         void                    *dpcctx;
3804 {
3805
3806         if (dpc == NULL)
3807                 return;
3808
3809         dpc->k_deferedfunc = dpcfunc;
3810         dpc->k_deferredctx = dpcctx;
3811         dpc->k_num = KDPC_CPU_DEFAULT;
3812         dpc->k_importance = KDPC_IMPORTANCE_MEDIUM;
3813         InitializeListHead((&dpc->k_dpclistentry));
3814 }
3815
3816 uint8_t
3817 KeInsertQueueDpc(dpc, sysarg1, sysarg2)
3818         kdpc                    *dpc;
3819         void                    *sysarg1;
3820         void                    *sysarg2;
3821 {
3822         kdpc_queue              *kq;
3823         uint8_t                 r;
3824         uint8_t                 irql;
3825
3826         if (dpc == NULL)
3827                 return (FALSE);
3828
3829         kq = kq_queues;
3830
3831 #ifdef NTOSKRNL_MULTIPLE_DPCS
3832         KeRaiseIrql(DISPATCH_LEVEL, &irql);
3833
3834         /*
3835          * By default, the DPC is queued to run on the same CPU
3836          * that scheduled it.
3837          */
3838
3839         if (dpc->k_num == KDPC_CPU_DEFAULT)
3840                 kq += curthread->td_oncpu;
3841         else
3842                 kq += dpc->k_num;
3843         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3844 #else
3845         KeAcquireSpinLock(&kq->kq_lock, &irql);
3846 #endif
3847
3848         r = ntoskrnl_insert_dpc(&kq->kq_disp, dpc);
3849         if (r == TRUE) {
3850                 dpc->k_sysarg1 = sysarg1;
3851                 dpc->k_sysarg2 = sysarg2;
3852         }
3853         KeReleaseSpinLock(&kq->kq_lock, irql);
3854
3855         if (r == FALSE)
3856                 return (r);
3857
3858         KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
3859
3860         return (r);
3861 }
3862
3863 uint8_t
3864 KeRemoveQueueDpc(dpc)
3865         kdpc                    *dpc;
3866 {
3867         kdpc_queue              *kq;
3868         uint8_t                 irql;
3869
3870         if (dpc == NULL)
3871                 return (FALSE);
3872
3873 #ifdef NTOSKRNL_MULTIPLE_DPCS
3874         KeRaiseIrql(DISPATCH_LEVEL, &irql);
3875
3876         kq = kq_queues + dpc->k_num;
3877
3878         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3879 #else
3880         kq = kq_queues;
3881         KeAcquireSpinLock(&kq->kq_lock, &irql);
3882 #endif
3883
3884         if (dpc->k_dpclistentry.nle_flink == &dpc->k_dpclistentry) {
3885                 KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
3886                 KeLowerIrql(irql);
3887                 return (FALSE);
3888         }
3889
3890         RemoveEntryList((&dpc->k_dpclistentry));
3891         InitializeListHead((&dpc->k_dpclistentry));
3892
3893         KeReleaseSpinLock(&kq->kq_lock, irql);
3894
3895         return (TRUE);
3896 }
3897
3898 void
3899 KeSetImportanceDpc(dpc, imp)
3900         kdpc                    *dpc;
3901         uint32_t                imp;
3902 {
3903         if (imp != KDPC_IMPORTANCE_LOW &&
3904             imp != KDPC_IMPORTANCE_MEDIUM &&
3905             imp != KDPC_IMPORTANCE_HIGH)
3906                 return;
3907
3908         dpc->k_importance = (uint8_t)imp;
3909 }
3910
3911 void
3912 KeSetTargetProcessorDpc(kdpc *dpc, uint8_t cpu)
3913 {
3914         if (cpu > mp_ncpus)
3915                 return;
3916
3917         dpc->k_num = cpu;
3918 }
3919
3920 void
3921 KeFlushQueuedDpcs(void)
3922 {
3923         kdpc_queue              *kq;
3924         int                     i;
3925
3926         /*
3927          * Poke each DPC queue and wait
3928          * for them to drain.
3929          */
3930
3931 #ifdef NTOSKRNL_MULTIPLE_DPCS
3932         for (i = 0; i < mp_ncpus; i++) {
3933 #else
3934         for (i = 0; i < 1; i++) {
3935 #endif
3936                 kq = kq_queues + i;
3937                 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
3938                 KeWaitForSingleObject(&kq->kq_done, 0, 0, TRUE, NULL);
3939         }
3940 }
3941
3942 uint32_t
3943 KeGetCurrentProcessorNumber(void)
3944 {
3945         return ((uint32_t)curthread->td_oncpu);
3946 }
3947
3948 uint8_t
3949 KeSetTimerEx(timer, duetime, period, dpc)
3950         ktimer                  *timer;
3951         int64_t                 duetime;
3952         uint32_t                period;
3953         kdpc                    *dpc;
3954 {
3955         struct timeval          tv;
3956         uint64_t                curtime;
3957         uint8_t                 pending;
3958
3959         if (timer == NULL)
3960                 return (FALSE);
3961
3962         mtx_lock(&ntoskrnl_dispatchlock);
3963
3964         if (timer->k_header.dh_inserted == TRUE) {
3965                 ntoskrnl_remove_timer(timer);
3966 #ifdef NTOSKRNL_DEBUG_TIMERS
3967                 ntoskrnl_timer_cancels++;
3968 #endif
3969                 timer->k_header.dh_inserted = FALSE;
3970                 pending = TRUE;
3971         } else
3972                 pending = FALSE;
3973
3974         timer->k_duetime = duetime;
3975         timer->k_period = period;
3976         timer->k_header.dh_sigstate = FALSE;
3977         timer->k_dpc = dpc;
3978
3979         if (duetime < 0) {
3980                 tv.tv_sec = - (duetime) / 10000000;
3981                 tv.tv_usec = (- (duetime) / 10) -
3982                     (tv.tv_sec * 1000000);
3983         } else {
3984                 ntoskrnl_time(&curtime);
3985                 if (duetime < curtime)
3986                         tv.tv_sec = tv.tv_usec = 0;
3987                 else {
3988                         tv.tv_sec = ((duetime) - curtime) / 10000000;
3989                         tv.tv_usec = ((duetime) - curtime) / 10 -
3990                             (tv.tv_sec * 1000000);
3991                 }
3992         }
3993
3994         timer->k_header.dh_inserted = TRUE;
3995         ntoskrnl_insert_timer(timer, tvtohz(&tv));
3996 #ifdef NTOSKRNL_DEBUG_TIMERS
3997         ntoskrnl_timer_sets++;
3998 #endif
3999
4000         mtx_unlock(&ntoskrnl_dispatchlock);
4001
4002         return (pending);
4003 }
4004
4005 uint8_t
4006 KeSetTimer(timer, duetime, dpc)
4007         ktimer                  *timer;
4008         int64_t                 duetime;
4009         kdpc                    *dpc;
4010 {
4011         return (KeSetTimerEx(timer, duetime, 0, dpc));
4012 }
4013
4014 /*
4015  * The Windows DDK documentation seems to say that cancelling
4016  * a timer that has a DPC will result in the DPC also being
4017  * cancelled, but this isn't really the case.
4018  */
4019
4020 uint8_t
4021 KeCancelTimer(timer)
4022         ktimer                  *timer;
4023 {
4024         uint8_t                 pending;
4025
4026         if (timer == NULL)
4027                 return (FALSE);
4028
4029         mtx_lock(&ntoskrnl_dispatchlock);
4030
4031         pending = timer->k_header.dh_inserted;
4032
4033         if (timer->k_header.dh_inserted == TRUE) {
4034                 timer->k_header.dh_inserted = FALSE;
4035                 ntoskrnl_remove_timer(timer);
4036 #ifdef NTOSKRNL_DEBUG_TIMERS
4037                 ntoskrnl_timer_cancels++;
4038 #endif
4039         }
4040
4041         mtx_unlock(&ntoskrnl_dispatchlock);
4042
4043         return (pending);
4044 }
4045
4046 uint8_t
4047 KeReadStateTimer(timer)
4048         ktimer                  *timer;
4049 {
4050         return (timer->k_header.dh_sigstate);
4051 }
4052
4053 static int32_t
4054 KeDelayExecutionThread(uint8_t wait_mode, uint8_t alertable, int64_t *interval)
4055 {
4056         ktimer                  timer;
4057
4058         if (wait_mode != 0)
4059                 panic("invalid wait_mode %d", wait_mode);
4060
4061         KeInitializeTimer(&timer);
4062         KeSetTimer(&timer, *interval, NULL);
4063         KeWaitForSingleObject(&timer, 0, 0, alertable, NULL);
4064
4065         return STATUS_SUCCESS;
4066 }
4067
4068 static uint64_t
4069 KeQueryInterruptTime(void)
4070 {
4071         int ticks;
4072         struct timeval tv;
4073
4074         getmicrouptime(&tv);
4075
4076         ticks = tvtohz(&tv);
4077
4078         return ticks * ((10000000 + hz - 1) / hz);
4079 }
4080
4081 static struct thread *
4082 KeGetCurrentThread(void)
4083 {
4084
4085         return curthread;
4086 }
4087
4088 static int32_t
4089 KeSetPriorityThread(td, pri)
4090         struct thread   *td;
4091         int32_t         pri;
4092 {
4093         int32_t old;
4094
4095         if (td == NULL)
4096                 return LOW_REALTIME_PRIORITY;
4097
4098         if (td->td_priority <= PRI_MIN_KERN)
4099                 old = HIGH_PRIORITY;
4100         else if (td->td_priority >= PRI_MAX_KERN)
4101                 old = LOW_PRIORITY;
4102         else
4103                 old = LOW_REALTIME_PRIORITY;
4104
4105         thread_lock(td);
4106         if (pri == HIGH_PRIORITY)
4107                 sched_prio(td, PRI_MIN_KERN);
4108         if (pri == LOW_REALTIME_PRIORITY)
4109                 sched_prio(td, PRI_MIN_KERN + (PRI_MAX_KERN - PRI_MIN_KERN) / 2);
4110         if (pri == LOW_PRIORITY)
4111                 sched_prio(td, PRI_MAX_KERN);
4112         thread_unlock(td);
4113
4114         return old;
4115 }
4116
4117 static void
4118 dummy()
4119 {
4120         printf("ntoskrnl dummy called...\n");
4121 }
4122
4123
4124 image_patch_table ntoskrnl_functbl[] = {
4125         IMPORT_SFUNC(RtlZeroMemory, 2),
4126         IMPORT_SFUNC(RtlCopyMemory, 3),
4127         IMPORT_SFUNC(RtlCompareMemory, 3),
4128         IMPORT_SFUNC(RtlEqualUnicodeString, 3),
4129         IMPORT_SFUNC(RtlCopyUnicodeString, 2),
4130         IMPORT_SFUNC(RtlUnicodeStringToAnsiString, 3),
4131         IMPORT_SFUNC(RtlAnsiStringToUnicodeString, 3),
4132         IMPORT_SFUNC(RtlInitAnsiString, 2),
4133         IMPORT_SFUNC_MAP(RtlInitString, RtlInitAnsiString, 2),
4134         IMPORT_SFUNC(RtlInitUnicodeString, 2),
4135         IMPORT_SFUNC(RtlFreeAnsiString, 1),
4136         IMPORT_SFUNC(RtlFreeUnicodeString, 1),
4137         IMPORT_SFUNC(RtlUnicodeStringToInteger, 3),
4138         IMPORT_CFUNC(sprintf, 0),
4139         IMPORT_CFUNC(vsprintf, 0),
4140         IMPORT_CFUNC_MAP(_snprintf, snprintf, 0),
4141         IMPORT_CFUNC_MAP(_vsnprintf, vsnprintf, 0),
4142         IMPORT_CFUNC(DbgPrint, 0),
4143         IMPORT_SFUNC(DbgBreakPoint, 0),
4144         IMPORT_SFUNC(KeBugCheckEx, 5),
4145         IMPORT_CFUNC(strncmp, 0),
4146         IMPORT_CFUNC(strcmp, 0),
4147         IMPORT_CFUNC_MAP(stricmp, strcasecmp, 0),
4148         IMPORT_CFUNC(strncpy, 0),
4149         IMPORT_CFUNC(strcpy, 0),
4150         IMPORT_CFUNC(strlen, 0),
4151         IMPORT_CFUNC_MAP(toupper, ntoskrnl_toupper, 0),
4152         IMPORT_CFUNC_MAP(tolower, ntoskrnl_tolower, 0),
4153         IMPORT_CFUNC_MAP(strstr, ntoskrnl_strstr, 0),
4154         IMPORT_CFUNC_MAP(strncat, ntoskrnl_strncat, 0),
4155         IMPORT_CFUNC_MAP(strchr, index, 0),
4156         IMPORT_CFUNC_MAP(strrchr, rindex, 0),
4157         IMPORT_CFUNC(memcpy, 0),
4158         IMPORT_CFUNC_MAP(memmove, ntoskrnl_memmove, 0),
4159         IMPORT_CFUNC_MAP(memset, ntoskrnl_memset, 0),
4160         IMPORT_CFUNC_MAP(memchr, ntoskrnl_memchr, 0),
4161         IMPORT_SFUNC(IoAllocateDriverObjectExtension, 4),
4162         IMPORT_SFUNC(IoGetDriverObjectExtension, 2),
4163         IMPORT_FFUNC(IofCallDriver, 2),
4164         IMPORT_FFUNC(IofCompleteRequest, 2),
4165         IMPORT_SFUNC(IoAcquireCancelSpinLock, 1),
4166         IMPORT_SFUNC(IoReleaseCancelSpinLock, 1),
4167         IMPORT_SFUNC(IoCancelIrp, 1),
4168         IMPORT_SFUNC(IoConnectInterrupt, 11),
4169         IMPORT_SFUNC(IoDisconnectInterrupt, 1),
4170         IMPORT_SFUNC(IoCreateDevice, 7),
4171         IMPORT_SFUNC(IoDeleteDevice, 1),
4172         IMPORT_SFUNC(IoGetAttachedDevice, 1),
4173         IMPORT_SFUNC(IoAttachDeviceToDeviceStack, 2),
4174         IMPORT_SFUNC(IoDetachDevice, 1),
4175         IMPORT_SFUNC(IoBuildSynchronousFsdRequest, 7),
4176         IMPORT_SFUNC(IoBuildAsynchronousFsdRequest, 6),
4177         IMPORT_SFUNC(IoBuildDeviceIoControlRequest, 9),
4178         IMPORT_SFUNC(IoAllocateIrp, 2),
4179         IMPORT_SFUNC(IoReuseIrp, 2),
4180         IMPORT_SFUNC(IoMakeAssociatedIrp, 2),
4181         IMPORT_SFUNC(IoFreeIrp, 1),
4182         IMPORT_SFUNC(IoInitializeIrp, 3),
4183         IMPORT_SFUNC(KeAcquireInterruptSpinLock, 1),
4184         IMPORT_SFUNC(KeReleaseInterruptSpinLock, 2),
4185         IMPORT_SFUNC(KeSynchronizeExecution, 3),
4186         IMPORT_SFUNC(KeWaitForSingleObject, 5),
4187         IMPORT_SFUNC(KeWaitForMultipleObjects, 8),
4188         IMPORT_SFUNC(_allmul, 4),
4189         IMPORT_SFUNC(_alldiv, 4),
4190         IMPORT_SFUNC(_allrem, 4),
4191         IMPORT_RFUNC(_allshr, 0),
4192         IMPORT_RFUNC(_allshl, 0),
4193         IMPORT_SFUNC(_aullmul, 4),
4194         IMPORT_SFUNC(_aulldiv, 4),
4195         IMPORT_SFUNC(_aullrem, 4),
4196         IMPORT_RFUNC(_aullshr, 0),
4197         IMPORT_RFUNC(_aullshl, 0),
4198         IMPORT_CFUNC(atoi, 0),
4199         IMPORT_CFUNC(atol, 0),
4200         IMPORT_CFUNC(rand, 0),
4201         IMPORT_CFUNC(srand, 0),
4202         IMPORT_SFUNC(WRITE_REGISTER_USHORT, 2),
4203         IMPORT_SFUNC(READ_REGISTER_USHORT, 1),
4204         IMPORT_SFUNC(WRITE_REGISTER_ULONG, 2),
4205         IMPORT_SFUNC(READ_REGISTER_ULONG, 1),
4206         IMPORT_SFUNC(READ_REGISTER_UCHAR, 1),
4207         IMPORT_SFUNC(WRITE_REGISTER_UCHAR, 2),
4208         IMPORT_SFUNC(ExInitializePagedLookasideList, 7),
4209         IMPORT_SFUNC(ExDeletePagedLookasideList, 1),
4210         IMPORT_SFUNC(ExInitializeNPagedLookasideList, 7),
4211         IMPORT_SFUNC(ExDeleteNPagedLookasideList, 1),
4212         IMPORT_FFUNC(InterlockedPopEntrySList, 1),
4213         IMPORT_FFUNC(InterlockedPushEntrySList, 2),
4214         IMPORT_SFUNC(ExQueryDepthSList, 1),
4215         IMPORT_FFUNC_MAP(ExpInterlockedPopEntrySList,
4216                 InterlockedPopEntrySList, 1),
4217         IMPORT_FFUNC_MAP(ExpInterlockedPushEntrySList,
4218                 InterlockedPushEntrySList, 2),
4219         IMPORT_FFUNC(ExInterlockedPopEntrySList, 2),
4220         IMPORT_FFUNC(ExInterlockedPushEntrySList, 3),
4221         IMPORT_SFUNC(ExAllocatePoolWithTag, 3),
4222         IMPORT_SFUNC(ExFreePool, 1),
4223 #ifdef __i386__
4224         IMPORT_FFUNC(KefAcquireSpinLockAtDpcLevel, 1),
4225         IMPORT_FFUNC(KefReleaseSpinLockFromDpcLevel,1),
4226         IMPORT_FFUNC(KeAcquireSpinLockRaiseToDpc, 1),
4227 #else
4228         /*
4229          * For AMD64, we can get away with just mapping
4230          * KeAcquireSpinLockRaiseToDpc() directly to KfAcquireSpinLock()
4231          * because the calling conventions end up being the same.
4232          * On i386, we have to be careful because KfAcquireSpinLock()
4233          * is _fastcall but KeAcquireSpinLockRaiseToDpc() isn't.
4234          */
4235         IMPORT_SFUNC(KeAcquireSpinLockAtDpcLevel, 1),
4236         IMPORT_SFUNC(KeReleaseSpinLockFromDpcLevel, 1),
4237         IMPORT_SFUNC_MAP(KeAcquireSpinLockRaiseToDpc, KfAcquireSpinLock, 1),
4238 #endif
4239         IMPORT_SFUNC_MAP(KeReleaseSpinLock, KfReleaseSpinLock, 1),
4240         IMPORT_FFUNC(InterlockedIncrement, 1),
4241         IMPORT_FFUNC(InterlockedDecrement, 1),
4242         IMPORT_FFUNC(InterlockedExchange, 2),
4243         IMPORT_FFUNC(ExInterlockedAddLargeStatistic, 2),
4244         IMPORT_SFUNC(IoAllocateMdl, 5),
4245         IMPORT_SFUNC(IoFreeMdl, 1),
4246         IMPORT_SFUNC(MmAllocateContiguousMemory, 2 + 1),
4247         IMPORT_SFUNC(MmAllocateContiguousMemorySpecifyCache, 5 + 3),
4248         IMPORT_SFUNC(MmFreeContiguousMemory, 1),
4249         IMPORT_SFUNC(MmFreeContiguousMemorySpecifyCache, 3),
4250         IMPORT_SFUNC(MmSizeOfMdl, 1),
4251         IMPORT_SFUNC(MmMapLockedPages, 2),
4252         IMPORT_SFUNC(MmMapLockedPagesSpecifyCache, 6),
4253         IMPORT_SFUNC(MmUnmapLockedPages, 2),
4254         IMPORT_SFUNC(MmBuildMdlForNonPagedPool, 1),
4255         IMPORT_SFUNC(MmGetPhysicalAddress, 1),
4256         IMPORT_SFUNC(MmIsAddressValid, 1),
4257         IMPORT_SFUNC(MmMapIoSpace, 3 + 1),
4258         IMPORT_SFUNC(MmUnmapIoSpace, 2),
4259         IMPORT_SFUNC(KeInitializeSpinLock, 1),
4260         IMPORT_SFUNC(IoIsWdmVersionAvailable, 2),
4261         IMPORT_SFUNC(IoGetDeviceObjectPointer, 4),
4262         IMPORT_SFUNC(IoGetDeviceProperty, 5),
4263         IMPORT_SFUNC(IoAllocateWorkItem, 1),
4264         IMPORT_SFUNC(IoFreeWorkItem, 1),
4265         IMPORT_SFUNC(IoQueueWorkItem, 4),
4266         IMPORT_SFUNC(ExQueueWorkItem, 2),
4267         IMPORT_SFUNC(ntoskrnl_workitem, 2),
4268         IMPORT_SFUNC(KeInitializeMutex, 2),
4269         IMPORT_SFUNC(KeReleaseMutex, 2),
4270         IMPORT_SFUNC(KeReadStateMutex, 1),
4271         IMPORT_SFUNC(KeInitializeEvent, 3),
4272         IMPORT_SFUNC(KeSetEvent, 3),
4273         IMPORT_SFUNC(KeResetEvent, 1),
4274         IMPORT_SFUNC(KeClearEvent, 1),
4275         IMPORT_SFUNC(KeReadStateEvent, 1),
4276         IMPORT_SFUNC(KeInitializeTimer, 1),
4277         IMPORT_SFUNC(KeInitializeTimerEx, 2),
4278         IMPORT_SFUNC(KeSetTimer, 3),
4279         IMPORT_SFUNC(KeSetTimerEx, 4),
4280         IMPORT_SFUNC(KeCancelTimer, 1),
4281         IMPORT_SFUNC(KeReadStateTimer, 1),
4282         IMPORT_SFUNC(KeInitializeDpc, 3),
4283         IMPORT_SFUNC(KeInsertQueueDpc, 3),
4284         IMPORT_SFUNC(KeRemoveQueueDpc, 1),
4285         IMPORT_SFUNC(KeSetImportanceDpc, 2),
4286         IMPORT_SFUNC(KeSetTargetProcessorDpc, 2),
4287         IMPORT_SFUNC(KeFlushQueuedDpcs, 0),
4288         IMPORT_SFUNC(KeGetCurrentProcessorNumber, 1),
4289         IMPORT_SFUNC(ObReferenceObjectByHandle, 6),
4290         IMPORT_FFUNC(ObfDereferenceObject, 1),
4291         IMPORT_SFUNC(ZwClose, 1),
4292         IMPORT_SFUNC(PsCreateSystemThread, 7),
4293         IMPORT_SFUNC(PsTerminateSystemThread, 1),
4294         IMPORT_SFUNC(IoWMIRegistrationControl, 2),
4295         IMPORT_SFUNC(WmiQueryTraceInformation, 5),
4296         IMPORT_CFUNC(WmiTraceMessage, 0),
4297         IMPORT_SFUNC(KeQuerySystemTime, 1),
4298         IMPORT_CFUNC(KeTickCount, 0),
4299         IMPORT_SFUNC(KeDelayExecutionThread, 3),
4300         IMPORT_SFUNC(KeQueryInterruptTime, 0),
4301         IMPORT_SFUNC(KeGetCurrentThread, 0),
4302         IMPORT_SFUNC(KeSetPriorityThread, 2),
4303
4304         /*
4305          * This last entry is a catch-all for any function we haven't
4306          * implemented yet. The PE import list patching routine will
4307          * use it for any function that doesn't have an explicit match
4308          * in this table.
4309          */
4310
4311         { NULL, (FUNC)dummy, NULL, 0, WINDRV_WRAP_STDCALL },
4312
4313         /* End of list. */
4314
4315         { NULL, NULL, NULL }
4316 };