"Fossies" - the Fresh Open Source Software Archive

Member "ndiswrapper-1.63/driver/ntoskernel.c" (3 May 2020, 71741 Bytes) of package /linux/misc/ndiswrapper-1.63.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "ntoskernel.c" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: 1.62_vs_1.63.

    1 /*
    2  *  Copyright (C) 2003-2005 Pontus Fuchs, Giridhar Pemmasani
    3  *
    4  *  This program is free software; you can redistribute it and/or modify
    5  *  it under the terms of the GNU General Public License as published by
    6  *  the Free Software Foundation; either version 2 of the License, or
    7  *  (at your option) any later version.
    8  *
    9  *  This program is distributed in the hope that it will be useful,
   10  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
   11  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
   12  *  GNU General Public License for more details.
   13  *
   14  */
   15 
   16 #include "ntoskernel.h"
   17 #include "ndis.h"
   18 #include "usb.h"
   19 #include "pnp.h"
   20 #include "loader.h"
   21 #include "ntoskernel_exports.h"
   22 
   23 /* MDLs describe a range of virtual address with an array of physical
   24  * pages right after the header. For different ranges of virtual
   25  * addresses, the number of entries of physical pages may be different
   26  * (depending on number of entries required). If we want to allocate
   27  * MDLs from a pool, the size has to be constant. So we assume that
   28  * maximum range used by a driver is MDL_CACHE_PAGES; if a driver
   29  * requests an MDL for a bigger region, we allocate it with kmalloc;
   30  * otherwise, we allocate from the pool */
   31 
   32 #define MDL_CACHE_PAGES 3
   33 #define MDL_CACHE_SIZE (sizeof(struct mdl) + \
   34             (sizeof(PFN_NUMBER) * MDL_CACHE_PAGES))
   35 struct wrap_mdl {
   36     struct nt_list list;
   37     struct mdl mdl[0];
   38 };
   39 
   40 /* everything here is for all drivers/devices - not per driver/device */
   41 static spinlock_t dispatcher_lock;
   42 spinlock_t ntoskernel_lock;
   43 static void *mdl_cache;
   44 static struct nt_list wrap_mdl_list;
   45 
   46 static struct work_struct kdpc_work;
   47 static void kdpc_worker(struct work_struct *dummy);
   48 
   49 static struct nt_list kdpc_list;
   50 static spinlock_t kdpc_list_lock;
   51 
   52 static struct nt_list callback_objects;
   53 
   54 struct nt_list object_list;
   55 
   56 struct bus_driver {
   57     struct nt_list list;
   58     char name[MAX_DRIVER_NAME_LEN];
   59     struct driver_object drv_obj;
   60 };
   61 
   62 static struct nt_list bus_driver_list;
   63 
   64 static struct work_struct ntos_work;
   65 static struct nt_list ntos_work_list;
   66 static spinlock_t ntos_work_lock;
   67 static void ntos_work_worker(struct work_struct *dummy);
   68 spinlock_t irp_cancel_lock;
   69 static NT_SPIN_LOCK nt_list_lock;
   70 static struct nt_slist wrap_timer_slist;
   71 CCHAR cpu_count;
   72 
   73 /* compute ticks (100ns) since 1601 until when system booted into
   74  * wrap_ticks_to_boot */
   75 u64 wrap_ticks_to_boot;
   76 
   77 #if defined(CONFIG_X86_64)
   78 static struct timer_list shared_data_timer;
   79 struct kuser_shared_data kuser_shared_data;
   80 #endif
   81 
   82 WIN_SYMBOL_MAP("KeTickCount", &jiffies)
   83 WIN_SYMBOL_MAP("KeNumberProcessors", &cpu_count)
   84 WIN_SYMBOL_MAP("NlsMbCodePageTag", FALSE)
   85 
   86 struct workqueue_struct *ntos_wq;
   87 
   88 #ifdef WRAP_PREEMPT
   89 DEFINE_PER_CPU(struct irql_info, irql_info);
   90 #endif
   91 
   92 #if defined(CONFIG_X86_64)
   93 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0)
   94 static void update_user_shared_data_proc(struct timer_list *tl)
   95 #else
   96 static void update_user_shared_data_proc(unsigned long data)
   97 #endif
   98 {
   99     /* timer is supposed to be scheduled every 10ms, but bigger
  100      * intervals seem to work (tried up to 50ms) */
  101     *((ULONG64 *)&kuser_shared_data.system_time) = ticks_1601();
  102     *((ULONG64 *)&kuser_shared_data.interrupt_time) =
  103         jiffies * TICKSPERSEC / HZ;
  104     *((ULONG64 *)&kuser_shared_data.tick) = jiffies;
  105 
  106     mod_timer(&shared_data_timer, jiffies + MSEC_TO_HZ(30));
  107 }
  108 #endif
  109 
  110 void *allocate_object(ULONG size, enum common_object_type type,
  111               struct unicode_string *name)
  112 {
  113     struct common_object_header *hdr;
  114     void *body;
  115 
  116     /* we pad header as prefix to body */
  117     hdr = ExAllocatePoolWithTag(NonPagedPool, OBJECT_SIZE(size), 0);
  118     if (!hdr) {
  119         WARNING("couldn't allocate memory");
  120         return NULL;
  121     }
  122     memset(hdr, 0, OBJECT_SIZE(size));
  123     if (name) {
  124         hdr->name.buf = ExAllocatePoolWithTag(NonPagedPool,
  125                               name->max_length, 0);
  126         if (!hdr->name.buf) {
  127             ExFreePool(hdr);
  128             return NULL;
  129         }
  130         memcpy(hdr->name.buf, name->buf, name->max_length);
  131         hdr->name.length = name->length;
  132         hdr->name.max_length = name->max_length;
  133     }
  134     hdr->type = type;
  135     hdr->ref_count = 1;
  136     spin_lock_bh(&ntoskernel_lock);
  137     /* threads are looked up often (in KeWaitForXXX), so optimize
  138      * for fast lookups of threads */
  139     if (type == OBJECT_TYPE_NT_THREAD)
  140         InsertHeadList(&object_list, &hdr->list);
  141     else
  142         InsertTailList(&object_list, &hdr->list);
  143     spin_unlock_bh(&ntoskernel_lock);
  144     body = HEADER_TO_OBJECT(hdr);
  145     TRACE3("allocated hdr: %p, body: %p", hdr, body);
  146     return body;
  147 }
  148 
  149 static void free_object(void *object)
  150 {
  151     struct common_object_header *hdr;
  152 
  153     hdr = OBJECT_TO_HEADER(object);
  154     spin_lock_bh(&ntoskernel_lock);
  155     RemoveEntryList(&hdr->list);
  156     spin_unlock_bh(&ntoskernel_lock);
  157     TRACE3("freed hdr: %p, body: %p", hdr, object);
  158     if (hdr->name.buf)
  159         ExFreePool(hdr->name.buf);
  160     ExFreePool(hdr);
  161 }
  162 
  163 static int add_bus_driver(const char *name)
  164 {
  165     struct bus_driver *bus_driver;
  166 
  167     bus_driver = kzalloc(sizeof(*bus_driver), GFP_KERNEL);
  168     if (!bus_driver) {
  169         ERROR("couldn't allocate memory");
  170         return -ENOMEM;
  171     }
  172     strncpy(bus_driver->name, name, sizeof(bus_driver->name));
  173     bus_driver->name[sizeof(bus_driver->name)-1] = 0;
  174     spin_lock_bh(&ntoskernel_lock);
  175     InsertTailList(&bus_driver_list, &bus_driver->list);
  176     spin_unlock_bh(&ntoskernel_lock);
  177     TRACE1("bus driver %s is at %p", name, &bus_driver->drv_obj);
  178     return STATUS_SUCCESS;
  179 }
  180 
  181 struct driver_object *find_bus_driver(const char *name)
  182 {
  183     struct bus_driver *bus_driver;
  184     struct driver_object *drv_obj;
  185 
  186     spin_lock_bh(&ntoskernel_lock);
  187     drv_obj = NULL;
  188     nt_list_for_each_entry(bus_driver, &bus_driver_list, list) {
  189         if (strcmp(bus_driver->name, name) == 0) {
  190             drv_obj = &bus_driver->drv_obj;
  191             break;
  192         }
  193     }
  194     spin_unlock_bh(&ntoskernel_lock);
  195     return drv_obj;
  196 }
  197 
  198 wfastcall struct nt_list *WIN_FUNC(ExfInterlockedInsertHeadList,3)
  199     (struct nt_list *head, struct nt_list *entry, NT_SPIN_LOCK *lock)
  200 {
  201     struct nt_list *first;
  202     unsigned long flags;
  203 
  204     ENTER5("head = %p, entry = %p", head, entry);
  205     nt_spin_lock_irqsave(lock, flags);
  206     first = InsertHeadList(head, entry);
  207     nt_spin_unlock_irqrestore(lock, flags);
  208     TRACE5("head = %p, old = %p", head, first);
  209     return first;
  210 }
  211 
  212 wfastcall struct nt_list *WIN_FUNC(ExInterlockedInsertHeadList,3)
  213     (struct nt_list *head, struct nt_list *entry, NT_SPIN_LOCK *lock)
  214 {
  215     ENTER5("%p", head);
  216     return ExfInterlockedInsertHeadList(head, entry, lock);
  217 }
  218 
  219 wfastcall struct nt_list *WIN_FUNC(ExfInterlockedInsertTailList,3)
  220     (struct nt_list *head, struct nt_list *entry, NT_SPIN_LOCK *lock)
  221 {
  222     struct nt_list *last;
  223     unsigned long flags;
  224 
  225     ENTER5("head = %p, entry = %p", head, entry);
  226     nt_spin_lock_irqsave(lock, flags);
  227     last = InsertTailList(head, entry);
  228     nt_spin_unlock_irqrestore(lock, flags);
  229     TRACE5("head = %p, old = %p", head, last);
  230     return last;
  231 }
  232 
  233 wfastcall struct nt_list *WIN_FUNC(ExInterlockedInsertTailList,3)
  234     (struct nt_list *head, struct nt_list *entry, NT_SPIN_LOCK *lock)
  235 {
  236     ENTER5("%p", head);
  237     return ExfInterlockedInsertTailList(head, entry, lock);
  238 }
  239 
  240 wfastcall struct nt_list *WIN_FUNC(ExfInterlockedRemoveHeadList,2)
  241     (struct nt_list *head, NT_SPIN_LOCK *lock)
  242 {
  243     struct nt_list *ret;
  244     unsigned long flags;
  245 
  246     ENTER5("head = %p", head);
  247     nt_spin_lock_irqsave(lock, flags);
  248     ret = RemoveHeadList(head);
  249     nt_spin_unlock_irqrestore(lock, flags);
  250     TRACE5("head = %p, ret = %p", head, ret);
  251     return ret;
  252 }
  253 
  254 wfastcall struct nt_list *WIN_FUNC(ExInterlockedRemoveHeadList,2)
  255     (struct nt_list *head, NT_SPIN_LOCK *lock)
  256 {
  257     ENTER5("%p", head);
  258     return ExfInterlockedRemoveHeadList(head, lock);
  259 }
  260 
  261 wfastcall struct nt_list *WIN_FUNC(ExfInterlockedRemoveTailList,2)
  262     (struct nt_list *head, NT_SPIN_LOCK *lock)
  263 {
  264     struct nt_list *ret;
  265     unsigned long flags;
  266 
  267     ENTER5("head = %p", head);
  268     nt_spin_lock_irqsave(lock, flags);
  269     ret = RemoveTailList(head);
  270     nt_spin_unlock_irqrestore(lock, flags);
  271     TRACE5("head = %p, ret = %p", head, ret);
  272     return ret;
  273 }
  274 
  275 wfastcall struct nt_list *WIN_FUNC(ExInterlockedRemoveTailList,2)
  276     (struct nt_list *head, NT_SPIN_LOCK *lock)
  277 {
  278     ENTER5("%p", head);
  279     return ExfInterlockedRemoveTailList(head, lock);
  280 }
  281 
  282 wfastcall void WIN_FUNC(InitializeSListHead,1)
  283     (nt_slist_header *head)
  284 {
  285     memset(head, 0, sizeof(*head));
  286 }
  287 
  288 wfastcall struct nt_slist *WIN_FUNC(ExInterlockedPushEntrySList,3)
  289     (nt_slist_header *head, struct nt_slist *entry, NT_SPIN_LOCK *lock)
  290 {
  291     struct nt_slist *ret;
  292 
  293     ret = PushEntrySList(head, entry, lock);
  294     return ret;
  295 }
  296 
  297 wstdcall struct nt_slist *WIN_FUNC(ExpInterlockedPushEntrySList,2)
  298     (nt_slist_header *head, struct nt_slist *entry)
  299 {
  300     struct nt_slist *ret;
  301 
  302     ret = PushEntrySList(head, entry, &nt_list_lock);
  303     return ret;
  304 }
  305 
  306 wfastcall struct nt_slist *WIN_FUNC(InterlockedPushEntrySList,2)
  307     (nt_slist_header *head, struct nt_slist *entry)
  308 {
  309     struct nt_slist *ret;
  310 
  311     ret = PushEntrySList(head, entry, &nt_list_lock);
  312     return ret;
  313 }
  314 
  315 wfastcall struct nt_slist *WIN_FUNC(ExInterlockedPopEntrySList,2)
  316     (nt_slist_header *head, NT_SPIN_LOCK *lock)
  317 {
  318     struct nt_slist *ret;
  319 
  320     ret = PopEntrySList(head, lock);
  321     return ret;
  322 }
  323 
  324 wstdcall struct nt_slist *WIN_FUNC(ExpInterlockedPopEntrySList,1)
  325     (nt_slist_header *head)
  326 {
  327     struct nt_slist *ret;
  328 
  329     ret = PopEntrySList(head, &nt_list_lock);
  330     return ret;
  331 }
  332 
  333 wfastcall struct nt_slist *WIN_FUNC(InterlockedPopEntrySList,1)
  334     (nt_slist_header *head)
  335 {
  336     struct nt_slist *ret;
  337 
  338     ret = PopEntrySList(head, &nt_list_lock);
  339     return ret;
  340 }
  341 
  342 wstdcall USHORT WIN_FUNC(ExQueryDepthSList,1)
  343     (nt_slist_header *head)
  344 {
  345     USHORT depth;
  346     ENTER5("%p", head);
  347     depth = head->depth;
  348     TRACE5("%d, %p", depth, head->next);
  349     return depth;
  350 }
  351 
  352 wfastcall LONG WIN_FUNC(InterlockedIncrement,1)
  353     (LONG volatile *val)
  354 {
  355     return post_atomic_add(*val, 1);
  356 }
  357 
  358 wfastcall LONG WIN_FUNC(InterlockedDecrement,1)
  359     (LONG volatile *val)
  360 {
  361     return post_atomic_add(*val, -1);
  362 }
  363 
  364 wfastcall LONG WIN_FUNC(InterlockedExchange,2)
  365     (LONG volatile *target, LONG val)
  366 {
  367     return xchg(target, val);
  368 }
  369 
  370 wfastcall LONG WIN_FUNC(InterlockedCompareExchange,3)
  371     (LONG volatile *dest, LONG new, LONG old)
  372 {
  373     return cmpxchg(dest, old, new);
  374 }
  375 
  376 wfastcall void WIN_FUNC(ExInterlockedAddLargeStatistic,2)
  377     (LARGE_INTEGER volatile *plint, ULONG n)
  378 {
  379     unsigned long flags;
  380 
  381     local_irq_save(flags);
  382 #ifdef CONFIG_X86_64
  383     __asm__ __volatile__(
  384         "\n"
  385         LOCK_PREFIX "add %1, %0\n\t"
  386         : "+m" (*plint)
  387         : "r" (n));
  388 #else
  389     __asm__ __volatile__(
  390         "1:\t"
  391         "   movl %1, %%ebx\n\t"
  392         "   movl %%edx, %%ecx\n\t"
  393         "   addl %%eax, %%ebx\n\t"
  394         "   adcl $0, %%ecx\n\t"
  395             LOCK_PREFIX "cmpxchg8b %0\n\t"
  396         "   jnz 1b\n\t"
  397         : "+m" (*plint)
  398         : "m" (n), "A" (*plint)
  399         : "ebx", "ecx");
  400 #endif
  401     local_irq_restore(flags);
  402 }
  403 
  404 static void initialize_object(struct dispatcher_header *dh, enum dh_type type,
  405                   int state)
  406 {
  407     memset(dh, 0, sizeof(*dh));
  408     set_object_type(dh, type);
  409     dh->signal_state = state;
  410     InitializeListHead(&dh->wait_blocks);
  411 }
  412 
  413 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0)
  414 static void timer_proc(struct timer_list *tl)
  415 #else
  416 static void timer_proc(unsigned long data)
  417 #endif
  418 {
  419 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0)
  420     struct wrap_timer *wrap_timer = from_timer(wrap_timer, tl, timer);
  421 #else
  422     struct wrap_timer *wrap_timer = (struct wrap_timer *)data;
  423 #endif
  424     struct nt_timer *nt_timer;
  425     struct kdpc *kdpc;
  426 
  427     nt_timer = wrap_timer->nt_timer;
  428     TIMERENTER("%p(%p), %lu", wrap_timer, nt_timer, jiffies);
  429 #ifdef TIMER_DEBUG
  430     BUG_ON(wrap_timer->wrap_timer_magic != WRAP_TIMER_MAGIC);
  431     BUG_ON(nt_timer->wrap_timer_magic != WRAP_TIMER_MAGIC);
  432 #endif
  433     KeSetEvent((struct nt_event *)nt_timer, 0, FALSE);
  434     if (wrap_timer->repeat)
  435         mod_timer(&wrap_timer->timer, jiffies + wrap_timer->repeat);
  436     kdpc = nt_timer->kdpc;
  437     if (kdpc)
  438         queue_kdpc(kdpc);
  439     TIMEREXIT(return);
  440 }
  441 
  442 void wrap_init_timer(struct nt_timer *nt_timer, enum timer_type type,
  443              struct ndis_mp_block *nmb)
  444 {
  445     struct wrap_timer *wrap_timer;
  446 
  447     /* TODO: if a timer is initialized more than once, we allocate
  448      * memory for wrap_timer more than once for the same nt_timer,
  449      * wasting memory. We can check if nt_timer->wrap_timer_magic is
  450      * set and not allocate, but it is not guaranteed always to be
  451      * safe */
  452     TIMERENTER("%p", nt_timer);
  453     /* we allocate memory for wrap_timer behind driver's back and
  454      * there is no NDIS/DDK function where this memory can be
  455      * freed, so we use slack_kmalloc so it gets freed when driver
  456      * is unloaded */
  457     if (nmb)
  458         wrap_timer = kzalloc(sizeof(*wrap_timer), irql_gfp());
  459     else
  460         wrap_timer = slack_kzalloc(sizeof(*wrap_timer));
  461     if (!wrap_timer) {
  462         ERROR("couldn't allocate memory for timer");
  463         return;
  464     }
  465 
  466 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)
  467     init_timer(&wrap_timer->timer);
  468     wrap_timer->timer.function = timer_proc;
  469     wrap_timer->timer.data = (unsigned long)wrap_timer;
  470 #else
  471     timer_setup(&wrap_timer->timer, timer_proc, 0);
  472 #endif
  473     wrap_timer->nt_timer = nt_timer;
  474 #ifdef TIMER_DEBUG
  475     wrap_timer->wrap_timer_magic = WRAP_TIMER_MAGIC;
  476 #endif
  477     nt_timer->wrap_timer = wrap_timer;
  478     nt_timer->kdpc = NULL;
  479     initialize_object(&nt_timer->dh, (enum dh_type)type, 0);
  480     nt_timer->wrap_timer_magic = WRAP_TIMER_MAGIC;
  481     TIMERTRACE("timer %p (%p)", wrap_timer, nt_timer);
  482     spin_lock_bh(&ntoskernel_lock);
  483     if (nmb) {
  484         wrap_timer->slist.next = nmb->wnd->wrap_timer_slist.next;
  485         nmb->wnd->wrap_timer_slist.next = &wrap_timer->slist;
  486     } else {
  487         wrap_timer->slist.next = wrap_timer_slist.next;
  488         wrap_timer_slist.next = &wrap_timer->slist;
  489     }
  490     spin_unlock_bh(&ntoskernel_lock);
  491     TIMEREXIT(return);
  492 }
  493 
  494 wstdcall void WIN_FUNC(KeInitializeTimerEx,2)
  495     (struct nt_timer *nt_timer, enum timer_type type)
  496 {
  497     TIMERENTER("%p", nt_timer);
  498     wrap_init_timer(nt_timer, type, NULL);
  499 }
  500 
  501 wstdcall void WIN_FUNC(KeInitializeTimer,1)
  502     (struct nt_timer *nt_timer)
  503 {
  504     TIMERENTER("%p", nt_timer);
  505     wrap_init_timer(nt_timer, NotificationTimer, NULL);
  506 }
  507 
  508 /* expires and repeat are in HZ */
  509 BOOLEAN wrap_set_timer(struct nt_timer *nt_timer, unsigned long expires_hz,
  510                unsigned long repeat_hz, struct kdpc *kdpc)
  511 {
  512     struct wrap_timer *wrap_timer;
  513 
  514     TIMERENTER("%p, %lu, %lu, %p, %lu",
  515            nt_timer, expires_hz, repeat_hz, kdpc, jiffies);
  516 
  517     wrap_timer = nt_timer->wrap_timer;
  518     TIMERTRACE("%p", wrap_timer);
  519 #ifdef TIMER_DEBUG
  520     if (wrap_timer->nt_timer != nt_timer)
  521         WARNING("bad timers: %p, %p, %p", wrap_timer, nt_timer,
  522             wrap_timer->nt_timer);
  523     if (nt_timer->wrap_timer_magic != WRAP_TIMER_MAGIC) {
  524         WARNING("buggy Windows timer didn't initialize timer %p",
  525             nt_timer);
  526         return FALSE;
  527     }
  528     if (wrap_timer->wrap_timer_magic != WRAP_TIMER_MAGIC) {
  529         WARNING("timer %p is not initialized (%lx)?",
  530             wrap_timer, wrap_timer->wrap_timer_magic);
  531         wrap_timer->wrap_timer_magic = WRAP_TIMER_MAGIC;
  532     }
  533 #endif
  534     KeClearEvent((struct nt_event *)nt_timer);
  535     nt_timer->kdpc = kdpc;
  536     wrap_timer->repeat = repeat_hz;
  537     if (mod_timer(&wrap_timer->timer, jiffies + expires_hz))
  538         TIMEREXIT(return TRUE);
  539     else
  540         TIMEREXIT(return FALSE);
  541 }
  542 
  543 wstdcall BOOLEAN WIN_FUNC(KeSetTimerEx,4)
  544     (struct nt_timer *nt_timer, LARGE_INTEGER duetime_ticks,
  545      LONG period_ms, struct kdpc *kdpc)
  546 {
  547     unsigned long expires_hz, repeat_hz;
  548 
  549     TIMERENTER("%p, %lld, %d", nt_timer, duetime_ticks, period_ms);
  550     expires_hz = SYSTEM_TIME_TO_HZ(duetime_ticks);
  551     repeat_hz = MSEC_TO_HZ(period_ms);
  552     return wrap_set_timer(nt_timer, expires_hz, repeat_hz, kdpc);
  553 }
  554 
  555 wstdcall BOOLEAN WIN_FUNC(KeSetTimer,3)
  556     (struct nt_timer *nt_timer, LARGE_INTEGER duetime_ticks,
  557      struct kdpc *kdpc)
  558 {
  559     TIMERENTER("%p, %lld, %p", nt_timer, duetime_ticks, kdpc);
  560     return KeSetTimerEx(nt_timer, duetime_ticks, 0, kdpc);
  561 }
  562 
  563 wstdcall BOOLEAN WIN_FUNC(KeCancelTimer,1)
  564     (struct nt_timer *nt_timer)
  565 {
  566     struct wrap_timer *wrap_timer;
  567     int ret;
  568 
  569     TIMERENTER("%p", nt_timer);
  570     wrap_timer = nt_timer->wrap_timer;
  571     if (!wrap_timer) {
  572         ERROR("invalid wrap_timer");
  573         return TRUE;
  574     }
  575 #ifdef TIMER_DEBUG
  576     BUG_ON(wrap_timer->wrap_timer_magic != WRAP_TIMER_MAGIC);
  577 #endif
  578     /* disable timer before deleting so if it is periodic timer, it
  579      * won't be re-armed after deleting */
  580     wrap_timer->repeat = 0;
  581     ret = del_timer_sync(&wrap_timer->timer);
  582     /* the documentation for KeCancelTimer suggests the DPC is
  583      * deqeued, but actually DPC is left to run */
  584     if (ret)
  585         TIMEREXIT(return TRUE);
  586     else
  587         TIMEREXIT(return FALSE);
  588 }
  589 
  590 wstdcall BOOLEAN WIN_FUNC(KeReadStateTimer,1)
  591     (struct nt_timer *nt_timer)
  592 {
  593     if (nt_timer->dh.signal_state)
  594         return TRUE;
  595     else
  596         return FALSE;
  597 }
  598 
  599 wstdcall void WIN_FUNC(KeInitializeDpc,3)
  600     (struct kdpc *kdpc, void *func, void *ctx)
  601 {
  602     ENTER3("%p, %p, %p", kdpc, func, ctx);
  603     memset(kdpc, 0, sizeof(*kdpc));
  604     kdpc->func = func;
  605     kdpc->ctx = ctx;
  606     InitializeListHead(&kdpc->list);
  607 }
  608 
  609 static void kdpc_worker(struct work_struct *dummy)
  610 {
  611     struct nt_list *entry;
  612     struct kdpc *kdpc;
  613     unsigned long flags;
  614     KIRQL irql;
  615 
  616     WORKENTER("");
  617     irql = raise_irql(DISPATCH_LEVEL);
  618     while (1) {
  619         spin_lock_irqsave(&kdpc_list_lock, flags);
  620         entry = RemoveHeadList(&kdpc_list);
  621         if (entry) {
  622             kdpc = container_of(entry, struct kdpc, list);
  623             assert(kdpc->queued);
  624             kdpc->queued = 0;
  625         } else
  626             kdpc = NULL;
  627         spin_unlock_irqrestore(&kdpc_list_lock, flags);
  628         if (!kdpc)
  629             break;
  630         WORKTRACE("%p, %p, %p, %p, %p", kdpc, kdpc->func, kdpc->ctx,
  631               kdpc->arg1, kdpc->arg2);
  632         assert_irql(_irql_ == DISPATCH_LEVEL);
  633         LIN2WIN4(kdpc->func, kdpc, kdpc->ctx, kdpc->arg1, kdpc->arg2);
  634         assert_irql(_irql_ == DISPATCH_LEVEL);
  635     }
  636     lower_irql(irql);
  637     WORKEXIT(return);
  638 }
  639 
  640 wstdcall void WIN_FUNC(KeFlushQueuedDpcs,0)
  641     (void)
  642 {
  643     kdpc_worker(NULL);
  644 }
  645 
  646 BOOLEAN queue_kdpc(struct kdpc *kdpc)
  647 {
  648     BOOLEAN ret;
  649     unsigned long flags;
  650 
  651     WORKENTER("%p", kdpc);
  652     spin_lock_irqsave(&kdpc_list_lock, flags);
  653     if (kdpc->queued)
  654         ret = FALSE;
  655     else {
  656         if (unlikely(kdpc->importance == HighImportance))
  657             InsertHeadList(&kdpc_list, &kdpc->list);
  658         else
  659             InsertTailList(&kdpc_list, &kdpc->list);
  660         kdpc->queued = 1;
  661         ret = TRUE;
  662     }
  663     spin_unlock_irqrestore(&kdpc_list_lock, flags);
  664     if (ret == TRUE)
  665         queue_work(ntos_wq, &kdpc_work);
  666     WORKTRACE("%d", ret);
  667     return ret;
  668 }
  669 
  670 BOOLEAN dequeue_kdpc(struct kdpc *kdpc)
  671 {
  672     BOOLEAN ret;
  673     unsigned long flags;
  674 
  675     WORKENTER("%p", kdpc);
  676     spin_lock_irqsave(&kdpc_list_lock, flags);
  677     if (kdpc->queued) {
  678         RemoveEntryList(&kdpc->list);
  679         kdpc->queued = 0;
  680         ret = TRUE;
  681     } else
  682         ret = FALSE;
  683     spin_unlock_irqrestore(&kdpc_list_lock, flags);
  684     WORKTRACE("%d", ret);
  685     return ret;
  686 }
  687 
  688 wstdcall BOOLEAN WIN_FUNC(KeInsertQueueDpc,3)
  689     (struct kdpc *kdpc, void *arg1, void *arg2)
  690 {
  691     WORKENTER("%p, %p, %p", kdpc, arg1, arg2);
  692     kdpc->arg1 = arg1;
  693     kdpc->arg2 = arg2;
  694     return queue_kdpc(kdpc);
  695 }
  696 
  697 wstdcall BOOLEAN WIN_FUNC(KeRemoveQueueDpc,1)
  698     (struct kdpc *kdpc)
  699 {
  700     return dequeue_kdpc(kdpc);
  701 }
  702 
  703 wstdcall void WIN_FUNC(KeSetImportanceDpc,2)
  704     (struct kdpc *kdpc, enum kdpc_importance importance)
  705 {
  706     kdpc->importance = importance;
  707 }
  708 
  709 static void ntos_work_worker(struct work_struct *dummy)
  710 {
  711     struct ntos_work_item *ntos_work_item;
  712     struct nt_list *cur;
  713 
  714     while (1) {
  715         spin_lock_bh(&ntos_work_lock);
  716         cur = RemoveHeadList(&ntos_work_list);
  717         spin_unlock_bh(&ntos_work_lock);
  718         if (!cur)
  719             break;
  720         ntos_work_item = container_of(cur, struct ntos_work_item, list);
  721         WORKTRACE("%p: executing %p, %p, %p", current,
  722               ntos_work_item->func, ntos_work_item->arg1,
  723               ntos_work_item->arg2);
  724         LIN2WIN2(ntos_work_item->func, ntos_work_item->arg1,
  725              ntos_work_item->arg2);
  726         kfree(ntos_work_item);
  727     }
  728     WORKEXIT(return);
  729 }
  730 
  731 int schedule_ntos_work_item(NTOS_WORK_FUNC func, void *arg1, void *arg2)
  732 {
  733     struct ntos_work_item *ntos_work_item;
  734 
  735     WORKENTER("adding work: %p, %p, %p", func, arg1, arg2);
  736     ntos_work_item = kmalloc(sizeof(*ntos_work_item), irql_gfp());
  737     if (!ntos_work_item) {
  738         ERROR("couldn't allocate memory");
  739         return -ENOMEM;
  740     }
  741     ntos_work_item->func = func;
  742     ntos_work_item->arg1 = arg1;
  743     ntos_work_item->arg2 = arg2;
  744     spin_lock_bh(&ntos_work_lock);
  745     InsertTailList(&ntos_work_list, &ntos_work_item->list);
  746     spin_unlock_bh(&ntos_work_lock);
  747     queue_work(ntos_wq, &ntos_work);
  748     WORKEXIT(return 0);
  749 }
  750 
  751 wstdcall void WIN_FUNC(KeInitializeSpinLock,1)
  752     (NT_SPIN_LOCK *lock)
  753 {
  754     ENTER6("%p", lock);
  755     nt_spin_lock_init(lock);
  756 }
  757 
  758 wstdcall void WIN_FUNC(KeAcquireSpinLock,2)
  759     (NT_SPIN_LOCK *lock, KIRQL *irql)
  760 {
  761     ENTER6("%p", lock);
  762     *irql = nt_spin_lock_irql(lock, DISPATCH_LEVEL);
  763 }
  764 
  765 wstdcall void WIN_FUNC(KeReleaseSpinLock,2)
  766     (NT_SPIN_LOCK *lock, KIRQL oldirql)
  767 {
  768     ENTER6("%p", lock);
  769     nt_spin_unlock_irql(lock, oldirql);
  770 }
  771 
  772 wstdcall void WIN_FUNC(KeAcquireSpinLockAtDpcLevel,1)
  773     (NT_SPIN_LOCK *lock)
  774 {
  775     ENTER6("%p", lock);
  776     nt_spin_lock(lock);
  777 }
  778 
  779 wstdcall void WIN_FUNC(KeReleaseSpinLockFromDpcLevel,1)
  780     (NT_SPIN_LOCK *lock)
  781 {
  782     ENTER6("%p", lock);
  783     nt_spin_unlock(lock);
  784 }
  785 
  786 wstdcall void WIN_FUNC(KeRaiseIrql,2)
  787     (KIRQL newirql, KIRQL *oldirql)
  788 {
  789     ENTER6("%d", newirql);
  790     *oldirql = raise_irql(newirql);
  791 }
  792 
  793 wstdcall KIRQL WIN_FUNC(KeRaiseIrqlToDpcLevel,0)
  794     (void)
  795 {
  796     return raise_irql(DISPATCH_LEVEL);
  797 }
  798 
  799 wstdcall void WIN_FUNC(KeLowerIrql,1)
  800     (KIRQL irql)
  801 {
  802     ENTER6("%d", irql);
  803     lower_irql(irql);
  804 }
  805 
  806 wstdcall KIRQL WIN_FUNC(KeAcquireSpinLockRaiseToDpc,1)
  807     (NT_SPIN_LOCK *lock)
  808 {
  809     ENTER6("%p", lock);
  810     return nt_spin_lock_irql(lock, DISPATCH_LEVEL);
  811 }
  812 
  813 wstdcall void *WIN_FUNC(ExAllocatePoolWithTag,3)
  814     (enum pool_type pool_type, SIZE_T size, ULONG tag)
  815 {
  816     void *addr;
  817 
  818     ENTER4("pool_type: %d, size: %zu, tag: 0x%x", pool_type, size, tag);
  819     assert_irql(_irql_ <= DISPATCH_LEVEL);
  820     if (size < PAGE_SIZE)
  821         addr = kmalloc(size, irql_gfp());
  822     else {
  823         if (irql_gfp() & GFP_ATOMIC) {
  824             addr = __vmalloc(size, GFP_ATOMIC | __GFP_HIGHMEM,
  825                      PAGE_KERNEL);
  826             TRACE1("%p, %zu", addr, size);
  827         } else {
  828             addr = vmalloc(size);
  829             TRACE1("%p, %zu", addr, size);
  830         }
  831     }
  832     DBG_BLOCK(1) {
  833         if (addr)
  834             TRACE4("addr: %p, %zu", addr, size);
  835         else
  836             TRACE1("failed: %zu", size);
  837     }
  838     return addr;
  839 }
  840 WIN_FUNC_DECL(ExAllocatePoolWithTag,3)
  841 
  842 wstdcall void WIN_FUNC(ExFreePoolWithTag,2)
  843     (void *addr, ULONG tag)
  844 {
  845     TRACE4("%p", addr);
  846     if ((unsigned long)addr < VMALLOC_START ||
  847         (unsigned long)addr >= VMALLOC_END)
  848         kfree(addr);
  849     else
  850         vfree(addr);
  851 
  852     EXIT4(return);
  853 }
  854 
  855 wstdcall void WIN_FUNC(ExFreePool,1)
  856     (void *addr)
  857 {
  858     ExFreePoolWithTag(addr, 0);
  859 }
  860 WIN_FUNC_DECL(ExFreePool,1)
  861 
  862 wstdcall void WIN_FUNC(ExInitializeNPagedLookasideList,7)
  863     (struct npaged_lookaside_list *lookaside,
  864      LOOKASIDE_ALLOC_FUNC *alloc_func, LOOKASIDE_FREE_FUNC *free_func,
  865      ULONG flags, SIZE_T size, ULONG tag, USHORT depth)
  866 {
  867     ENTER3("lookaside: %p, size: %zu, flags: %u, head: %p, "
  868            "alloc: %p, free: %p", lookaside, size, flags,
  869            lookaside, alloc_func, free_func);
  870 
  871     memset(lookaside, 0, sizeof(*lookaside));
  872 
  873     lookaside->size = size;
  874     lookaside->tag = tag;
  875     lookaside->depth = 4;
  876     lookaside->maxdepth = 256;
  877     lookaside->pool_type = NonPagedPool;
  878 
  879     if (alloc_func)
  880         lookaside->alloc_func = alloc_func;
  881     else
  882         lookaside->alloc_func = WIN_FUNC_PTR(ExAllocatePoolWithTag,3);
  883     if (free_func)
  884         lookaside->free_func = free_func;
  885     else
  886         lookaside->free_func = WIN_FUNC_PTR(ExFreePool,1);
  887 
  888 #ifndef CONFIG_X86_64
  889     nt_spin_lock_init(&lookaside->obsolete);
  890 #endif
  891     EXIT3(return);
  892 }
  893 
  894 wstdcall void WIN_FUNC(ExDeleteNPagedLookasideList,1)
  895     (struct npaged_lookaside_list *lookaside)
  896 {
  897     struct nt_slist *entry;
  898 
  899     ENTER3("lookaside = %p", lookaside);
  900     while ((entry = ExpInterlockedPopEntrySList(&lookaside->head)))
  901         LIN2WIN1(lookaside->free_func, entry);
  902     EXIT3(return);
  903 }
  904 
  905 wstdcall NTSTATUS WIN_FUNC(ExCreateCallback,4)
  906     (struct callback_object **object, struct object_attributes *attributes,
  907      BOOLEAN create, BOOLEAN allow_multiple_callbacks)
  908 {
  909     struct callback_object *obj;
  910 
  911     ENTER2("");
  912     spin_lock_bh(&ntoskernel_lock);
  913     nt_list_for_each_entry(obj, &callback_objects, callback_funcs) {
  914         if (obj->attributes == attributes) {
  915             spin_unlock_bh(&ntoskernel_lock);
  916             *object = obj;
  917             return STATUS_SUCCESS;
  918         }
  919     }
  920     spin_unlock_bh(&ntoskernel_lock);
  921     obj = allocate_object(sizeof(struct callback_object),
  922                   OBJECT_TYPE_CALLBACK, NULL);
  923     if (!obj)
  924         EXIT2(return STATUS_INSUFFICIENT_RESOURCES);
  925     InitializeListHead(&obj->callback_funcs);
  926     nt_spin_lock_init(&obj->lock);
  927     obj->allow_multiple_callbacks = allow_multiple_callbacks;
  928     obj->attributes = attributes;
  929     *object = obj;
  930     EXIT2(return STATUS_SUCCESS);
  931 }
  932 
  933 wstdcall void *WIN_FUNC(ExRegisterCallback,3)
  934     (struct callback_object *object, PCALLBACK_FUNCTION func, void *context)
  935 {
  936     struct callback_func *callback;
  937     KIRQL irql;
  938 
  939     ENTER2("");
  940     irql = nt_spin_lock_irql(&object->lock, DISPATCH_LEVEL);
  941     if (object->allow_multiple_callbacks == FALSE &&
  942         !IsListEmpty(&object->callback_funcs)) {
  943         nt_spin_unlock_irql(&object->lock, irql);
  944         EXIT2(return NULL);
  945     }
  946     nt_spin_unlock_irql(&object->lock, irql);
  947     callback = kmalloc(sizeof(*callback), GFP_KERNEL);
  948     if (!callback) {
  949         ERROR("couldn't allocate memory");
  950         return NULL;
  951     }
  952     callback->func = func;
  953     callback->context = context;
  954     callback->object = object;
  955     irql = nt_spin_lock_irql(&object->lock, DISPATCH_LEVEL);
  956     InsertTailList(&object->callback_funcs, &callback->list);
  957     nt_spin_unlock_irql(&object->lock, irql);
  958     EXIT2(return callback);
  959 }
  960 
  961 wstdcall void WIN_FUNC(ExUnregisterCallback,1)
  962     (struct callback_func *callback)
  963 {
  964     struct callback_object *object;
  965     KIRQL irql;
  966 
  967     ENTER3("%p", callback);
  968     if (!callback)
  969         return;
  970     object = callback->object;
  971     irql = nt_spin_lock_irql(&object->lock, DISPATCH_LEVEL);
  972     RemoveEntryList(&callback->list);
  973     nt_spin_unlock_irql(&object->lock, irql);
  974     kfree(callback);
  975     return;
  976 }
  977 
  978 wstdcall void WIN_FUNC(ExNotifyCallback,3)
  979     (struct callback_object *object, void *arg1, void *arg2)
  980 {
  981     struct callback_func *callback;
  982     KIRQL irql;
  983 
  984     ENTER3("%p", object);
  985     irql = nt_spin_lock_irql(&object->lock, DISPATCH_LEVEL);
  986     nt_list_for_each_entry(callback, &object->callback_funcs, list) {
  987         LIN2WIN3(callback->func, callback->context, arg1, arg2);
  988     }
  989     nt_spin_unlock_irql(&object->lock, irql);
  990     return;
  991 }
  992 
  993 /* check and set signaled state; should be called with dispatcher_lock held */
  994 /* @grab indicates if the event should be grabbed or checked
  995  * - note that a semaphore may stay in signaled state for multiple
  996  * 'grabs' if the count is > 1 */
  997 static int grab_object(struct dispatcher_header *dh,
  998                struct task_struct *thread, int grab)
  999 {
 1000     EVENTTRACE("%p, %p, %d, %d", dh, thread, grab, dh->signal_state);
 1001     if (unlikely(is_mutex_object(dh))) {
 1002         struct nt_mutex *nt_mutex;
 1003         nt_mutex = container_of(dh, struct nt_mutex, dh);
 1004         EVENTTRACE("%p, %p, %d, %p, %d", nt_mutex,
 1005                nt_mutex->owner_thread, dh->signal_state,
 1006                thread, grab);
 1007         /* either no thread owns the mutex or this thread owns
 1008          * it */
 1009         assert(dh->signal_state == 1 && nt_mutex->owner_thread == NULL);
 1010         assert(dh->signal_state < 1 && nt_mutex->owner_thread != NULL);
 1011         if ((dh->signal_state == 1 && nt_mutex->owner_thread == NULL) ||
 1012             nt_mutex->owner_thread == thread) {
 1013             if (grab) {
 1014                 dh->signal_state--;
 1015                 nt_mutex->owner_thread = thread;
 1016             }
 1017             EVENTEXIT(return 1);
 1018         }
 1019     } else if (dh->signal_state > 0) {
 1020         /* to grab, decrement signal_state for synchronization
 1021          * or semaphore objects */
 1022         if (grab && (is_synch_object(dh) || is_semaphore_object(dh)))
 1023             dh->signal_state--;
 1024         EVENTEXIT(return 1);
 1025     }
 1026     EVENTEXIT(return 0);
 1027 }
 1028 
 1029 /* this function should be called holding dispatcher_lock */
 1030 static void object_signaled(struct dispatcher_header *dh)
 1031 {
 1032     struct nt_list *cur, *next;
 1033     struct wait_block *wb;
 1034 
 1035     EVENTENTER("%p", dh);
 1036     nt_list_for_each_safe(cur, next, &dh->wait_blocks) {
 1037         wb = container_of(cur, struct wait_block, list);
 1038         assert(wb->thread != NULL);
 1039         assert(wb->object == NULL);
 1040         if (!grab_object(dh, wb->thread, 1))
 1041             continue;
 1042         EVENTTRACE("%p (%p): waking %p", dh, wb, wb->thread);
 1043         RemoveEntryList(cur);
 1044         wb->object = dh;
 1045         *(wb->wait_done) = 1;
 1046         wake_up_process(wb->thread);
 1047     }
 1048     EVENTEXIT(return);
 1049 }
 1050 
 1051 wstdcall NTSTATUS WIN_FUNC(KeWaitForMultipleObjects,8)
 1052     (ULONG count, void *object[], enum wait_type wait_type,
 1053      KWAIT_REASON wait_reason, KPROCESSOR_MODE wait_mode,
 1054      BOOLEAN alertable, LARGE_INTEGER *timeout,
 1055      struct wait_block *wait_block_array)
 1056 {
 1057     int i, res = 0, wait_count, wait_done;
 1058     typeof(jiffies) wait_hz = 0;
 1059     struct wait_block *wb, wb_array[THREAD_WAIT_OBJECTS];
 1060     struct dispatcher_header *dh;
 1061     KIRQL irql = current_irql();
 1062 
 1063     EVENTENTER("%p, %d, %u, %p", current, count, wait_type, timeout);
 1064 
 1065     if (count > MAX_WAIT_OBJECTS ||
 1066         (count > THREAD_WAIT_OBJECTS && wait_block_array == NULL))
 1067         EVENTEXIT(return STATUS_INVALID_PARAMETER);
 1068 
 1069     if (wait_block_array == NULL)
 1070         wb = wb_array;
 1071     else
 1072         wb = wait_block_array;
 1073 
 1074     /* If *timeout == 0: In the case of WaitAny, if an object can
 1075      * be grabbed (object is in signaled state), grab and
 1076      * return. In the case of WaitAll, we have to first make sure
 1077      * all objects can be grabbed. If any/some of them can't be
 1078      * grabbed, either we return STATUS_TIMEOUT or wait for them,
 1079      * depending on how to satisfy wait. If all of them can be
 1080      * grabbed, we will grab them in the next loop below */
 1081 
 1082     spin_lock_bh(&dispatcher_lock);
 1083     for (i = wait_count = 0; i < count; i++) {
 1084         dh = object[i];
 1085         EVENTTRACE("%p: event %p (%d)", current, dh, dh->signal_state);
 1086         /* wait_type == 1 for WaitAny, 0 for WaitAll */
 1087         if (grab_object(dh, current, wait_type)) {
 1088             if (wait_type == WaitAny) {
 1089                 spin_unlock_bh(&dispatcher_lock);
 1090                 EVENTEXIT(return STATUS_WAIT_0 + i);
 1091             }
 1092         } else {
 1093             EVENTTRACE("%p: wait for %p", current, dh);
 1094             wait_count++;
 1095         }
 1096     }
 1097 
 1098     if (timeout && *timeout == 0 && wait_count) {
 1099         spin_unlock_bh(&dispatcher_lock);
 1100         EVENTEXIT(return STATUS_TIMEOUT);
 1101     }
 1102 
 1103     /* get the list of objects the thread needs to wait on and add
 1104      * the thread on the wait list for each such object */
 1105     /* if *timeout == 0, this step will grab all the objects */
 1106     wait_done = 0;
 1107     for (i = 0; i < count; i++) {
 1108         dh = object[i];
 1109         EVENTTRACE("%p: event %p (%d)", current, dh, dh->signal_state);
 1110         wb[i].object = NULL;
 1111         if (grab_object(dh, current, 1)) {
 1112             EVENTTRACE("%p: no wait for %p (%d)",
 1113                    current, dh, dh->signal_state);
 1114             /* mark that we are not waiting on this object */
 1115             wb[i].thread = NULL;
 1116         } else {
 1117             wb[i].wait_done = &wait_done;
 1118             wb[i].thread = current;
 1119             EVENTTRACE("%p: wait for %p", current, dh);
 1120             InsertTailList(&dh->wait_blocks, &wb[i].list);
 1121         }
 1122     }
 1123     spin_unlock_bh(&dispatcher_lock);
 1124     if (wait_count == 0)
 1125         EVENTEXIT(return STATUS_SUCCESS);
 1126 
 1127     assert(timeout == NULL || *timeout != 0);
 1128     if (timeout == NULL)
 1129         wait_hz = 0;
 1130     else
 1131         wait_hz = SYSTEM_TIME_TO_HZ(*timeout);
 1132 
 1133     if (irql >= DISPATCH_LEVEL) {
 1134         WARNING("attempt to wait with irql %d", irql);
 1135         EVENTEXIT(return STATUS_INVALID_PARAMETER);
 1136     }
 1137     EVENTTRACE("%p: sleep for %ld on %p", current, wait_hz, &wait_done);
 1138     /* we don't honor 'alertable' - according to description for
 1139      * this, even if waiting in non-alertable state, thread may be
 1140      * alerted in some circumstances */
 1141     while (wait_count) {
 1142         res = wait_condition(wait_done, wait_hz, TASK_INTERRUPTIBLE);
 1143         spin_lock_bh(&dispatcher_lock);
 1144         EVENTTRACE("%p woke up: %d, %d", current, res, wait_done);
 1145         /* the event may have been set by the time
 1146          * wrap_wait_event returned and spinlock obtained, so
 1147          * don't rely on value of 'res' - check event status */
 1148         if (!wait_done) {
 1149             assert(res <= 0);
 1150             /* timed out or interrupted; remove from wait list */
 1151             for (i = 0; i < count; i++) {
 1152                 if (!wb[i].thread)
 1153                     continue;
 1154                 EVENTTRACE("%p: timedout, dequeue %p (%p)",
 1155                        current, object[i], wb[i].object);
 1156                 assert(wb[i].object == NULL);
 1157                 RemoveEntryList(&wb[i].list);
 1158             }
 1159             spin_unlock_bh(&dispatcher_lock);
 1160             if (res < 0)
 1161                 EVENTEXIT(return STATUS_ALERTED);
 1162             else
 1163                 EVENTEXIT(return STATUS_TIMEOUT);
 1164         }
 1165         assert(res > 0);
 1166         /* woken because object(s) signaled */
 1167         for (i = 0; wait_count && i < count; i++) {
 1168             if (!wb[i].thread || !wb[i].object)
 1169                 continue;
 1170             DBG_BLOCK(1) {
 1171                 if (wb[i].object != object[i]) {
 1172                     EVENTTRACE("oops %p != %p",
 1173                            wb[i].object, object[i]);
 1174                     continue;
 1175                 }
 1176             }
 1177             wait_count--;
 1178             if (wait_type == WaitAny) {
 1179                 int j;
 1180                 /* done; remove from rest of wait list */
 1181                 for (j = i + 1; j < count; j++) {
 1182                     if (wb[j].thread && !wb[j].object)
 1183                         RemoveEntryList(&wb[j].list);
 1184                 }
 1185                 spin_unlock_bh(&dispatcher_lock);
 1186                 EVENTEXIT(return STATUS_WAIT_0 + i);
 1187             }
 1188         }
 1189         wait_done = 0;
 1190         spin_unlock_bh(&dispatcher_lock);
 1191         if (wait_count == 0)
 1192             EVENTEXIT(return STATUS_SUCCESS);
 1193 
 1194         /* this thread is still waiting for more objects, so
 1195          * let it wait for remaining time and those objects */
 1196         if (timeout)
 1197             wait_hz = res;
 1198         else
 1199             wait_hz = 0;
 1200     }
 1201     /* should never reach here, but compiler wants return value */
 1202     ERROR("%p: wait_hz: %ld", current, wait_hz);
 1203     EVENTEXIT(return STATUS_SUCCESS);
 1204 }
 1205 
 1206 wstdcall NTSTATUS WIN_FUNC(KeWaitForSingleObject,5)
 1207     (void *object, KWAIT_REASON wait_reason, KPROCESSOR_MODE wait_mode,
 1208      BOOLEAN alertable, LARGE_INTEGER *timeout)
 1209 {
 1210     return KeWaitForMultipleObjects(1, &object, WaitAny, wait_reason,
 1211                     wait_mode, alertable, timeout, NULL);
 1212 }
 1213 
 1214 wstdcall void WIN_FUNC(KeInitializeEvent,3)
 1215     (struct nt_event *nt_event, enum event_type type, BOOLEAN state)
 1216 {
 1217     EVENTENTER("event = %p, type = %d, state = %d", nt_event, type, state);
 1218     initialize_object(&nt_event->dh, (enum dh_type)type, state);
 1219     EVENTEXIT(return);
 1220 }
 1221 
 1222 wstdcall LONG WIN_FUNC(KeSetEvent,3)
 1223     (struct nt_event *nt_event, KPRIORITY incr, BOOLEAN wait)
 1224 {
 1225     LONG old_state;
 1226 
 1227     EVENTENTER("%p, %d", nt_event, nt_event->dh.type);
 1228     if (wait == TRUE)
 1229         WARNING("wait = %d, not yet implemented", wait);
 1230     spin_lock_bh(&dispatcher_lock);
 1231     old_state = nt_event->dh.signal_state;
 1232     nt_event->dh.signal_state = 1;
 1233     if (old_state == 0)
 1234         object_signaled(&nt_event->dh);
 1235     spin_unlock_bh(&dispatcher_lock);
 1236     EVENTEXIT(return old_state);
 1237 }
 1238 
 1239 wstdcall void WIN_FUNC(KeClearEvent,1)
 1240     (struct nt_event *nt_event)
 1241 {
 1242     EVENTENTER("%p", nt_event);
 1243     nt_event->dh.signal_state = 0;
 1244     EVENTEXIT(return);
 1245 }
 1246 
 1247 wstdcall LONG WIN_FUNC(KeResetEvent,1)
 1248     (struct nt_event *nt_event)
 1249 {
 1250     LONG old_state;
 1251 
 1252     EVENTENTER("%p", nt_event);
 1253     old_state = xchg(&nt_event->dh.signal_state, 0);
 1254     EVENTEXIT(return old_state);
 1255 }
 1256 
 1257 wstdcall LONG WIN_FUNC(KeReadStateEvent,1)
 1258     (struct nt_event *nt_event)
 1259 {
 1260     LONG state;
 1261 
 1262     state = nt_event->dh.signal_state;
 1263     EVENTTRACE("%d", state);
 1264     return state;
 1265 }
 1266 
 1267 wstdcall void WIN_FUNC(KeInitializeMutex,2)
 1268     (struct nt_mutex *mutex, ULONG level)
 1269 {
 1270     EVENTENTER("%p", mutex);
 1271     initialize_object(&mutex->dh, MutexObject, 1);
 1272     mutex->dh.size = sizeof(*mutex);
 1273     InitializeListHead(&mutex->list);
 1274     mutex->abandoned = FALSE;
 1275     mutex->apc_disable = 1;
 1276     mutex->owner_thread = NULL;
 1277     EVENTEXIT(return);
 1278 }
 1279 
 1280 wstdcall LONG WIN_FUNC(KeReleaseMutex,2)
 1281     (struct nt_mutex *mutex, BOOLEAN wait)
 1282 {
 1283     LONG ret;
 1284     struct task_struct *thread;
 1285 
 1286     EVENTENTER("%p, %d, %p", mutex, wait, current);
 1287     if (wait == TRUE)
 1288         WARNING("wait: %d", wait);
 1289     thread = current;
 1290     spin_lock_bh(&dispatcher_lock);
 1291     EVENTTRACE("%p, %p, %p, %d", mutex, thread, mutex->owner_thread,
 1292            mutex->dh.signal_state);
 1293     if ((mutex->owner_thread == thread) && (mutex->dh.signal_state <= 0)) {
 1294         ret = mutex->dh.signal_state++;
 1295         if (ret == 0) {
 1296             mutex->owner_thread = NULL;
 1297             object_signaled(&mutex->dh);
 1298         }
 1299     } else {
 1300         ret = STATUS_MUTANT_NOT_OWNED;
 1301         WARNING("invalid mutex: %p, %p, %p", mutex, mutex->owner_thread,
 1302             thread);
 1303     }
 1304     EVENTTRACE("%p, %p, %p, %d", mutex, thread, mutex->owner_thread,
 1305            mutex->dh.signal_state);
 1306     spin_unlock_bh(&dispatcher_lock);
 1307     EVENTEXIT(return ret);
 1308 }
 1309 
 1310 wstdcall void WIN_FUNC(KeInitializeSemaphore,3)
 1311     (struct nt_semaphore *semaphore, LONG count, LONG limit)
 1312 {
 1313     EVENTENTER("%p: %d", semaphore, count);
 1314     /* if limit > 1, we need to satisfy as many waits (until count
 1315      * becomes 0); so we keep decrementing count every time a wait
 1316      * is satisfied */
 1317     initialize_object(&semaphore->dh, SemaphoreObject, count);
 1318     semaphore->dh.size = sizeof(*semaphore);
 1319     semaphore->limit = limit;
 1320     EVENTEXIT(return);
 1321 }
 1322 
 1323 wstdcall LONG WIN_FUNC(KeReleaseSemaphore,4)
 1324     (struct nt_semaphore *semaphore, KPRIORITY incr, LONG adjustment,
 1325      BOOLEAN wait)
 1326 {
 1327     LONG ret;
 1328 
 1329     EVENTENTER("%p", semaphore);
 1330     spin_lock_bh(&dispatcher_lock);
 1331     ret = semaphore->dh.signal_state;
 1332     assert(ret >= 0);
 1333     if (semaphore->dh.signal_state + adjustment <= semaphore->limit)
 1334         semaphore->dh.signal_state += adjustment;
 1335     else {
 1336         WARNING("releasing %d over limit %d", adjustment,
 1337             semaphore->limit);
 1338         semaphore->dh.signal_state = semaphore->limit;
 1339     }
 1340     if (semaphore->dh.signal_state > 0)
 1341         object_signaled(&semaphore->dh);
 1342     spin_unlock_bh(&dispatcher_lock);
 1343     EVENTEXIT(return ret);
 1344 }
 1345 
 1346 wstdcall NTSTATUS WIN_FUNC(KeDelayExecutionThread,3)
 1347     (KPROCESSOR_MODE wait_mode, BOOLEAN alertable, LARGE_INTEGER *interval)
 1348 {
 1349     int res;
 1350     long timeout;
 1351 
 1352     if (wait_mode != 0)
 1353         ERROR("invalid wait_mode %d", wait_mode);
 1354 
 1355     timeout = SYSTEM_TIME_TO_HZ(*interval);
 1356     EVENTTRACE("%p, %lld, %ld", current, *interval, timeout);
 1357     if (timeout <= 0)
 1358         EVENTEXIT(return STATUS_SUCCESS);
 1359 
 1360     if (alertable)
 1361         set_current_state(TASK_INTERRUPTIBLE);
 1362     else
 1363         set_current_state(TASK_UNINTERRUPTIBLE);
 1364 
 1365     res = schedule_timeout(timeout);
 1366     EVENTTRACE("%p, %d", current, res);
 1367     if (res == 0)
 1368         EVENTEXIT(return STATUS_SUCCESS);
 1369     else
 1370         EVENTEXIT(return STATUS_ALERTED);
 1371 }
 1372 
 1373 wstdcall ULONGLONG WIN_FUNC(KeQueryInterruptTime,0)
 1374     (void)
 1375 {
 1376     EXIT5(return jiffies * TICKSPERJIFFY);
 1377 }
 1378 
 1379 wstdcall ULONG WIN_FUNC(KeQueryTimeIncrement,0)
 1380     (void)
 1381 {
 1382     EXIT5(return TICKSPERSEC / HZ);
 1383 }
 1384 
 1385 wstdcall void WIN_FUNC(KeQuerySystemTime,1)
 1386     (LARGE_INTEGER *time)
 1387 {
 1388     *time = ticks_1601();
 1389     TRACE5("%llu, %lu", *time, jiffies);
 1390 }
 1391 
 1392 wstdcall void WIN_FUNC(KeQueryTickCount,1)
 1393     (LARGE_INTEGER *count)
 1394 {
 1395     *count = jiffies;
 1396 }
 1397 
 1398 wstdcall LARGE_INTEGER WIN_FUNC(KeQueryPerformanceCounter,1)
 1399     (LARGE_INTEGER *counter)
 1400 {
 1401     if (counter)
 1402         *counter = HZ;
 1403     return jiffies;
 1404 }
 1405 
 1406 wstdcall KAFFINITY WIN_FUNC(KeQueryActiveProcessors,0)
 1407     (void)
 1408 {
 1409     int i, n;
 1410     KAFFINITY bits = 0;
 1411     n = num_online_cpus();
 1412     for (i = 0; i < n; i++)
 1413         bits = (bits << 1) | 1;
 1414     return bits;
 1415 }
 1416 
 1417 struct nt_thread *get_current_nt_thread(void)
 1418 {
 1419     struct task_struct *task = current;
 1420     struct nt_thread *thread;
 1421     struct common_object_header *header;
 1422 
 1423     TRACE6("task: %p", task);
 1424     thread = NULL;
 1425     spin_lock_bh(&ntoskernel_lock);
 1426     nt_list_for_each_entry(header, &object_list, list) {
 1427         TRACE6("%p, %d", header, header->type);
 1428         if (header->type != OBJECT_TYPE_NT_THREAD)
 1429             break;
 1430         thread = HEADER_TO_OBJECT(header);
 1431         TRACE6("%p, %p", thread, thread->task);
 1432         if (thread->task == task)
 1433             break;
 1434         else
 1435             thread = NULL;
 1436     }
 1437     spin_unlock_bh(&ntoskernel_lock);
 1438     if (thread == NULL)
 1439         TRACE4("couldn't find thread for task %p, %d", task, task->pid);
 1440     TRACE6("%p", thread);
 1441     return thread;
 1442 }
 1443 
 1444 static struct task_struct *get_nt_thread_task(struct nt_thread *thread)
 1445 {
 1446     struct task_struct *task;
 1447     struct common_object_header *header;
 1448 
 1449     TRACE6("%p", thread);
 1450     task = NULL;
 1451     spin_lock_bh(&ntoskernel_lock);
 1452     nt_list_for_each_entry(header, &object_list, list) {
 1453         TRACE6("%p, %d", header, header->type);
 1454         if (header->type != OBJECT_TYPE_NT_THREAD)
 1455             break;
 1456         if (thread == HEADER_TO_OBJECT(header)) {
 1457             task = thread->task;
 1458             break;
 1459         }
 1460     }
 1461     spin_unlock_bh(&ntoskernel_lock);
 1462     if (task == NULL)
 1463         TRACE2("%p: couldn't find task for %p", current, thread);
 1464     return task;
 1465 }
 1466 
 1467 static struct nt_thread *create_nt_thread(struct task_struct *task)
 1468 {
 1469     struct nt_thread *thread;
 1470     thread = allocate_object(sizeof(*thread), OBJECT_TYPE_NT_THREAD, NULL);
 1471     if (!thread) {
 1472         ERROR("couldn't allocate thread object");
 1473         EXIT2(return NULL);
 1474     }
 1475     thread->task = task;
 1476     if (task)
 1477         thread->pid = task->pid;
 1478     else
 1479         thread->pid = 0;
 1480     nt_spin_lock_init(&thread->lock);
 1481     InitializeListHead(&thread->irps);
 1482     initialize_object(&thread->dh, ThreadObject, 0);
 1483     thread->dh.size = sizeof(*thread);
 1484     thread->prio = LOW_PRIORITY;
 1485     return thread;
 1486 }
 1487 
 1488 wstdcall struct nt_thread *WIN_FUNC(KeGetCurrentThread,0)
 1489     (void)
 1490 {
 1491     struct nt_thread *thread = get_current_nt_thread();
 1492     TRACE2("%p, %p", thread, current);
 1493     return thread;
 1494 }
 1495 
 1496 wstdcall KPRIORITY WIN_FUNC(KeQueryPriorityThread,1)
 1497     (struct nt_thread *thread)
 1498 {
 1499     KPRIORITY prio;
 1500     struct task_struct *task;
 1501 
 1502     TRACE2("%p", thread);
 1503 #ifdef CONFIG_X86_64
 1504     /* sis163u driver for amd64 passes 0x1f from thread created by
 1505      * PsCreateSystemThread - no idea what is 0x1f */
 1506     if (thread == (void *)0x1f)
 1507         thread = get_current_nt_thread();
 1508 #endif
 1509     if (!thread) {
 1510         TRACE2("invalid thread");
 1511         EXIT2(return LOW_REALTIME_PRIORITY);
 1512     }
 1513     task = get_nt_thread_task(thread);
 1514     if (!task) {
 1515         TRACE2("couldn't find task for thread: %p", thread);
 1516         EXIT2(return LOW_REALTIME_PRIORITY);
 1517     }
 1518 
 1519     prio = thread->prio;
 1520 
 1521     TRACE2("%d", prio);
 1522     return prio;
 1523 }
 1524 
 1525 wstdcall KPRIORITY WIN_FUNC(KeSetPriorityThread,2)
 1526     (struct nt_thread *thread, KPRIORITY prio)
 1527 {
 1528     KPRIORITY old_prio;
 1529     struct task_struct *task;
 1530 
 1531     TRACE2("thread: %p, priority = %u", thread, prio);
 1532 #ifdef CONFIG_X86_64
 1533     if (thread == (void *)0x1f)
 1534         thread = get_current_nt_thread();
 1535 #endif
 1536     if (!thread) {
 1537         TRACE2("invalid thread");
 1538         EXIT2(return LOW_REALTIME_PRIORITY);
 1539     }
 1540     task = get_nt_thread_task(thread);
 1541     if (!task) {
 1542         TRACE2("couldn't find task for thread: %p", thread);
 1543         EXIT2(return LOW_REALTIME_PRIORITY);
 1544     }
 1545 
 1546     old_prio = thread->prio;
 1547     thread->prio = prio;
 1548 
 1549     TRACE2("%d, %d", old_prio, thread->prio);
 1550     return old_prio;
 1551 }
 1552 
 1553 struct thread_trampoline {
 1554     void (*func)(void *) wstdcall;
 1555     void *ctx;
 1556     struct nt_thread *thread;
 1557     struct completion started;
 1558 };
 1559 
 1560 static int ntdriver_thread(void *data)
 1561 {
 1562     struct thread_trampoline *thread_tramp = data;
 1563     /* yes, a tramp! */
 1564     typeof(thread_tramp->func) func = thread_tramp->func;
 1565     typeof(thread_tramp->ctx) ctx = thread_tramp->ctx;
 1566 
 1567     thread_tramp->thread->task = current;
 1568     thread_tramp->thread->pid = current->pid;
 1569     TRACE2("thread: %p, task: %p (%d)", thread_tramp->thread,
 1570            current, current->pid);
 1571     complete(&thread_tramp->started);
 1572 
 1573 #ifdef PF_NOFREEZE
 1574     current->flags |= PF_NOFREEZE;
 1575 #endif
 1576     strncpy(current->comm, "ntdriver", sizeof(current->comm));
 1577     current->comm[sizeof(current->comm)-1] = 0;
 1578     LIN2WIN1(func, ctx);
 1579     ERROR("task: %p", current);
 1580     return 0;
 1581 }
 1582 
 1583 wstdcall NTSTATUS WIN_FUNC(PsCreateSystemThread,7)
 1584     (void **handle, ULONG access, void *obj_attr, void *process,
 1585      void *client_id, void (*func)(void *) wstdcall, void *ctx)
 1586 {
 1587     struct thread_trampoline thread_tramp;
 1588 
 1589     ENTER2("handle = %p, access = %u, obj_attr = %p, process = %p, "
 1590            "client_id = %p, func = %p, context = %p", handle, access,
 1591            obj_attr, process, client_id, func, ctx);
 1592 
 1593     thread_tramp.thread = create_nt_thread(NULL);
 1594     if (!thread_tramp.thread) {
 1595         ERROR("couldn't allocate thread object");
 1596         EXIT2(return STATUS_RESOURCES);
 1597     }
 1598     TRACE2("thread: %p", thread_tramp.thread);
 1599     thread_tramp.func = func;
 1600     thread_tramp.ctx = ctx;
 1601     init_completion(&thread_tramp.started);
 1602 
 1603     thread_tramp.thread->task = kthread_run(ntdriver_thread,
 1604                         &thread_tramp, "ntdriver");
 1605     if (IS_ERR(thread_tramp.thread->task)) {
 1606         free_object(thread_tramp.thread);
 1607         EXIT2(return STATUS_FAILURE);
 1608     }
 1609     TRACE2("created task: %p", thread_tramp.thread->task);
 1610 
 1611     wait_for_completion(&thread_tramp.started);
 1612     *handle = OBJECT_TO_HEADER(thread_tramp.thread);
 1613     TRACE2("created thread: %p, %p", thread_tramp.thread, *handle);
 1614     EXIT2(return STATUS_SUCCESS);
 1615 }
 1616 
 1617 wstdcall NTSTATUS WIN_FUNC(PsTerminateSystemThread,1)
 1618     (NTSTATUS status)
 1619 {
 1620     struct nt_thread *thread;
 1621 
 1622     TRACE2("%p, %08X", current, status);
 1623     thread = get_current_nt_thread();
 1624     TRACE2("%p", thread);
 1625     if (thread) {
 1626         KeSetEvent((struct nt_event *)&thread->dh, 0, FALSE);
 1627         while (1) {
 1628             struct nt_list *ent;
 1629             struct irp *irp;
 1630             KIRQL irql;
 1631             irql = nt_spin_lock_irql(&thread->lock, DISPATCH_LEVEL);
 1632             ent = RemoveHeadList(&thread->irps);
 1633             nt_spin_unlock_irql(&thread->lock, irql);
 1634             if (!ent)
 1635                 break;
 1636             irp = container_of(ent, struct irp, thread_list);
 1637             IOTRACE("%p", irp);
 1638             IoCancelIrp(irp);
 1639         }
 1640         /* the driver may later query this status with
 1641          * ZwQueryInformationThread */
 1642         thread->status = status;
 1643     } else
 1644         ERROR("couldn't find thread for task: %p", current);
 1645 
 1646     complete_and_exit(NULL, status);
 1647     ERROR("oops: %p, %d", thread->task, thread->pid);
 1648     return STATUS_FAILURE;
 1649 }
 1650 
 1651 wstdcall BOOLEAN WIN_FUNC(KeRemoveEntryDeviceQueue,2)
 1652     (struct kdevice_queue *dev_queue, struct kdevice_queue_entry *entry)
 1653 {
 1654     struct kdevice_queue_entry *e;
 1655     KIRQL irql;
 1656 
 1657     irql = nt_spin_lock_irql(&dev_queue->lock, DISPATCH_LEVEL);
 1658     nt_list_for_each_entry(e, &dev_queue->list, list) {
 1659         if (e == entry) {
 1660             RemoveEntryList(&e->list);
 1661             nt_spin_unlock_irql(&dev_queue->lock, irql);
 1662             return TRUE;
 1663         }
 1664     }
 1665     nt_spin_unlock_irql(&dev_queue->lock, irql);
 1666     return FALSE;
 1667 }
 1668 
 1669 wstdcall BOOLEAN WIN_FUNC(KeSynchronizeExecution,3)
 1670     (struct kinterrupt *interrupt, PKSYNCHRONIZE_ROUTINE synch_routine,
 1671      void *ctx)
 1672 {
 1673     BOOLEAN ret;
 1674     unsigned long flags;
 1675 
 1676     nt_spin_lock_irqsave(interrupt->actual_lock, flags);
 1677     ret = LIN2WIN1(synch_routine, ctx);
 1678     nt_spin_unlock_irqrestore(interrupt->actual_lock, flags);
 1679     TRACE6("%d", ret);
 1680     return ret;
 1681 }
 1682 
 1683 wstdcall BOOLEAN WIN_FUNC(KeRegisterBugCheckReasonCallback,4)
 1684     (void *callback_record, void *callback_routine, UINT reason,
 1685      char *component)
 1686 {
 1687     TRACE1("callback_record: %p, callback_routine: %p, reason: %d, "
 1688            "component: %s", callback_record, callback_routine, reason,
 1689            component);
 1690     TODO();
 1691     return FALSE;
 1692 }
 1693 
 1694 wstdcall BOOLEAN WIN_FUNC(KeDeregisterBugCheckReasonCallback,1)
 1695     (void *callback_record)
 1696 {
 1697     TRACE1("callback_record: %p", callback_record);
 1698     TODO();
 1699     return TRUE;
 1700 }
 1701 
 1702 wstdcall void *WIN_FUNC(MmAllocateContiguousMemorySpecifyCache,5)
 1703     (SIZE_T size, PHYSICAL_ADDRESS lowest, PHYSICAL_ADDRESS highest,
 1704      PHYSICAL_ADDRESS boundary, enum memory_caching_type cache_type)
 1705 {
 1706     void *addr;
 1707     gfp_t flags;
 1708 
 1709     ENTER2("%zu, 0x%llx, 0x%llx, 0x%llx, %d", size, lowest,
 1710            highest, boundary, cache_type);
 1711     flags = irql_gfp();
 1712     addr = wrap_get_free_pages(flags, size);
 1713     TRACE2("%p, %zu, 0x%x", addr, size, flags);
 1714     if (addr && ((virt_to_phys(addr) + size) <= highest))
 1715         EXIT2(return addr);
 1716 #ifdef CONFIG_X86_64
 1717     /* GFP_DMA is really only 16MB even on x86-64, but there is no
 1718      * other zone available */
 1719     if (highest <= DMA_BIT_MASK(31))
 1720         flags |= __GFP_DMA;
 1721     else if (highest <= DMA_BIT_MASK(32))
 1722         flags |= __GFP_DMA32;
 1723 #else
 1724     if (highest <= DMA_BIT_MASK(24))
 1725         flags |= __GFP_DMA;
 1726     else if (highest > DMA_BIT_MASK(30))
 1727         flags |= __GFP_HIGHMEM;
 1728 #endif
 1729     if (addr)
 1730         free_pages((unsigned long)addr, get_order(size));
 1731     addr = wrap_get_free_pages(flags, size);
 1732     TRACE2("%p, %zu, 0x%x", addr, size, flags);
 1733     return addr;
 1734 }
 1735 
 1736 wstdcall void WIN_FUNC(MmFreeContiguousMemorySpecifyCache,3)
 1737     (void *base, SIZE_T size, enum memory_caching_type cache_type)
 1738 {
 1739     TRACE2("%p, %zu", base, size);
 1740     free_pages((unsigned long)base, get_order(size));
 1741 }
 1742 
 1743 wstdcall PHYSICAL_ADDRESS WIN_FUNC(MmGetPhysicalAddress,1)
 1744     (void *base)
 1745 {
 1746     unsigned long phy = virt_to_phys(base);
 1747     TRACE2("%p, %p", base, (void *)phy);
 1748     return phy;
 1749 }
 1750 
 1751 /* Atheros card with pciid 168C:0014 calls this function with 0xf0000
 1752  * and 0xf6ef0 address, and then check for things that seem to be
 1753  * related to ACPI: "_SM_" and "_DMI_". This may be the hack they do
 1754  * to check if this card is installed in IBM thinkpads; we can
 1755  * probably get this device to work if we create a buffer with the
 1756  * strings as required by the driver and return virtual address for
 1757  * that address instead */
 1758 wstdcall void __iomem *WIN_FUNC(MmMapIoSpace,3)
 1759     (PHYSICAL_ADDRESS phys_addr, SIZE_T size,
 1760      enum memory_caching_type cache)
 1761 {
 1762     void __iomem *virt;
 1763     ENTER1("cache type: %d", cache);
 1764     if (cache == MmCached) {
 1765 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)
 1766         virt = ioremap_cache(phys_addr, size);
 1767 #else
 1768         virt = ioremap(phys_addr, size);
 1769 #endif
 1770     } else {
 1771 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)
 1772         virt = ioremap(phys_addr, size);
 1773 #else
 1774         virt = ioremap_nocache(phys_addr, size);
 1775 #endif
 1776     }
 1777     TRACE1("%llx, %zu, %p", phys_addr, size, virt);
 1778     return virt;
 1779 }
 1780 
 1781 wstdcall void WIN_FUNC(MmUnmapIoSpace,2)
 1782     (void __iomem *addr, SIZE_T size)
 1783 {
 1784     ENTER1("%p, %zu", addr, size);
 1785     iounmap(addr);
 1786     return;
 1787 }
 1788 
 1789 wstdcall ULONG WIN_FUNC(MmSizeOfMdl,2)
 1790     (void *base, ULONG length)
 1791 {
 1792     return sizeof(struct mdl) +
 1793            (sizeof(PFN_NUMBER) * SPAN_PAGES(base, length));
 1794 }
 1795 
 1796 struct mdl *allocate_init_mdl(void *virt, ULONG length)
 1797 {
 1798     struct wrap_mdl *wrap_mdl;
 1799     struct mdl *mdl;
 1800     int mdl_size = MmSizeOfMdl(virt, length);
 1801 
 1802     if (mdl_size <= MDL_CACHE_SIZE) {
 1803         wrap_mdl = kmem_cache_alloc(mdl_cache, irql_gfp());
 1804         if (!wrap_mdl)
 1805             return NULL;
 1806         spin_lock_bh(&dispatcher_lock);
 1807         InsertHeadList(&wrap_mdl_list, &wrap_mdl->list);
 1808         spin_unlock_bh(&dispatcher_lock);
 1809         mdl = wrap_mdl->mdl;
 1810         TRACE3("allocated mdl from cache: %p(%p), %p(%d)",
 1811                wrap_mdl, mdl, virt, length);
 1812         memset(mdl, 0, MDL_CACHE_SIZE);
 1813         MmInitializeMdl(mdl, virt, length);
 1814         /* mark the MDL as allocated from cache pool so when
 1815          * it is freed, we free it back to the pool */
 1816         mdl->flags = MDL_ALLOCATED_FIXED_SIZE | MDL_CACHE_ALLOCATED;
 1817     } else {
 1818         wrap_mdl =
 1819             kmalloc(sizeof(*wrap_mdl) + mdl_size, irql_gfp());
 1820         if (!wrap_mdl)
 1821             return NULL;
 1822         mdl = wrap_mdl->mdl;
 1823         TRACE3("allocated mdl from memory: %p(%p), %p(%d)",
 1824                wrap_mdl, mdl, virt, length);
 1825         spin_lock_bh(&dispatcher_lock);
 1826         InsertHeadList(&wrap_mdl_list, &wrap_mdl->list);
 1827         spin_unlock_bh(&dispatcher_lock);
 1828         memset(mdl, 0, mdl_size);
 1829         MmInitializeMdl(mdl, virt, length);
 1830         mdl->flags = MDL_ALLOCATED_FIXED_SIZE;
 1831     }
 1832     return mdl;
 1833 }
 1834 
 1835 void free_mdl(struct mdl *mdl)
 1836 {
 1837     /* A driver may allocate Mdl with NdisAllocateBuffer and free
 1838      * with IoFreeMdl (e.g., 64-bit Broadcom). Since we need to
 1839      * treat buffers allocated with Ndis calls differently, we
 1840      * must call NdisFreeBuffer if it is allocated with Ndis
 1841      * function. We set 'pool' field in Ndis functions. */
 1842     if (!mdl)
 1843         return;
 1844     if (mdl->pool)
 1845         NdisFreeBuffer(mdl);
 1846     else {
 1847         struct wrap_mdl *wrap_mdl = (struct wrap_mdl *)
 1848             ((char *)mdl - offsetof(struct wrap_mdl, mdl));
 1849         spin_lock_bh(&dispatcher_lock);
 1850         RemoveEntryList(&wrap_mdl->list);
 1851         spin_unlock_bh(&dispatcher_lock);
 1852 
 1853         if (mdl->flags & MDL_CACHE_ALLOCATED) {
 1854             TRACE3("freeing mdl cache: %p, %p, %p",
 1855                    wrap_mdl, mdl, mdl->mappedsystemva);
 1856             kmem_cache_free(mdl_cache, wrap_mdl);
 1857         } else {
 1858             TRACE3("freeing mdl: %p, %p, %p",
 1859                    wrap_mdl, mdl, mdl->mappedsystemva);
 1860             kfree(wrap_mdl);
 1861         }
 1862     }
 1863     return;
 1864 }
 1865 
 1866 wstdcall void WIN_FUNC(IoBuildPartialMdl,4)
 1867     (struct mdl *source, struct mdl *target, void *virt, ULONG length)
 1868 {
 1869     MmInitializeMdl(target, virt, length);
 1870     target->flags |= MDL_PARTIAL;
 1871 }
 1872 
 1873 wstdcall void WIN_FUNC(MmBuildMdlForNonPagedPool,1)
 1874     (struct mdl *mdl)
 1875 {
 1876     PFN_NUMBER *mdl_pages;
 1877     int i, n;
 1878 
 1879     ENTER4("%p", mdl);
 1880     /* already mapped */
 1881 //  mdl->mappedsystemva = MmGetMdlVirtualAddress(mdl);
 1882     mdl->flags |= MDL_SOURCE_IS_NONPAGED_POOL;
 1883     TRACE4("%p, %p, %p, %d, %d", mdl, mdl->mappedsystemva, mdl->startva,
 1884            mdl->byteoffset, mdl->bytecount);
 1885     n = SPAN_PAGES(MmGetSystemAddressForMdl(mdl), MmGetMdlByteCount(mdl));
 1886     if (n > MDL_CACHE_PAGES)
 1887         WARNING("%p, %d, %d", MmGetSystemAddressForMdl(mdl),
 1888             MmGetMdlByteCount(mdl), n);
 1889     mdl_pages = MmGetMdlPfnArray(mdl);
 1890     for (i = 0; i < n; i++)
 1891         mdl_pages[i] = (ULONG_PTR)mdl->startva + (i * PAGE_SIZE);
 1892     EXIT4(return);
 1893 }
 1894 
 1895 wstdcall void *WIN_FUNC(MmMapLockedPages,2)
 1896     (struct mdl *mdl, KPROCESSOR_MODE access_mode)
 1897 {
 1898     /* already mapped */
 1899 //  mdl->mappedsystemva = MmGetMdlVirtualAddress(mdl);
 1900     mdl->flags |= MDL_MAPPED_TO_SYSTEM_VA;
 1901     /* what is the need for MDL_PARTIAL_HAS_BEEN_MAPPED? */
 1902     if (mdl->flags & MDL_PARTIAL)
 1903         mdl->flags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
 1904     return mdl->mappedsystemva;
 1905 }
 1906 
 1907 wstdcall void *WIN_FUNC(MmMapLockedPagesSpecifyCache,6)
 1908     (struct mdl *mdl, KPROCESSOR_MODE access_mode,
 1909      enum memory_caching_type cache_type, void *base_address,
 1910      ULONG bug_check, enum mm_page_priority priority)
 1911 {
 1912     return MmMapLockedPages(mdl, access_mode);
 1913 }
 1914 
 1915 wstdcall void WIN_FUNC(MmUnmapLockedPages,2)
 1916     (void *base, struct mdl *mdl)
 1917 {
 1918     mdl->flags &= ~MDL_MAPPED_TO_SYSTEM_VA;
 1919     return;
 1920 }
 1921 
 1922 wstdcall void WIN_FUNC(MmProbeAndLockPages,3)
 1923     (struct mdl *mdl, KPROCESSOR_MODE access_mode,
 1924      enum lock_operation operation)
 1925 {
 1926     /* already locked */
 1927     mdl->flags |= MDL_PAGES_LOCKED;
 1928     return;
 1929 }
 1930 
 1931 wstdcall void WIN_FUNC(MmUnlockPages,1)
 1932     (struct mdl *mdl)
 1933 {
 1934     mdl->flags &= ~MDL_PAGES_LOCKED;
 1935     return;
 1936 }
 1937 
 1938 wstdcall BOOLEAN WIN_FUNC(MmIsAddressValid,1)
 1939     (void *virt_addr)
 1940 {
 1941     if (virt_addr_valid(virt_addr))
 1942         return TRUE;
 1943     else
 1944         return FALSE;
 1945 }
 1946 
 1947 wstdcall void *WIN_FUNC(MmLockPagableDataSection,1)
 1948     (void *address)
 1949 {
 1950     return address;
 1951 }
 1952 
 1953 wstdcall void WIN_FUNC(MmUnlockPagableImageSection,1)
 1954     (void *handle)
 1955 {
 1956     return;
 1957 }
 1958 
 1959 wstdcall NTSTATUS WIN_FUNC(ObReferenceObjectByHandle,6)
 1960     (void *handle, ACCESS_MASK desired_access, void *obj_type,
 1961      KPROCESSOR_MODE access_mode, void **object, void *handle_info)
 1962 {
 1963     struct common_object_header *hdr;
 1964 
 1965     TRACE2("%p", handle);
 1966     hdr = HANDLE_TO_HEADER(handle);
 1967     atomic_inc_var(hdr->ref_count);
 1968     *object = HEADER_TO_OBJECT(hdr);
 1969     TRACE2("%p, %p, %d, %p", hdr, object, hdr->ref_count, *object);
 1970     return STATUS_SUCCESS;
 1971 }
 1972 
 1973 /* DDK doesn't say if return value should be before incrementing or
 1974  * after incrementing reference count, but according to #reactos
 1975  * developers, it should be return value after incrementing */
 1976 wfastcall LONG WIN_FUNC(ObfReferenceObject,1)
 1977     (void *object)
 1978 {
 1979     struct common_object_header *hdr;
 1980     LONG ret;
 1981 
 1982     hdr = OBJECT_TO_HEADER(object);
 1983     ret = post_atomic_add(hdr->ref_count, 1);
 1984     TRACE2("%p, %d, %p", hdr, hdr->ref_count, object);
 1985     return ret;
 1986 }
 1987 
 1988 static int dereference_object(void *object)
 1989 {
 1990     struct common_object_header *hdr;
 1991     int ref_count;
 1992 
 1993     ENTER2("object: %p", object);
 1994     hdr = OBJECT_TO_HEADER(object);
 1995     TRACE2("hdr: %p", hdr);
 1996     ref_count = post_atomic_add(hdr->ref_count, -1);
 1997     TRACE2("object: %p, %d", object, ref_count);
 1998     if (ref_count < 0)
 1999         ERROR("invalid object: %p (%d)", object, ref_count);
 2000     if (ref_count <= 0) {
 2001         free_object(object);
 2002         return 1;
 2003     } else
 2004         return 0;
 2005 }
 2006 
 2007 wfastcall void WIN_FUNC(ObfDereferenceObject,1)
 2008     (void *object)
 2009 {
 2010     TRACE2("%p", object);
 2011     dereference_object(object);
 2012 }
 2013 
 2014 wstdcall NTSTATUS WIN_FUNC(ZwCreateFile,11)
 2015     (void **handle, ACCESS_MASK access_mask,
 2016      struct object_attributes *obj_attr, struct io_status_block *iosb,
 2017      LARGE_INTEGER *size, ULONG file_attr, ULONG share_access,
 2018      ULONG create_disposition, ULONG create_options, void *ea_buffer,
 2019      ULONG ea_length)
 2020 {
 2021     struct common_object_header *coh;
 2022     struct file_object *fo;
 2023     struct ansi_string ansi;
 2024     struct wrap_bin_file *bin_file;
 2025     char *file_basename;
 2026     NTSTATUS status;
 2027 
 2028     spin_lock_bh(&ntoskernel_lock);
 2029     nt_list_for_each_entry(coh, &object_list, list) {
 2030         if (coh->type != OBJECT_TYPE_FILE)
 2031             continue;
 2032         /* TODO: check if file is opened in shared mode */
 2033         if (!RtlCompareUnicodeString(&coh->name, obj_attr->name, TRUE)) {
 2034             fo = HEADER_TO_OBJECT(coh);
 2035             bin_file = fo->wrap_bin_file;
 2036             *handle = coh;
 2037             spin_unlock_bh(&ntoskernel_lock);
 2038             ObReferenceObject(fo);
 2039             iosb->status = FILE_OPENED;
 2040             iosb->info = bin_file->size;
 2041             EXIT2(return STATUS_SUCCESS);
 2042         }
 2043     }
 2044     spin_unlock_bh(&ntoskernel_lock);
 2045 
 2046     if (RtlUnicodeStringToAnsiString(&ansi, obj_attr->name, TRUE) !=
 2047         STATUS_SUCCESS)
 2048         EXIT2(return STATUS_INSUFFICIENT_RESOURCES);
 2049 
 2050     file_basename = strrchr(ansi.buf, '\\');
 2051     if (file_basename)
 2052         file_basename++;
 2053     else
 2054         file_basename = ansi.buf;
 2055     TRACE2("file: '%s', '%s'", ansi.buf, file_basename);
 2056 
 2057     fo = allocate_object(sizeof(struct file_object), OBJECT_TYPE_FILE,
 2058                  obj_attr->name);
 2059     if (!fo) {
 2060         RtlFreeAnsiString(&ansi);
 2061         iosb->status = STATUS_INSUFFICIENT_RESOURCES;
 2062         iosb->info = 0;
 2063         EXIT2(return STATUS_FAILURE);
 2064     }
 2065     coh = OBJECT_TO_HEADER(fo);
 2066     bin_file = get_bin_file(file_basename);
 2067     if (bin_file) {
 2068         TRACE2("%s, %s", bin_file->name, file_basename);
 2069         fo->flags = FILE_OPENED;
 2070     } else if (access_mask & FILE_WRITE_DATA) {
 2071         bin_file = kzalloc(sizeof(*bin_file), GFP_KERNEL);
 2072         if (bin_file) {
 2073             strncpy(bin_file->name, file_basename,
 2074                 sizeof(bin_file->name));
 2075             bin_file->name[sizeof(bin_file->name)-1] = 0;
 2076             bin_file->data = vmalloc(*size);
 2077             if (bin_file->data) {
 2078                 memset(bin_file->data, 0, *size);
 2079                 bin_file->size = *size;
 2080                 fo->flags = FILE_CREATED;
 2081             } else {
 2082                 kfree(bin_file);
 2083                 bin_file = NULL;
 2084             }
 2085         }
 2086     } else
 2087         bin_file = NULL;
 2088 
 2089     RtlFreeAnsiString(&ansi);
 2090     if (!bin_file) {
 2091         iosb->status = FILE_DOES_NOT_EXIST;
 2092         iosb->info = 0;
 2093         free_object(fo);
 2094         EXIT2(return STATUS_FAILURE);
 2095     }
 2096 
 2097     fo->wrap_bin_file = bin_file;
 2098     fo->current_byte_offset = 0;
 2099     if (access_mask & FILE_READ_DATA)
 2100         fo->read_access = TRUE;
 2101     if (access_mask & FILE_WRITE_DATA)
 2102         fo->write_access = TRUE;
 2103     iosb->status = FILE_OPENED;
 2104     iosb->info = bin_file->size;
 2105     *handle = coh;
 2106     TRACE2("handle: %p", *handle);
 2107     status = STATUS_SUCCESS;
 2108     EXIT2(return status);
 2109 }
 2110 
 2111 wstdcall NTSTATUS WIN_FUNC(ZwOpenFile,6)
 2112     (void **handle, ACCESS_MASK access_mask,
 2113      struct object_attributes *obj_attr, struct io_status_block *iosb,
 2114      ULONG share_access, ULONG open_options)
 2115 {
 2116     LARGE_INTEGER size;
 2117     return ZwCreateFile(handle, access_mask, obj_attr, iosb, &size, 0,
 2118                 share_access, 0, open_options, NULL, 0);
 2119 }
 2120 
 2121 wstdcall NTSTATUS WIN_FUNC(ZwReadFile,9)
 2122     (void *handle, struct nt_event *event, void *apc_routine,
 2123      void *apc_context, struct io_status_block *iosb, void *buffer,
 2124      ULONG length, LARGE_INTEGER *byte_offset, ULONG *key)
 2125 {
 2126     struct file_object *fo;
 2127     struct common_object_header *coh;
 2128     ULONG count;
 2129     size_t offset;
 2130     struct wrap_bin_file *file;
 2131 
 2132     TRACE2("%p", handle);
 2133     coh = handle;
 2134     if (coh->type != OBJECT_TYPE_FILE) {
 2135         ERROR("handle %p is invalid: %d", handle, coh->type);
 2136         EXIT2(return STATUS_FAILURE);
 2137     }
 2138     fo = HANDLE_TO_OBJECT(coh);
 2139     file = fo->wrap_bin_file;
 2140     TRACE2("file: %s (%zu)", file->name, file->size);
 2141     spin_lock_bh(&ntoskernel_lock);
 2142     if (byte_offset)
 2143         offset = *byte_offset;
 2144     else
 2145         offset = fo->current_byte_offset;
 2146     count = min((size_t)length, file->size - offset);
 2147     TRACE2("count: %u, offset: %zu, length: %u", count, offset, length);
 2148     memcpy(buffer, ((void *)file->data) + offset, count);
 2149     fo->current_byte_offset = offset + count;
 2150     spin_unlock_bh(&ntoskernel_lock);
 2151     iosb->status = STATUS_SUCCESS;
 2152     iosb->info = count;
 2153     EXIT2(return STATUS_SUCCESS);
 2154 }
 2155 
 2156 wstdcall NTSTATUS WIN_FUNC(ZwWriteFile,9)
 2157     (void *handle, struct nt_event *event, void *apc_routine,
 2158      void *apc_context, struct io_status_block *iosb, void *buffer,
 2159      ULONG length, LARGE_INTEGER *byte_offset, ULONG *key)
 2160 {
 2161     struct file_object *fo;
 2162     struct common_object_header *coh;
 2163     struct wrap_bin_file *file;
 2164     unsigned long offset;
 2165 
 2166     TRACE2("%p", handle);
 2167     coh = handle;
 2168     if (coh->type != OBJECT_TYPE_FILE) {
 2169         ERROR("handle %p is invalid: %d", handle, coh->type);
 2170         EXIT2(return STATUS_FAILURE);
 2171     }
 2172     fo = HANDLE_TO_OBJECT(coh);
 2173     file = fo->wrap_bin_file;
 2174     TRACE2("file: %zu, %u", file->size, length);
 2175     spin_lock_bh(&ntoskernel_lock);
 2176     if (byte_offset)
 2177         offset = *byte_offset;
 2178     else
 2179         offset = fo->current_byte_offset;
 2180     if (length + offset > file->size) {
 2181         WARNING("%lu, %zu", length + offset, file->size);
 2182         /* TODO: implement writing past end of current size */
 2183         iosb->status = STATUS_FAILURE;
 2184         iosb->info = 0;
 2185     } else {
 2186         memcpy(file->data + offset, buffer, length);
 2187         iosb->status = STATUS_SUCCESS;
 2188         iosb->info = length;
 2189         fo->current_byte_offset = offset + length;
 2190     }
 2191     spin_unlock_bh(&ntoskernel_lock);
 2192     EXIT2(return iosb->status);
 2193 }
 2194 
 2195 wstdcall NTSTATUS WIN_FUNC(ZwClose,1)
 2196     (void *handle)
 2197 {
 2198     struct common_object_header *coh;
 2199 
 2200     TRACE2("%p", handle);
 2201     if (handle == NULL) {
 2202         TRACE1("");
 2203         EXIT2(return STATUS_SUCCESS);
 2204     }
 2205     coh = handle;
 2206     if (coh->type == OBJECT_TYPE_FILE) {
 2207         struct file_object *fo;
 2208         struct wrap_bin_file *bin_file;
 2209         typeof(fo->flags) flags;
 2210 
 2211         fo = HANDLE_TO_OBJECT(handle);
 2212         flags = fo->flags;
 2213         bin_file = fo->wrap_bin_file;
 2214         if (dereference_object(fo)) {
 2215             if (flags == FILE_CREATED) {
 2216                 vfree(bin_file->data);
 2217                 kfree(bin_file);
 2218             } else
 2219                 free_bin_file(bin_file);
 2220         }
 2221     } else if (coh->type == OBJECT_TYPE_NT_THREAD) {
 2222         struct nt_thread *thread = HANDLE_TO_OBJECT(handle);
 2223         TRACE2("thread: %p (%p)", thread, handle);
 2224         ObDereferenceObject(thread);
 2225     } else {
 2226         /* TODO: can we just dereference object here? */
 2227         WARNING("closing handle 0x%x not implemented", coh->type);
 2228     }
 2229     EXIT2(return STATUS_SUCCESS);
 2230 }
 2231 
 2232 wstdcall NTSTATUS WIN_FUNC(ZwQueryInformationFile,5)
 2233     (void *handle, struct io_status_block *iosb, void *info,
 2234      ULONG length, enum file_info_class class)
 2235 {
 2236     struct file_object *fo;
 2237     struct file_name_info *fni;
 2238     struct file_std_info *fsi;
 2239     struct wrap_bin_file *file;
 2240     struct common_object_header *coh;
 2241 
 2242     ENTER2("%p", handle);
 2243     coh = handle;
 2244     if (coh->type != OBJECT_TYPE_FILE) {
 2245         ERROR("handle %p is invalid: %d", coh, coh->type);
 2246         EXIT2(return STATUS_FAILURE);
 2247     }
 2248     fo = HANDLE_TO_OBJECT(handle);
 2249     TRACE2("fo: %p, %d", fo, class);
 2250     switch (class) {
 2251     case FileNameInformation:
 2252         fni = info;
 2253         fni->length = min(length, (typeof(length))coh->name.length);
 2254         memcpy(fni->name, coh->name.buf, fni->length);
 2255         iosb->status = STATUS_SUCCESS;
 2256         iosb->info = fni->length;
 2257         break;
 2258     case FileStandardInformation:
 2259         fsi = info;
 2260         file = fo->wrap_bin_file;
 2261         fsi->alloc_size = file->size;
 2262         fsi->eof = file->size;
 2263         fsi->num_links = 1;
 2264         fsi->delete_pending = FALSE;
 2265         fsi->dir = FALSE;
 2266         iosb->status = STATUS_SUCCESS;
 2267         iosb->info = 0;
 2268         break;
 2269     default:
 2270         WARNING("type %d not implemented yet", class);
 2271         iosb->status = STATUS_FAILURE;
 2272         iosb->info = 0;
 2273         break;
 2274     }
 2275     EXIT2(return iosb->status);
 2276 }
 2277 
 2278 wstdcall NTSTATUS WIN_FUNC(ZwOpenSection,3)
 2279     (void **handle, ACCESS_MASK access, struct object_attributes *obj_attrs)
 2280 {
 2281     INFO("%p, 0x%x, %d", obj_attrs, obj_attrs->attributes, access);
 2282     TODO();
 2283     *handle = obj_attrs;
 2284     return STATUS_SUCCESS;
 2285 }
 2286 
 2287 wstdcall NTSTATUS WIN_FUNC(ZwMapViewOfSection,10)
 2288     (void *secn_handle, void *process_handle, void **base_address,
 2289      ULONG zero_bits, SIZE_T commit_size, LARGE_INTEGER *secn_offset,
 2290      SIZE_T *view_size, enum section_inherit inherit, ULONG alloc_type,
 2291      ULONG protect)
 2292 {
 2293     INFO("%p, %p, %p", secn_handle, process_handle, base_address);
 2294     TODO();
 2295     *base_address = (void *)0xdeadbeef;
 2296     return STATUS_SUCCESS;
 2297 }
 2298 
 2299 wstdcall NTSTATUS WIN_FUNC(ZwUnmapViewOfSection,2)
 2300     (void *process_handle, void *base_address)
 2301 {
 2302     INFO("%p, %p", process_handle, base_address);
 2303     TODO();
 2304     return STATUS_SUCCESS;
 2305 }
 2306 
 2307 wstdcall NTSTATUS WIN_FUNC(ZwCreateKey,7)
 2308     (void **handle, ACCESS_MASK desired_access,
 2309      struct object_attributes *attr, ULONG title_index,
 2310      struct unicode_string *class, ULONG create_options,
 2311      ULONG *disposition)
 2312 {
 2313     struct ansi_string ansi;
 2314     if (RtlUnicodeStringToAnsiString(&ansi, attr->name, TRUE) ==
 2315         STATUS_SUCCESS) {
 2316         TRACE1("key: %s", ansi.buf);
 2317         RtlFreeAnsiString(&ansi);
 2318     }
 2319     *handle = NULL;
 2320     return STATUS_SUCCESS;
 2321 }
 2322 
 2323 wstdcall NTSTATUS WIN_FUNC(ZwOpenKey,3)
 2324     (void **handle, ACCESS_MASK desired_access,
 2325      struct object_attributes *attr)
 2326 {
 2327     struct ansi_string ansi;
 2328     if (RtlUnicodeStringToAnsiString(&ansi, attr->name, TRUE) ==
 2329         STATUS_SUCCESS) {
 2330         TRACE1("key: %s", ansi.buf);
 2331         RtlFreeAnsiString(&ansi);
 2332     }
 2333     *handle = NULL;
 2334     return STATUS_SUCCESS;
 2335 }
 2336 
 2337 wstdcall NTSTATUS WIN_FUNC(ZwSetValueKey,6)
 2338     (void *handle, struct unicode_string *name, ULONG title_index,
 2339      ULONG type, void *data, ULONG data_size)
 2340 {
 2341     struct ansi_string ansi;
 2342     if (RtlUnicodeStringToAnsiString(&ansi, name, TRUE) ==
 2343         STATUS_SUCCESS) {
 2344         TRACE1("key: %s", ansi.buf);
 2345         RtlFreeAnsiString(&ansi);
 2346     }
 2347     return STATUS_SUCCESS;
 2348 }
 2349 
 2350 wstdcall NTSTATUS WIN_FUNC(ZwQueryValueKey,6)
 2351     (void *handle, struct unicode_string *name,
 2352      enum key_value_information_class class, void *info,
 2353      ULONG length, ULONG *res_length)
 2354 {
 2355     struct ansi_string ansi;
 2356     if (RtlUnicodeStringToAnsiString(&ansi, name, TRUE) == STATUS_SUCCESS) {
 2357         TRACE1("key: %s", ansi.buf);
 2358         RtlFreeAnsiString(&ansi);
 2359     }
 2360     TODO();
 2361     return STATUS_INVALID_PARAMETER;
 2362 }
 2363 
 2364 wstdcall NTSTATUS WIN_FUNC(ZwDeleteKey,1)
 2365     (void *handle)
 2366 {
 2367     ENTER2("%p", handle);
 2368     return STATUS_SUCCESS;
 2369 }
 2370 
 2371 wstdcall NTSTATUS WIN_FUNC(ZwPowerInformation,5)
 2372     (INT info_level, void *in_buf, ULONG in_buf_len, void *out_buf,
 2373      ULONG out_buf_len)
 2374 {
 2375     INFO("%d, %u, %u", info_level, in_buf_len, out_buf_len);
 2376     TODO();
 2377     return STATUS_ACCESS_DENIED;
 2378 }
 2379 
 2380 wstdcall NTSTATUS WIN_FUNC(WmiSystemControl,4)
 2381     (struct wmilib_context *info, struct device_object *dev_obj,
 2382      struct irp *irp, void *irp_disposition)
 2383 {
 2384     TODO();
 2385     return STATUS_SUCCESS;
 2386 }
 2387 
 2388 wstdcall NTSTATUS WIN_FUNC(WmiCompleteRequest,5)
 2389     (struct device_object *dev_obj, struct irp *irp, NTSTATUS status,
 2390      ULONG buffer_used, CCHAR priority_boost)
 2391 {
 2392     TODO();
 2393     return STATUS_SUCCESS;
 2394 }
 2395 
 2396 noregparm NTSTATUS WIN_FUNC(WmiTraceMessage,12)
 2397     (void *tracehandle, ULONG message_flags,
 2398      void *message_guid, USHORT message_no, ...)
 2399 {
 2400     TODO();
 2401     EXIT2(return STATUS_SUCCESS);
 2402 }
 2403 
 2404 wstdcall NTSTATUS WIN_FUNC(WmiQueryTraceInformation,4)
 2405     (enum trace_information_class trace_info_class, void *trace_info,
 2406      ULONG *req_length, void *buf)
 2407 {
 2408     TODO();
 2409     EXIT2(return STATUS_SUCCESS);
 2410 }
 2411 
 2412 /* this function can't be wstdcall as it takes variable number of args */
 2413 __attribute__((format(printf, 1, 2)))
 2414 noregparm ULONG WIN_FUNC(DbgPrint,12)
 2415     (char *format, ...)
 2416 {
 2417 #if DEBUG >= 1
 2418     va_list args;
 2419     static char buf[100];
 2420 
 2421     va_start(args, format);
 2422     vsnprintf(buf, sizeof(buf), format, args);
 2423     printk(KERN_DEBUG "%s (%s): %s", DRIVER_NAME, __func__, buf);
 2424     va_end(args);
 2425 #endif
 2426     return STATUS_SUCCESS;
 2427 }
 2428 
 2429 __attribute__((format(printf, 3, 4)))
 2430 noregparm ULONG WIN_FUNC(DbgPrintEx,12)
 2431     (ULONG component_id, ULONG severity, char *format, ...)
 2432 {
 2433 #if DEBUG >= 1
 2434     va_list args;
 2435     static char buf[100];
 2436 
 2437     va_start(args, format);
 2438     vsnprintf(buf, sizeof(buf), format, args);
 2439     TRACE1("component_id: %d, severity: %d\n", component_id, severity);
 2440     printk(KERN_DEBUG "%s (%s): %s", DRIVER_NAME, __func__, buf);
 2441     va_end(args);
 2442 #endif
 2443     return STATUS_SUCCESS;
 2444 }
 2445 
 2446 wstdcall void WIN_FUNC(KeBugCheck,1)
 2447     (ULONG code)
 2448 {
 2449     ERROR("Unrecoverable error reported by the driver");
 2450     ERROR("code: 0x%x\n", code);
 2451     dump_stack();
 2452     return;
 2453 }
 2454 
 2455 wstdcall void WIN_FUNC(KeBugCheckEx,5)
 2456     (ULONG code, ULONG_PTR param1, ULONG_PTR param2,
 2457      ULONG_PTR param3, ULONG_PTR param4)
 2458 {
 2459     ERROR("Unrecoverable error reported by the driver");
 2460     ERROR("code: 0x%x, params: 0x%lx 0x%lx 0x%lx 0x%lx\n", code, param1,
 2461           param2, param3, param4);
 2462     dump_stack();
 2463     return;
 2464 }
 2465 
 2466 wstdcall void WIN_FUNC(ExSystemTimeToLocalTime,2)
 2467     (LARGE_INTEGER *system_time, LARGE_INTEGER *local_time)
 2468 {
 2469     *local_time = *system_time;
 2470 }
 2471 
 2472 wstdcall ULONG WIN_FUNC(ExSetTimerResolution,2)
 2473     (ULONG time, BOOLEAN set)
 2474 {
 2475     /* why a driver should change system wide timer resolution is
 2476      * beyond me */
 2477     return time;
 2478 }
 2479 
 2480 wstdcall void WIN_FUNC(DbgBreakPoint,0)
 2481     (void)
 2482 {
 2483     TODO();
 2484 }
 2485 
 2486 wstdcall void WIN_FUNC(_except_handler3,0)
 2487     (void)
 2488 {
 2489     TODO();
 2490 }
 2491 
 2492 wstdcall void WIN_FUNC(__C_specific_handler,0)
 2493     (void)
 2494 {
 2495     TODO();
 2496 }
 2497 
 2498 wstdcall void WIN_FUNC(_purecall,0)
 2499     (void)
 2500 {
 2501     TODO();
 2502 }
 2503 
 2504 struct worker_init_struct {
 2505     struct work_struct work;
 2506     struct completion completion;
 2507     struct nt_thread *nt_thread;
 2508 };
 2509 
 2510 int ntoskernel_init(void)
 2511 {
 2512     spin_lock_init(&dispatcher_lock);
 2513     spin_lock_init(&ntoskernel_lock);
 2514     spin_lock_init(&ntos_work_lock);
 2515     spin_lock_init(&kdpc_list_lock);
 2516     spin_lock_init(&irp_cancel_lock);
 2517     InitializeListHead(&wrap_mdl_list);
 2518     InitializeListHead(&kdpc_list);
 2519     InitializeListHead(&callback_objects);
 2520     InitializeListHead(&bus_driver_list);
 2521     InitializeListHead(&object_list);
 2522     InitializeListHead(&ntos_work_list);
 2523 
 2524     nt_spin_lock_init(&nt_list_lock);
 2525 
 2526     INIT_WORK(&kdpc_work, kdpc_worker);
 2527     INIT_WORK(&ntos_work, ntos_work_worker);
 2528     wrap_timer_slist.next = NULL;
 2529 
 2530     wrap_ticks_to_boot = TICKS_1601_TO_1970;
 2531 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)
 2532     do {
 2533         u64 now;
 2534         now = ktime_get_real_ns();
 2535         wrap_ticks_to_boot += now * 10;
 2536     } while (0);
 2537 #else
 2538     do {
 2539         struct timeval now;
 2540 
 2541         do_gettimeofday(&now);
 2542         wrap_ticks_to_boot += (u64)now.tv_sec * TICKSPERSEC;
 2543         wrap_ticks_to_boot += now.tv_usec * 10;
 2544     } while (0);
 2545 #endif
 2546     TRACE2("%llu", wrap_ticks_to_boot);
 2547     wrap_ticks_to_boot -= jiffies * TICKSPERJIFFY;
 2548     TRACE2("%llu", wrap_ticks_to_boot);
 2549 
 2550     cpu_count = num_online_cpus();
 2551 
 2552 #ifdef WRAP_PREEMPT
 2553     do {
 2554         int cpu;
 2555         for_each_possible_cpu(cpu) {
 2556             struct irql_info *info;
 2557             info = &per_cpu(irql_info, cpu);
 2558             mutex_init(&(info->lock));
 2559             info->task = NULL;
 2560             info->count = 0;
 2561 #ifdef CONFIG_SMP
 2562             cpumask_setall(tsk_cpus_allowed(info));
 2563 #endif
 2564         }
 2565     } while (0);
 2566 #endif
 2567 
 2568     ntos_wq = create_singlethread_workqueue("ntos_wq");
 2569     if (!ntos_wq) {
 2570         WARNING("couldn't create ntos_wq thread");
 2571         return -ENOMEM;
 2572     }
 2573     TRACE1("ntos_wq: %p", ntos_wq);
 2574 
 2575     if (add_bus_driver("PCI")
 2576 #ifdef ENABLE_USB
 2577         || add_bus_driver("USB")
 2578 #endif
 2579         ) {
 2580         ntoskernel_exit();
 2581         return -ENOMEM;
 2582     }
 2583     mdl_cache =
 2584         wrap_kmem_cache_create(DRIVER_NAME "_mdl",
 2585                        sizeof(struct wrap_mdl) + MDL_CACHE_SIZE,
 2586                        0, 0);
 2587     TRACE2("%p", mdl_cache);
 2588     if (!mdl_cache) {
 2589         ERROR("couldn't allocate MDL cache");
 2590         ntoskernel_exit();
 2591         return -ENOMEM;
 2592     }
 2593 
 2594 #if defined(CONFIG_X86_64)
 2595     memset(&kuser_shared_data, 0, sizeof(kuser_shared_data));
 2596     *((ULONG64 *)&kuser_shared_data.system_time) = ticks_1601();
 2597 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)
 2598     init_timer(&shared_data_timer);
 2599     shared_data_timer.function = update_user_shared_data_proc;
 2600     shared_data_timer.data = 0;
 2601 #else
 2602     timer_setup(&shared_data_timer, update_user_shared_data_proc, 0);
 2603 #endif
 2604 #endif
 2605     return 0;
 2606 }
 2607 
 2608 int ntoskernel_init_device(struct wrap_device *wd)
 2609 {
 2610 #if defined(CONFIG_X86_64)
 2611     if (kuser_shared_data.reserved1)
 2612         mod_timer(&shared_data_timer, jiffies + MSEC_TO_HZ(30));
 2613 #endif
 2614     return 0;
 2615 }
 2616 
 2617 void ntoskernel_exit_device(struct wrap_device *wd)
 2618 {
 2619     ENTER2("");
 2620 
 2621     KeFlushQueuedDpcs();
 2622     EXIT2(return);
 2623 }
 2624 
 2625 void ntoskernel_exit(void)
 2626 {
 2627     struct nt_list *cur;
 2628 
 2629     ENTER2("");
 2630 
 2631     /* free kernel (Ke) timers */
 2632     TRACE2("freeing timers");
 2633     while (1) {
 2634         struct wrap_timer *wrap_timer;
 2635         struct nt_slist *slist;
 2636 
 2637         spin_lock_bh(&ntoskernel_lock);
 2638         if ((slist = wrap_timer_slist.next))
 2639             wrap_timer_slist.next = slist->next;
 2640         spin_unlock_bh(&ntoskernel_lock);
 2641         TIMERTRACE("%p", slist);
 2642         if (!slist)
 2643             break;
 2644         wrap_timer = container_of(slist, struct wrap_timer, slist);
 2645         if (del_timer_sync(&wrap_timer->timer))
 2646             WARNING("Buggy Windows driver left timer %p running",
 2647                 wrap_timer->nt_timer);
 2648         memset(wrap_timer, 0, sizeof(*wrap_timer));
 2649         slack_kfree(wrap_timer);
 2650     }
 2651 
 2652     TRACE2("freeing MDLs");
 2653     if (mdl_cache) {
 2654         spin_lock_bh(&ntoskernel_lock);
 2655         if (!IsListEmpty(&wrap_mdl_list))
 2656             ERROR("Windows driver didn't free all MDLs; "
 2657                   "freeing them now");
 2658         while ((cur = RemoveHeadList(&wrap_mdl_list))) {
 2659             struct wrap_mdl *wrap_mdl;
 2660             wrap_mdl = container_of(cur, struct wrap_mdl, list);
 2661             if (wrap_mdl->mdl->flags & MDL_CACHE_ALLOCATED)
 2662                 kmem_cache_free(mdl_cache, wrap_mdl);
 2663             else
 2664                 kfree(wrap_mdl);
 2665         }
 2666         spin_unlock_bh(&ntoskernel_lock);
 2667         kmem_cache_destroy(mdl_cache);
 2668         mdl_cache = NULL;
 2669     }
 2670 
 2671     TRACE2("freeing callbacks");
 2672     spin_lock_bh(&ntoskernel_lock);
 2673     while ((cur = RemoveHeadList(&callback_objects))) {
 2674         struct callback_object *object;
 2675         struct nt_list *ent;
 2676         object = container_of(cur, struct callback_object, list);
 2677         while ((ent = RemoveHeadList(&object->callback_funcs))) {
 2678             struct callback_func *f;
 2679             f = container_of(ent, struct callback_func, list);
 2680             kfree(f);
 2681         }
 2682         kfree(object);
 2683     }
 2684     spin_unlock_bh(&ntoskernel_lock);
 2685 
 2686     spin_lock_bh(&ntoskernel_lock);
 2687     while ((cur = RemoveHeadList(&bus_driver_list))) {
 2688         struct bus_driver *bus_driver;
 2689         bus_driver = container_of(cur, struct bus_driver, list);
 2690         /* TODO: make sure all all drivers are shutdown/removed */
 2691         kfree(bus_driver);
 2692     }
 2693     spin_unlock_bh(&ntoskernel_lock);
 2694 
 2695 #if defined(CONFIG_X86_64)
 2696     del_timer_sync(&shared_data_timer);
 2697 #endif
 2698     if (ntos_wq)
 2699         destroy_workqueue(ntos_wq);
 2700     ENTER2("freeing objects");
 2701     spin_lock_bh(&ntoskernel_lock);
 2702     while ((cur = RemoveHeadList(&object_list))) {
 2703         struct common_object_header *hdr;
 2704         hdr = container_of(cur, struct common_object_header, list);
 2705         if (hdr->type == OBJECT_TYPE_NT_THREAD)
 2706             TRACE1("object %p(%d) was not freed, freeing it now",
 2707                    HEADER_TO_OBJECT(hdr), hdr->type);
 2708         else
 2709             WARNING("object %p(%d) was not freed, freeing it now",
 2710                 HEADER_TO_OBJECT(hdr), hdr->type);
 2711         ExFreePool(hdr);
 2712     }
 2713     spin_unlock_bh(&ntoskernel_lock);
 2714 
 2715     EXIT2(return);
 2716 }