"Fossies" - the Fresh Open Source Software Archive

Member "ndiswrapper-1.63/driver/ntoskernel.h" (3 May 2020, 31917 Bytes) of package /linux/misc/ndiswrapper-1.63.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "ntoskernel.h" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: 1.62_vs_1.63.

    1 /*
    2  *  Copyright (C) 2003-2005 Pontus Fuchs, Giridhar Pemmasani
    3  *
    4  *  This program is free software; you can redistribute it and/or modify
    5  *  it under the terms of the GNU General Public License as published by
    6  *  the Free Software Foundation; either version 2 of the License, or
    7  *  (at your option) any later version.
    8  *
    9  *  This program is distributed in the hope that it will be useful,
   10  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
   11  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
   12  *  GNU General Public License for more details.
   13  *
   14  */
   15 
   16 #ifndef _NTOSKERNEL_H_
   17 #define _NTOSKERNEL_H_
   18 
   19 #include <linux/types.h>
   20 #include <linux/timer.h>
   21 #include <linux/time.h>
   22 #include <linux/module.h>
   23 #include <linux/kmod.h>
   24 
   25 #include <linux/netdevice.h>
   26 #include <linux/wireless.h>
   27 #include <linux/pci.h>
   28 #include <linux/wait.h>
   29 #include <linux/pm.h>
   30 #include <linux/delay.h>
   31 #include <linux/mm.h>
   32 #include <linux/random.h>
   33 #include <linux/ctype.h>
   34 #include <linux/list.h>
   35 #include <linux/sched.h>
   36 #include <linux/usb.h>
   37 #include <linux/spinlock.h>
   38 #include <asm/mman.h>
   39 #include <linux/version.h>
   40 #include <linux/etherdevice.h>
   41 #include <net/iw_handler.h>
   42 #include <linux/ethtool.h>
   43 #include <linux/if_arp.h>
   44 #include <linux/rtnetlink.h>
   45 #include <linux/highmem.h>
   46 #include <linux/percpu.h>
   47 #include <linux/kthread.h>
   48 #include <linux/workqueue.h>
   49 #include <linux/vmalloc.h>
   50 
   51 #if LINUX_VERSION_CODE > KERNEL_VERSION(4,11,0)
   52 #include <linux/sched/signal.h>
   53 #endif
   54 
   55 #if !defined(CONFIG_X86) && !defined(CONFIG_X86_64)
   56 #error "this module is for x86 or x86_64 architectures only"
   57 #endif
   58 
   59 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)
   60 #define gfp_t unsigned int __nocast
   61 
   62 static inline void *_kzalloc(size_t size, gfp_t flags)
   63 {
   64     void *p = kmalloc(size, flags);
   65     if (likely(p != NULL))
   66         memset(p, 0, size);
   67     return p;
   68 }
   69 
   70 #define kzalloc(size, flags) _kzalloc(size, flags)
   71 #endif
   72 
   73 /* Interrupt backwards compatibility stuff */
   74 #include <linux/interrupt.h>
   75 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)
   76 #ifndef IRQ_HANDLED
   77 #define IRQ_HANDLED
   78 #define IRQ_NONE
   79 #define irqreturn_t void
   80 #endif
   81 #endif /* Linux < 2.6.29 */
   82 
   83 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
   84 #ifndef mutex_init
   85 #define mutex semaphore
   86 #define mutex_init(m) sema_init(m, 1)
   87 #define mutex_lock(m) down(m)
   88 #define mutex_trylock(m) (!down_trylock(m))
   89 #define mutex_unlock(m) up(m)
   90 #define mutex_is_locked(m) (atomic_read(m.count) == 0)
   91 #endif
   92 #endif /* Linux < 2.6.16 */
   93 
   94 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
   95 #define set_cpus_allowed_ptr(task, mask) set_cpus_allowed(task, *mask)
   96 #endif /* Linux < 2.6.26 */
   97 
   98 #ifdef CONFIG_SMP
   99 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
  100 #define cpumask_copy(dst, src) do { *dst = *src; } while (0)
  101 #define cpumask_equal(mask1, mask2) cpus_equal(*mask1, *mask2)
  102 #define cpumask_setall(mask) cpus_setall(*mask)
  103 static cpumask_t cpumasks[NR_CPUS];
  104 #define cpumask_of(cpu)             \
  105 ({                      \
  106     cpumasks[cpu] = cpumask_of_cpu(cpu);    \
  107     &cpumasks[cpu];             \
  108 })
  109 #endif /* Linux < 2.6.28 */
  110 #endif /* CONFIG_SMP */
  111 
  112 #ifndef tsk_cpus_allowed
  113 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5,3,0)
  114 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_mask)
  115 #else
  116 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
  117 #endif
  118 #endif
  119 
  120 #ifndef __packed
  121 #define __packed __attribute__((packed))
  122 #endif
  123 
  124 /* pci functions in 2.6 kernels have problems allocating dma buffers,
  125  * but seem to work fine with dma functions
  126  */
  127 #include <asm/dma-mapping.h>
  128 
  129 #ifndef ___GFP_RETRY_MAYFAIL
  130 #define ___GFP_RETRY_MAYFAIL __GFP_REPEAT
  131 #endif
  132 
  133 #define PCI_DMA_ALLOC_COHERENT(pci_dev,size,dma_handle)         \
  134     dma_alloc_coherent(&pci_dev->dev,size,dma_handle,       \
  135                GFP_KERNEL | ___GFP_RETRY_MAYFAIL)
  136 #define PCI_DMA_FREE_COHERENT(pci_dev,size,cpu_addr,dma_handle)     \
  137     dma_free_coherent(&pci_dev->dev,size,cpu_addr,dma_handle)
  138 #define PCI_DMA_MAP_SINGLE(pci_dev,addr,size,direction)     \
  139     dma_map_single(&pci_dev->dev,addr,size,direction)
  140 #define PCI_DMA_UNMAP_SINGLE(pci_dev,dma_handle,size,direction)     \
  141     dma_unmap_single(&pci_dev->dev,dma_handle,size,direction)
  142 #define MAP_SG(pci_dev, sglist, nents, direction)       \
  143     dma_map_sg(&pci_dev->dev, sglist, nents, direction)
  144 #define UNMAP_SG(pci_dev, sglist, nents, direction)     \
  145     dma_unmap_sg(&pci_dev->dev, sglist, nents, direction)
  146 #define PCI_DMA_MAP_ERROR(dma_addr) dma_mapping_error(dma_addr)
  147 
  148 
  149 #if defined(CONFIG_NET_RADIO) && !defined(CONFIG_WIRELESS_EXT)
  150 #define CONFIG_WIRELESS_EXT
  151 #endif
  152 
  153 #define prepare_wait_condition(task, var, value)    \
  154 do {                            \
  155     var = value;                    \
  156     task = current;                 \
  157     barrier();                  \
  158 } while (0)
  159 
  160 /* Wait in wait_state (e.g., TASK_INTERRUPTIBLE) for condition to
  161  * become true; timeout is either jiffies (> 0) to wait or 0 to wait
  162  * forever.
  163  * When timeout == 0, return value is
  164  *    > 0 if condition becomes true, or
  165  *    < 0 if signal is pending on the thread.
  166  * When timeout > 0, return value is
  167  *    > 0 if condition becomes true before timeout,
  168  *    < 0 if signal is pending on the thread before timeout, or
  169  *    0 if timedout (condition may have become true at the same time)
  170  */
  171 
  172 #define wait_condition(condition, timeout, wait_state)      \
  173 ({                              \
  174     long ret = timeout ? timeout : 1;           \
  175     while (1) {                     \
  176         if (signal_pending(current)) {          \
  177             ret = -ERESTARTSYS;         \
  178             break;                  \
  179         }                       \
  180         set_current_state(wait_state);          \
  181         if (condition) {                \
  182             __set_current_state(TASK_RUNNING);  \
  183             break;                  \
  184         }                       \
  185         if (timeout) {                  \
  186             ret = schedule_timeout(ret);        \
  187             if (!ret)               \
  188                 break;              \
  189         } else                      \
  190             schedule();             \
  191     }                           \
  192     ret;                            \
  193 })
  194 
  195 #ifdef WRAP_WQ
  196 
  197 struct wrap_workqueue_struct;
  198 
  199 struct wrap_work_struct {
  200     struct list_head list;
  201     void (*func)(struct wrap_work_struct *data);
  202     void *data;
  203     /* whether/on which thread scheduled */
  204     struct workqueue_thread *thread;
  205 };
  206 
  207 #define work_struct wrap_work_struct
  208 #define workqueue_struct wrap_workqueue_struct
  209 
  210 #undef INIT_WORK
  211 #define INIT_WORK(work, pfunc)                  \
  212     do {                            \
  213         (work)->func = (pfunc);             \
  214         (work)->data = (work);              \
  215         (work)->thread = NULL;              \
  216     } while (0)
  217 
  218 #undef create_singlethread_workqueue
  219 #define create_singlethread_workqueue(wq) wrap_create_wq(wq, 1, 0)
  220 #undef create_workqueue
  221 #define create_workqueue(wq) wrap_create_wq(wq, 0, 0)
  222 #undef destroy_workqueue
  223 #define destroy_workqueue(wq) wrap_destroy_wq(wq)
  224 #undef queue_work
  225 #define queue_work(wq, work) wrap_queue_work(wq, work)
  226 #undef flush_workqueue
  227 #define flush_workqueue(wq) wrap_flush_wq(wq)
  228 
  229 struct workqueue_struct *wrap_create_wq(const char *name, u8 singlethread,
  230                     u8 freeze);
  231 void wrap_destroy_wq(struct workqueue_struct *workq);
  232 int wrap_queue_work(struct workqueue_struct *workq, struct work_struct *work);
  233 void wrap_cancel_work(struct work_struct *work);
  234 void wrap_flush_wq(struct workqueue_struct *workq);
  235 
  236 #else // WRAP_WQ
  237 
  238 /* Compatibility for Linux before 2.6.20 where INIT_WORK takes 3 arguments */
  239 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) && \
  240     !defined(INIT_WORK_NAR) && \
  241     !defined(INIT_DELAYED_WORK_DEFERRABLE)
  242 typedef void (*compat_work_func_t)(void *work);
  243 typedef void (*work_func_t)(struct work_struct *work);
  244 static inline void (INIT_WORK)(struct work_struct *work, work_func_t func)
  245 {
  246     INIT_WORK(work, (compat_work_func_t)func, work);
  247 }
  248 #undef INIT_WORK
  249 #endif
  250 
  251 #endif // WRAP_WQ
  252 
  253 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)
  254 #define ISR_PT_REGS_PARAM_DECL
  255 #else
  256 #define ISR_PT_REGS_PARAM_DECL , struct pt_regs *regs
  257 #endif
  258 
  259 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,16)
  260 #define for_each_possible_cpu(_cpu) for_each_cpu(_cpu)
  261 #endif
  262 
  263 #ifndef CHECKSUM_PARTIAL
  264 #define CHECKSUM_PARTIAL CHECKSUM_HW
  265 #endif
  266 
  267 #ifndef IRQF_SHARED
  268 #define IRQF_SHARED SA_SHIRQ
  269 #endif
  270 
  271 #ifndef UMH_WAIT_PROC
  272 #define UMH_WAIT_PROC 1
  273 #endif
  274 
  275 #define memcpy_skb(skb, from, length)           \
  276     memcpy(skb_put(skb, length), from, length)
  277 
  278 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
  279 #ifndef DMA_BIT_MASK
  280 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
  281 #endif
  282 #endif
  283 
  284 #ifndef __GFP_DMA32
  285 #define __GFP_DMA32 GFP_DMA
  286 #endif
  287 
  288 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22)
  289 #define wrap_kmem_cache_create(name, size, align, flags)    \
  290     kmem_cache_create(name, size, align, flags, NULL, NULL)
  291 #else
  292 #define wrap_kmem_cache_create(name, size, align, flags)    \
  293     kmem_cache_create(name, size, align, flags, NULL)
  294 #endif
  295 
  296 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)
  297 #define netdev_mc_count(dev) ((dev)->mc_count)
  298 #define usb_alloc_coherent(dev, size, mem_flags, dma) (usb_buffer_alloc((dev), (size), (mem_flags), (dma)))
  299 #define usb_free_coherent(dev, size, addr, dma) (usb_buffer_free((dev), (size), (addr), (dma)))
  300 #endif
  301 
  302 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)
  303 #define daemonize(name, ...) do {} while (0)
  304 #endif
  305 
  306 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)
  307 #define add_taint(flag, lockdep_ok) add_taint(flag)
  308 #endif
  309 
  310 #include "winnt_types.h"
  311 #include "ndiswrapper.h"
  312 #include "pe_linker.h"
  313 #include "wrapmem.h"
  314 #include "lin2win.h"
  315 #include "loader.h"
  316 
  317 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
  318 static inline void netif_tx_lock(struct net_device *dev)
  319 {
  320     spin_lock(&dev->xmit_lock);
  321 }
  322 static inline void netif_tx_unlock(struct net_device *dev)
  323 {
  324     spin_unlock(&dev->xmit_lock);
  325 }
  326 static inline void netif_tx_lock_bh(struct net_device *dev)
  327 {
  328     spin_lock_bh(&dev->xmit_lock);
  329 }
  330 static inline void netif_tx_unlock_bh(struct net_device *dev)
  331 {
  332     spin_unlock_bh(&dev->xmit_lock);
  333 }
  334 #endif
  335 
  336 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
  337 static inline void netif_poll_enable(struct net_device *dev)
  338 {
  339 }
  340 static inline void netif_poll_disable(struct net_device *dev)
  341 {
  342 }
  343 #endif
  344 
  345 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
  346 #define proc_net_root init_net.proc_net
  347 #else
  348 #define proc_net_root proc_net
  349 #endif
  350 
  351 #if ((LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)) && \
  352      (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0))) || \
  353     (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,42))
  354 #ifndef skb_frag_page
  355 #define skb_frag_page(frag) ((frag)->page)
  356 #endif
  357 #endif
  358 
  359 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0)
  360 #define netdev_notifier_info_to_dev(x) ((struct net_device *)(x))
  361 #endif
  362 
  363 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0)
  364 static inline void reinit_completion(struct completion *x)
  365 {
  366     INIT_COMPLETION(*x);
  367 }
  368 #endif
  369 
  370 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0)
  371 #define prandom_seed(seed) net_srandom(seed)
  372 #endif
  373 
  374 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
  375 static int strncasecmp(const char *s1, const char *s2, size_t n)
  376 {
  377     strnicmp(s1, s2, n);
  378 }
  379 #endif
  380 
  381 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
  382 static inline void netif_trans_update(struct net_device *dev)
  383 {
  384     dev->trans_start = jiffies;
  385 }
  386 #endif
  387 
  388 /* TICK is 100ns */
  389 #define TICKSPERSEC     10000000
  390 #define TICKSPERMSEC        10000
  391 #define SECSPERDAY      86400
  392 #define TICKSPERJIFFY       ((TICKSPERSEC + HZ - 1) / HZ)
  393 
  394 #define int_div_round(x, y) (((x) + (y - 1)) / (y))
  395 
  396 /* 1601 to 1970 is 369 years plus 89 leap days */
  397 #define SECS_1601_TO_1970   ((369 * 365 + 89) * (u64)SECSPERDAY)
  398 #define TICKS_1601_TO_1970  (SECS_1601_TO_1970 * TICKSPERSEC)
  399 
  400 /* 100ns units to HZ; if sys_time is negative, relative to current
  401  * clock, otherwise from year 1601 */
  402 #define SYSTEM_TIME_TO_HZ(sys_time)                 \
  403     (((sys_time) <= 0) ? \
  404      int_div_round(((u64)HZ * (-(sys_time))), TICKSPERSEC) :    \
  405      int_div_round(((s64)HZ * ((sys_time) - ticks_1601())), TICKSPERSEC))
  406 
  407 #define MSEC_TO_HZ(ms) int_div_round((ms * HZ), 1000)
  408 #define USEC_TO_HZ(us) int_div_round((us * HZ), 1000000)
  409 
  410 extern u64 wrap_ticks_to_boot;
  411 
  412 static inline u64 ticks_1601(void)
  413 {
  414     return wrap_ticks_to_boot + (u64)jiffies * TICKSPERJIFFY;
  415 }
  416 
  417 typedef void (*generic_func)(void);
  418 
  419 struct wrap_export {
  420     const char *name;
  421     generic_func func;
  422 };
  423 
  424 #ifdef CONFIG_X86_64
  425 
  426 #define WIN_SYMBOL(name, argc)                  \
  427     {#name, (generic_func) win2lin_ ## name ## _ ## argc}
  428 #define WIN_WIN_SYMBOL(name, argc)                  \
  429     {#name, (generic_func) win2lin__win_ ## name ## _ ## argc}
  430 #define WIN_FUNC_DECL(name, argc)           \
  431     extern typeof(name) win2lin_ ## name ## _ ## argc;
  432 #define WIN_FUNC_PTR(name, argc) win2lin_ ## name ## _ ## argc
  433 
  434 #else
  435 
  436 #define WIN_SYMBOL(name, argc) {#name, (generic_func)name}
  437 #define WIN_WIN_SYMBOL(name, argc) {#name, (generic_func)_win_ ## name}
  438 #define WIN_FUNC_DECL(name, argc)
  439 #define WIN_FUNC_PTR(name, argc) name
  440 
  441 #endif
  442 
  443 #define WIN_FUNC(name, argc) (name)
  444 /* map name s to f - if f is different from s */
  445 #define WIN_SYMBOL_MAP(s, f)
  446 
  447 #define POOL_TAG(A, B, C, D)                    \
  448     ((ULONG)((A) + ((B) << 8) + ((C) << 16) + ((D) << 24)))
  449 
  450 struct pe_image {
  451     char name[MAX_DRIVER_NAME_LEN];
  452     UINT (*entry)(struct driver_object *, struct unicode_string *) wstdcall;
  453     void *image;
  454     int size;
  455     int type;
  456 
  457     IMAGE_NT_HEADERS *nt_hdr;
  458     IMAGE_OPTIONAL_HEADER *opt_hdr;
  459 };
  460 
  461 struct ndis_mp_block;
  462 
  463 struct wrap_timer {
  464     struct nt_slist slist;
  465     struct timer_list timer;
  466     struct nt_timer *nt_timer;
  467     long repeat;
  468 #ifdef TIMER_DEBUG
  469     unsigned long wrap_timer_magic;
  470 #endif
  471 };
  472 
  473 struct ntos_work_item {
  474     struct nt_list list;
  475     void *arg1;
  476     void *arg2;
  477     NTOS_WORK_FUNC func;
  478 };
  479 
  480 struct wrap_device_setting {
  481     struct nt_list list;
  482     char name[MAX_SETTING_NAME_LEN];
  483     char value[MAX_SETTING_VALUE_LEN];
  484     void *encoded;
  485 };
  486 
  487 struct wrap_bin_file {
  488     char name[MAX_DRIVER_NAME_LEN];
  489     size_t size;
  490     void *data;
  491 };
  492 
  493 #define WRAP_DRIVER_CLIENT_ID 1
  494 
  495 struct wrap_driver {
  496     struct nt_list list;
  497     struct driver_object *drv_obj;
  498     char name[MAX_DRIVER_NAME_LEN];
  499     char version[MAX_SETTING_VALUE_LEN];
  500     unsigned short num_pe_images;
  501     struct pe_image pe_images[MAX_DRIVER_PE_IMAGES];
  502     unsigned short num_bin_files;
  503     struct wrap_bin_file *bin_files;
  504     struct nt_list settings;
  505     int dev_type;
  506     struct ndis_driver *ndis_driver;
  507 };
  508 
  509 enum hw_status {
  510     HW_INITIALIZED = 1, HW_SUSPENDED, HW_HALTED, HW_DISABLED,
  511 };
  512 
  513 struct wrap_device {
  514     /* first part is (de)initialized once by loader */
  515     struct nt_list list;
  516     int dev_bus;
  517     int vendor;
  518     int device;
  519     int subvendor;
  520     int subdevice;
  521     char conf_file_name[MAX_DRIVER_NAME_LEN];
  522     char driver_name[MAX_DRIVER_NAME_LEN];
  523     struct wrap_driver *driver;
  524     struct nt_list settings;
  525 
  526     /* rest should be (de)initialized when a device is
  527      * (un)plugged */
  528     struct cm_resource_list *resource_list;
  529     unsigned long hw_status;
  530     struct device_object *pdo;
  531     union {
  532         struct {
  533             struct pci_dev *pdev;
  534             enum device_power_state wake_state;
  535         } pci;
  536         struct {
  537             struct usb_device *udev;
  538             struct usb_interface *intf;
  539             int num_alloc_urbs;
  540             struct nt_list wrap_urb_list;
  541         } usb;
  542     };
  543 };
  544 
  545 #define wrap_is_pci_bus(dev_bus)            \
  546     (WRAP_BUS(dev_bus) == WRAP_PCI_BUS ||       \
  547      WRAP_BUS(dev_bus) == WRAP_PCMCIA_BUS)
  548 #ifdef ENABLE_USB
  549 /* earlier versions of ndiswrapper used 0 as USB_BUS */
  550 #define wrap_is_usb_bus(dev_bus)            \
  551     (WRAP_BUS(dev_bus) == WRAP_USB_BUS ||       \
  552      WRAP_BUS(dev_bus) == WRAP_INTERNAL_BUS)
  553 #else
  554 #define wrap_is_usb_bus(dev_bus) 0
  555 #endif
  556 #define wrap_is_bluetooth_device(dev_bus)           \
  557     (WRAP_DEVICE(dev_bus) == WRAP_BLUETOOTH_DEVICE1 ||  \
  558      WRAP_DEVICE(dev_bus) == WRAP_BLUETOOTH_DEVICE2)
  559 
  560 extern struct workqueue_struct *ntos_wq;
  561 extern struct workqueue_struct *ndis_wq;
  562 extern struct workqueue_struct *wrapndis_wq;
  563 
  564 #define atomic_unary_op(var, size, oper)                \
  565 do {                                    \
  566     if (size == 1)                          \
  567         __asm__ __volatile__(                   \
  568             LOCK_PREFIX oper "b %b0\n\t" : "+m" (var)); \
  569     else if (size == 2)                     \
  570         __asm__ __volatile__(                   \
  571             LOCK_PREFIX oper "w %w0\n\t" : "+m" (var)); \
  572     else if (size == 4)                     \
  573         __asm__ __volatile__(                   \
  574             LOCK_PREFIX oper "l %0\n\t" : "+m" (var));  \
  575     else if (size == 8)                     \
  576         __asm__ __volatile__(                   \
  577             LOCK_PREFIX oper "q %q0\n\t" : "+m" (var)); \
  578     else {                              \
  579         extern void _invalid_op_size_(void);            \
  580         _invalid_op_size_();                    \
  581     }                               \
  582 } while (0)
  583 
  584 #define atomic_inc_var_size(var, size) atomic_unary_op(var, size, "inc")
  585 
  586 #define atomic_inc_var(var) atomic_inc_var_size(var, sizeof(var))
  587 
  588 #define atomic_dec_var_size(var, size) atomic_unary_op(var, size, "dec")
  589 
  590 #define atomic_dec_var(var) atomic_dec_var_size(var, sizeof(var))
  591 
  592 #define pre_atomic_add(var, i)                  \
  593 ({                              \
  594     typeof(var) pre;                    \
  595     __asm__ __volatile__(                   \
  596         LOCK_PREFIX "xadd %0, %1\n\t"           \
  597         : "=r"(pre), "+m"(var)              \
  598         : "0"(i));                  \
  599     pre;                            \
  600 })
  601 
  602 #define post_atomic_add(var, i) (pre_atomic_add(var, i) + i)
  603 
  604 //#define DEBUG_IRQL 1
  605 
  606 #ifdef DEBUG_IRQL
  607 #define assert_irql(cond)                       \
  608 do {                                    \
  609     KIRQL _irql_ = current_irql();                  \
  610     if (!(cond)) {                          \
  611         WARNING("assertion '%s' failed: %d", #cond, _irql_);    \
  612         DBG_BLOCK(4) {                      \
  613             dump_stack();                   \
  614         }                           \
  615     }                               \
  616 } while (0)
  617 #else
  618 #define assert_irql(cond) do { } while (0)
  619 #endif
  620 
  621 /* When preempt is enabled, we should preempt_disable to raise IRQL to
  622  * DISPATCH_LEVEL, to be consistent with the semantics. However, using
  623  * a mutex instead, so that only ndiswrapper threads run one at a time
  624  * on a processor when at DISPATCH_LEVEL seems to be enough. So that
  625  * is what we will use until we learn otherwise. If
  626  * preempt_(en|dis)able is required for some reason, comment out
  627  * following #define. */
  628 
  629 #define WRAP_PREEMPT 1
  630 
  631 #if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_RT)
  632 #ifndef WRAP_PREEMPT
  633 #define WRAP_PREEMPT 1
  634 #endif
  635 #endif
  636 
  637 //#undef WRAP_PREEMPT
  638 
  639 #ifdef WRAP_PREEMPT
  640 
  641 struct irql_info {
  642     int count;
  643     struct mutex lock;
  644 #ifdef CONFIG_SMP
  645 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5,3,0)
  646     cpumask_t cpus_mask;
  647 #else
  648     cpumask_t cpus_allowed;
  649 #endif
  650 #endif
  651     struct task_struct *task;
  652 };
  653 
  654 DECLARE_PER_CPU(struct irql_info, irql_info);
  655 
  656 static inline KIRQL raise_irql(KIRQL newirql)
  657 {
  658     struct irql_info *info;
  659 
  660     assert(newirql == DISPATCH_LEVEL);
  661     info = &get_cpu_var(irql_info);
  662     if (info->task == current) {
  663         assert(info->count > 0);
  664         assert(mutex_is_locked(&info->lock));
  665 #if defined(CONFIG_SMP) && DEBUG >= 1
  666         assert(cpumask_equal(tsk_cpus_allowed(current),
  667                      cpumask_of(smp_processor_id())));
  668 #endif
  669         info->count++;
  670         put_cpu_var(irql_info);
  671         return DISPATCH_LEVEL;
  672     }
  673     /* TODO: is this enough to pin down to current cpu? */
  674 #ifdef CONFIG_SMP
  675     assert(task_cpu(current) == smp_processor_id());
  676     cpumask_copy(tsk_cpus_allowed(info), tsk_cpus_allowed(current));
  677     set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id()));
  678 #endif
  679     put_cpu_var(irql_info);
  680     mutex_lock(&info->lock);
  681     assert(info->count == 0);
  682     assert(info->task == NULL);
  683     info->count = 1;
  684     info->task = current;
  685     return PASSIVE_LEVEL;
  686 }
  687 
  688 static inline void lower_irql(KIRQL oldirql)
  689 {
  690     struct irql_info *info;
  691 
  692     assert(oldirql <= DISPATCH_LEVEL);
  693     info = &get_cpu_var(irql_info);
  694     assert(info->task == current);
  695     assert(mutex_is_locked(&info->lock));
  696     assert(info->count > 0);
  697     if (--info->count == 0) {
  698         info->task = NULL;
  699 #ifdef CONFIG_SMP
  700         set_cpus_allowed_ptr(current, tsk_cpus_allowed(info));
  701 #endif
  702         mutex_unlock(&info->lock);
  703     }
  704     put_cpu_var(irql_info);
  705 }
  706 
  707 static inline KIRQL current_irql(void)
  708 {
  709     int count;
  710     if (in_irq() || irqs_disabled())
  711         EXIT4(return DIRQL);
  712     if (in_atomic() || in_interrupt())
  713         EXIT4(return SOFT_IRQL);
  714     count = get_cpu_var(irql_info).count;
  715     put_cpu_var(irql_info);
  716     if (count)
  717         EXIT6(return DISPATCH_LEVEL);
  718     else
  719         EXIT6(return PASSIVE_LEVEL);
  720 }
  721 
  722 #else
  723 
  724 static inline KIRQL current_irql(void)
  725 {
  726     if (in_irq() || irqs_disabled())
  727         EXIT4(return DIRQL);
  728     if (in_interrupt())
  729         EXIT4(return SOFT_IRQL);
  730     if (in_atomic())
  731         EXIT6(return DISPATCH_LEVEL);
  732     else
  733         EXIT6(return PASSIVE_LEVEL);
  734 }
  735 
  736 static inline KIRQL raise_irql(KIRQL newirql)
  737 {
  738     KIRQL ret = in_atomic() ? DISPATCH_LEVEL : PASSIVE_LEVEL;
  739     assert(newirql == DISPATCH_LEVEL);
  740     assert(current_irql() <= DISPATCH_LEVEL);
  741     preempt_disable();
  742     return ret;
  743 }
  744 
  745 static inline void lower_irql(KIRQL oldirql)
  746 {
  747     assert(current_irql() == DISPATCH_LEVEL);
  748     preempt_enable();
  749 }
  750 
  751 #endif
  752 
  753 #define irql_gfp() (in_atomic() ? GFP_ATOMIC : GFP_KERNEL)
  754 
  755 /* Windows spinlocks are of type ULONG_PTR which is not big enough to
  756  * store Linux spinlocks; so we implement Windows spinlocks using
  757  * ULONG_PTR space with our own functions/macros */
  758 
  759 /* Windows seems to use 0 for unlocked state of spinlock - if Linux
  760  * convention of 1 for unlocked state is used, at least prism54 driver
  761  * crashes */
  762 
  763 #define NT_SPIN_LOCK_UNLOCKED 0
  764 #define NT_SPIN_LOCK_LOCKED 1
  765 
  766 static inline void nt_spin_lock_init(NT_SPIN_LOCK *lock)
  767 {
  768     *lock = NT_SPIN_LOCK_UNLOCKED;
  769 }
  770 
  771 #ifdef CONFIG_SMP
  772 
  773 static inline void nt_spin_lock(NT_SPIN_LOCK *lock)
  774 {
  775     while (1) {
  776         unsigned long lockval = xchg(lock, NT_SPIN_LOCK_LOCKED);
  777 
  778         if (likely(lockval == NT_SPIN_LOCK_UNLOCKED))
  779             break;
  780         if (unlikely(lockval > NT_SPIN_LOCK_LOCKED)) {
  781             ERROR("bad spinlock: 0x%lx at %p", lockval, lock);
  782             return;
  783         }
  784         /* "rep; nop" doesn't change cx register, it's a "pause" */
  785         __asm__ __volatile__("rep; nop");
  786     }
  787 }
  788 
  789 static inline void nt_spin_unlock(NT_SPIN_LOCK *lock)
  790 {
  791     unsigned long lockval = xchg(lock, NT_SPIN_LOCK_UNLOCKED);
  792 
  793     if (likely(lockval == NT_SPIN_LOCK_LOCKED))
  794         return;
  795     WARNING("unlocking unlocked spinlock: 0x%lx at %p", lockval, lock);
  796 }
  797 
  798 #else // CONFIG_SMP
  799 
  800 #define nt_spin_lock(lock) do { } while (0)
  801 
  802 #define nt_spin_unlock(lock) do { } while (0)
  803 
  804 #endif // CONFIG_SMP
  805 
  806 /* When kernel would've disabled preempt (e.g., in interrupt
  807  * handlers), we need to fake preempt so driver thinks it is running
  808  * at right IRQL */
  809 
  810 /* raise IRQL to given (higher) IRQL if necessary before locking */
  811 static inline KIRQL nt_spin_lock_irql(NT_SPIN_LOCK *lock, KIRQL newirql)
  812 {
  813     KIRQL oldirql = raise_irql(newirql);
  814     nt_spin_lock(lock);
  815     return oldirql;
  816 }
  817 
  818 /* lower IRQL to given (lower) IRQL if necessary after unlocking */
  819 static inline void nt_spin_unlock_irql(NT_SPIN_LOCK *lock, KIRQL oldirql)
  820 {
  821     nt_spin_unlock(lock);
  822     lower_irql(oldirql);
  823 }
  824 
  825 #define nt_spin_lock_irqsave(lock, flags)               \
  826 do {                                    \
  827     local_irq_save(flags);                      \
  828     preempt_disable();                      \
  829     nt_spin_lock(lock);                     \
  830 } while (0)
  831 
  832 #define nt_spin_unlock_irqrestore(lock, flags)              \
  833 do {                                    \
  834     nt_spin_unlock(lock);                       \
  835     preempt_enable();                       \
  836     local_irq_restore(flags);                   \
  837 } while (0)
  838 
  839 static inline ULONG SPAN_PAGES(void *ptr, SIZE_T length)
  840 {
  841     return PAGE_ALIGN(((unsigned long)ptr & (PAGE_SIZE - 1)) + length)
  842         >> PAGE_SHIFT;
  843 }
  844 
  845 #ifdef CONFIG_X86_64
  846 
  847 /* TODO: can these be implemented without using spinlock? */
  848 
  849 static inline struct nt_slist *PushEntrySList(nt_slist_header *head,
  850                           struct nt_slist *entry,
  851                           NT_SPIN_LOCK *lock)
  852 {
  853     KIRQL irql = nt_spin_lock_irql(lock, DISPATCH_LEVEL);
  854     entry->next = head->next;
  855     head->next = entry;
  856     head->depth++;
  857     nt_spin_unlock_irql(lock, irql);
  858     TRACE4("%p, %p, %p", head, entry, entry->next);
  859     return entry->next;
  860 }
  861 
  862 static inline struct nt_slist *PopEntrySList(nt_slist_header *head,
  863                          NT_SPIN_LOCK *lock)
  864 {
  865     struct nt_slist *entry;
  866     KIRQL irql = nt_spin_lock_irql(lock, DISPATCH_LEVEL);
  867     entry = head->next;
  868     if (entry) {
  869         head->next = entry->next;
  870         head->depth--;
  871     }
  872     nt_spin_unlock_irql(lock, irql);
  873     TRACE4("%p, %p", head, entry);
  874     return entry;
  875 }
  876 
  877 #else
  878 
  879 #define u64_low_32(x) ((u32)x)
  880 #define u64_high_32(x) ((u32)(x >> 32))
  881 
  882 static inline u64 nt_cmpxchg8b(volatile u64 *ptr, u64 old, u64 new)
  883 {
  884     u64 prev;
  885 
  886     __asm__ __volatile__(
  887         "\n"
  888         LOCK_PREFIX "cmpxchg8b %0\n"
  889         : "+m" (*ptr), "=A" (prev)
  890         : "A" (old), "b" (u64_low_32(new)), "c" (u64_high_32(new)));
  891     return prev;
  892 }
  893 
  894 /* slist routines below update slist atomically - no need for
  895  * spinlocks */
  896 
  897 static inline struct nt_slist *PushEntrySList(nt_slist_header *head,
  898                           struct nt_slist *entry,
  899                           NT_SPIN_LOCK *lock)
  900 {
  901     nt_slist_header old, new;
  902     do {
  903         old.align = head->align;
  904         entry->next = old.next;
  905         new.next = entry;
  906         new.depth = old.depth + 1;
  907     } while (nt_cmpxchg8b(&head->align, old.align, new.align) != old.align);
  908     TRACE4("%p, %p, %p", head, entry, old.next);
  909     return old.next;
  910 }
  911 
  912 static inline struct nt_slist *PopEntrySList(nt_slist_header *head,
  913                          NT_SPIN_LOCK *lock)
  914 {
  915     struct nt_slist *entry;
  916     nt_slist_header old, new;
  917     do {
  918         old.align = head->align;
  919         entry = old.next;
  920         if (!entry)
  921             break;
  922         new.next = entry->next;
  923         new.depth = old.depth - 1;
  924     } while (nt_cmpxchg8b(&head->align, old.align, new.align) != old.align);
  925     TRACE4("%p, %p", head, entry);
  926     return entry;
  927 }
  928 
  929 #endif
  930 
  931 #define sleep_hz(n)                 \
  932 do {                            \
  933     set_current_state(TASK_INTERRUPTIBLE);      \
  934     schedule_timeout(n);                \
  935 } while (0)
  936 
  937 int ntoskernel_init(void);
  938 void ntoskernel_exit(void);
  939 int ntoskernel_init_device(struct wrap_device *wd);
  940 void ntoskernel_exit_device(struct wrap_device *wd);
  941 void *allocate_object(ULONG size, enum common_object_type type,
  942               struct unicode_string *name);
  943 
  944 #ifdef ENABLE_USB
  945 int usb_init(void);
  946 void usb_exit(void);
  947 #else
  948 static inline int usb_init(void) { return 0; }
  949 static inline void usb_exit(void) {}
  950 #endif
  951 int usb_init_device(struct wrap_device *wd);
  952 void usb_exit_device(struct wrap_device *wd);
  953 
  954 int wrap_procfs_init(void);
  955 void wrap_procfs_remove(void);
  956 
  957 int link_pe_images(struct pe_image *pe_image, unsigned short n);
  958 
  959 int stricmp(const char *s1, const char *s2);
  960 void dump_bytes(const char *name, const u8 *from, int len);
  961 struct mdl *allocate_init_mdl(void *virt, ULONG length);
  962 void free_mdl(struct mdl *mdl);
  963 struct driver_object *find_bus_driver(const char *name);
  964 void free_custom_extensions(struct driver_extension *drv_obj_ext);
  965 struct nt_thread *get_current_nt_thread(void);
  966 u64 ticks_1601(void);
  967 int schedule_ntos_work_item(NTOS_WORK_FUNC func, void *arg1, void *arg2);
  968 void wrap_init_timer(struct nt_timer *nt_timer, enum timer_type type,
  969              struct ndis_mp_block *nmb);
  970 BOOLEAN wrap_set_timer(struct nt_timer *nt_timer, unsigned long expires_hz,
  971                unsigned long repeat_hz, struct kdpc *kdpc);
  972 
  973 LONG InterlockedDecrement(LONG volatile *val) wfastcall;
  974 LONG InterlockedIncrement(LONG volatile *val) wfastcall;
  975 struct nt_list *ExInterlockedInsertHeadList
  976     (struct nt_list *head, struct nt_list *entry,
  977      NT_SPIN_LOCK *lock) wfastcall;
  978 struct nt_list *ExInterlockedInsertTailList
  979     (struct nt_list *head, struct nt_list *entry,
  980      NT_SPIN_LOCK *lock) wfastcall;
  981 struct nt_list *ExInterlockedRemoveHeadList
  982     (struct nt_list *head, NT_SPIN_LOCK *lock) wfastcall;
  983 NTSTATUS IofCallDriver(struct device_object *dev_obj, struct irp *irp) wfastcall;
  984 KIRQL KfRaiseIrql(KIRQL newirql) wfastcall;
  985 void KfLowerIrql(KIRQL oldirql) wfastcall;
  986 KIRQL KfAcquireSpinLock(NT_SPIN_LOCK *lock) wfastcall;
  987 void KfReleaseSpinLock(NT_SPIN_LOCK *lock, KIRQL oldirql) wfastcall;
  988 void IofCompleteRequest(struct irp *irp, CHAR prio_boost) wfastcall;
  989 void KefReleaseSpinLockFromDpcLevel(NT_SPIN_LOCK *lock) wfastcall;
  990 
  991 LONG ObfReferenceObject(void *object) wfastcall;
  992 void ObfDereferenceObject(void *object) wfastcall;
  993 
  994 #define ObReferenceObject(object) ObfReferenceObject(object)
  995 #define ObDereferenceObject(object) ObfDereferenceObject(object)
  996 
  997 /* prevent expansion of ExAllocatePoolWithTag macro */
  998 void *(ExAllocatePoolWithTag)(enum pool_type pool_type, SIZE_T size,
  999                   ULONG tag) wstdcall;
 1000 
 1001 void ExFreePool(void *p) wstdcall;
 1002 ULONG MmSizeOfMdl(void *base, ULONG length) wstdcall;
 1003 void __iomem *MmMapIoSpace(PHYSICAL_ADDRESS phys_addr, SIZE_T size,
 1004            enum memory_caching_type cache) wstdcall;
 1005 void MmUnmapIoSpace(void __iomem *addr, SIZE_T size) wstdcall;
 1006 void MmProbeAndLockPages(struct mdl *mdl, KPROCESSOR_MODE access_mode,
 1007              enum lock_operation operation) wstdcall;
 1008 void MmUnlockPages(struct mdl *mdl) wstdcall;
 1009 void KeInitializeEvent(struct nt_event *nt_event,
 1010                enum event_type type, BOOLEAN state) wstdcall;
 1011 LONG KeSetEvent(struct nt_event *nt_event, KPRIORITY incr,
 1012         BOOLEAN wait) wstdcall;
 1013 LONG KeResetEvent(struct nt_event *nt_event) wstdcall;
 1014 BOOLEAN queue_kdpc(struct kdpc *kdpc);
 1015 BOOLEAN dequeue_kdpc(struct kdpc *kdpc);
 1016 
 1017 NTSTATUS IoConnectInterrupt(struct kinterrupt **kinterrupt,
 1018                 PKSERVICE_ROUTINE service_routine,
 1019                 void *service_context, NT_SPIN_LOCK *lock,
 1020                 ULONG vector, KIRQL irql, KIRQL synch_irql,
 1021                 enum kinterrupt_mode interrupt_mode,
 1022                 BOOLEAN shareable, KAFFINITY processor_enable_mask,
 1023                 BOOLEAN floating_save) wstdcall;
 1024 void IoDisconnectInterrupt(struct kinterrupt *interrupt) wstdcall;
 1025 BOOLEAN KeSynchronizeExecution(struct kinterrupt *interrupt,
 1026                    PKSYNCHRONIZE_ROUTINE synch_routine,
 1027                    void *ctx) wstdcall;
 1028 
 1029 NTSTATUS KeWaitForSingleObject(void *object, KWAIT_REASON reason,
 1030                    KPROCESSOR_MODE waitmode, BOOLEAN alertable,
 1031                    LARGE_INTEGER *timeout) wstdcall;
 1032 void MmBuildMdlForNonPagedPool(struct mdl *mdl) wstdcall;
 1033 NTSTATUS IoCreateDevice(struct driver_object *driver, ULONG dev_ext_length,
 1034             struct unicode_string *dev_name, DEVICE_TYPE dev_type,
 1035             ULONG dev_chars, BOOLEAN exclusive,
 1036             struct device_object **dev_obj) wstdcall;
 1037 NTSTATUS IoCreateSymbolicLink(struct unicode_string *link,
 1038                   struct unicode_string *dev_name) wstdcall;
 1039 void IoDeleteDevice(struct device_object *dev) wstdcall;
 1040 void IoDetachDevice(struct device_object *topdev) wstdcall;
 1041 struct device_object *IoGetAttachedDevice(struct device_object *dev) wstdcall;
 1042 struct device_object *IoGetAttachedDeviceReference
 1043     (struct device_object *dev) wstdcall;
 1044 NTSTATUS IoAllocateDriverObjectExtension
 1045     (struct driver_object *drv_obj, void *client_id, ULONG extlen,
 1046      void **ext) wstdcall;
 1047 void *IoGetDriverObjectExtension(struct driver_object *drv,
 1048                  void *client_id) wstdcall;
 1049 struct device_object *IoAttachDeviceToDeviceStack
 1050     (struct device_object *src, struct device_object *dst) wstdcall;
 1051 BOOLEAN IoCancelIrp(struct irp *irp) wstdcall;
 1052 struct irp *IoBuildSynchronousFsdRequest
 1053     (ULONG major_func, struct device_object *dev_obj, void *buf,
 1054      ULONG length, LARGE_INTEGER *offset, struct nt_event *event,
 1055      struct io_status_block *status) wstdcall;
 1056 
 1057 NTSTATUS IoPassIrpDown(struct device_object *dev_obj, struct irp *irp) wstdcall;
 1058 WIN_FUNC_DECL(IoPassIrpDown,2);
 1059 NTSTATUS IoSyncForwardIrp(struct device_object *dev_obj,
 1060               struct irp *irp) wstdcall;
 1061 NTSTATUS IoAsyncForwardIrp(struct device_object *dev_obj,
 1062                struct irp *irp) wstdcall;
 1063 NTSTATUS IoInvalidDeviceRequest(struct device_object *dev_obj,
 1064                 struct irp *irp) wstdcall;
 1065 
 1066 void KeInitializeSpinLock(NT_SPIN_LOCK *lock) wstdcall;
 1067 void IoAcquireCancelSpinLock(KIRQL *irql) wstdcall;
 1068 void IoReleaseCancelSpinLock(KIRQL irql) wstdcall;
 1069 
 1070 NTSTATUS RtlUnicodeStringToAnsiString
 1071     (struct ansi_string *dst, const struct unicode_string *src,
 1072      BOOLEAN dup) wstdcall;
 1073 NTSTATUS RtlAnsiStringToUnicodeString
 1074     (struct unicode_string *dst, const struct ansi_string *src,
 1075      BOOLEAN dup) wstdcall;
 1076 void RtlInitAnsiString(struct ansi_string *dst, const char *src) wstdcall;
 1077 void RtlInitUnicodeString(struct unicode_string *dest,
 1078               const wchar_t *src) wstdcall;
 1079 void RtlFreeUnicodeString(struct unicode_string *string) wstdcall;
 1080 void RtlFreeAnsiString(struct ansi_string *string) wstdcall;
 1081 LONG RtlCompareUnicodeString(const struct unicode_string *s1,
 1082                  const struct unicode_string *s2,
 1083                  BOOLEAN case_insensitive) wstdcall;
 1084 NTSTATUS RtlUpcaseUnicodeString(struct unicode_string *dst,
 1085                 struct unicode_string *src,
 1086                 BOOLEAN alloc) wstdcall;
 1087 BOOLEAN KeCancelTimer(struct nt_timer *nt_timer) wstdcall;
 1088 void KeInitializeDpc(struct kdpc *kdpc, void *func, void *ctx) wstdcall;
 1089 
 1090 extern spinlock_t ntoskernel_lock;
 1091 extern spinlock_t irp_cancel_lock;
 1092 extern struct nt_list object_list;
 1093 extern CCHAR cpu_count;
 1094 #ifdef CONFIG_X86_64
 1095 extern struct kuser_shared_data kuser_shared_data;
 1096 #endif
 1097 
 1098 #define IoCompleteRequest(irp, prio) IofCompleteRequest(irp, prio)
 1099 #define IoCallDriver(dev, irp) IofCallDriver(dev, irp)
 1100 
 1101 #if defined(IO_DEBUG)
 1102 #define DUMP_IRP(_irp)                          \
 1103 do {                                    \
 1104     struct io_stack_location *_irp_sl;              \
 1105     _irp_sl = IoGetCurrentIrpStackLocation(_irp);           \
 1106     IOTRACE("irp: %p, stack size: %d, cl: %d, sl: %p, dev_obj: %p, " \
 1107         "mj_fn: %d, minor_fn: %d, nt_urb: %p, event: %p",   \
 1108         _irp, _irp->stack_count, (_irp)->current_location,  \
 1109         _irp_sl, _irp_sl->dev_obj, _irp_sl->major_fn,       \
 1110         _irp_sl->minor_fn, IRP_URB(_irp),           \
 1111         (_irp)->user_event);                    \
 1112 } while (0)
 1113 #else
 1114 #define DUMP_IRP(_irp) do { } while (0)
 1115 #endif
 1116 
 1117 #endif // _NTOSKERNEL_H_