"Fossies" - the Fresh Open Source Software Archive

Member "drbd-9.0.20-1/drbd/drbd-kernel-compat/cocci_cache/c872f35aa9c483f16194f18edaebd2d7/compat.patch" (10 Oct 2019, 97660 Bytes) of package /linux/misc/drbd-9.0.20-1.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) Diff source code syntax highlighting (style: standard) with prefixed line numbers. Alternatively you can here view or download the uninterpreted source code file. See also the last Fossies "Diffs" side-by-side code changes report for "compat.patch": 9.0.20-0rc2_vs_9.0.20-0rc3.

    1 --- ./drbd_int.h
    2 +++ /tmp/cocci-output-19039-649506-drbd_int.h
    3 @@ -1765,8 +1765,8 @@ extern struct kmem_cache *drbd_request_c
    4  extern struct kmem_cache *drbd_ee_cache;   /* peer requests */
    5  extern struct kmem_cache *drbd_bm_ext_cache;   /* bitmap extents */
    6  extern struct kmem_cache *drbd_al_ext_cache;   /* activity log extents */
    7 -extern mempool_t drbd_request_mempool;
    8 -extern mempool_t drbd_ee_mempool;
    9 +extern mempool_t *drbd_request_mempool;
   10 +extern mempool_t *drbd_ee_mempool;
   11  
   12  /* drbd's page pool, used to buffer data received from the peer,
   13   * or data requested by the peer.
   14 @@ -1792,16 +1792,16 @@ extern wait_queue_head_t drbd_pp_wait;
   15   * 128 should be plenty, currently we probably can get away with as few as 1.
   16   */
   17  #define DRBD_MIN_POOL_PAGES    128
   18 -extern mempool_t drbd_md_io_page_pool;
   19 +extern mempool_t *drbd_md_io_page_pool;
   20  
   21  /* We also need to make sure we get a bio
   22   * when we need it for housekeeping purposes */
   23 -extern struct bio_set drbd_md_io_bio_set;
   24 +extern struct bio_set * drbd_md_io_bio_set;
   25  /* to allocate from that set */
   26  extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
   27  
   28  /* And a bio_set for cloning */
   29 -extern struct bio_set drbd_io_bio_set;
   30 +extern struct bio_set * drbd_io_bio_set;
   31  
   32  extern struct drbd_peer_device *create_peer_device(struct drbd_device *, struct drbd_connection *);
   33  extern enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor,
   34 @@ -1826,13 +1826,15 @@ extern void drbd_transport_shutdown(stru
   35  extern void drbd_destroy_connection(struct kref *kref);
   36  extern void conn_free_crypto(struct drbd_connection *connection);
   37  
   38 +extern int drbd_merge_bvec(struct request_queue *, struct bvec_merge_data *,
   39 +              struct bio_vec *);
   40  /* drbd_req */
   41  extern void do_submit(struct work_struct *ws);
   42  #ifndef CONFIG_DRBD_TIMING_STATS
   43  #define __drbd_make_request(d,b,k,j) __drbd_make_request(d,b,j)
   44  #endif
   45  extern void __drbd_make_request(struct drbd_device *, struct bio *, ktime_t, unsigned long);
   46 -extern blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio);
   47 +extern int drbd_make_request(struct request_queue *q, struct bio *bio);
   48  
   49  /* drbd_nl.c */
   50  enum suspend_scope {
   51 @@ -1885,7 +1887,7 @@ extern void verify_progress(struct drbd_
   52  extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
   53  extern void drbd_md_put_buffer(struct drbd_device *device);
   54  extern int drbd_md_sync_page_io(struct drbd_device *device,
   55 -       struct drbd_backing_dev *bdev, sector_t sector, int op);
   56 +       struct drbd_backing_dev *bdev, sector_t sector, int rw);
   57  extern void drbd_ov_out_of_sync_found(struct drbd_peer_device *, sector_t, int);
   58  extern void wait_until_done_or_force_detached(struct drbd_device *device,
   59         struct drbd_backing_dev *bdev, unsigned int *done);
   60 @@ -1893,7 +1895,7 @@ extern void drbd_rs_controller_reset(str
   61  extern void drbd_check_peers_new_current_uuid(struct drbd_device *);
   62  extern void drbd_ping_peer(struct drbd_connection *connection);
   63  extern struct drbd_peer_device *peer_device_by_node_id(struct drbd_device *, int);
   64 -extern void repost_up_to_date_fn(struct timer_list *t);
   65 +extern void repost_up_to_date_fn(unsigned long data);
   66  
   67  static inline void ov_out_of_sync_print(struct drbd_peer_device *peer_device)
   68  {
   69 @@ -1931,15 +1933,15 @@ extern int w_restart_disk_io(struct drbd
   70  extern int w_start_resync(struct drbd_work *, int);
   71  extern int w_send_uuids(struct drbd_work *, int);
   72  
   73 -extern void resync_timer_fn(struct timer_list *t);
   74 -extern void start_resync_timer_fn(struct timer_list *t);
   75 +extern void resync_timer_fn(unsigned long data);
   76 +extern void start_resync_timer_fn(unsigned long data);
   77  
   78  extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req);
   79  
   80  /* bi_end_io handlers */
   81 -extern void drbd_md_endio(struct bio *bio);
   82 -extern void drbd_peer_request_endio(struct bio *bio);
   83 -extern void drbd_request_endio(struct bio *bio);
   84 +extern void drbd_md_endio(struct bio *bio, int error);
   85 +extern void drbd_peer_request_endio(struct bio *bio, int error);
   86 +extern void drbd_request_endio(struct bio *bio, int error);
   87  
   88  void __update_timing_details(
   89         struct drbd_thread_timing_details *tdp,
   90 @@ -2005,8 +2007,9 @@ extern bool drbd_rs_c_min_rate_throttle(
   91  extern bool drbd_rs_should_slow_down(struct drbd_peer_device *, sector_t,
   92                      bool throttle_if_app_is_waiting);
   93  extern int drbd_submit_peer_request(struct drbd_device *,
   94 -                   struct drbd_peer_request *, const unsigned,
   95 -                   const unsigned, const int);
   96 +                   struct drbd_peer_request *,
   97 +                   const unsigned,
   98 +                   const int);
   99  extern void drbd_cleanup_after_failed_submit_peer_request(struct drbd_peer_request *peer_req);
  100  extern void drbd_cleanup_peer_requests_wfa(struct drbd_device *device, struct list_head *cleanup);
  101  extern int drbd_free_peer_reqs(struct drbd_resource *, struct list_head *, bool is_net_ee);
  102 @@ -2020,7 +2023,7 @@ extern void apply_unacked_peer_requests(
  103  extern struct drbd_connection *drbd_connection_by_node_id(struct drbd_resource *, int);
  104  extern struct drbd_connection *drbd_get_connection_by_node_id(struct drbd_resource *, int);
  105  extern void queue_queued_twopc(struct drbd_resource *resource);
  106 -extern void queued_twopc_timer_fn(struct timer_list *t);
  107 +extern void queued_twopc_timer_fn(unsigned long data);
  108  extern bool drbd_have_local_disk(struct drbd_resource *resource);
  109  extern enum drbd_state_rv drbd_support_2pc_resize(struct drbd_resource *resource);
  110  extern enum determine_dev_size
  111 @@ -2053,8 +2056,8 @@ static inline void drbd_generic_make_req
  112     __release(local);
  113  
  114     if (drbd_insert_fault(device, fault_type)) {
  115 -       bio->bi_status = BLK_STS_IOERR;
  116 -       bio_endio(bio);
  117 +       bio_endio(bio,
  118 +             (10 == 0 ? 0 : 10 == 9 ? -ENOMEM : 10 == 1 ? -EOPNOTSUPP : -EIO));
  119     } else {
  120         generic_make_request(bio);
  121     }
  122 @@ -2063,8 +2066,8 @@ static inline void drbd_generic_make_req
  123  void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
  124                   enum write_ordering_e wo);
  125  
  126 -extern void twopc_timer_fn(struct timer_list *t);
  127 -extern void connect_timer_fn(struct timer_list *t);
  128 +extern void twopc_timer_fn(unsigned long data);
  129 +extern void connect_timer_fn(unsigned long data);
  130  
  131  /* drbd_proc.c */
  132  extern struct proc_dir_entry *drbd_proc;
  133 @@ -2673,6 +2676,21 @@ static inline int drbd_queue_order_type(
  134     return QUEUE_ORDERED_NONE;
  135  }
  136  
  137 +static inline void drbd_blk_run_queue(struct request_queue *q)
  138 +{
  139 +   if (q && q->unplug_fn)
  140 +       q->unplug_fn(q);
  141 +
  142 +
  143 +}
  144 +static inline void drbd_kick_lo(struct drbd_device *device)
  145 +{
  146 +   if (get_ldev(device)) {
  147 +       drbd_blk_run_queue(bdev_get_queue(device->ldev->backing_bdev));
  148 +       put_ldev(device);
  149 +   }
  150 +}
  151 +
  152  /* resync bitmap */
  153  /* 128MB sized 'bitmap extent' to track syncer usage */
  154  struct bm_extent {
  155 --- ./drbd_req.h
  156 +++ /tmp/cocci-output-19039-26176c-drbd_req.h
  157 @@ -295,7 +295,7 @@ extern void __req_mod(struct drbd_reques
  158         struct bio_and_error *m);
  159  extern void complete_master_bio(struct drbd_device *device,
  160         struct bio_and_error *m);
  161 -extern void request_timer_fn(struct timer_list *t);
  162 +extern void request_timer_fn(unsigned long data);
  163  extern void tl_walk(struct drbd_connection *connection, enum drbd_req_event what);
  164  extern void _tl_walk(struct drbd_connection *connection, enum drbd_req_event what);
  165  extern void __tl_walk(struct drbd_resource *const resource,
  166 --- lru_cache.c
  167 +++ /tmp/cocci-output-19039-ecfbe1-lru_cache.c
  168 @@ -234,7 +234,7 @@ static struct lc_element *__lc_find(stru
  169  
  170     BUG_ON(!lc);
  171     BUG_ON(!lc->nr_elements);
  172 -   hlist_for_each_entry(e, lc_hash_slot(lc, enr), colision) {
  173 +   for (e = ((lc_hash_slot(lc, enr))->first) ? hlist_entry((lc_hash_slot(lc, enr))->first, typeof(*(e)), colision) : NULL; e; e = ((e)->colision.next) ? hlist_entry((e)->colision.next, typeof(*(e)), colision) : NULL) {
  174         /* "about to be changed" elements, pending transaction commit,
  175          * are hashed by their "new number". "Normal" elements have
  176          * lc_number == lc_new_number. */
  177 --- drbd_receiver.c
  178 +++ /tmp/cocci-output-19039-6726b6-drbd_receiver.c
  179 @@ -336,6 +336,37 @@ static void rs_sectors_came_in(struct dr
  180             &peer_device->connection->sender_work,
  181             &peer_device->resync_work);
  182  }
  183 +/* kick lower level device, if we have more than (arbitrary number)
  184 + * reference counts on it, which typically are locally submitted io
  185 + * requests.  don't use unacked_cnt, so we speed up proto A and B, too. */
  186 +static void maybe_kick_lo(struct drbd_device *device){
  187 +   struct disk_conf *dc;
  188 +   unsigned int watermark = 1000000;
  189 +
  190 +   if (get_ldev(device)) {
  191 +       rcu_read_lock();
  192 +       dc = rcu_dereference(device->ldev->disk_conf);
  193 +       if (dc)
  194 +               min_not_zero(dc->unplug_watermark, watermark);
  195 +       rcu_read_unlock();
  196 +
  197 +       if (atomic_read(&device->local_cnt) >= watermark)
  198 +               drbd_kick_lo(device);
  199 +       put_ldev(device);
  200 +   }
  201 +}
  202 +
  203 +static void conn_maybe_kick_lo(struct drbd_connection *connection)
  204 +{
  205 +   struct drbd_resource *resource = connection->resource;
  206 +   struct drbd_device *device;
  207 +   int vnr;
  208 +
  209 +   rcu_read_lock();
  210 +   idr_for_each_entry (&resource->devices, device, vnr)
  211 +       maybe_kick_lo(device);
  212 +   rcu_read_unlock();
  213 +}
  214  
  215  static void reclaim_finished_net_peer_reqs(struct drbd_connection *connection,
  216                        struct list_head *to_be_freed)
  217 @@ -411,6 +442,7 @@ struct page *drbd_alloc_pages(struct drb
  218  
  219     while (page == NULL) {
  220         prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
  221 +       conn_maybe_kick_lo(connection);
  222  
  223         drbd_reclaim_net_peer_reqs(connection);
  224  
  225 @@ -495,7 +527,7 @@ drbd_alloc_peer_req(struct drbd_peer_dev
  226     if (drbd_insert_fault(device, DRBD_FAULT_AL_EE))
  227         return NULL;
  228  
  229 -   peer_req = mempool_alloc(&drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
  230 +   peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
  231     if (!peer_req) {
  232         if (!(gfp_mask & __GFP_NOWARN))
  233             drbd_err(device, "%s: allocation failed\n", __func__);
  234 @@ -523,7 +555,7 @@ void __drbd_free_peer_req(struct drbd_pe
  235     D_ASSERT(peer_device, atomic_read(&peer_req->pending_bios) == 0);
  236     D_ASSERT(peer_device, drbd_interval_empty(&peer_req->i));
  237     drbd_free_page_chain(&peer_device->connection->transport, &peer_req->page_chain, is_net);
  238 -   mempool_free(peer_req, &drbd_ee_mempool);
  239 +   mempool_free(peer_req, drbd_ee_mempool);
  240  }
  241  
  242  int drbd_free_peer_reqs(struct drbd_resource *resource, struct list_head *list, bool is_net_ee)
  243 @@ -688,9 +720,9 @@ int drbd_connected(struct drbd_peer_devi
  244     return err;
  245  }
  246  
  247 -void connect_timer_fn(struct timer_list *t)
  248 +void connect_timer_fn(unsigned long data)
  249  {
  250 -   struct drbd_connection *connection = from_timer(connection, t, connect_timer);
  251 +   struct drbd_connection *connection = (struct drbd_connection *)data;
  252     struct drbd_resource *resource = connection->resource;
  253     unsigned long irq_flags;
  254  
  255 @@ -883,7 +915,7 @@ start:
  256  
  257     drbd_thread_start(&connection->ack_receiver);
  258     connection->ack_sender =
  259 -       alloc_ordered_workqueue("drbd_as_%s", WQ_MEM_RECLAIM, connection->resource->name);
  260 +       create_singlethread_workqueue("drbd_ack_sender");
  261     if (!connection->ack_sender) {
  262         drbd_err(connection, "Failed to create workqueue ack_sender\n");
  263         schedule_timeout_uninterruptible(HZ);
  264 @@ -966,10 +998,20 @@ static int decode_header(struct drbd_con
  265  
  266  static void drbd_unplug_all_devices(struct drbd_connection *connection)
  267  {
  268 -   if (current->plug == &connection->receiver_plug) {
  269 -       blk_finish_plug(&connection->receiver_plug);
  270 -       blk_start_plug(&connection->receiver_plug);
  271 -   } /* else: maybe just schedule() ?? */
  272 +   /* else: maybe just schedule() ?? */
  273 +   struct drbd_resource *resource = connection->resource;
  274 +   struct drbd_device *device;
  275 +   int vnr;
  276 +
  277 +   rcu_read_lock();
  278 +   idr_for_each_entry (&resource->devices, device, vnr) {
  279 +       kref_get(&device->kref);
  280 +       rcu_read_unlock();
  281 +       drbd_kick_lo(device);
  282 +       kref_put(&device->kref, drbd_destroy_device);
  283 +       rcu_read_lock();
  284 +   }
  285 +   rcu_read_unlock();
  286  }
  287  
  288  static int drbd_recv_header(struct drbd_connection *connection, struct packet_info *pi)
  289 @@ -1042,16 +1084,16 @@ struct one_flush_context {
  290     struct issue_flush_context *ctx;
  291  };
  292  
  293 -static void one_flush_endio(struct bio *bio)
  294 +static void one_flush_endio(struct bio *bio, int error)
  295  {
  296     struct one_flush_context *octx = bio->bi_private;
  297     struct drbd_device *device = octx->device;
  298     struct issue_flush_context *ctx = octx->ctx;
  299  
  300 -   blk_status_t status = bio->bi_status;
  301 +   u8 status = (error == 0 ? 0 : error == -ENOMEM ? 9 : error == -EOPNOTSUPP ? 1 : 10);
  302  
  303     if (status) {
  304 -       ctx->error = blk_status_to_errno(status);
  305 +       ctx->error = (status == 0 ? 0 : status == 9 ? -ENOMEM : status == 1 ? -EOPNOTSUPP : -EIO);
  306         drbd_info(device, "local disk FLUSH FAILED with status %d\n", status);
  307     }
  308     kfree(octx);
  309 @@ -1088,15 +1130,14 @@ static void submit_one_flush(struct drbd
  310  
  311     octx->device = device;
  312     octx->ctx = ctx;
  313 -   bio_set_dev(bio, device->ldev->backing_bdev);
  314 +   bio->bi_bdev = device->ldev->backing_bdev;
  315     bio->bi_private = octx;
  316     bio->bi_end_io = one_flush_endio;
  317  
  318     device->flush_jif = jiffies;
  319     set_bit(FLUSH_PENDING, &device->flags);
  320     atomic_inc(&ctx->pending);
  321 -   bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
  322 -   submit_bio(bio);
  323 +   submit_bio(WRITE_FLUSH, bio);
  324  }
  325  
  326  static enum finish_epoch drbd_flush_after_epoch(struct drbd_connection *connection, struct drbd_epoch *epoch)
  327 @@ -1527,11 +1568,12 @@ static void drbd_issue_peer_discard_or_z
  328  static void drbd_issue_peer_wsame(struct drbd_device *device,
  329                   struct drbd_peer_request *peer_req)
  330  {
  331 -   struct block_device *bdev = device->ldev->backing_bdev;
  332 -   sector_t s = peer_req->i.sector;
  333 -   sector_t nr = peer_req->i.size >> 9;
  334 -   if (blkdev_issue_write_same(bdev, s, nr, GFP_NOIO, peer_req->page_chain.head))
  335 -       peer_req->flags |= EE_WAS_ERROR;
  336 +   
  337 +   /* We should have never received this request!  At least not until we
  338 +    * implement an open-coded write-same equivalent submit loop, and tell
  339 +    * our peer we were write_same_capable. */
  340 +   drbd_err(device, "received unsupported WRITE_SAME request\n");
  341 +   peer_req->flags |= EE_WAS_ERROR;
  342     drbd_endio_write_sec_final(peer_req);
  343  }
  344  
  345 @@ -1578,7 +1620,7 @@ static void conn_wait_ee_empty(struct dr
  346  /* TODO allocate from our own bio_set. */
  347  int drbd_submit_peer_request(struct drbd_device *device,
  348                  struct drbd_peer_request *peer_req,
  349 -                const unsigned op, const unsigned op_flags,
  350 +                const unsigned rw,
  351                  const int fault_type)
  352  {
  353     struct bio *bios = NULL;
  354 @@ -1620,16 +1662,6 @@ int drbd_submit_peer_request(struct drbd
  355      * generated bio, but a bio allocated on behalf of the peer.
  356      */
  357  next_bio:
  358 -   /* REQ_OP_WRITE_SAME, _DISCARD, _WRITE_ZEROES handled above.
  359 -    * REQ_OP_FLUSH (empty flush) not expected,
  360 -    * should have been mapped to a "drbd protocol barrier".
  361 -    * REQ_OP_SECURE_ERASE: I don't see how we could ever support that.
  362 -    */
  363 -   if (!(op == REQ_OP_WRITE || op == REQ_OP_READ)) {
  364 -       drbd_err(device, "Invalid bio op received: 0x%x\n", op);
  365 -       err = -EINVAL;
  366 -       goto fail;
  367 -   }
  368  
  369     bio = bio_alloc(GFP_NOIO, nr_pages);
  370     if (!bio) {
  371 @@ -1637,11 +1669,11 @@ next_bio:
  372         goto fail;
  373     }
  374     /* > peer_req->i.sector, unless this is the first bio */
  375 -   bio->bi_iter.bi_sector = sector;
  376 -   bio_set_dev(bio, device->ldev->backing_bdev);
  377 +   bio->bi_sector = sector;
  378 +   bio->bi_bdev = device->ldev->backing_bdev;
  379     /* we special case some flags in the multi-bio case, see below
  380      * (REQ_PREFLUSH, or BIO_RW_BARRIER in older kernels) */
  381 -   bio->bi_opf = op | op_flags;
  382 +   bio->bi_rw = rw;
  383     bio->bi_private = peer_req;
  384     bio->bi_end_io = drbd_peer_request_endio;
  385  
  386 @@ -1653,7 +1685,7 @@ next_bio:
  387         unsigned off, len;
  388         int res;
  389  
  390 -       if (op == REQ_OP_READ) {
  391 +       if (!(rw & BIO_WRITE)) {
  392             set_page_chain_offset(page, 0);
  393             set_page_chain_size(page, min_t(unsigned, data_size, PAGE_SIZE));
  394         }
  395 @@ -1675,8 +1707,9 @@ next_bio:
  396             if (bio->bi_vcnt == 0) {
  397                 drbd_err(device,
  398                     "bio_add_page(%p, %p, %u, %u): %d (bi_vcnt %u bi_max_vecs %u bi_sector %llu, bi_flags 0x%lx)\n",
  399 -                   bio, page, len, off, res, bio->bi_vcnt, bio->bi_max_vecs, (uint64_t)bio->bi_iter.bi_sector,
  400 -                    (unsigned long)bio->bi_flags);
  401 +                   bio, page, len, off, res, bio->bi_vcnt, bio->bi_max_vecs,
  402 +                   (uint64_t) bio->bi_sector,
  403 +                   (unsigned long)bio->bi_flags);
  404                 err = -ENOSPC;
  405                 goto fail;
  406             }
  407 @@ -1703,8 +1736,9 @@ next_bio:
  408         /* strip off REQ_PREFLUSH,
  409          * unless it is the first or last bio */
  410         if (bios && bios->bi_next)
  411 -           bios->bi_opf &= ~REQ_PREFLUSH;
  412 +           bios->bi_rw &= ~REQ_FLUSH;
  413     } while (bios);
  414 +   maybe_kick_lo(device);
  415     return 0;
  416  
  417  fail:
  418 @@ -1760,7 +1794,8 @@ int w_e_reissue(struct drbd_work *w, int
  419      * get_ldev was done in receive_Data. */
  420  
  421     peer_req->w.cb = e_end_block;
  422 -   err = drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE, 0, DRBD_FAULT_DT_WR);
  423 +   err = drbd_submit_peer_request(device, peer_req, WRITE | 0,
  424 +                      DRBD_FAULT_DT_WR);
  425     switch (err) {
  426     case -ENOMEM:
  427         peer_req->w.cb = w_e_reissue;
  428 @@ -2011,8 +2046,8 @@ static int ignore_remaining_packet(struc
  429  static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_request *req,
  430                sector_t sector, int data_size)
  431  {
  432 -   struct bio_vec bvec;
  433 -   struct bvec_iter iter;
  434 +   struct bio_vec *bvec;
  435 +   int iter;
  436     struct bio *bio;
  437     int digest_size, err, expect;
  438     void *dig_in = peer_device->connection->int_dig_in;
  439 @@ -2032,13 +2067,13 @@ static int recv_dless_read(struct drbd_p
  440     peer_device->recv_cnt += data_size >> 9;
  441  
  442     bio = req->master_bio;
  443 -   D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector);
  444 +   D_ASSERT(peer_device->device, sector == bio->bi_sector);
  445  
  446     bio_for_each_segment(bvec, bio, iter) {
  447 -       void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
  448 -       expect = min_t(int, data_size, bvec.bv_len);
  449 +       void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
  450 +       expect = min_t(int, data_size, bvec->bv_len);
  451         err = drbd_recv_into(peer_device->connection, mapped, expect);
  452 -       kunmap(bvec.bv_page);
  453 +       kunmap(bvec->bv_page);
  454         if (err)
  455             return err;
  456         data_size -= expect;
  457 @@ -2120,7 +2155,8 @@ static int recv_resync_read(struct drbd_
  458        sync by the sync source with a P_PEERS_IN_SYNC packet soon. */
  459     drbd_set_all_out_of_sync(device, peer_req->i.sector, peer_req->i.size);
  460  
  461 -   err = drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE, 0, DRBD_FAULT_RS_WR);
  462 +   err = drbd_submit_peer_request(device, peer_req, WRITE | 0,
  463 +                      DRBD_FAULT_RS_WR);
  464     if (err)
  465         goto out;
  466  
  467 @@ -2529,27 +2565,16 @@ static int wait_for_and_update_peer_seq(
  468  }
  469  
  470  /* see also bio_flags_to_wire() */
  471 -static unsigned long wire_flags_to_bio_flags(struct drbd_connection *connection, u32 dpf)
  472 +static unsigned long wire_flags_to_bio(struct drbd_connection *connection, u32 dpf)
  473  {
  474     if (connection->agreed_pro_version >= 95)
  475 -       return  (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
  476 -           (dpf & DP_FUA ? REQ_FUA : 0) |
  477 -           (dpf & DP_FLUSH ? REQ_PREFLUSH : 0);
  478 +       return  (dpf & DP_RW_SYNC ? (1UL << BIO_RW_SYNCIO) : 0) |
  479 +           (dpf & DP_FUA ? BIO_FUA : 0) |
  480 +           (dpf & DP_DISCARD ? BIO_DISCARD : 0) |
  481 +           (dpf & DP_FLUSH ? BIO_FLUSH : 0);
  482  
  483     /* else: we used to communicate one bit only in older DRBD */
  484 -   return dpf & DP_RW_SYNC ? REQ_SYNC : 0;
  485 -}
  486 -
  487 -static unsigned long wire_flags_to_bio_op(u32 dpf)
  488 -{
  489 -   if (dpf & DP_ZEROES)
  490 -       return REQ_OP_WRITE_ZEROES;
  491 -   if (dpf & DP_DISCARD)
  492 -       return REQ_OP_DISCARD;
  493 -   if (dpf & DP_WSAME)
  494 -       return REQ_OP_WRITE_SAME;
  495 -   else
  496 -       return REQ_OP_WRITE;
  497 +   return dpf & DP_RW_SYNC ? (1UL << BIO_RW_SYNCIO) : 0;
  498  }
  499  
  500  static void fail_postponed_requests(struct drbd_peer_request *peer_req)
  501 @@ -2781,7 +2806,7 @@ static int receive_Data(struct drbd_conn
  502     struct net_conf *nc;
  503     struct drbd_peer_request *peer_req;
  504     struct drbd_peer_request_details d;
  505 -   int op, op_flags;
  506 +   int rw = WRITE, op_flags;
  507     int err, tp;
  508  
  509     peer_device = conn_peer_device(connection, pi->vnr);
  510 @@ -2832,12 +2857,11 @@ static int receive_Data(struct drbd_conn
  511     peer_req->submit_jif = jiffies;
  512     peer_req->flags |= EE_APPLICATION;
  513  
  514 -   op = wire_flags_to_bio_op(d.dp_flags);
  515 -   op_flags = wire_flags_to_bio_flags(connection, d.dp_flags);
  516 +   rw |= wire_flags_to_bio(connection, d.dp_flags);
  517     if (pi->cmd == P_TRIM) {
  518         D_ASSERT(peer_device, peer_req->i.size > 0);
  519         D_ASSERT(peer_device, d.dp_flags & DP_DISCARD);
  520 -       D_ASSERT(peer_device, op == REQ_OP_DISCARD);
  521 +       D_ASSERT(peer_device, (rw & BIO_DISCARD));
  522         D_ASSERT(peer_device, peer_req->page_chain.head == NULL);
  523         D_ASSERT(peer_device, peer_req->page_chain.nr_pages == 0);
  524         /* need to play safe: an older DRBD sender
  525 @@ -2847,7 +2871,6 @@ static int receive_Data(struct drbd_conn
  526     } else if (pi->cmd == P_ZEROES) {
  527         D_ASSERT(peer_device, peer_req->i.size > 0);
  528         D_ASSERT(peer_device, d.dp_flags & DP_ZEROES);
  529 -       D_ASSERT(peer_device, op == REQ_OP_WRITE_ZEROES);
  530         D_ASSERT(peer_device, peer_req->page_chain.head == NULL);
  531         D_ASSERT(peer_device, peer_req->page_chain.nr_pages == 0);
  532         /* Do (not) pass down BLKDEV_ZERO_NOUNMAP? */
  533 @@ -2855,7 +2878,6 @@ static int receive_Data(struct drbd_conn
  534             peer_req->flags |= EE_TRIM;
  535     } else if (pi->cmd == P_WSAME) {
  536         D_ASSERT(peer_device, peer_req->i.size > 0);
  537 -       D_ASSERT(peer_device, op == REQ_OP_WRITE_SAME);
  538         D_ASSERT(peer_device, peer_req->page_chain.head != NULL);
  539     } else if (peer_req->page_chain.head == NULL) {
  540         /* Actually, this must not happen anymore,
  541 @@ -2866,12 +2888,21 @@ static int receive_Data(struct drbd_conn
  542         D_ASSERT(device, d.dp_flags & DP_FLUSH);
  543     } else {
  544         D_ASSERT(peer_device, peer_req->i.size > 0);
  545 -       D_ASSERT(peer_device, op == REQ_OP_WRITE);
  546 +       D_ASSERT(peer_device, (rw & BIO_WRITE));
  547     }
  548  
  549     if (d.dp_flags & DP_MAY_SET_IN_SYNC)
  550         peer_req->flags |= EE_MAY_SET_IN_SYNC;
  551  
  552 +   /* last "fixes" to rw flags.
  553 +    * Strip off BIO_RW_BARRIER unconditionally,
  554 +    * it is not supposed to be here anyways.
  555 +    * (Was FUA or FLUSH on the peer,
  556 +    * and got translated to BARRIER on this side).
  557 +    * Note that the epoch handling code below
  558 +    * may add it again, though.
  559 +    */
  560 +   op_flags &= ~(1UL << BIO_RW_BARRIER);
  561     spin_lock(&connection->epoch_lock);
  562     peer_req->epoch = connection->current_epoch;
  563     atomic_inc(&peer_req->epoch->epoch_size);
  564 @@ -2888,14 +2919,14 @@ static int receive_Data(struct drbd_conn
  565         epoch = list_entry(peer_req->epoch->list.prev, struct drbd_epoch, list);
  566         if (epoch == peer_req->epoch) {
  567             set_bit(DE_CONTAINS_A_BARRIER, &peer_req->epoch->flags);
  568 -           op_flags |= REQ_PREFLUSH | REQ_FUA;
  569 +           rw |= BIO_FLUSH | BIO_FUA;
  570             peer_req->flags |= EE_IS_BARRIER;
  571         } else {
  572             if (atomic_read(&epoch->epoch_size) > 1 ||
  573                 !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
  574                 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
  575                 set_bit(DE_CONTAINS_A_BARRIER, &peer_req->epoch->flags);
  576 -               op_flags |= REQ_PREFLUSH | REQ_FUA;
  577 +               rw |= BIO_FLUSH | BIO_FUA;
  578                 peer_req->flags |= EE_IS_BARRIER;
  579             }
  580         }
  581 @@ -2971,7 +3002,7 @@ static int receive_Data(struct drbd_conn
  582     /* If we would need to block on the activity log,
  583      * we may queue this request for the submitter workqueue.
  584      * Remember the op_flags. */
  585 -   peer_req->op_flags = op_flags;
  586 +   peer_req->op_flags = rw;
  587  
  588     err = prepare_activity_log(peer_req);
  589     if (err == DRBD_PAL_DISCONNECTED)
  590 @@ -2998,7 +3029,7 @@ static int receive_Data(struct drbd_conn
  591         return 0;
  592     }
  593  
  594 -   err = drbd_submit_peer_request(device, peer_req, op, op_flags, DRBD_FAULT_DT_WR);
  595 +   err = drbd_submit_peer_request(device, peer_req, rw, DRBD_FAULT_DT_WR);
  596     if (!err)
  597         return 0;
  598  
  599 @@ -3416,7 +3447,7 @@ submit_for_resync:
  600  submit:
  601     update_receiver_timing_details(connection, drbd_submit_peer_request);
  602     inc_unacked(peer_device);
  603 -   if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0, fault_type) == 0)
  604 +   if (drbd_submit_peer_request(device, peer_req, READ | 0, fault_type) == 0)
  605         return 0;
  606  
  607     /* don't care for the reason here */
  608 @@ -5765,9 +5796,9 @@ int abort_nested_twopc_work(struct drbd_
  609     return 0;
  610  }
  611  
  612 -void twopc_timer_fn(struct timer_list *t)
  613 +void twopc_timer_fn(unsigned long data)
  614  {
  615 -   struct drbd_resource *resource = from_timer(resource, t, twopc_timer);
  616 +   struct drbd_resource *resource = (struct drbd_resource *)data;
  617     unsigned long irq_flags;
  618  
  619     spin_lock_irqsave(&resource->req_lock, irq_flags);
  620 @@ -6023,9 +6054,9 @@ static int queued_twopc_work(struct drbd
  621     return 0;
  622  }
  623  
  624 -void queued_twopc_timer_fn(struct timer_list *t)
  625 +void queued_twopc_timer_fn(unsigned long data)
  626  {
  627 -   struct drbd_resource *resource = from_timer(resource, t, queued_twopc_timer);
  628 +   struct drbd_resource *resource = (struct drbd_resource *)data;
  629     struct queued_twopc *q;
  630     unsigned long irq_flags;
  631     unsigned long time = twopc_timeout(resource) / 4;
  632 @@ -7579,8 +7610,9 @@ static int receive_rs_deallocated(struct
  633         spin_unlock_irq(&device->resource->req_lock);
  634  
  635         atomic_add(pi->size >> 9, &device->rs_sect_ev);
  636 -       err = drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE_ZEROES,
  637 -               0, DRBD_FAULT_RS_WR);
  638 +       err = drbd_submit_peer_request(device, peer_req,
  639 +                          (-3)/* WRITE_ZEROES not supported on this kernel */,
  640 +                          DRBD_FAULT_RS_WR);
  641  
  642         if (err) {
  643             spin_lock_irq(&device->resource->req_lock);
  644 @@ -7730,7 +7762,7 @@ static void cleanup_resync_leftovers(str
  645     wake_up(&peer_device->device->misc_wait);
  646  
  647     del_timer_sync(&peer_device->resync_timer);
  648 -   resync_timer_fn(&peer_device->resync_timer);
  649 +   resync_timer_fn((unsigned long)peer_device);
  650     del_timer_sync(&peer_device->start_resync_timer);
  651  }
  652  
  653 @@ -8942,7 +8974,7 @@ static void destroy_peer_ack_req(struct
  654         container_of(kref, struct drbd_request, kref);
  655  
  656     list_del(&req->tl_requests);
  657 -   mempool_free(req, &drbd_request_mempool);
  658 +   mempool_free(req, drbd_request_mempool);
  659  }
  660  
  661  static void cleanup_peer_ack_list(struct drbd_connection *connection)
  662 --- drbd_bitmap.c
  663 +++ /tmp/cocci-output-19039-0c9ab8-drbd_bitmap.c
  664 @@ -464,23 +464,26 @@ static inline unsigned long bit_to_page_
  665     return word32_to_page(interleaved_word32(bitmap, bitmap_index, bit));
  666  }
  667  
  668 -static void *bm_map(struct drbd_bitmap *bitmap, unsigned int page)
  669 +static void *bm_map(struct drbd_bitmap *bitmap, unsigned int page,
  670 +           enum km_type km_type)
  671  {
  672     if (!(bitmap->bm_flags & BM_ON_DAX_PMEM))
  673 -       return kmap_atomic(bitmap->bm_pages[page]);
  674 +       return kmap_atomic(bitmap->bm_pages[page], km_type);
  675  
  676     return ((unsigned char *)bitmap->bm_on_pmem) + (unsigned long)page * PAGE_SIZE;
  677  }
  678  
  679 -static void bm_unmap(struct drbd_bitmap *bitmap, void *addr)
  680 +static void bm_unmap(struct drbd_bitmap *bitmap, void *addr,
  681 +            enum km_type km_type)
  682  {
  683     if (!(bitmap->bm_flags & BM_ON_DAX_PMEM))
  684 -       kunmap_atomic(addr);
  685 +       kunmap_atomic(addr, km_type);
  686  }
  687  
  688  static __always_inline unsigned long
  689 -____bm_op(struct drbd_device *device, unsigned int bitmap_index, unsigned long start, unsigned long end,
  690 -    enum bitmap_operations op, __le32 *buffer)
  691 +____bm_op(struct drbd_device *device, unsigned int bitmap_index,
  692 +     unsigned long start, unsigned long end, enum bitmap_operations op,
  693 +     __le32 *buffer, enum km_type km_type)
  694  {
  695     struct drbd_bitmap *bitmap = device->bitmap;
  696     unsigned int word32_skip = 32 * bitmap->bm_max_peers;
  697 @@ -499,7 +502,7 @@ ____bm_op(struct drbd_device *device, un
  698         unsigned int count = 0;
  699         void *addr;
  700  
  701 -       addr = bm_map(bitmap, page);
  702 +       addr = bm_map(bitmap, page, km_type);
  703         if (((start & 31) && (start | 31) <= end) || op == BM_OP_TEST) {
  704             unsigned int last = bit_in_page | 31;
  705  
  706 @@ -521,7 +524,8 @@ ____bm_op(struct drbd_device *device, un
  707                         break;
  708                     case BM_OP_TEST:
  709                         total = !!test_bit_le(bit_in_page, addr);
  710 -                       bm_unmap(bitmap, addr);
  711 +                       bm_unmap(bitmap, addr,
  712 +                            km_type);
  713                         return total;
  714                     default:
  715                         break;
  716 @@ -663,7 +667,7 @@ ____bm_op(struct drbd_device *device, un
  717         }
  718  
  719         next_page:
  720 -       bm_unmap(bitmap, addr);
  721 +       bm_unmap(bitmap, addr, km_type);
  722         bit_in_page -= BITS_PER_PAGE;
  723         switch(op) {
  724         case BM_OP_CLEAR:
  725 @@ -685,7 +689,7 @@ ____bm_op(struct drbd_device *device, un
  726         continue;
  727  
  728         found:
  729 -       bm_unmap(bitmap, addr);
  730 +       bm_unmap(bitmap, addr, km_type);
  731         return start + count - bit_in_page;
  732     }
  733     switch(op) {
  734 @@ -745,7 +749,8 @@ __bm_op(struct drbd_device *device, unsi
  735             break;
  736         }
  737     }
  738 -   return ____bm_op(device, bitmap_index, start, end, op, buffer);
  739 +   return ____bm_op(device, bitmap_index, start, end, op, buffer,
  740 +            KM_IRQ1);
  741  }
  742  
  743  static __always_inline unsigned long
  744 @@ -781,16 +786,16 @@ bm_op(struct drbd_device *device, unsign
  745  #endif
  746  
  747  #ifdef BITMAP_DEBUG
  748 -#define ___bm_op(device, bitmap_index, start, end, op, buffer) \
  749 +#define ___bm_op(device,bitmap_index,start,end,op,buffer,km_type)  \
  750     ({ unsigned long ret; \
  751        drbd_info(device, "%s: ___bm_op(..., %u, %lu, %lu, %u, %p)\n", \
  752              __func__, bitmap_index, start, end, op, buffer); \
  753 -      ret = ____bm_op(device, bitmap_index, start, end, op, buffer); \
  754 +      ret = ____bm_op(device, bitmap_index, start, end, op, buffer, km_type); \
  755        drbd_info(device, "= %lu\n", ret); \
  756        ret; })
  757  #else
  758 -#define ___bm_op(device, bitmap_index, start, end, op, buffer) \
  759 -   ____bm_op(device, bitmap_index, start, end, op, buffer)
  760 +#define ___bm_op(device,bitmap_index,start,end,op,buffer,km_type)  \
  761 +   ____bm_op(device, bitmap_index, start, end, op, buffer, km_type)
  762  #endif
  763  
  764  /* you better not modify the bitmap while this is running,
  765 @@ -807,7 +812,8 @@ static void bm_count_bits(struct drbd_de
  766         while (bit < bitmap->bm_bits) {
  767             unsigned long last_bit = last_bit_on_page(bitmap, bitmap_index, bit);
  768  
  769 -           bits_set += ___bm_op(device, bitmap_index, bit, last_bit, BM_OP_COUNT, NULL);
  770 +           bits_set += ___bm_op(device, bitmap_index, bit, last_bit, BM_OP_COUNT,
  771 +                        NULL, KM_USER0);
  772             bit = last_bit + 1;
  773             cond_resched();
  774         }
  775 @@ -946,11 +952,13 @@ int drbd_bm_resize(struct drbd_device *d
  776             unsigned long bm_set = b->bm_set[bitmap_index];
  777  
  778             if (set_new_bits) {
  779 -               ___bm_op(device, bitmap_index, obits, -1UL, BM_OP_SET, NULL);
  780 +               ___bm_op(device, bitmap_index, obits, -1UL, BM_OP_SET,
  781 +                    NULL, KM_IRQ1);
  782                 bm_set += bits - obits;
  783             }
  784             else
  785 -               ___bm_op(device, bitmap_index, obits, -1UL, BM_OP_CLEAR, NULL);
  786 +               ___bm_op(device, bitmap_index, obits, -1UL, BM_OP_CLEAR,
  787 +                    NULL, KM_IRQ1);
  788  
  789             b->bm_set[bitmap_index] = bm_set;
  790         }
  791 @@ -1076,14 +1084,14 @@ static void drbd_bm_aio_ctx_destroy(stru
  792  }
  793  
  794  /* bv_page may be a copy, or may be the original */
  795 -static void drbd_bm_endio(struct bio *bio)
  796 +static void drbd_bm_endio(struct bio *bio, int error)
  797  {
  798     struct drbd_bm_aio_ctx *ctx = bio->bi_private;
  799     struct drbd_device *device = ctx->device;
  800     struct drbd_bitmap *b = device->bitmap;
  801     unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
  802  
  803 -   blk_status_t status = bio->bi_status;
  804 +   u8 status = (error == 0 ? 0 : error == -ENOMEM ? 9 : error == -EOPNOTSUPP ? 1 : 10);
  805  
  806     if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
  807         !bm_test_page_unchanged(b->bm_pages[idx]))
  808 @@ -1092,7 +1100,7 @@ static void drbd_bm_endio(struct bio *bi
  809     if (status) {
  810         /* ctx error will hold the completed-last non-zero error code,
  811          * in case error codes differ. */
  812 -       ctx->error = blk_status_to_errno(status);
  813 +       ctx->error = (status == 0 ? 0 : status == 9 ? -ENOMEM : status == 1 ? -EOPNOTSUPP : -EIO);
  814         bm_set_page_io_err(b->bm_pages[idx]);
  815         /* Not identical to on disk version of it.
  816          * Is BM_PAGE_IO_ERROR enough? */
  817 @@ -1107,7 +1115,7 @@ static void drbd_bm_endio(struct bio *bi
  818     bm_page_unlock_io(device, idx);
  819  
  820     if (ctx->flags & BM_AIO_COPY_PAGES)
  821 -       mempool_free(bio->bi_io_vec[0].bv_page, &drbd_md_io_page_pool);
  822 +       mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool);
  823  
  824     bio_put(bio);
  825  
  826 @@ -1125,7 +1133,7 @@ static void bm_page_io_async(struct drbd
  827     struct drbd_bitmap *b = device->bitmap;
  828     struct page *page;
  829     unsigned int len;
  830 -   unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE;
  831 +   unsigned int rw = (ctx->flags & BM_AIO_READ) ? READ : WRITE;
  832  
  833     sector_t on_disk_sector =
  834         device->ldev->md.md_offset + device->ldev->md.bm_offset;
  835 @@ -1144,26 +1152,26 @@ static void bm_page_io_async(struct drbd
  836     bm_set_page_unchanged(b->bm_pages[page_nr]);
  837  
  838     if (ctx->flags & BM_AIO_COPY_PAGES) {
  839 -       page = mempool_alloc(&drbd_md_io_page_pool,
  840 -               GFP_NOIO | __GFP_HIGHMEM);
  841 +       page = mempool_alloc(drbd_md_io_page_pool,
  842 +                    GFP_NOIO | __GFP_HIGHMEM);
  843         copy_highpage(page, b->bm_pages[page_nr]);
  844         bm_store_page_idx(page, page_nr);
  845     } else
  846         page = b->bm_pages[page_nr];
  847 -   bio_set_dev(bio, device->ldev->md_bdev);
  848 -   bio->bi_iter.bi_sector = on_disk_sector;
  849 +   bio->bi_bdev = device->ldev->md_bdev;
  850 +   bio->bi_sector = on_disk_sector;
  851     /* bio_add_page of a single page to an empty bio will always succeed,
  852      * according to api.  Do we want to assert that? */
  853     bio_add_page(bio, page, len, 0);
  854     bio->bi_private = ctx;
  855     bio->bi_end_io = drbd_bm_endio;
  856 -   bio->bi_opf = op;
  857 +   bio->bi_rw = rw;
  858  
  859 -   if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
  860 -       bio->bi_status = BLK_STS_IOERR;
  861 -       bio_endio(bio);
  862 +   if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
  863 +       bio_endio(bio,
  864 +             (10 == 0 ? 0 : 10 == 9 ? -ENOMEM : 10 == 1 ? -EOPNOTSUPP : -EIO));
  865     } else {
  866 -       submit_bio(bio);
  867 +       submit_bio(rw, bio);
  868         /* this should not count as user activity and cause the
  869          * resync to throttle -- see drbd_rs_should_slow_down(). */
  870         atomic_add(len >> 9, &device->rs_sect_ev);
  871 @@ -1309,6 +1317,7 @@ static int bm_rw_range(struct drbd_devic
  872      * "in_flight reached zero, all done" event.
  873      */
  874     if (!atomic_dec_and_test(&ctx->in_flight)) {
  875 +       drbd_blk_run_queue(bdev_get_queue(device->ldev->md_bdev));
  876         wait_until_done_or_force_detached(device, device->ldev, &ctx->done);
  877     } else
  878         kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy);
  879 @@ -1468,7 +1477,7 @@ unsigned long _drbd_bm_find_next(struct
  880  {
  881     /* WARN_ON(!(device->b->bm_flags & BM_LOCK_SET)); */
  882     return ____bm_op(peer_device->device, peer_device->bitmap_index, start, -1UL,
  883 -           BM_OP_FIND_BIT, NULL);
  884 +           BM_OP_FIND_BIT, NULL, KM_USER0);
  885  }
  886  
  887  unsigned long _drbd_bm_find_next_zero(struct drbd_peer_device *peer_device, unsigned long start)
  888 @@ -1476,7 +1485,7 @@ unsigned long _drbd_bm_find_next_zero(st
  889  {
  890     /* WARN_ON(!(device->b->bm_flags & BM_LOCK_SET)); */
  891     return ____bm_op(peer_device->device, peer_device->bitmap_index, start, -1UL,
  892 -           BM_OP_FIND_ZERO_BIT, NULL);
  893 +           BM_OP_FIND_ZERO_BIT, NULL, KM_USER0);
  894  }
  895  
  896  unsigned int drbd_bm_set_bits(struct drbd_device *device, unsigned int bitmap_index,
  897 @@ -1607,7 +1616,7 @@ void drbd_bm_copy_slot(struct drbd_devic
  898  
  899     bitmap->bm_set[to_index] = 0;
  900     current_page_nr = 0;
  901 -   addr = bm_map(bitmap, current_page_nr);
  902 +   addr = bm_map(bitmap, current_page_nr, KM_IRQ1);
  903     for (word_nr = 0; word_nr < bitmap->bm_words; word_nr += bitmap->bm_max_peers) {
  904         from_word_nr = word_nr + from_index;
  905         from_page_nr = word32_to_page(from_word_nr);
  906 @@ -1615,14 +1624,14 @@ void drbd_bm_copy_slot(struct drbd_devic
  907         to_page_nr = word32_to_page(to_word_nr);
  908  
  909         if (current_page_nr != from_page_nr) {
  910 -           bm_unmap(bitmap, addr);
  911 +           bm_unmap(bitmap, addr, KM_IRQ1);
  912             if (need_resched()) {
  913                 spin_unlock_irq(&bitmap->bm_lock);
  914                 cond_resched();
  915                 spin_lock_irq(&bitmap->bm_lock);
  916             }
  917             current_page_nr = from_page_nr;
  918 -           addr = bm_map(bitmap, current_page_nr);
  919 +           addr = bm_map(bitmap, current_page_nr, KM_IRQ1);
  920         }
  921         data_word = addr[word32_in_page(from_word_nr)];
  922  
  923 @@ -1633,9 +1642,9 @@ void drbd_bm_copy_slot(struct drbd_devic
  924         }
  925  
  926         if (current_page_nr != to_page_nr) {
  927 -           bm_unmap(bitmap, addr);
  928 +           bm_unmap(bitmap, addr, KM_IRQ1);
  929             current_page_nr = to_page_nr;
  930 -           addr = bm_map(bitmap, current_page_nr);
  931 +           addr = bm_map(bitmap, current_page_nr, KM_IRQ1);
  932         }
  933  
  934         if (addr[word32_in_page(to_word_nr)] != data_word)
  935 @@ -1643,7 +1652,7 @@ void drbd_bm_copy_slot(struct drbd_devic
  936         addr[word32_in_page(to_word_nr)] = data_word;
  937         bitmap->bm_set[to_index] += hweight32(data_word);
  938     }
  939 -   bm_unmap(bitmap, addr);
  940 +   bm_unmap(bitmap, addr, KM_IRQ1);
  941  
  942     spin_unlock_irq(&bitmap->bm_lock);
  943  }
  944 --- drbd_sender.c
  945 +++ /tmp/cocci-output-19039-56adca-drbd_sender.c
  946 @@ -53,14 +53,14 @@ struct mutex resources_mutex;
  947  /* used for synchronous meta data and bitmap IO
  948   * submitted by drbd_md_sync_page_io()
  949   */
  950 -void drbd_md_endio(struct bio *bio)
  951 +void drbd_md_endio(struct bio *bio, int error)
  952  {
  953     struct drbd_device *device;
  954  
  955 -   blk_status_t status = bio->bi_status;
  956 +   u8 status = (error == 0 ? 0 : error == -ENOMEM ? 9 : error == -EOPNOTSUPP ? 1 : 10);
  957  
  958     device = bio->bi_private;
  959 -   device->md_io.error = blk_status_to_errno(status);
  960 +   device->md_io.error = (status == 0 ? 0 : status == 9 ? -ENOMEM : status == 1 ? -EOPNOTSUPP : -EIO);
  961  
  962     /* special case: drbd_md_read() during drbd_adm_attach() */
  963     if (device->ldev)
  964 @@ -195,15 +195,15 @@ void drbd_endio_write_sec_final(struct d
  965  /* writes on behalf of the partner, or resync writes,
  966   * "submitted" by the receiver.
  967   */
  968 -void drbd_peer_request_endio(struct bio *bio)
  969 +void drbd_peer_request_endio(struct bio *bio, int error)
  970  {
  971     struct drbd_peer_request *peer_req = bio->bi_private;
  972     struct drbd_device *device = peer_req->peer_device->device;
  973     bool is_write = bio_data_dir(bio) == WRITE;
  974 -   bool is_discard = bio_op(bio) == REQ_OP_WRITE_ZEROES ||
  975 -             bio_op(bio) == REQ_OP_DISCARD;
  976 +   bool is_discard = (false)/* WRITE_ZEROES not supported on this kernel */ ||
  977 +             (bio->bi_rw & BIO_DISCARD);
  978  
  979 -   blk_status_t status = bio->bi_status;
  980 +   u8 status = (error == 0 ? 0 : error == -ENOMEM ? 9 : error == -EOPNOTSUPP ? 1 : 10);
  981  
  982     if (status && drbd_ratelimit())
  983         drbd_warn(device, "%s: error=%d s=%llus\n",
  984 @@ -233,7 +233,7 @@ void drbd_panic_after_delayed_completion
  985  
  986  /* read, readA or write requests on R_PRIMARY coming from drbd_make_request
  987   */
  988 -void drbd_request_endio(struct bio *bio)
  989 +void drbd_request_endio(struct bio *bio, int error)
  990  {
  991     unsigned long flags;
  992     struct drbd_request *req = bio->bi_private;
  993 @@ -241,7 +241,7 @@ void drbd_request_endio(struct bio *bio)
  994     struct bio_and_error m;
  995     enum drbd_req_event what;
  996  
  997 -   blk_status_t status = bio->bi_status;
  998 +   u8 status = (error == 0 ? 0 : error == -ENOMEM ? 9 : error == -EOPNOTSUPP ? 1 : 10);
  999  
 1000     /* If this request was aborted locally before,
 1001      * but now was completed "successfully",
 1002 @@ -281,14 +281,13 @@ void drbd_request_endio(struct bio *bio)
 1003  
 1004     /* to avoid recursion in __req_mod */
 1005     if (unlikely(status)) {
 1006 -       unsigned int op = bio_op(bio);
 1007 -       if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) {
 1008 -           if (status == BLK_STS_NOTSUPP)
 1009 +       if ((bio->bi_rw & BIO_DISCARD) || (false)/* WRITE_ZEROES not supported on this kernel */) {
 1010 +           if (status == 1)
 1011                 what = DISCARD_COMPLETED_NOTSUPP;
 1012             else
 1013                 what = DISCARD_COMPLETED_WITH_ERROR;
 1014 -       } else if (op == REQ_OP_READ) {
 1015 -           if (bio->bi_opf & REQ_RAHEAD)
 1016 +       } else if (!(bio->bi_rw & BIO_WRITE)) {
 1017 +           if ((false)/* RAHEAD not supported on this kernel */)
 1018                 what = READ_AHEAD_COMPLETED_WITH_ERROR;
 1019             else
 1020                 what = READ_COMPLETED_WITH_ERROR;
 1021 @@ -300,7 +299,7 @@ void drbd_request_endio(struct bio *bio)
 1022     }
 1023  
 1024     bio_put(req->private_bio);
 1025 -   req->private_bio = ERR_PTR(blk_status_to_errno(status));
 1026 +   req->private_bio = ERR_PTR((status == 0 ? 0 : status == 9 ? -ENOMEM : status == 1 ? -EOPNOTSUPP : -EIO));
 1027  
 1028     /* not req_mod(), we need irqsave here! */
 1029     spin_lock_irqsave(&device->resource->req_lock, flags);
 1030 @@ -325,9 +324,9 @@ void drbd_csum_pages(struct crypto_shash
 1031         unsigned off = page_chain_offset(page);
 1032         unsigned len = page_chain_size(page);
 1033         u8 *src;
 1034 -       src = kmap_atomic(page);
 1035 +       src = kmap_atomic(page, KM_USER1);
 1036         crypto_shash_update(desc, src + off, len);
 1037 -       kunmap_atomic(src);
 1038 +       kunmap_atomic(src, KM_USER1);
 1039     }
 1040     crypto_shash_final(desc, digest);
 1041     shash_desc_zero(desc);
 1042 @@ -336,8 +335,8 @@ void drbd_csum_pages(struct crypto_shash
 1043  void drbd_csum_bio(struct crypto_shash *tfm, struct bio *bio, void *digest)
 1044  /* kmap compat: KM_USER1 */
 1045  {
 1046 -   struct bio_vec bvec;
 1047 -   struct bvec_iter iter;
 1048 +   struct bio_vec *bvec;
 1049 +   int iter;
 1050     SHASH_DESC_ON_STACK(desc, tfm);
 1051  
 1052     desc->tfm = tfm;
 1053 @@ -346,12 +345,12 @@ void drbd_csum_bio(struct crypto_shash *
 1054  
 1055     bio_for_each_segment(bvec, bio, iter) {
 1056         u8 *src;
 1057 -       src = kmap_atomic(bvec.bv_page);
 1058 -       crypto_shash_update(desc, src + bvec.bv_offset, bvec.bv_len);
 1059 -       kunmap_atomic(src);
 1060 +       src = kmap_atomic(bvec->bv_page, KM_USER1);
 1061 +       crypto_shash_update(desc, src + bvec->bv_offset, bvec->bv_len);
 1062 +       kunmap_atomic(src, KM_USER1);
 1063         /* WRITE_SAME has only one segment,
 1064          * checksum the payload only once. */
 1065 -       if (bio_op(bio) == REQ_OP_WRITE_SAME)
 1066 +       if ((false)/* WRITE_SAME not supported on this kernel */)
 1067             break;
 1068     }
 1069     crypto_shash_final(desc, digest);
 1070 @@ -428,7 +427,7 @@ static int read_for_csum(struct drbd_pee
 1071     spin_unlock_irq(&device->resource->req_lock);
 1072  
 1073     atomic_add(size >> 9, &device->rs_sect_ev);
 1074 -   if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0, DRBD_FAULT_RS_RD) == 0)
 1075 +   if (drbd_submit_peer_request(device, peer_req, READ | 0, DRBD_FAULT_RS_RD) == 0)
 1076         return 0;
 1077  
 1078     /* If it failed because of ENOMEM, retry should help.  If it failed
 1079 @@ -484,9 +483,9 @@ int w_send_uuids(struct drbd_work *w, in
 1080     return 0;
 1081  }
 1082  
 1083 -void resync_timer_fn(struct timer_list *t)
 1084 +void resync_timer_fn(unsigned long data)
 1085  {
 1086 -   struct drbd_peer_device *peer_device = from_timer(peer_device, t, resync_timer);
 1087 +   struct drbd_peer_device *peer_device = (struct drbd_peer_device *)data;
 1088  
 1089     if (test_bit(SYNC_TARGET_TO_BEHIND, &peer_device->flags))
 1090         return;
 1091 @@ -1069,6 +1068,7 @@ int drbd_resync_finished(struct drbd_pee
 1092          * queue (or even the read operations for those packets
 1093          * is not finished by now).   Retry in 100ms. */
 1094  
 1095 +       drbd_kick_lo(device);
 1096         schedule_timeout_interruptible(HZ / 10);
 1097     queue_on_sender_workq:
 1098         rfw = kmalloc(sizeof(*rfw), GFP_ATOMIC);
 1099 @@ -1325,14 +1325,14 @@ static bool all_zero(struct drbd_peer_re
 1100         unsigned int i, words = l / sizeof(long);
 1101         unsigned long *d;
 1102  
 1103 -       d = kmap_atomic(page);
 1104 +       d = kmap_atomic(page, KM_USER1);
 1105         for (i = 0; i < words; i++) {
 1106             if (d[i]) {
 1107 -               kunmap_atomic(d);
 1108 +               kunmap_atomic(d, KM_USER1);
 1109                 return false;
 1110             }
 1111         }
 1112 -       kunmap_atomic(d);
 1113 +       kunmap_atomic(d, KM_USER1);
 1114         len -= l;
 1115     }
 1116  
 1117 @@ -1865,9 +1865,9 @@ void drbd_rs_controller_reset(struct drb
 1118     rcu_read_unlock();
 1119  }
 1120  
 1121 -void start_resync_timer_fn(struct timer_list *t)
 1122 +void start_resync_timer_fn(unsigned long data)
 1123  {
 1124 -   struct drbd_peer_device *peer_device = from_timer(peer_device, t, start_resync_timer);
 1125 +   struct drbd_peer_device *peer_device = (struct drbd_peer_device *)data;
 1126     drbd_peer_device_post_work(peer_device, RS_START);
 1127  }
 1128  
 1129 @@ -2226,9 +2226,9 @@ static int do_md_sync(struct drbd_device
 1130     return 0;
 1131  }
 1132  
 1133 -void repost_up_to_date_fn(struct timer_list *t)
 1134 +void repost_up_to_date_fn(unsigned long data)
 1135  {
 1136 -   struct drbd_resource *resource = from_timer(resource, t, repost_up_to_date_timer);
 1137 +   struct drbd_resource *resource = (struct drbd_resource *)data;
 1138     drbd_post_work(resource, TRY_BECOME_UP_TO_DATE);
 1139  }
 1140  
 1141 --- drbd_transport_tcp.c
 1142 +++ /tmp/cocci-output-19039-043d59-drbd_transport_tcp.c
 1143 @@ -429,7 +429,8 @@ static int dtt_try_connect(struct drbd_t
 1144     peer_addr = path->path.peer_addr;
 1145  
 1146     what = "sock_create_kern";
 1147 -   err = sock_create_kern(&init_net, my_addr.ss_family, SOCK_STREAM, IPPROTO_TCP, &socket);
 1148 +   err = sock_create_kern(my_addr.ss_family, SOCK_STREAM, IPPROTO_TCP,
 1149 +                  &socket);
 1150     if (err < 0) {
 1151         socket = NULL;
 1152         goto out;
 1153 @@ -609,7 +610,7 @@ static int dtt_wait_for_connect(struct d
 1154     rcu_read_unlock();
 1155  
 1156     timeo = connect_int * HZ;
 1157 -   timeo += (prandom_u32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
 1158 +   timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
 1159  
 1160  retry:
 1161     timeo = wait_event_interruptible_timeout(listener->wait,
 1162 @@ -625,6 +626,7 @@ retry:
 1163         list_del(&socket_c->list);
 1164         kfree(socket_c);
 1165     } else if (listener->listener.pending_accepts > 0) {
 1166 +       int ___addr_len;
 1167         listener->listener.pending_accepts--;
 1168         spin_unlock_bh(&listener->listener.waiters_lock);
 1169  
 1170 @@ -637,7 +639,8 @@ retry:
 1171            from the listening socket. */
 1172         unregister_state_change(s_estab->sk, listener);
 1173  
 1174 -       s_estab->ops->getname(s_estab, (struct sockaddr *)&peer_addr, 2);
 1175 +       s_estab->ops->getname(s_estab, (struct sockaddr *)&peer_addr,
 1176 +                     &___addr_len, 2);
 1177  
 1178         spin_lock_bh(&listener->listener.waiters_lock);
 1179         drbd_path2 = drbd_find_path_by_addr(&listener->listener, &peer_addr);
 1180 @@ -773,7 +776,8 @@ static int dtt_init_listener(struct drbd
 1181  
 1182     my_addr = *(struct sockaddr_storage *)addr;
 1183  
 1184 -   err = sock_create_kern(&init_net, my_addr.ss_family, SOCK_STREAM, IPPROTO_TCP, &s_listen);
 1185 +   err = sock_create_kern(my_addr.ss_family, SOCK_STREAM, IPPROTO_TCP,
 1186 +                  &s_listen);
 1187     if (err) {
 1188         s_listen = NULL;
 1189         what = "sock_create_kern";
 1190 @@ -1014,7 +1018,7 @@ retry:
 1191                 kernel_sock_shutdown(s, SHUT_RDWR);
 1192                 sock_release(s);
 1193  randomize:
 1194 -               if (prandom_u32() & 1)
 1195 +               if (random32() & 1)
 1196                     goto retry;
 1197             }
 1198         }
 1199 @@ -1184,19 +1188,19 @@ static int dtt_send_page(struct drbd_tra
 1200  
 1201  static int dtt_send_zc_bio(struct drbd_transport *transport, struct bio *bio)
 1202  {
 1203 -   struct bio_vec bvec;
 1204 -   struct bvec_iter iter;
 1205 +   struct bio_vec *bvec;
 1206 +   int iter;
 1207  
 1208     bio_for_each_segment(bvec, bio, iter) {
 1209         int err;
 1210  
 1211 -       err = dtt_send_page(transport, DATA_STREAM, bvec.bv_page,
 1212 -                     bvec.bv_offset, bvec.bv_len,
 1213 -                     bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
 1214 +       err = dtt_send_page(transport, DATA_STREAM, bvec->bv_page,
 1215 +                     bvec->bv_offset, bvec->bv_len,
 1216 +                     ((iter) == bio->bi_vcnt - 1) ? 0 : MSG_MORE);
 1217         if (err)
 1218             return err;
 1219  
 1220 -       if (bio_op(bio) == REQ_OP_WRITE_SAME)
 1221 +       if ((false)/* WRITE_SAME not supported on this kernel */)
 1222             break;
 1223     }
 1224     return 0;
 1225 --- drbd_nla.c
 1226 +++ /tmp/cocci-output-19039-323b9b-drbd_nla.c
 1227 @@ -35,8 +35,7 @@ int drbd_nla_parse_nested(struct nlattr
 1228  
 1229     err = drbd_nla_check_mandatory(maxtype, nla);
 1230     if (!err)
 1231 -       err = nla_parse_nested_deprecated(tb, maxtype, nla, policy,
 1232 -                         NULL);
 1233 +       err = nla_parse_nested(tb, maxtype, nla, policy, NULL);
 1234  
 1235     return err;
 1236  }
 1237 --- drbd_main.c
 1238 +++ /tmp/cocci-output-19039-985ee9-drbd_main.c
 1239 @@ -54,8 +54,8 @@
 1240  #include "drbd_dax_pmem.h"
 1241  
 1242  static int drbd_open(struct block_device *bdev, fmode_t mode);
 1243 -static void drbd_release(struct gendisk *gd, fmode_t mode);
 1244 -static void md_sync_timer_fn(struct timer_list *t);
 1245 +static int drbd_release(struct gendisk *gd, fmode_t mode);
 1246 +static void md_sync_timer_fn(unsigned long data);
 1247  static int w_bitmap_io(struct drbd_work *w, int unused);
 1248  static int flush_send_buffer(struct drbd_connection *connection, enum drbd_stream drbd_stream);
 1249  static u64 __set_bitmap_slots(struct drbd_device *device, u64 bitmap_uuid, u64 do_nodes) __must_hold(local);
 1250 @@ -121,11 +121,6 @@ static int param_set_drbd_protocol_versi
 1251  #define param_check_drbd_protocol_version  param_check_uint
 1252  #define param_get_drbd_protocol_version        param_get_uint
 1253  
 1254 -const struct kernel_param_ops param_ops_drbd_protocol_version = {
 1255 -   .set = param_set_drbd_protocol_version,
 1256 -   .get = param_get_drbd_protocol_version,
 1257 -};
 1258 -
 1259  unsigned int drbd_protocol_version_min = PRO_VERSION_MIN;
 1260  module_param_named(protocol_version_min, drbd_protocol_version_min, drbd_protocol_version, 0644);
 1261  
 1262 @@ -140,11 +135,11 @@ struct kmem_cache *drbd_request_cache;
 1263  struct kmem_cache *drbd_ee_cache;  /* peer requests */
 1264  struct kmem_cache *drbd_bm_ext_cache;  /* bitmap extents */
 1265  struct kmem_cache *drbd_al_ext_cache;  /* activity log extents */
 1266 -mempool_t drbd_request_mempool;
 1267 -mempool_t drbd_ee_mempool;
 1268 -mempool_t drbd_md_io_page_pool;
 1269 -struct bio_set drbd_md_io_bio_set;
 1270 -struct bio_set drbd_io_bio_set;
 1271 +mempool_t *drbd_request_mempool;
 1272 +mempool_t *drbd_ee_mempool;
 1273 +mempool_t *drbd_md_io_page_pool;
 1274 +struct bio_set * drbd_md_io_bio_set;
 1275 +struct bio_set * drbd_io_bio_set;
 1276  
 1277  /* I do not use a standard mempool, because:
 1278     1) I want to hand out the pre-allocated objects first.
 1279 @@ -163,12 +158,22 @@ static const struct block_device_operati
 1280     .release = drbd_release,
 1281  };
 1282  
 1283 +static void ___bio_destructor_drbd(struct bio *bio)
 1284 +{
 1285 +   bio_free(bio, drbd_md_io_bio_set);
 1286 +}
 1287 +
 1288  struct bio *bio_alloc_drbd(gfp_t gfp_mask)
 1289  {
 1290 -   if (!bioset_initialized(&drbd_md_io_bio_set))
 1291 +   struct bio *___bio;
 1292 +   if (!(drbd_md_io_bio_set != NULL))
 1293         return bio_alloc(gfp_mask, 1);
 1294  
 1295 -   return bio_alloc_bioset(gfp_mask, 1, &drbd_md_io_bio_set);
 1296 +   ___bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
 1297 +   if (!___bio)
 1298 +       return NULL;
 1299 +   ___bio->bi_destructor = ___bio_destructor_drbd;
 1300 +   return ___bio;
 1301  }
 1302  
 1303  #ifdef __CHECKER__
 1304 @@ -589,8 +594,8 @@ static int drbd_thread_setup(void *arg)
 1305     unsigned long flags;
 1306     int retval;
 1307  
 1308 -   allow_kernel_signal(DRBD_SIGKILL);
 1309 -   allow_kernel_signal(SIGXCPU);
 1310 +   allow_signal(DRBD_SIGKILL);
 1311 +   allow_signal(SIGXCPU);
 1312  
 1313     if (connection)
 1314         kref_get(&connection->kref);
 1315 @@ -1553,7 +1558,7 @@ static void assign_p_sizes_qlim(struct d
 1316         p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
 1317         p->qlim->discard_enabled = blk_queue_discard(q);
 1318         p->qlim->discard_zeroes_data = queue_discard_zeroes_data(q);
 1319 -       p->qlim->write_same_capable = !!q->limits.max_write_same_sectors;
 1320 +       p->qlim->write_same_capable = 0;
 1321     } else {
 1322         q = device->rq_queue;
 1323         p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q));
 1324 @@ -2113,9 +2118,9 @@ static int _drbd_no_send_page(struct drb
 1325     int err;
 1326  
 1327     buffer2 = alloc_send_buffer(connection, size, DATA_STREAM);
 1328 -   from_base = kmap_atomic(page);
 1329 +   from_base = kmap_atomic(page, KM_USER0);
 1330     memcpy(buffer2, from_base + offset, size);
 1331 -   kunmap_atomic(from_base);
 1332 +   kunmap_atomic(from_base, KM_USER0);
 1333  
 1334     if (msg_flags & MSG_MORE) {
 1335         sbuf->pos += sbuf->allocated_size;
 1336 @@ -2131,8 +2136,8 @@ static int _drbd_no_send_page(struct drb
 1337  static int _drbd_send_bio(struct drbd_peer_device *peer_device, struct bio *bio)
 1338  {
 1339     struct drbd_connection *connection = peer_device->connection;
 1340 -   struct bio_vec bvec;
 1341 -   struct bvec_iter iter;
 1342 +   struct bio_vec *bvec;
 1343 +   int iter;
 1344  
 1345     /* Flush send buffer and make sure PAGE_SIZE is available... */
 1346     alloc_send_buffer(connection, PAGE_SIZE, DATA_STREAM);
 1347 @@ -2142,24 +2147,24 @@ static int _drbd_send_bio(struct drbd_pe
 1348     bio_for_each_segment(bvec, bio, iter) {
 1349         int err;
 1350  
 1351 -       err = _drbd_no_send_page(peer_device, bvec.bv_page,
 1352 -                    bvec.bv_offset, bvec.bv_len,
 1353 -                    bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
 1354 +       err = _drbd_no_send_page(peer_device, bvec->bv_page,
 1355 +                    bvec->bv_offset, bvec->bv_len,
 1356 +                    ((iter) == bio->bi_vcnt - 1) ? 0 : MSG_MORE);
 1357         if (err)
 1358             return err;
 1359         /* WRITE_SAME has only one segment */
 1360 -       if (bio_op(bio) == REQ_OP_WRITE_SAME)
 1361 +       if ((false)/* WRITE_SAME not supported on this kernel */)
 1362             break;
 1363  
 1364 -       peer_device->send_cnt += bvec.bv_len >> 9;
 1365 +       peer_device->send_cnt += bvec->bv_len >> 9;
 1366     }
 1367     return 0;
 1368  }
 1369  
 1370  static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *bio)
 1371  {
 1372 -   struct bio_vec bvec;
 1373 -   struct bvec_iter iter;
 1374 +   struct bio_vec *bvec;
 1375 +   int iter;
 1376     bool no_zc = drbd_disable_sendpage;
 1377  
 1378     /* e.g. XFS meta- & log-data is in slab pages, which have a
 1379 @@ -2170,7 +2175,7 @@ static int _drbd_send_zc_bio(struct drbd
 1380      * by someone, leading to some obscure delayed Oops somewhere else. */
 1381     if (!no_zc)
 1382         bio_for_each_segment(bvec, bio, iter) {
 1383 -           struct page *page = bvec.bv_page;
 1384 +           struct page *page = bvec->bv_page;
 1385  
 1386             if (page_count(page) < 1 || PageSlab(page)) {
 1387                 no_zc = true;
 1388 @@ -2190,7 +2195,7 @@ static int _drbd_send_zc_bio(struct drbd
 1389  
 1390         err = tr_ops->send_zc_bio(transport, bio);
 1391         if (!err)
 1392 -           peer_device->send_cnt += bio->bi_iter.bi_size >> 9;
 1393 +           peer_device->send_cnt += bio->bi_size >> 9;
 1394  
 1395         return err;
 1396     }
 1397 @@ -2226,19 +2231,19 @@ static int _drbd_send_zc_ee(struct drbd_
 1398  static u32 bio_flags_to_wire(struct drbd_connection *connection, struct bio *bio)
 1399  {
 1400     if (connection->agreed_pro_version >= 95)
 1401 -       return  (bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0) |
 1402 -           (bio->bi_opf & REQ_FUA ? DP_FUA : 0) |
 1403 -           (bio->bi_opf & REQ_PREFLUSH ? DP_FLUSH : 0) |
 1404 -           (bio_op(bio) == REQ_OP_WRITE_SAME ? DP_WSAME : 0) |
 1405 -           (bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0) |
 1406 -           (bio_op(bio) == REQ_OP_WRITE_ZEROES ?
 1407 +       return  (bio->bi_rw & (1UL << BIO_RW_SYNCIO) ? DP_RW_SYNC : 0) |
 1408 +           (bio->bi_rw & (1UL << BIO_RW_FUA) ? DP_FUA : 0) |
 1409 +           (bio->bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
 1410 +           ((false)/* WRITE_SAME not supported on this kernel */ ? DP_WSAME : 0) |
 1411 +           ((bio->bi_rw & BIO_DISCARD) ? DP_DISCARD : 0) |
 1412 +           ((false)/* WRITE_ZEROES not supported on this kernel */ ?
 1413               ((connection->agreed_features & DRBD_FF_WZEROES) ?
 1414 -              (DP_ZEROES |(!(bio->bi_opf & REQ_NOUNMAP) ? DP_DISCARD : 0))
 1415 +              (DP_ZEROES |(!(false)/* NOUNMAP not supported on this kernel */ ? DP_DISCARD : 0))
 1416                : DP_DISCARD)
 1417             : 0);
 1418  
 1419     /* else: we used to communicate one bit only in older DRBD */
 1420 -   return bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0;
 1421 +   return bio->bi_rw & (1UL << BIO_RW_SYNCIO) ? DP_RW_SYNC : 0;
 1422  }
 1423  
 1424  /* Used to send write or TRIM aka REQ_OP_DISCARD requests
 1425 @@ -2257,9 +2262,8 @@ int drbd_send_dblock(struct drbd_peer_de
 1426     int digest_size = 0;
 1427     int err;
 1428     const unsigned s = drbd_req_state_by_peer_device(req, peer_device);
 1429 -   const int op = bio_op(req->master_bio);
 1430  
 1431 -   if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) {
 1432 +   if ((req->master_bio->bi_rw & BIO_DISCARD) || (false)/* WRITE_ZEROES not supported on this kernel */) {
 1433         trim = drbd_prepare_command(peer_device, sizeof(*trim), DATA_STREAM);
 1434         if (!trim)
 1435             return -EIO;
 1436 @@ -2269,7 +2273,7 @@ int drbd_send_dblock(struct drbd_peer_de
 1437         if (peer_device->connection->integrity_tfm)
 1438             digest_size = crypto_shash_digestsize(peer_device->connection->integrity_tfm);
 1439  
 1440 -       if (op == REQ_OP_WRITE_SAME) {
 1441 +       if ((false)/* WRITE_SAME not supported on this kernel */) {
 1442             wsame = drbd_prepare_command(peer_device, sizeof(*wsame) + digest_size, DATA_STREAM);
 1443             if (!wsame)
 1444                 return -EIO;
 1445 @@ -2312,7 +2316,7 @@ int drbd_send_dblock(struct drbd_peer_de
 1446  
 1447     if (wsame) {
 1448         additional_size_command(peer_device->connection, DATA_STREAM,
 1449 -                   bio_iovec(req->master_bio).bv_len);
 1450 +                   bio_iovec(req->master_bio)->bv_len);
 1451         err = __send_command(peer_device->connection, device->vnr, P_WSAME, DATA_STREAM);
 1452     } else {
 1453         additional_size_command(peer_device->connection, DATA_STREAM, req->i.size);
 1454 @@ -2669,7 +2673,7 @@ void drbd_open_counts(struct drbd_resour
 1455     *ro_count_ptr = ro_count;
 1456  }
 1457  
 1458 -static void drbd_release(struct gendisk *gd, fmode_t mode)
 1459 +static int drbd_release(struct gendisk *gd, fmode_t mode)
 1460  {
 1461     struct drbd_device *device = gd->private_data;
 1462     struct drbd_resource *resource = device->resource;
 1463 @@ -2713,6 +2717,7 @@ static void drbd_release(struct gendisk
 1464  
 1465     kref_debug_put(&device->kref_debug, 3);
 1466     kref_put(&device->kref, drbd_destroy_device);  /* might destroy the resource as well */
 1467 +   return 0;
 1468  }
 1469  
 1470  /* need to hold resource->req_lock */
 1471 @@ -2732,6 +2737,23 @@ void drbd_queue_unplug(struct drbd_devic
 1472     }
 1473  }
 1474  
 1475 +static void drbd_unplug_fn(struct request_queue *q)
 1476 +{
 1477 +   struct drbd_device *device = q->queuedata;
 1478 +   struct drbd_resource *resource = device->resource;
 1479 +
 1480 +   /* unplug FIRST */
 1481 +   /* note: q->queue_lock == resource->req_lock */
 1482 +   spin_lock_irq(&resource->req_lock);
 1483 +   blk_remove_plug(q);
 1484 +
 1485 +   /* only if connected */
 1486 +   drbd_queue_unplug(device);
 1487 +   spin_unlock_irq(&resource->req_lock);
 1488 +
 1489 +   drbd_kick_lo(device);
 1490 +}
 1491 +
 1492  static void drbd_set_defaults(struct drbd_device *device)
 1493  {
 1494     device->disk_state[NOW] = D_DISKLESS;
 1495 @@ -2769,11 +2791,26 @@ static void drbd_destroy_mempools(void)
 1496  
 1497     /* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
 1498  
 1499 -   bioset_exit(&drbd_io_bio_set);
 1500 -   bioset_exit(&drbd_md_io_bio_set);
 1501 -   mempool_exit(&drbd_md_io_page_pool);
 1502 -   mempool_exit(&drbd_ee_mempool);
 1503 -   mempool_exit(&drbd_request_mempool);
 1504 +   if (drbd_io_bio_set) {
 1505 +       bioset_free(drbd_io_bio_set);
 1506 +       drbd_io_bio_set = NULL;
 1507 +   }
 1508 +   if (drbd_md_io_bio_set) {
 1509 +       bioset_free(drbd_md_io_bio_set);
 1510 +       drbd_md_io_bio_set = NULL;
 1511 +   }
 1512 +   if (drbd_md_io_page_pool) {
 1513 +       mempool_destroy(drbd_md_io_page_pool);
 1514 +       drbd_md_io_page_pool = NULL;
 1515 +   }
 1516 +   if (drbd_ee_mempool) {
 1517 +       mempool_destroy(drbd_ee_mempool);
 1518 +       drbd_ee_mempool = NULL;
 1519 +   }
 1520 +   if (drbd_request_mempool) {
 1521 +       mempool_destroy(drbd_request_mempool);
 1522 +       drbd_request_mempool = NULL;
 1523 +   }
 1524     if (drbd_ee_cache)
 1525         kmem_cache_destroy(drbd_ee_cache);
 1526     if (drbd_request_cache)
 1527 @@ -2819,25 +2856,23 @@ static int drbd_create_mempools(void)
 1528         goto Enomem;
 1529  
 1530     /* mempools */
 1531 -   ret = bioset_init(&drbd_io_bio_set, BIO_POOL_SIZE, 0, 0);
 1532 -   if (ret)
 1533 +   drbd_io_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0);
 1534 +   if (drbd_io_bio_set == NULL)
 1535         goto Enomem;
 1536  
 1537 -   ret = bioset_init(&drbd_md_io_bio_set, DRBD_MIN_POOL_PAGES, 0,
 1538 -             BIOSET_NEED_BVECS);
 1539 -   if (ret)
 1540 +   drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0, 0);
 1541 +   if (drbd_md_io_bio_set == NULL)
 1542         goto Enomem;
 1543  
 1544 -   ret = mempool_init_page_pool(&drbd_md_io_page_pool, DRBD_MIN_POOL_PAGES, 0);
 1545 +   ret = ((drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0)) == NULL ? -ENOMEM : 0);
 1546     if (ret)
 1547         goto Enomem;
 1548  
 1549 -   ret = mempool_init_slab_pool(&drbd_request_mempool, number,
 1550 -                    drbd_request_cache);
 1551 +   ret = ((drbd_request_mempool = mempool_create_slab_pool(number, drbd_request_cache)) == NULL ? -ENOMEM : 0);
 1552     if (ret)
 1553         goto Enomem;
 1554  
 1555 -   ret = mempool_init_slab_pool(&drbd_ee_mempool, number, drbd_ee_cache);
 1556 +   ret = ((drbd_ee_mempool = mempool_create_slab_pool(number, drbd_ee_cache)) == NULL ? -ENOMEM : 0);
 1557     if (ret)
 1558         goto Enomem;
 1559  
 1560 @@ -2947,7 +2982,7 @@ void drbd_reclaim_resource(struct rcu_he
 1561         kref_debug_put(&connection->kref_debug, 9);
 1562         kref_put(&connection->kref, drbd_destroy_connection);
 1563     }
 1564 -   mempool_free(resource->peer_ack_req, &drbd_request_mempool);
 1565 +   mempool_free(resource->peer_ack_req, drbd_request_mempool);
 1566     kref_debug_put(&resource->kref_debug, 8);
 1567     kref_put(&resource->kref, drbd_destroy_resource);
 1568  }
 1569 @@ -3091,14 +3126,14 @@ static int drbd_congested(void *congeste
 1570     }
 1571  
 1572     if (test_bit(CALLBACK_PENDING, &device->resource->flags)) {
 1573 -       r |= (1 << WB_async_congested);
 1574 +       r |= (1 << BDI_async_congested);
 1575         /* Without good local data, we would need to read from remote,
 1576          * and that would need the worker thread as well, which is
 1577          * currently blocked waiting for that usermode helper to
 1578          * finish.
 1579          */
 1580         if (!get_ldev_if_state(device, D_UP_TO_DATE))
 1581 -           r |= (1 << WB_sync_congested);
 1582 +           r |= (1 << BDI_sync_congested);
 1583         else
 1584             put_ldev(device);
 1585         r &= bdi_bits;
 1586 @@ -3111,13 +3146,13 @@ static int drbd_congested(void *congeste
 1587         put_ldev(device);
 1588     }
 1589  
 1590 -   if (bdi_bits & (1 << WB_async_congested)) {
 1591 +   if (bdi_bits & (1 << BDI_async_congested)) {
 1592         struct drbd_peer_device *peer_device;
 1593  
 1594         rcu_read_lock();
 1595         for_each_peer_device_rcu(peer_device, device) {
 1596             if (test_bit(NET_CONGESTED, &peer_device->connection->transport.flags)) {
 1597 -               r |= (1 << WB_async_congested);
 1598 +               r |= (1 << BDI_async_congested);
 1599                 break;
 1600             }
 1601         }
 1602 @@ -3232,9 +3267,9 @@ void drbd_flush_peer_acks(struct drbd_re
 1603     spin_unlock_irq(&resource->req_lock);
 1604  }
 1605  
 1606 -static void peer_ack_timer_fn(struct timer_list *t)
 1607 +static void peer_ack_timer_fn(unsigned long data)
 1608  {
 1609 -   struct drbd_resource *resource = from_timer(resource, t, peer_ack_timer);
 1610 +   struct drbd_resource *resource = (struct drbd_resource *)data;
 1611  
 1612     drbd_flush_peer_acks(resource);
 1613  }
 1614 @@ -3371,8 +3406,10 @@ struct drbd_resource *drbd_create_resour
 1615     INIT_LIST_HEAD(&resource->connections);
 1616     INIT_LIST_HEAD(&resource->transfer_log);
 1617     INIT_LIST_HEAD(&resource->peer_ack_list);
 1618 -   timer_setup(&resource->peer_ack_timer, peer_ack_timer_fn, 0);
 1619 -   timer_setup(&resource->repost_up_to_date_timer, repost_up_to_date_fn, 0);
 1620 +   setup_timer(&resource->peer_ack_timer, peer_ack_timer_fn,
 1621 +           (unsigned long)resource);
 1622 +   setup_timer(&resource->repost_up_to_date_timer, repost_up_to_date_fn,
 1623 +           (unsigned long)resource);
 1624     sema_init(&resource->state_sem, 1);
 1625     resource->role[NOW] = R_SECONDARY;
 1626     if (set_resource_options(resource, res_opts))
 1627 @@ -3389,11 +3426,13 @@ struct drbd_resource *drbd_create_resour
 1628     init_waitqueue_head(&resource->twopc_wait);
 1629     init_waitqueue_head(&resource->barrier_wait);
 1630     INIT_LIST_HEAD(&resource->twopc_parents);
 1631 -   timer_setup(&resource->twopc_timer, twopc_timer_fn, 0);
 1632 +   setup_timer(&resource->twopc_timer, twopc_timer_fn,
 1633 +           (unsigned long)resource);
 1634     INIT_LIST_HEAD(&resource->twopc_work.list);
 1635     INIT_LIST_HEAD(&resource->queued_twopc);
 1636     spin_lock_init(&resource->queued_twopc_lock);
 1637 -   timer_setup(&resource->queued_twopc_timer, queued_twopc_timer_fn, 0);
 1638 +   setup_timer(&resource->queued_twopc_timer, queued_twopc_timer_fn,
 1639 +           (unsigned long)resource);
 1640     drbd_init_workqueue(&resource->work);
 1641     drbd_thread_init(resource, &resource->worker, drbd_worker, "worker");
 1642     drbd_thread_start(&resource->worker);
 1643 @@ -3454,7 +3493,8 @@ struct drbd_connection *drbd_create_conn
 1644     mutex_init(&connection->mutex[CONTROL_STREAM]);
 1645  
 1646     INIT_LIST_HEAD(&connection->connect_timer_work.list);
 1647 -   timer_setup(&connection->connect_timer, connect_timer_fn, 0);
 1648 +   setup_timer(&connection->connect_timer, connect_timer_fn,
 1649 +           (unsigned long)connection);
 1650  
 1651     drbd_thread_init(resource, &connection->receiver, drbd_receiver, "receiver");
 1652     connection->receiver.connection = connection;
 1653 @@ -3566,11 +3606,13 @@ struct drbd_peer_device *create_peer_dev
 1654         return NULL;
 1655     }
 1656  
 1657 -   timer_setup(&peer_device->start_resync_timer, start_resync_timer_fn, 0);
 1658 +   setup_timer(&peer_device->start_resync_timer, start_resync_timer_fn,
 1659 +           (unsigned long)peer_device);
 1660  
 1661     INIT_LIST_HEAD(&peer_device->resync_work.list);
 1662     peer_device->resync_work.cb  = w_resync_timer;
 1663 -   timer_setup(&peer_device->resync_timer, resync_timer_fn, 0);
 1664 +   setup_timer(&peer_device->resync_timer, resync_timer_fn,
 1665 +           (unsigned long)peer_device);
 1666  
 1667     INIT_LIST_HEAD(&peer_device->propagate_uuids_work.list);
 1668     peer_device->propagate_uuids_work.cb = w_send_uuids;
 1669 @@ -3594,7 +3636,7 @@ static int init_submitter(struct drbd_de
 1670     /* opencoded create_singlethread_workqueue(),
 1671      * to be able to use format string arguments */
 1672     device->submit.wq =
 1673 -       alloc_ordered_workqueue("drbd%u_submit", WQ_MEM_RECLAIM, device->minor);
 1674 +       create_singlethread_workqueue("drbd_submit");
 1675     if (!device->submit.wq)
 1676         return -ENOMEM;
 1677     INIT_WORK(&device->submit.worker, do_submit);
 1678 @@ -3667,8 +3709,10 @@ enum drbd_ret_code drbd_create_device(st
 1679     spin_lock_init(&device->pending_bitmap_work.q_lock);
 1680     INIT_LIST_HEAD(&device->pending_bitmap_work.q);
 1681  
 1682 -   timer_setup(&device->md_sync_timer, md_sync_timer_fn, 0);
 1683 -   timer_setup(&device->request_timer, request_timer_fn, 0);
 1684 +   setup_timer(&device->md_sync_timer, md_sync_timer_fn,
 1685 +           (unsigned long)device);
 1686 +   setup_timer(&device->request_timer, request_timer_fn,
 1687 +           (unsigned long)device);
 1688  
 1689     init_waitqueue_head(&device->misc_wait);
 1690     init_waitqueue_head(&device->al_wait);
 1691 @@ -3700,6 +3744,10 @@ enum drbd_ret_code drbd_create_device(st
 1692  
 1693     blk_queue_make_request(q, drbd_make_request);
 1694     blk_queue_write_cache(q, true, true);
 1695 +   q->queue_lock = &resource->req_lock;/* needed since we use */
 1696 +   /* plugging on a queue, that actually has no requests! */
 1697 +   q->unplug_fn = drbd_unplug_fn;
 1698 +   blk_queue_merge_bvec(q, drbd_merge_bvec);
 1699  
 1700     device->md_io.page = alloc_page(GFP_KERNEL);
 1701     if (!device->md_io.page)
 1702 @@ -4115,7 +4163,7 @@ int drbd_md_write(struct drbd_device *de
 1703     D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
 1704     sector = device->ldev->md.md_offset;
 1705  
 1706 -   err = drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE);
 1707 +   err = drbd_md_sync_page_io(device, device->ldev, sector, WRITE);
 1708     if (err) {
 1709         drbd_err(device, "meta data update failed!\n");
 1710         drbd_chk_io_error(device, err, DRBD_META_IO_ERROR);
 1711 @@ -4443,8 +4491,7 @@ int drbd_md_read(struct drbd_device *dev
 1712     if (!buffer)
 1713         return ERR_NOMEM;
 1714  
 1715 -   if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset,
 1716 -                REQ_OP_READ)) {
 1717 +   if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset, READ)) {
 1718         /* NOTE: can't do normal error processing here as this is
 1719            called BEFORE disk is attached */
 1720         drbd_err(device, "Error while reading metadata.\n");
 1721 @@ -5488,9 +5535,9 @@ bool drbd_md_test_peer_flag(struct drbd_
 1722     return md->peers[peer_device->node_id].flags & flag;
 1723  }
 1724  
 1725 -static void md_sync_timer_fn(struct timer_list *t)
 1726 +static void md_sync_timer_fn(unsigned long data)
 1727  {
 1728 -   struct drbd_device *device = from_timer(device, t, md_sync_timer);
 1729 +   struct drbd_device *device = (struct drbd_device *)data;
 1730     drbd_device_post_work(device, MD_SYNC);
 1731  }
 1732  
 1733 --- drbd_debugfs.c
 1734 +++ /tmp/cocci-output-19039-ce059e-drbd_debugfs.c
 1735 @@ -594,12 +594,12 @@ static int drbd_single_open(struct file
 1736     if (!parent || !parent->d_inode)
 1737         goto out;
 1738     /* serialize with d_delete() */
 1739 -   inode_lock(d_inode(parent));
 1740 +   mutex_lock(&parent->d_inode->i_mutex);
 1741     /* Make sure the object is still alive */
 1742     if (simple_positive(file->f_path.dentry)
 1743     && kref_get_unless_zero(kref))
 1744         ret = 0;
 1745 -   inode_unlock(d_inode(parent));
 1746 +   mutex_unlock(&parent->d_inode->i_mutex);
 1747     if (!ret) {
 1748         ret = single_open(file, show, data);
 1749         if (ret)
 1750 @@ -1147,7 +1147,7 @@ static int device_req_timing_show(struct
 1751  static ssize_t device_req_timing_write(struct file *file, const char __user *ubuf,
 1752                        size_t cnt, loff_t *ppos)
 1753  {
 1754 -   struct drbd_device *device = file_inode(file)->i_private;
 1755 +   struct drbd_device *device = file->f_dentry->d_inode->i_private;
 1756     char buffer;
 1757  
 1758     if (copy_from_user(&buffer, ubuf, 1))
 1759 @@ -1299,7 +1299,7 @@ static int drbd_single_open_peer_device(
 1760     parent = file->f_path.dentry->d_parent;
 1761     if (!parent || !parent->d_inode)
 1762         goto out;
 1763 -   inode_lock(d_inode(parent));
 1764 +   mutex_lock(&parent->d_inode->i_mutex);
 1765     if (!simple_positive(file->f_path.dentry))
 1766         goto out_unlock;
 1767  
 1768 @@ -1308,7 +1308,7 @@ static int drbd_single_open_peer_device(
 1769  
 1770     if (got_connection && got_device) {
 1771         int ret;
 1772 -       inode_unlock(d_inode(parent));
 1773 +       mutex_unlock(&parent->d_inode->i_mutex);
 1774         ret = single_open(file, show, peer_device);
 1775         if (ret) {
 1776             kref_put(&connection->kref, drbd_destroy_connection);
 1777 @@ -1322,7 +1322,7 @@ static int drbd_single_open_peer_device(
 1778     if (got_device)
 1779         kref_put(&device->kref, drbd_destroy_device);
 1780  out_unlock:
 1781 -   inode_unlock(d_inode(parent));
 1782 +   mutex_unlock(&parent->d_inode->i_mutex);
 1783  out:
 1784     return -ESTALE;
 1785  }
 1786 @@ -1716,6 +1716,59 @@ static const struct file_operations drbd
 1787  
 1788  static int drbd_compat_show(struct seq_file *m, void *ignored)
 1789  {
 1790 +   seq_puts(m, "block_device_operations_release__no_is_void\n");
 1791 +   seq_puts(m, "claim_disk__no_link__yes_claim\n");
 1792 +   seq_puts(m, "timer_setup__no_present\n");
 1793 +   seq_puts(m, "bio_bi_bdev__yes_present\n");
 1794 +   seq_puts(m, "refcount_inc__no_present\n");
 1795 +   seq_puts(m, "netlink_cb_portid__no_present\n");
 1796 +   seq_puts(m, "prandom_u32__no_present\n");
 1797 +   seq_puts(m, "struct_bvec_iter__no_present\n");
 1798 +   seq_puts(m, "rdma_create_id__no_has_net_ns\n");
 1799 +   seq_puts(m, "ib_query_device__no_has_3_params\n");
 1800 +   seq_puts(m, "ib_alloc_pd__no_has_2_params\n");
 1801 +   seq_puts(m, "req_hardbarrier__yes_present\n");
 1802 +   seq_puts(m, "make_request__no_is_blk_qc_t__no_is_void\n");
 1803 +   seq_puts(m, "blkdev_get_by_path__no_present\n");
 1804 +   seq_puts(m, "bio__no_bi_status__no_bi_error\n");
 1805 +   seq_puts(m, "bio__no_bi_status\n");
 1806 +   seq_puts(m, "kernel_read__yes_before_4_13\n");
 1807 +   seq_puts(m, "sock_ops__no_returns_addr_len\n");
 1808 +   seq_puts(m, "hlist_for_each_entry__no_has_three_parameters\n");
 1809 +   seq_puts(m, "idr_is_empty__no_present\n");
 1810 +   seq_puts(m, "sock_create_kern__no_has_five_parameters\n");
 1811 +   seq_puts(m, "wb_congested_enum__no_present\n");
 1812 +   seq_puts(m, "time64_to_tm__no_present\n");
 1813 +   seq_puts(m, "ktime_to_timespec64__no_present\n");
 1814 +   seq_puts(m, "file_inode__no_present\n");
 1815 +   seq_puts(m, "d_inode__no_present\n");
 1816 +   seq_puts(m, "inode_lock__no_present\n");
 1817 +   seq_puts(m, "ratelimit_state_init__no_present\n");
 1818 +   seq_puts(m, "bioset_init__no_present\n");
 1819 +   seq_puts(m, "bioset_init__no_present__no_bio_clone_fast\n");
 1820 +   seq_puts(m, "bioset_init__no_present__no_need_bvecs\n");
 1821 +   seq_puts(m, "bio_free__yes_present\n");
 1822 +   seq_puts(m, "genl_policy__yes_in_ops\n");
 1823 +   seq_puts(m, "blk_queue_merge_bvec__yes_present\n");
 1824 +   seq_puts(m, "blk_queue_split__no_present\n");
 1825 +   seq_puts(m, "blk_queue_flag_set__no_present\n");
 1826 +   seq_puts(m, "req_nounmap__no_present\n");
 1827 +   seq_puts(m, "write_same__no_capable\n");
 1828 +   seq_puts(m, "bio_rw__yes_present\n");
 1829 +   seq_puts(m, "req_op_write_zeroes__no_present\n");
 1830 +   seq_puts(m, "blk_check_plugged__no_present\n");
 1831 +   seq_puts(m, "kmap_atomic__no_page_only\n");
 1832 +   seq_puts(m, "blk_queue_plugged__yes_present\n");
 1833 +   seq_puts(m, "alloc_workqueue__no_takes_fmt\n");
 1834 +   seq_puts(m, "struct_kernel_param_ops__no_present\n");
 1835 +   seq_puts(m, "req_prio__no_present\n");
 1836 +   seq_puts(m, "req_preflush__no_present\n");
 1837 +   seq_puts(m, "bio_bi_opf__no_present\n");
 1838 +   seq_puts(m, "bio_flush__yes_present\n");
 1839 +   seq_puts(m, "req_noidle__yes_present\n");
 1840 +   seq_puts(m, "nla_nest_start_noflag__no_present\n");
 1841 +   seq_puts(m, "nla_parse_deprecated__no_present\n");
 1842 +   seq_puts(m, "allow_kernel_signal__no_present\n");
 1843     return 0;
 1844  }
 1845  
 1846 --- drbd_actlog.c
 1847 +++ /tmp/cocci-output-19039-7943e0-drbd_actlog.c
 1848 @@ -77,32 +77,32 @@ void wait_until_done_or_force_detached(s
 1849  
 1850  static int _drbd_md_sync_page_io(struct drbd_device *device,
 1851                  struct drbd_backing_dev *bdev,
 1852 -                sector_t sector, int op)
 1853 +                sector_t sector, int rw)
 1854  {
 1855     struct bio *bio;
 1856     /* we do all our meta data IO in aligned 4k blocks. */
 1857     const int size = 4096;
 1858 -   int err, op_flags = 0;
 1859 +   int err;
 1860  
 1861 -   if ((op == REQ_OP_WRITE) && !test_bit(MD_NO_FUA, &device->flags))
 1862 -       op_flags |= REQ_FUA | REQ_PREFLUSH;
 1863 -   op_flags |= REQ_META | REQ_SYNC | REQ_PRIO;
 1864 +   if ((rw & WRITE) && !test_bit(MD_NO_FUA, &device->flags))
 1865 +       rw |= BIO_FLUSH | BIO_FUA;
 1866 +   rw |= REQ_META | (1UL << BIO_RW_SYNCIO);
 1867  
 1868     device->md_io.done = 0;
 1869     device->md_io.error = -ENODEV;
 1870  
 1871     bio = bio_alloc_drbd(GFP_NOIO);
 1872 -   bio_set_dev(bio, bdev->md_bdev);
 1873 -   bio->bi_iter.bi_sector = sector;
 1874 +   bio->bi_bdev = bdev->md_bdev;
 1875 +   bio->bi_sector = sector;
 1876     err = -EIO;
 1877     if (bio_add_page(bio, device->md_io.page, size, 0) != size)
 1878         goto out;
 1879     bio->bi_private = device;
 1880     bio->bi_end_io = drbd_md_endio;
 1881  
 1882 -   bio->bi_opf = op | op_flags;
 1883 +   bio->bi_rw = rw;
 1884  
 1885 -   if (op != REQ_OP_WRITE && device->disk_state[NOW] == D_DISKLESS && device->ldev == NULL)
 1886 +   if (!(rw & WRITE) && device->disk_state[NOW] == D_DISKLESS && device->ldev == NULL)
 1887         /* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */
 1888         ;
 1889     else if (!get_ldev_if_state(device, D_ATTACHING)) {
 1890 @@ -115,11 +115,11 @@ static int _drbd_md_sync_page_io(struct
 1891     bio_get(bio); /* one bio_put() is in the completion handler */
 1892     atomic_inc(&device->md_io.in_use); /* drbd_md_put_buffer() is in the completion handler */
 1893     device->md_io.submit_jif = jiffies;
 1894 -   if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
 1895 -       bio->bi_status = BLK_STS_IOERR;
 1896 -       bio_endio(bio);
 1897 +   if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
 1898 +       bio_endio(bio,
 1899 +             (10 == 0 ? 0 : 10 == 9 ? -ENOMEM : 10 == 1 ? -EOPNOTSUPP : -EIO));
 1900     } else {
 1901 -       submit_bio(bio);
 1902 +       submit_bio(rw, bio);
 1903     }
 1904     wait_until_done_or_force_detached(device, bdev, &device->md_io.done);
 1905     err = device->md_io.error;
 1906 @@ -129,7 +129,7 @@ static int _drbd_md_sync_page_io(struct
 1907  }
 1908  
 1909  int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev,
 1910 -            sector_t sector, int op)
 1911 +            sector_t sector, int rw)
 1912  {
 1913     int err;
 1914     D_ASSERT(device, atomic_read(&device->md_io.in_use) == 1);
 1915 @@ -142,7 +142,7 @@ int drbd_md_sync_page_io(struct drbd_dev
 1916  
 1917     drbd_dbg(device, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n",
 1918          current->comm, current->pid, __func__,
 1919 -        (unsigned long long)sector, (op == REQ_OP_WRITE) ? "WRITE" : "READ",
 1920 +        (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ",
 1921          (void*)_RET_IP_ );
 1922  
 1923     if (sector < drbd_md_first_sector(bdev) ||
 1924 @@ -150,13 +150,13 @@ int drbd_md_sync_page_io(struct drbd_dev
 1925         drbd_alert(device, "%s [%d]:%s(,%llus,%s) out of range md access!\n",
 1926              current->comm, current->pid, __func__,
 1927              (unsigned long long)sector,
 1928 -            (op == REQ_OP_WRITE) ? "WRITE" : "READ");
 1929 +            (rw & WRITE) ? "WRITE" : "READ");
 1930  
 1931 -   err = _drbd_md_sync_page_io(device, bdev, sector, op);
 1932 +   err = _drbd_md_sync_page_io(device, bdev, sector, rw);
 1933     if (err) {
 1934         drbd_err(device, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n",
 1935             (unsigned long long)sector,
 1936 -           (op == REQ_OP_WRITE) ? "WRITE" : "READ", err);
 1937 +           (rw & WRITE) ? "WRITE" : "READ", err);
 1938     }
 1939     return err;
 1940  }
 1941 @@ -435,7 +435,7 @@ static int __al_write_transaction(struct
 1942         rcu_read_unlock();
 1943         if (write_al_updates) {
 1944             ktime_aggregate_delta(device, start_kt, al_mid_kt);
 1945 -           if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) {
 1946 +           if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) {
 1947                 err = -EIO;
 1948                 drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
 1949             } else {
 1950 --- kref_debug.c
 1951 +++ /tmp/cocci-output-19039-2a2c80-kref_debug.c
 1952 @@ -108,7 +108,7 @@ void print_kref_debug_info(struct seq_fi
 1953         char obj_name[80];
 1954  
 1955         debug_refs = number_of_debug_refs(debug_info);
 1956 -       refs = refcount_read(&debug_info->kref->refcount);
 1957 +       refs = atomic_read(&debug_info->kref->refcount);
 1958         debug_info->class->get_object_name(debug_info, obj_name);
 1959  
 1960         seq_printf(seq, "class: %s, name: %s, refs: %d, dr: %d\n",
 1961 --- drbd_nl.c
 1962 +++ /tmp/cocci-output-19039-2c5269-drbd_nl.c
 1963 @@ -111,7 +111,7 @@ static int drbd_msg_put_info(struct sk_b
 1964     if (!info || !info[0])
 1965         return 0;
 1966  
 1967 -   nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_REPLY);
 1968 +   nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
 1969     if (!nla)
 1970         return err;
 1971  
 1972 @@ -138,7 +138,7 @@ static int drbd_msg_sprintf_info(struct
 1973     int aligned_len;
 1974     char *msg_buf;
 1975  
 1976 -   nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_REPLY);
 1977 +   nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
 1978     if (!nla)
 1979         return err;
 1980  
 1981 @@ -1249,15 +1249,16 @@ static void opener_info(struct drbd_reso
 1982     mutex_lock(&resource->open_release);
 1983  
 1984     idr_for_each_entry(&resource->devices, device, i) {
 1985 -       struct timespec64 ts;
 1986 +       struct timespec ts;
 1987         struct tm tm;
 1988  
 1989         o = list_first_entry_or_null(&device->openers.list, struct opener, list);
 1990         if (!o)
 1991             continue;
 1992  
 1993 -       ts = ktime_to_timespec64(o->opened);
 1994 -       time64_to_tm(ts.tv_sec, -sys_tz.tz_minuteswest * 60, &tm);
 1995 +       ts = ktime_to_timespec(o->opened);
 1996 +       time_to_tm((time_t)ts.tv_sec, -sys_tz.tz_minuteswest * 60,
 1997 +              &tm);
 1998  
 1999         drbd_msg_sprintf_info(reply_skb,
 2000                       "/dev/drbd%d opened by %s (pid %d) "
 2001 @@ -1966,9 +1967,19 @@ static void decide_on_discard_support(st
 2002          * topology on all peers. */
 2003         blk_queue_discard_granularity(q, 512);
 2004         q->limits.max_discard_sectors = drbd_max_discard_sectors(device->resource);
 2005 -       blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
 2006 +       {
 2007 +           unsigned long ____flags0;
 2008 +           spin_lock_irqsave(q->queue_lock, ____flags0);
 2009 +           queue_flag_set(QUEUE_FLAG_DISCARD, q);
 2010 +           spin_unlock_irqrestore(q->queue_lock, ____flags0);
 2011 +       }
 2012     } else {
 2013 -       blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
 2014 +       {
 2015 +           unsigned long ____flags1;
 2016 +           spin_lock_irqsave(q->queue_lock, ____flags1);
 2017 +           queue_flag_clear(QUEUE_FLAG_DISCARD, q);
 2018 +           spin_unlock_irqrestore(q->queue_lock, ____flags1);
 2019 +       }
 2020         blk_queue_discard_granularity(q, 0);
 2021         q->limits.max_discard_sectors = 0;
 2022     }
 2023 @@ -1986,82 +1997,13 @@ static void fixup_discard_if_not_support
 2024     }
 2025  }
 2026  
 2027 -static void fixup_write_zeroes(struct drbd_device *device, struct request_queue *q)
 2028 -{
 2029 -   /* Fixup max_write_zeroes_sectors after blk_queue_stack_limits():
 2030 -    * if we can handle "zeroes" efficiently on the protocol,
 2031 -    * we want to do that, even if our backend does not announce
 2032 -    * max_write_zeroes_sectors itself. */
 2033 -
 2034 -   /* If all peers announce WZEROES support, use it.  Otherwise, rather
 2035 -    * send explicit zeroes than rely on some discard-zeroes-data magic. */
 2036 -   if (common_connection_features(device->resource) & DRBD_FF_WZEROES)
 2037 -       q->limits.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS;
 2038 -   else
 2039 -       q->limits.max_write_zeroes_sectors = 0;
 2040 -}
 2041 -
 2042  static void decide_on_write_same_support(struct drbd_device *device,
 2043             struct request_queue *q,
 2044             struct request_queue *b, struct o_qlim *o,
 2045             bool disable_write_same)
 2046  {
 2047 -   bool can_do = b ? b->limits.max_write_same_sectors : true;
 2048 -
 2049 -   if (can_do && disable_write_same) {
 2050 -       can_do = false;
 2051 -       drbd_info(device, "WRITE_SAME disabled by config\n");
 2052 -   }
 2053 -
 2054 -   if (can_do && !(common_connection_features(device->resource) & DRBD_FF_WSAME)) {
 2055 -       can_do = false;
 2056 -       drbd_info(device, "peer does not support WRITE_SAME\n");
 2057 -   }
 2058 -
 2059 -   if (o) {
 2060 -       /* logical block size; queue_logical_block_size(NULL) is 512 */
 2061 -       unsigned int peer_lbs = be32_to_cpu(o->logical_block_size);
 2062 -       unsigned int me_lbs_b = queue_logical_block_size(b);
 2063 -       unsigned int me_lbs = queue_logical_block_size(q);
 2064 -
 2065 -       if (me_lbs_b != me_lbs) {
 2066 -           drbd_warn(device,
 2067 -               "logical block size of local backend does not match (drbd:%u, backend:%u); was this a late attach?\n",
 2068 -               me_lbs, me_lbs_b);
 2069 -           /* rather disable write same than trigger some BUG_ON later in the scsi layer. */
 2070 -           can_do = false;
 2071 -       }
 2072 -       if (me_lbs_b != peer_lbs) {
 2073 -           drbd_warn(device, "logical block sizes do not match (me:%u, peer:%u); this may cause problems.\n",
 2074 -               me_lbs, peer_lbs);
 2075 -           if (can_do) {
 2076 -               drbd_dbg(device, "logical block size mismatch: WRITE_SAME disabled.\n");
 2077 -               can_do = false;
 2078 -           }
 2079 -           me_lbs = max(me_lbs, me_lbs_b);
 2080 -           /* We cannot change the logical block size of an in-use queue.
 2081 -            * We can only hope that access happens to be properly aligned.
 2082 -            * If not, the peer will likely produce an IO error, and detach. */
 2083 -           if (peer_lbs > me_lbs) {
 2084 -               if (device->resource->role[NOW] != R_PRIMARY) {
 2085 -                   blk_queue_logical_block_size(q, peer_lbs);
 2086 -                   drbd_warn(device, "logical block size set to %u\n", peer_lbs);
 2087 -               } else {
 2088 -                   drbd_warn(device,
 2089 -                       "current Primary must NOT adjust logical block size (%u -> %u); hope for the best.\n",
 2090 -                       me_lbs, peer_lbs);
 2091 -               }
 2092 -           }
 2093 -       }
 2094 -       if (can_do && !o->write_same_capable) {
 2095 -           /* If we introduce an open-coded write-same loop on the receiving side,
 2096 -            * the peer would present itself as "capable". */
 2097 -           drbd_dbg(device, "WRITE_SAME disabled (peer device not capable)\n");
 2098 -           can_do = false;
 2099 -       }
 2100 -   }
 2101 -
 2102 -   blk_queue_max_write_same_sectors(q, can_do ? DRBD_MAX_BBIO_SECTORS : 0);
 2103 +   
 2104 +   drbd_dbg(device, "This kernel is too old, no WRITE_SAME support.\n");
 2105  }
 2106  
 2107  static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev,
 2108 @@ -2098,7 +2040,6 @@ static void drbd_setup_queue_param(struc
 2109         adjust_ra_pages(q, b);
 2110     }
 2111     fixup_discard_if_not_supported(q);
 2112 -   fixup_write_zeroes(device, q);
 2113  }
 2114  
 2115  void drbd_reconsider_queue_parameters(struct drbd_device *device, struct drbd_backing_dev *bdev, struct o_qlim *o)
 2116 @@ -2569,8 +2510,9 @@ static struct block_device *open_backing
 2117     struct block_device *bdev;
 2118     int err = 0;
 2119  
 2120 -   bdev = blkdev_get_by_path(bdev_path,
 2121 -                 FMODE_READ | FMODE_WRITE | FMODE_EXCL, claim_ptr);
 2122 +   bdev = open_bdev_exclusive(bdev_path,
 2123 +                  FMODE_READ | FMODE_WRITE | FMODE_EXCL,
 2124 +                  claim_ptr);
 2125     if (IS_ERR(bdev)) {
 2126         drbd_err(device, "open(\"%s\") failed with %ld\n",
 2127                 bdev_path, PTR_ERR(bdev));
 2128 @@ -2580,9 +2522,9 @@ static struct block_device *open_backing
 2129     if (!do_bd_link)
 2130         return bdev;
 2131  
 2132 -   err = bd_link_disk_holder(bdev, device->vdisk);
 2133 +   err = bd_claim_by_disk(bdev, claim_ptr, device->vdisk);
 2134     if (err) {
 2135 -       blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
 2136 +       ({ close_bdev_exclusive(bdev, (FMODE_READ | FMODE_WRITE | FMODE_EXCL)); 0; });
 2137         drbd_err(device, "bd_link_disk_holder(\"%s\", ...) failed with %d\n",
 2138                 bdev_path, err);
 2139         bdev = ERR_PTR(err);
 2140 @@ -2629,8 +2571,8 @@ static void close_backing_dev(struct drb
 2141     if (!bdev)
 2142         return;
 2143     if (do_bd_unlink)
 2144 -       bd_unlink_disk_holder(bdev, device->vdisk);
 2145 -   blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
 2146 +       bd_release_from_disk(bdev, device->vdisk);
 2147 +   ({ close_bdev_exclusive(bdev, (FMODE_READ | FMODE_WRITE | FMODE_EXCL)); 0; });
 2148  }
 2149  
 2150  void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev)
 2151 @@ -4928,7 +4870,7 @@ static int nla_put_drbd_cfg_context(stru
 2152                     struct drbd_path *path)
 2153  {
 2154     struct nlattr *nla;
 2155 -   nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_CONTEXT);
 2156 +   nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
 2157     if (!nla)
 2158         goto nla_put_failure;
 2159     if (device)
 2160 @@ -5002,9 +4944,9 @@ found_resource:
 2161     goto out;
 2162  
 2163  put_result:
 2164 -   dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
 2165 -           cb->nlh->nlmsg_seq, &drbd_genl_family,
 2166 -           NLM_F_MULTI, DRBD_ADM_GET_RESOURCES);
 2167 +   dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
 2168 +            cb->nlh->nlmsg_seq, &drbd_genl_family,
 2169 +            NLM_F_MULTI, DRBD_ADM_GET_RESOURCES);
 2170     err = -ENOMEM;
 2171     if (!dh)
 2172         goto out;
 2173 @@ -5058,8 +5000,8 @@ static void device_to_statistics(struct
 2174         q = bdev_get_queue(device->ldev->backing_bdev);
 2175         s->dev_lower_blocked =
 2176             bdi_congested(q->backing_dev_info,
 2177 -                     (1 << WB_async_congested) |
 2178 -                     (1 << WB_sync_congested));
 2179 +                     (1 << BDI_async_congested) |
 2180 +                     (1 << BDI_sync_congested));
 2181         put_ldev(device);
 2182     }
 2183     s->dev_size = drbd_get_capacity(device->this_bdev);
 2184 @@ -5131,9 +5073,9 @@ int drbd_adm_dump_devices(struct sk_buff
 2185     goto out;  /* no more devices */
 2186  
 2187  put_result:
 2188 -   dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
 2189 -           cb->nlh->nlmsg_seq, &drbd_genl_family,
 2190 -           NLM_F_MULTI, DRBD_ADM_GET_DEVICES);
 2191 +   dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
 2192 +            cb->nlh->nlmsg_seq, &drbd_genl_family,
 2193 +            NLM_F_MULTI, DRBD_ADM_GET_DEVICES);
 2194     err = -ENOMEM;
 2195     if (!dh)
 2196         goto out;
 2197 @@ -5185,7 +5127,7 @@ int drbd_adm_dump_connections_done(struc
 2198  static int connection_paths_to_skb(struct sk_buff *skb, struct drbd_connection *connection)
 2199  {
 2200     struct drbd_path *path;
 2201 -   struct nlattr *tla = nla_nest_start_noflag(skb, DRBD_NLA_PATH_PARMS);
 2202 +   struct nlattr *tla = nla_nest_start(skb, DRBD_NLA_PATH_PARMS);
 2203     if (!tla)
 2204         goto nla_put_failure;
 2205  
 2206 @@ -5292,9 +5234,9 @@ found_resource:
 2207     goto out;  /* no more resources */
 2208  
 2209  put_result:
 2210 -   dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
 2211 -           cb->nlh->nlmsg_seq, &drbd_genl_family,
 2212 -           NLM_F_MULTI, DRBD_ADM_GET_CONNECTIONS);
 2213 +   dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
 2214 +            cb->nlh->nlmsg_seq, &drbd_genl_family,
 2215 +            NLM_F_MULTI, DRBD_ADM_GET_CONNECTIONS);
 2216     err = -ENOMEM;
 2217     if (!dh)
 2218         goto out;
 2219 @@ -5465,9 +5407,9 @@ found_peer_device:
 2220     goto next_device;
 2221  
 2222  put_result:
 2223 -   dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
 2224 -           cb->nlh->nlmsg_seq, &drbd_genl_family,
 2225 -           NLM_F_MULTI, DRBD_ADM_GET_PEER_DEVICES);
 2226 +   dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
 2227 +            cb->nlh->nlmsg_seq, &drbd_genl_family,
 2228 +            NLM_F_MULTI, DRBD_ADM_GET_PEER_DEVICES);
 2229     err = -ENOMEM;
 2230     if (!dh)
 2231         goto out;
 2232 @@ -5940,7 +5882,7 @@ static int adm_del_resource(struct drbd_
 2233     if (!list_empty(&resource->connections))
 2234         goto out;
 2235     err = ERR_RES_IN_USE;
 2236 -   if (!idr_is_empty(&resource->devices))
 2237 +   if (!({ int id = 0; idr_get_next(& resource -> devices, &id) == NULL; }))
 2238         goto out;
 2239  
 2240     set_bit(R_UNREGISTERED, &resource->flags);
 2241 --- drbd_req.c
 2242 +++ /tmp/cocci-output-19039-ec71df-drbd_req.c
 2243 @@ -41,7 +41,7 @@ static struct drbd_request *drbd_req_new
 2244  {
 2245     struct drbd_request *req;
 2246  
 2247 -   req = mempool_alloc(&drbd_request_mempool, GFP_NOIO);
 2248 +   req = mempool_alloc(drbd_request_mempool, GFP_NOIO);
 2249     if (!req)
 2250         return NULL;
 2251  
 2252 @@ -55,8 +55,8 @@ static struct drbd_request *drbd_req_new
 2253     req->epoch = 0;
 2254  
 2255     drbd_clear_interval(&req->i);
 2256 -   req->i.sector = bio_src->bi_iter.bi_sector;
 2257 -   req->i.size = bio_src->bi_iter.bi_size;
 2258 +   req->i.sector = bio_src->bi_sector;
 2259 +   req->i.size = bio_src->bi_size;
 2260     req->i.local = true;
 2261     req->i.waiting = false;
 2262  
 2263 @@ -70,9 +70,9 @@ static struct drbd_request *drbd_req_new
 2264     kref_init(&req->kref);
 2265  
 2266     req->local_rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0)
 2267 -                 | (bio_op(bio_src) == REQ_OP_WRITE_SAME ? RQ_WSAME : 0)
 2268 -                 | (bio_op(bio_src) == REQ_OP_WRITE_ZEROES ? RQ_ZEROES : 0)
 2269 -                 | (bio_op(bio_src) == REQ_OP_DISCARD ? RQ_UNMAP : 0);
 2270 +                 | ((false)/* WRITE_SAME not supported on this kernel */ ? RQ_WSAME : 0)
 2271 +                 | ((false)/* WRITE_ZEROES not supported on this kernel */ ? RQ_ZEROES : 0)
 2272 +                 | ((bio_src->bi_rw & BIO_DISCARD) ? RQ_UNMAP : 0);
 2273  
 2274     return req;
 2275  }
 2276 @@ -80,7 +80,7 @@ static struct drbd_request *drbd_req_new
 2277  static void req_destroy_no_send_peer_ack(struct kref *kref)
 2278  {
 2279     struct drbd_request *req = container_of(kref, struct drbd_request, kref);
 2280 -   mempool_free(req, &drbd_request_mempool);
 2281 +   mempool_free(req, drbd_request_mempool);
 2282  }
 2283  
 2284  void drbd_queue_peer_ack(struct drbd_resource *resource, struct drbd_request *req)
 2285 @@ -88,7 +88,7 @@ void drbd_queue_peer_ack(struct drbd_res
 2286     struct drbd_connection *connection;
 2287     bool queued = false;
 2288  
 2289 -   refcount_set(&req->kref.refcount, 1); /* was 0, instead of kref_get() */
 2290 +   atomic_set(&req->kref.refcount, 1); /* was 0, instead of kref_get() */
 2291     rcu_read_lock();
 2292     for_each_connection_rcu(connection, resource) {
 2293         unsigned int node_id = connection->peer_node_id;
 2294 @@ -285,7 +285,8 @@ void drbd_req_destroy(struct kref *kref)
 2295                 drbd_queue_peer_ack(resource, peer_ack_req);
 2296                 peer_ack_req = NULL;
 2297             } else
 2298 -               mempool_free(peer_ack_req, &drbd_request_mempool);
 2299 +               mempool_free(peer_ack_req,
 2300 +                        drbd_request_mempool);
 2301         }
 2302         req->device = NULL;
 2303         resource->peer_ack_req = req;
 2304 @@ -295,7 +296,7 @@ void drbd_req_destroy(struct kref *kref)
 2305         if (!peer_ack_req)
 2306             resource->last_peer_acked_dagtag = req->dagtag_sector;
 2307     } else
 2308 -       mempool_free(req, &drbd_request_mempool);
 2309 +       mempool_free(req, drbd_request_mempool);
 2310  
 2311     /* In both branches of the if above, the reference to device gets released */
 2312     kref_debug_put(&device->kref_debug, 6);
 2313 @@ -308,7 +309,7 @@ void drbd_req_destroy(struct kref *kref)
 2314      */
 2315     if (destroy_next) {
 2316         req = destroy_next;
 2317 -       if (refcount_dec_and_test(&req->kref.refcount))
 2318 +       if (atomic_dec_and_test(&req->kref.refcount))
 2319             goto tail_recursion;
 2320     }
 2321  }
 2322 @@ -341,8 +342,8 @@ void complete_master_bio(struct drbd_dev
 2323         struct bio_and_error *m)
 2324  {
 2325     int rw = bio_data_dir(m->bio);
 2326 -   m->bio->bi_status = errno_to_blk_status(m->error);
 2327 -   bio_endio(m->bio);
 2328 +   bio_endio(m->bio,
 2329 +         ((m->error == 0 ? 0 : m->error == -ENOMEM ? 9 : m->error == -EOPNOTSUPP ? 1 : 10) == 0 ? 0 : (m->error == 0 ? 0 : m->error == -ENOMEM ? 9 : m->error == -EOPNOTSUPP ? 1 : 10) == 9 ? -ENOMEM : (m->error == 0 ? 0 : m->error == -ENOMEM ? 9 : m->error == -EOPNOTSUPP ? 1 : 10) == 1 ? -EOPNOTSUPP : -EIO));
 2330     dec_ap_bio(device, rw);
 2331  }
 2332  
 2333 @@ -447,9 +448,7 @@ void drbd_req_complete(struct drbd_reque
 2334      *
 2335      * WRITE should have used all available paths already.
 2336      */
 2337 -   if (!ok &&
 2338 -       bio_op(req->master_bio) == REQ_OP_READ &&
 2339 -       !(req->master_bio->bi_opf & REQ_RAHEAD) &&
 2340 +   if ((!ok && !(req->master_bio->bi_rw & WRITE)) &&
 2341         !list_empty(&req->tl_requests))
 2342         req->local_rq_state |= RQ_POSTPONED;
 2343  
 2344 @@ -1460,8 +1459,8 @@ static void drbd_process_discard_or_zero
 2345  {
 2346     int err = drbd_issue_discard_or_zero_out(req->device,
 2347                 req->i.sector, req->i.size >> 9, flags);
 2348 -   req->private_bio->bi_status = err ? BLK_STS_IOERR : BLK_STS_OK;
 2349 -   bio_endio(req->private_bio);
 2350 +   bio_endio(req->private_bio,
 2351 +         ((err ? 10 : 0) == 0 ? 0 : (err ? 10 : 0) == 9 ? -ENOMEM : (err ? 10 : 0) == 1 ? -EOPNOTSUPP : -EIO));
 2352  }
 2353  
 2354  static void
 2355 @@ -1471,14 +1470,14 @@ drbd_submit_req_private_bio(struct drbd_
 2356     struct bio *bio = req->private_bio;
 2357     unsigned int type;
 2358  
 2359 -   if (bio_op(bio) != REQ_OP_READ)
 2360 +   if ((bio->bi_rw & BIO_WRITE))
 2361         type = DRBD_FAULT_DT_WR;
 2362 -   else if (bio->bi_opf & REQ_RAHEAD)
 2363 +   else if ((false)/* RAHEAD not supported on this kernel */)
 2364         type = DRBD_FAULT_DT_RA;
 2365     else
 2366         type = DRBD_FAULT_DT_RD;
 2367  
 2368 -   bio_set_dev(bio, device->ldev->backing_bdev);
 2369 +   bio->bi_bdev = device->ldev->backing_bdev;
 2370  
 2371     /* State may have changed since we grabbed our reference on the
 2372      * device->ldev member. Double check, and short-circuit to endio.
 2373 @@ -1487,20 +1486,20 @@ drbd_submit_req_private_bio(struct drbd_
 2374      * this bio. */
 2375     if (get_ldev(device)) {
 2376         if (drbd_insert_fault(device, type)) {
 2377 -           bio->bi_status = BLK_STS_IOERR;
 2378 -           bio_endio(bio);
 2379 -       } else if (bio_op(bio) == REQ_OP_WRITE_ZEROES) {
 2380 +           bio_endio(bio,
 2381 +                 (10 == 0 ? 0 : 10 == 9 ? -ENOMEM : 10 == 1 ? -EOPNOTSUPP : -EIO));
 2382 +       } else if ((false)/* WRITE_ZEROES not supported on this kernel */) {
 2383             drbd_process_discard_or_zeroes_req(req, EE_ZEROOUT |
 2384 -               ((bio->bi_opf & REQ_NOUNMAP) ? 0 : EE_TRIM));
 2385 -       } else if (bio_op(bio) == REQ_OP_DISCARD) {
 2386 +               ((false)/* NOUNMAP not supported on this kernel */ ? 0 : EE_TRIM));
 2387 +       } else if ((bio->bi_rw & BIO_DISCARD)) {
 2388             drbd_process_discard_or_zeroes_req(req, EE_TRIM);
 2389         } else {
 2390             generic_make_request(bio);
 2391         }
 2392         put_ldev(device);
 2393     } else {
 2394 -       bio->bi_status = BLK_STS_IOERR;
 2395 -       bio_endio(bio);
 2396 +       bio_endio(bio,
 2397 +             (10 == 0 ? 0 : 10 == 9 ? -ENOMEM : 10 == 1 ? -EOPNOTSUPP : -EIO));
 2398     }
 2399   }
 2400  
 2401 @@ -1521,7 +1520,7 @@ static void drbd_queue_write(struct drbd
 2402  static void req_make_private_bio(struct drbd_request *req, struct bio *bio_src)
 2403  {
 2404     struct bio *bio;
 2405 -   bio = bio_clone_fast(bio_src, GFP_NOIO, &drbd_io_bio_set);
 2406 +   bio = bio_clone(bio_src, GFP_NOIO);
 2407  
 2408     req->private_bio = bio;
 2409  
 2410 @@ -1560,8 +1559,8 @@ drbd_request_prepare(struct drbd_device
 2411         /* only pass the error to the upper layers.
 2412          * if user cannot handle io errors, that's not our business. */
 2413         drbd_err(device, "could not kmalloc() req\n");
 2414 -       bio->bi_status = BLK_STS_RESOURCE;
 2415 -       bio_endio(bio);
 2416 +       bio_endio(bio,
 2417 +             (9 == 0 ? 0 : 9 == 9 ? -ENOMEM : 9 == 1 ? -EOPNOTSUPP : -EIO));
 2418         return ERR_PTR(-ENOMEM);
 2419     }
 2420     if (get_ldev(device))
 2421 @@ -1587,8 +1586,8 @@ drbd_request_prepare(struct drbd_device
 2422         atomic_add(interval_to_al_extents(&req->i), &device->wait_for_actlog_ecnt);
 2423  
 2424     /* process discards always from our submitter thread */
 2425 -   if ((bio_op(bio) == REQ_OP_WRITE_ZEROES) ||
 2426 -       (bio_op(bio) == REQ_OP_DISCARD))
 2427 +   if ((false)/* WRITE_ZEROES not supported on this kernel */ ||
 2428 +       (bio->bi_rw & BIO_DISCARD))
 2429         goto queue_for_submitter_thread;
 2430  
 2431     if (req->private_bio && !test_bit(AL_SUSPENDED, &device->flags)) {
 2432 @@ -1629,57 +1628,6 @@ static bool may_do_writes(struct drbd_de
 2433     return false;
 2434  }
 2435  
 2436 -struct drbd_plug_cb {
 2437 -   struct blk_plug_cb cb;
 2438 -   struct drbd_request *most_recent_req;
 2439 -   /* do we need more? */
 2440 -};
 2441 -
 2442 -static void drbd_unplug(struct blk_plug_cb *cb, bool from_schedule)
 2443 -{
 2444 -   struct drbd_plug_cb *plug = container_of(cb, struct drbd_plug_cb, cb);
 2445 -   struct drbd_resource *resource = plug->cb.data;
 2446 -   struct drbd_request *req = plug->most_recent_req;
 2447 -
 2448 -   kfree(cb);
 2449 -   if (!req)
 2450 -       return;
 2451 -
 2452 -   spin_lock_irq(&resource->req_lock);
 2453 -   /* In case the sender did not process it yet, raise the flag to
 2454 -    * have it followed with P_UNPLUG_REMOTE just after. */
 2455 -   req->local_rq_state |= RQ_UNPLUG;
 2456 -   /* but also queue a generic unplug */
 2457 -   drbd_queue_unplug(req->device);
 2458 -   kref_put(&req->kref, drbd_req_destroy);
 2459 -   spin_unlock_irq(&resource->req_lock);
 2460 -}
 2461 -
 2462 -static struct drbd_plug_cb* drbd_check_plugged(struct drbd_resource *resource)
 2463 -{
 2464 -   /* A lot of text to say
 2465 -    * return (struct drbd_plug_cb*)blk_check_plugged(); */
 2466 -   struct drbd_plug_cb *plug;
 2467 -   struct blk_plug_cb *cb = blk_check_plugged(drbd_unplug, resource, sizeof(*plug));
 2468 -
 2469 -   if (cb)
 2470 -       plug = container_of(cb, struct drbd_plug_cb, cb);
 2471 -   else
 2472 -       plug = NULL;
 2473 -   return plug;
 2474 -}
 2475 -
 2476 -static void drbd_update_plug(struct drbd_plug_cb *plug, struct drbd_request *req)
 2477 -{
 2478 -   struct drbd_request *tmp = plug->most_recent_req;
 2479 -   /* Will be sent to some peer.
 2480 -    * Remember to tag it with UNPLUG_REMOTE on unplug */
 2481 -   kref_get(&req->kref);
 2482 -   plug->most_recent_req = req;
 2483 -   if (tmp)
 2484 -       kref_put(&tmp->kref, drbd_req_destroy);
 2485 -}
 2486 -
 2487  static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req)
 2488  {
 2489     struct drbd_resource *resource = device->resource;
 2490 @@ -1765,7 +1713,8 @@ static void drbd_send_and_submit(struct
 2491          * replicating, in which case there is no point. */
 2492         if (unlikely(req->i.size == 0)) {
 2493             /* The only size==0 bios we expect are empty flushes. */
 2494 -           D_ASSERT(device, req->master_bio->bi_opf & REQ_PREFLUSH);
 2495 +           D_ASSERT(device,
 2496 +                req->master_bio->bi_rw & REQ_FLUSH);
 2497             _req_mod(req, QUEUE_AS_DRBD_BARRIER, NULL);
 2498         } else if (!drbd_process_write_request(req))
 2499             no_remote = true;
 2500 @@ -1779,12 +1728,6 @@ static void drbd_send_and_submit(struct
 2501             no_remote = true;
 2502     }
 2503  
 2504 -   if (no_remote == false) {
 2505 -       struct drbd_plug_cb *plug = drbd_check_plugged(resource);
 2506 -       if (plug)
 2507 -           drbd_update_plug(plug, req);
 2508 -   }
 2509 -
 2510     /* If it took the fast path in drbd_request_prepare, add it here.
 2511      * The slow path has added it already. */
 2512     if (list_empty(&req->req_pending_master_completion))
 2513 @@ -1936,7 +1879,8 @@ static void __drbd_submit_peer_request(s
 2514     list_del_init(&peer_req->wait_for_actlog);
 2515  
 2516     err = drbd_submit_peer_request(device, peer_req,
 2517 -           REQ_OP_WRITE, peer_req->op_flags, DRBD_FAULT_DT_WR);
 2518 +                      WRITE | peer_req->op_flags,
 2519 +                      DRBD_FAULT_DT_WR);
 2520  
 2521     if (err)
 2522         drbd_cleanup_after_failed_submit_peer_request(peer_req);
 2523 @@ -2129,6 +2073,7 @@ void do_submit(struct work_struct *ws)
 2524             made_progress = prepare_al_transaction_nonblock(device, &wfa);
 2525             if (made_progress)
 2526                 break;
 2527 +           drbd_kick_lo(device);
 2528  
 2529             schedule();
 2530  
 2531 @@ -2192,9 +2137,10 @@ void do_submit(struct work_struct *ws)
 2532  
 2533         send_and_submit_pending(device, &wfa);
 2534     }
 2535 +   drbd_kick_lo(device);
 2536  }
 2537  
 2538 -blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio)
 2539 +int drbd_make_request(struct request_queue *q, struct bio *bio)
 2540  {
 2541     struct drbd_device *device = (struct drbd_device *) q->queuedata;
 2542  #ifdef CONFIG_DRBD_TIMING_STATS
 2543 @@ -2202,12 +2148,20 @@ blk_qc_t drbd_make_request(struct reques
 2544  #endif
 2545     unsigned long start_jif;
 2546  
 2547 -   blk_queue_split(q, &bio);
 2548 +   /* We never supported BIO_RW_BARRIER.
 2549 +    * We don't need to, anymore, either: starting with kernel 2.6.36,
 2550 +    * we have REQ_FUA and REQ_PREFLUSH, which will be handled transparently
 2551 +    * by the block layer. */
 2552 +   if (unlikely(bio->bi_rw & (1UL << BIO_RW_BARRIER))) {
 2553 +       bio_endio(bio,
 2554 +             (1 == 0 ? 0 : 1 == 9 ? -ENOMEM : 1 == 1 ? -EOPNOTSUPP : -EIO));
 2555 +       return 0;
 2556 +   }
 2557  
 2558     if (device->cached_err_io) {
 2559 -       bio->bi_status = BLK_STS_IOERR;
 2560 -       bio_endio(bio);
 2561 -       return BLK_QC_T_NONE;
 2562 +       bio_endio(bio,
 2563 +             (10 == 0 ? 0 : 10 == 9 ? -ENOMEM : 10 == 1 ? -EOPNOTSUPP : -EIO));
 2564 +       return 0;
 2565     }
 2566  
 2567     ktime_get_accounting(start_kt);
 2568 @@ -2215,7 +2169,40 @@ blk_qc_t drbd_make_request(struct reques
 2569  
 2570     __drbd_make_request(device, bio, start_kt, start_jif);
 2571  
 2572 -   return BLK_QC_T_NONE;
 2573 +   return 0;
 2574 +}
 2575 +
 2576 +/* This is called by bio_add_page().
 2577 + *
 2578 + * q->max_hw_sectors and other global limits are already enforced there.
 2579 + *
 2580 + * We need to call down to our lower level device,
 2581 + * in case it has special restrictions.
 2582 + *
 2583 + * As long as the BIO is empty we have to allow at least one bvec,
 2584 + * regardless of size and offset, so no need to ask lower levels.
 2585 + */
 2586 +int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm,
 2587 +           struct bio_vec *bvec)
 2588 +{
 2589 +   struct drbd_device *device = (struct drbd_device *)q->queuedata;
 2590 +   unsigned int bio_size = bvm->bi_size;
 2591 +   int limit = DRBD_MAX_BIO_SIZE;
 2592 +   int backing_limit;
 2593 +
 2594 +   if (bio_size && get_ldev(device)) {
 2595 +       unsigned int max_hw_sectors = queue_max_hw_sectors(q);
 2596 +       struct request_queue * const b = device->ldev->backing_bdev->bd_disk->queue;
 2597 +       if (b->merge_bvec_fn) {
 2598 +           bvm->bi_bdev = device->ldev->backing_bdev;
 2599 +           backing_limit = b->merge_bvec_fn(b, bvm, bvec);
 2600 +           limit = min(limit, backing_limit);
 2601 +       }
 2602 +       put_ldev(device);
 2603 +       if ((limit >> 9) > max_hw_sectors)
 2604 +           limit = max_hw_sectors << 9;
 2605 +   }
 2606 +   return limit;
 2607  }
 2608  
 2609  static unsigned long time_min_in_future(unsigned long now,
 2610 @@ -2301,9 +2288,9 @@ static bool net_timeout_reached(struct d
 2611   * to expire twice (worst case) to become effective. Good enough.
 2612   */
 2613  
 2614 -void request_timer_fn(struct timer_list *t)
 2615 +void request_timer_fn(unsigned long data)
 2616  {
 2617 -   struct drbd_device *device = from_timer(device, t, request_timer);
 2618 +   struct drbd_device *device = (struct drbd_device *)data;
 2619     struct drbd_connection *connection;
 2620     struct drbd_request *req_read, *req_write;
 2621     unsigned long oldest_submit_jif;
 2622 --- drbd_state.c
 2623 +++ /tmp/cocci-output-19039-f79148-drbd_state.c
 2624 @@ -3966,7 +3966,7 @@ long twopc_retry_timeout(struct drbd_res
 2625             retries = 5;
 2626         timeout = resource->res_opts.twopc_retry_timeout *
 2627               HZ / 10 * connections * (1 << retries);
 2628 -       timeout = prandom_u32() % timeout;
 2629 +       timeout = random32() % timeout;
 2630     }
 2631     return timeout;
 2632  }
 2633 @@ -4130,7 +4130,7 @@ change_cluster_wide_state(bool (*change)
 2634     }
 2635  
 2636     do
 2637 -       reply->tid = prandom_u32();
 2638 +       reply->tid = random32();
 2639     while (!reply->tid);
 2640  
 2641     request.tid = cpu_to_be32(reply->tid);
 2642 @@ -4341,7 +4341,7 @@ retry:
 2643     reach_immediately = directly_connected_nodes(resource, NOW);
 2644  
 2645     do
 2646 -       reply->tid = prandom_u32();
 2647 +       reply->tid = random32();
 2648     while (!reply->tid);
 2649  
 2650     request.tid = cpu_to_be32(reply->tid);
 2651 --- drbd-headers/linux/genl_magic_func.h
 2652 +++ drbd-headers/linux/genl_magic_func.h
 2653 @@ -233,6 +233,7 @@ static const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
 2654  {                              \
 2655     handler                         \
 2656     .cmd = op_name,                     \
 2657 +   .policy = CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy),   \
 2658  },
 2659  
 2660  #define ZZZ_genl_ops       CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
 2661 @@ -291,7 +292,6 @@ static struct genl_family ZZZ_genl_family __read_mostly = {
 2662  #ifdef COMPAT_HAVE_GENL_FAMILY_PARALLEL_OPS
 2663     .parallel_ops = true,
 2664  #endif
 2665 -   .policy = CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy),
 2666  };
 2667  
 2668  /*