summaryrefslogtreecommitdiff
path: root/drivers/block
diff options
context:
space:
mode:
authorPhilipp Reisner <philipp.reisner@linbit.com>2009-05-05 17:27:51 +0200
committerPhilipp Reisner <philipp.reisner@linbit.com>2009-07-29 10:31:49 +0200
commit38544ea16e86b0bac2a00e76de6cc15c826d7a10 (patch)
treea11863c207d57661ce6a269e6ecfbbff11152590 /drivers/block
parent80c5b619dce23d1d1b55f203b5538cbe5edf001d (diff)
Tracking DRBD mainline (all cleanups done there)
Improving a comment Call drbd_rs_cancel_all() and reset rs_pending when aborting resync due to detach. (Bugz 223) Allow detach of a SyncTarget node. (Bugz 221) rename inc_local -> get_ldev; inc_net -> get_net_conf; and corresponding dec_* -> put_* replace AL with activity log in comments drbd_md_sync_page_io: prepare for rw being more than just r/w direction remove outdated comment add comment: page_address allowed, preallocated with GFP_KERNEL renaming a few constants: _SECT -> _SECTOR_SIZE, _SIZE_B -> _SHIFT ... remove quite a few 'inline's from .c files replacing __attribute__((packed)) with __packed micro: comment spelling fix Allow pass-through options to drbdsetup also for the syncer subcommand dmsetup: ERR_PACKET_NR error message was missing Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/drbd/drbd_actlog.c134
-rw-r--r--drivers/block/drbd/drbd_bitmap.c20
-rw-r--r--drivers/block/drbd/drbd_buildtag.c4
-rw-r--r--drivers/block/drbd/drbd_int.h122
-rw-r--r--drivers/block/drbd/drbd_main.c137
-rw-r--r--drivers/block/drbd/drbd_nl.c88
-rw-r--r--drivers/block/drbd/drbd_proc.c4
-rw-r--r--drivers/block/drbd/drbd_receiver.c117
-rw-r--r--drivers/block/drbd/drbd_req.c39
-rw-r--r--drivers/block/drbd/drbd_req.h26
-rw-r--r--drivers/block/drbd/drbd_tracing.c2
-rw-r--r--drivers/block/drbd/drbd_worker.c44
-rw-r--r--drivers/block/drbd/drbd_wrappers.h3
-rw-r--r--drivers/block/drbd/lru_cache.c4
14 files changed, 378 insertions, 366 deletions
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index c894b4fa6af0..f1318e57f375 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -29,14 +29,14 @@
#include "drbd_tracing.h"
#include "drbd_wrappers.h"
-/* I do not believe that all storage medias can guarantee atomic
- * 512 byte write operations. When the journal is read, only
- * transactions with correct xor_sums are considered.
- * sizeof() = 512 byte */
-struct __attribute__((packed)) al_transaction {
+/* We maintain a trivial check sum in our on disk activity log.
+ * With that we can ensure correct operation even when the storage
+ * device might do a partial (last) sector write while loosing power.
+ */
+struct __packed al_transaction {
u32 magic;
u32 tr_number;
- struct __attribute__((packed)) {
+ struct __packed {
u32 pos;
u32 extent; } updates[1 + AL_EXTENTS_PT];
u32 xor_sum;
@@ -133,7 +133,7 @@ STATIC int _drbd_md_sync_page_io(struct drbd_conf *mdev,
int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
sector_t sector, int rw)
{
- int hardsect, mask, ok;
+ int hardsect_size, mask, ok;
int offset = 0;
struct page *iop = mdev->md_io_page;
@@ -141,34 +141,36 @@ int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
BUG_ON(!bdev->md_bdev);
- hardsect = drbd_get_hardsect(bdev->md_bdev);
- if (hardsect == 0)
- hardsect = MD_HARDSECT;
+ hardsect_size = drbd_get_hardsect_size(bdev->md_bdev);
+ if (hardsect_size == 0)
+ hardsect_size = MD_SECTOR_SIZE;
- /* in case hardsect != 512 [ s390 only? ] */
- if (hardsect != MD_HARDSECT) {
- mask = (hardsect / MD_HARDSECT) - 1;
+ /* in case hardsect_size != 512 [ s390 only? ] */
+ if (hardsect_size != MD_SECTOR_SIZE) {
+ mask = (hardsect_size / MD_SECTOR_SIZE) - 1;
D_ASSERT(mask == 1 || mask == 3 || mask == 7);
- D_ASSERT(hardsect == (mask+1) * MD_HARDSECT);
+ D_ASSERT(hardsect_size == (mask+1) * MD_SECTOR_SIZE);
offset = sector & mask;
sector = sector & ~mask;
iop = mdev->md_io_tmpp;
- if (rw == WRITE) {
+ if (rw & WRITE) {
+ /* these are GFP_KERNEL pages, preallocated
+ * on device initialization */
void *p = page_address(mdev->md_io_page);
void *hp = page_address(mdev->md_io_tmpp);
- ok = _drbd_md_sync_page_io(mdev, bdev, iop,
- sector, READ, hardsect);
+ ok = _drbd_md_sync_page_io(mdev, bdev, iop, sector,
+ READ, hardsect_size);
if (unlikely(!ok)) {
dev_err(DEV, "drbd_md_sync_page_io(,%llus,"
- "READ [hardsect!=512]) failed!\n",
+ "READ [hardsect_size!=512]) failed!\n",
(unsigned long long)sector);
return 0;
}
- memcpy(hp + offset*MD_HARDSECT , p, MD_HARDSECT);
+ memcpy(hp + offset*MD_SECTOR_SIZE, p, MD_SECTOR_SIZE);
}
}
@@ -176,27 +178,26 @@ int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
sector > drbd_md_last_sector(bdev))
dev_alert(DEV, "%s [%d]:%s(,%llus,%s) out of range md access!\n",
current->comm, current->pid, __func__,
- (unsigned long long)sector, rw ? "WRITE" : "READ");
+ (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
- ok = _drbd_md_sync_page_io(mdev, bdev, iop, sector, rw, hardsect);
+ ok = _drbd_md_sync_page_io(mdev, bdev, iop, sector, rw, hardsect_size);
if (unlikely(!ok)) {
dev_err(DEV, "drbd_md_sync_page_io(,%llus,%s) failed!\n",
- (unsigned long long)sector, rw ? "WRITE" : "READ");
+ (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
return 0;
}
- if (hardsect != MD_HARDSECT && rw == READ) {
+ if (hardsect_size != MD_SECTOR_SIZE && !(rw & WRITE)) {
void *p = page_address(mdev->md_io_page);
void *hp = page_address(mdev->md_io_tmpp);
- memcpy(p, hp + offset*MD_HARDSECT, MD_HARDSECT);
+ memcpy(p, hp + offset*MD_SECTOR_SIZE, MD_SECTOR_SIZE);
}
return ok;
}
-static inline
-struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr)
+static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr)
{
struct lc_element *al_ext;
struct bm_extent *bm_ext;
@@ -229,7 +230,7 @@ struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr)
void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector)
{
- unsigned int enr = (sector >> (AL_EXTENT_SIZE_B-9));
+ unsigned int enr = (sector >> (AL_EXTENT_SHIFT-9));
struct lc_element *al_ext;
struct update_al_work al_work;
@@ -241,10 +242,11 @@ void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector)
if (al_ext->lc_number != enr) {
/* drbd_al_write_transaction(mdev,al_ext,enr);
- generic_make_request() are serialized on the
- current->bio_tail list now. Therefore we have
- to deligate writing something to AL to the
- worker thread. */
+ * recurses into generic_make_request(), which
+ * disalows recursion, bios being serialized on the
+ * current->bio_tail list now.
+ * we have to delegate updates to the activity log
+ * to the worker thread. */
init_completion(&al_work.event);
al_work.al_ext = al_ext;
al_work.enr = enr;
@@ -264,7 +266,7 @@ void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector)
void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector)
{
- unsigned int enr = (sector >> (AL_EXTENT_SIZE_B-9));
+ unsigned int enr = (sector >> (AL_EXTENT_SHIFT-9));
struct lc_element *extent;
unsigned long flags;
@@ -293,15 +295,14 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
struct lc_element *updated = aw->al_ext;
const unsigned int new_enr = aw->enr;
const unsigned int evicted = aw->old_enr;
-
struct al_transaction *buffer;
sector_t sector;
int i, n, mx;
unsigned int extent_nr;
u32 xor_sum = 0;
- if (!inc_local(mdev)) {
- dev_err(DEV, "inc_local() failed in w_al_write_transaction\n");
+ if (!get_ldev(mdev)) {
+ dev_err(DEV, "get_ldev() failed in w_al_write_transaction\n");
complete(&((struct update_al_work *)w)->event);
return 1;
}
@@ -363,13 +364,13 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
mutex_unlock(&mdev->md_io_mutex);
complete(&((struct update_al_work *)w)->event);
- dec_local(mdev);
+ put_ldev(mdev);
return 1;
}
/**
- * drbd_al_read_tr: Reads a single transaction record form the
+ * drbd_al_read_tr: Reads a single transaction record from the
* on disk activity log.
* Returns -1 on IO error, 0 on checksum error and 1 if it is a valid
* record.
@@ -543,10 +544,10 @@ STATIC void atodb_endio(struct bio *bio, int error)
put_page(page);
bio_put(bio);
mdev->bm_writ_cnt++;
- dec_local(mdev);
+ put_ldev(mdev);
}
-#define S2W(s) ((s)<<(BM_EXT_SIZE_B-BM_BLOCK_SIZE_B-LN2_BPL))
+#define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
/* activity log to on disk bitmap -- prepare bio unless that sector
* is already covered by previously prepared bios */
STATIC int atodb_prepare_unless_covered(struct drbd_conf *mdev,
@@ -606,12 +607,12 @@ STATIC int atodb_prepare_unless_covered(struct drbd_conf *mdev,
bio->bi_bdev = mdev->bc->md_bdev;
bio->bi_sector = on_disk_sector;
- if (bio_add_page(bio, page, MD_HARDSECT, page_offset) != MD_HARDSECT)
+ if (bio_add_page(bio, page, MD_SECTOR_SIZE, page_offset) != MD_SECTOR_SIZE)
goto out_put_page;
atomic_inc(&wc->count);
/* we already know that we may do this...
- * inc_local_if_state(mdev,D_ATTACHING);
+ * get_ldev_if_state(mdev,D_ATTACHING);
* just get the extra reference, so that the local_cnt reflects
* the number of pending IO requests DRBD at its backing device.
*/
@@ -631,7 +632,8 @@ out_bio_put:
/**
* drbd_al_to_on_disk_bm:
- * Writes the areas of the bitmap which are covered by the AL.
+ * Writes the areas of the bitmap which are covered by the
+ * currently active extents of the activity log.
* called when we detach (unconfigure) local storage,
* or when we go from R_PRIMARY to R_SECONDARY state.
*/
@@ -642,7 +644,7 @@ void drbd_al_to_on_disk_bm(struct drbd_conf *mdev)
struct bio **bios;
struct drbd_atodb_wait wc;
- ERR_IF (!inc_local_if_state(mdev, D_ATTACHING))
+ ERR_IF (!get_ldev_if_state(mdev, D_ATTACHING))
return; /* sorry, I don't have any act_log etc... */
wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
@@ -699,7 +701,7 @@ void drbd_al_to_on_disk_bm(struct drbd_conf *mdev)
if (atomic_read(&wc.count))
wait_for_completion(&wc.io_done);
- dec_local(mdev);
+ put_ldev(mdev);
if (wc.error)
drbd_io_error(mdev, TRUE);
@@ -727,12 +729,12 @@ void drbd_al_to_on_disk_bm(struct drbd_conf *mdev)
lc_unlock(mdev->act_log);
wake_up(&mdev->al_wait);
- dec_local(mdev);
+ put_ldev(mdev);
}
/**
- * drbd_al_apply_to_bm: Sets the bits in the bitmap that are described
- * by the active extents of the AL.
+ * drbd_al_apply_to_bm: Sets the bits in the in-memory bitmap
+ * which are described by the active extents of the activity log.
*/
void drbd_al_apply_to_bm(struct drbd_conf *mdev)
{
@@ -757,7 +759,7 @@ void drbd_al_apply_to_bm(struct drbd_conf *mdev)
ppsize(ppb, Bit2KB(add)));
}
-static inline int _try_lc_del(struct drbd_conf *mdev, struct lc_element *al_ext)
+static int _try_lc_del(struct drbd_conf *mdev, struct lc_element *al_ext)
{
int rv;
@@ -771,8 +773,8 @@ static inline int _try_lc_del(struct drbd_conf *mdev, struct lc_element *al_ext)
}
/**
- * drbd_al_shrink: Removes all active extents form the AL. (but does not
- * write any transactions)
+ * drbd_al_shrink: Removes all active extents form the activity log.
+ * (but does not write any transactions)
* You need to lock mdev->act_log with lc_try_lock() / lc_unlock()
*/
void drbd_al_shrink(struct drbd_conf *mdev)
@@ -796,14 +798,14 @@ STATIC int w_update_odbm(struct drbd_conf *mdev, struct drbd_work *w, int unused
{
struct update_odbm_work *udw = (struct update_odbm_work *)w;
- if (!inc_local(mdev)) {
+ if (!get_ldev(mdev)) {
if (__ratelimit(&drbd_ratelimit_state))
dev_warn(DEV, "Can not update on disk bitmap, local IO disabled.\n");
return 1;
}
drbd_bm_write_sect(mdev, udw->enr);
- dec_local(mdev);
+ put_ldev(mdev);
kfree(udw);
@@ -825,7 +827,7 @@ STATIC int w_update_odbm(struct drbd_conf *mdev, struct drbd_work *w, int unused
/* ATTENTION. The AL's extents are 4MB each, while the extents in the
* resync LRU-cache are 16MB each.
- * The caller of this function has to hold an inc_local() reference.
+ * The caller of this function has to hold an get_ldev() reference.
*
* TODO will be obsoleted once we have a caching lru of the on disk bitmap
*/
@@ -977,9 +979,9 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
mdev->rs_mark_left = drbd_bm_total_weight(mdev);
}
}
- if (inc_local(mdev)) {
+ if (get_ldev(mdev)) {
drbd_try_clear_on_disk_bm(mdev, sector, count, TRUE);
- dec_local(mdev);
+ put_ldev(mdev);
}
/* just wake_up unconditional now, various lc_chaged(),
* lc_put() in drbd_try_clear_on_disk_bm(). */
@@ -1012,7 +1014,7 @@ void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
return;
}
- if (!inc_local(mdev))
+ if (!get_ldev(mdev))
return; /* no disk, no metadata, no bitmap to set bits in */
nr_sectors = drbd_get_capacity(mdev->this_bdev);
@@ -1046,10 +1048,10 @@ void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
spin_unlock_irqrestore(&mdev->al_lock, flags);
out:
- dec_local(mdev);
+ put_ldev(mdev);
}
-static inline
+static
struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr)
{
struct bm_extent *bm_ext;
@@ -1088,7 +1090,7 @@ struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr)
return bm_ext;
}
-static inline int _is_in_al(struct drbd_conf *mdev, unsigned int enr)
+static int _is_in_al(struct drbd_conf *mdev, unsigned int enr)
{
struct lc_element *al_ext;
int rv = 0;
@@ -1330,9 +1332,9 @@ void drbd_rs_cancel_all(struct drbd_conf *mdev)
spin_lock_irq(&mdev->al_lock);
- if (inc_local_if_state(mdev, D_FAILED)) { /* Makes sure ->resync is there. */
+ if (get_ldev_if_state(mdev, D_FAILED)) { /* Makes sure ->resync is there. */
lc_reset(mdev->resync);
- dec_local(mdev);
+ put_ldev(mdev);
}
mdev->resync_locked = 0;
mdev->resync_wenr = LC_FREE;
@@ -1355,7 +1357,7 @@ int drbd_rs_del_all(struct drbd_conf *mdev)
spin_lock_irq(&mdev->al_lock);
- if (inc_local_if_state(mdev, D_FAILED)) {
+ if (get_ldev_if_state(mdev, D_FAILED)) {
/* ok, ->resync is there. */
for (i = 0; i < mdev->resync->nr_elements; i++) {
bm_ext = (struct bm_extent *) lc_entry(mdev->resync, i);
@@ -1374,7 +1376,7 @@ int drbd_rs_del_all(struct drbd_conf *mdev)
if (bm_ext->lce.refcnt != 0) {
dev_info(DEV, "Retrying drbd_rs_del_all() later. "
"refcnt=%d\n", bm_ext->lce.refcnt);
- dec_local(mdev);
+ put_ldev(mdev);
spin_unlock_irq(&mdev->al_lock);
return -EAGAIN;
}
@@ -1383,7 +1385,7 @@ int drbd_rs_del_all(struct drbd_conf *mdev)
lc_del(mdev->resync, &bm_ext->lce);
}
D_ASSERT(mdev->resync->used == 0);
- dec_local(mdev);
+ put_ldev(mdev);
}
spin_unlock_irq(&mdev->al_lock);
@@ -1443,9 +1445,9 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
if (count) {
mdev->rs_failed += count;
- if (inc_local(mdev)) {
+ if (get_ldev(mdev)) {
drbd_try_clear_on_disk_bm(mdev, sector, count, FALSE);
- dec_local(mdev);
+ put_ldev(mdev);
}
/* just wake_up unconditional now, various lc_chaged(),
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index c160f7ab9e01..213fa12da121 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -81,7 +81,7 @@ struct drbd_bitmap {
#define BM_LOCKED 0
#define BM_MD_IO_ERROR 1
-static inline int bm_is_locked(struct drbd_bitmap *b)
+static int bm_is_locked(struct drbd_bitmap *b)
{
return test_bit(BM_LOCKED, &b->bm_flags);
}
@@ -178,7 +178,7 @@ void bm_unmap(unsigned long *p_addr)
}
/* long word offset of _bitmap_ sector */
-#define S2W(s) ((s)<<(BM_EXT_SIZE_B-BM_BLOCK_SIZE_B-LN2_BPL))
+#define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
/* word offset from start of bitmap to word number _in_page_
* modulo longs per page
#define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long))
@@ -384,12 +384,12 @@ STATIC unsigned long __bm_count_bits(struct drbd_bitmap *b, const int swap_endia
return bits;
}
-static inline unsigned long bm_count_bits(struct drbd_bitmap *b)
+static unsigned long bm_count_bits(struct drbd_bitmap *b)
{
return __bm_count_bits(b, 0);
}
-static inline unsigned long bm_count_bits_swap_endian(struct drbd_bitmap *b)
+static unsigned long bm_count_bits_swap_endian(struct drbd_bitmap *b)
{
return __bm_count_bits(b, 1);
}
@@ -498,9 +498,9 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity)
*/
words = ALIGN(bits, 64) >> LN2_BPL;
- if (inc_local(mdev)) {
+ if (get_ldev(mdev)) {
D_ASSERT((u64)bits <= (((u64)mdev->bc->md.md_size_sect-MD_BM_OFFSET) << 12));
- dec_local(mdev);
+ put_ldev(mdev);
}
/* one extra long to catch off by one errors */
@@ -580,7 +580,7 @@ unsigned long drbd_bm_total_weight(struct drbd_conf *mdev)
unsigned long flags;
/* if I don't have a disk, I don't know about out-of-sync status */
- if (!inc_local_if_state(mdev, D_NEGOTIATING))
+ if (!get_ldev_if_state(mdev, D_NEGOTIATING))
return 0;
ERR_IF(!b) return 0;
@@ -590,7 +590,7 @@ unsigned long drbd_bm_total_weight(struct drbd_conf *mdev)
s = b->bm_set;
spin_unlock_irqrestore(&b->bm_lock, flags);
- dec_local(mdev);
+ put_ldev(mdev);
return s;
}
@@ -864,7 +864,7 @@ STATIC int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local)
now = b->bm_set;
dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
- ppsize(ppb, now << (BM_BLOCK_SIZE_B-10)), now);
+ ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
return err;
}
@@ -908,7 +908,7 @@ int drbd_bm_write_sect(struct drbd_conf *mdev, unsigned long enr) __must_hold(lo
offset = S2W(enr); /* word offset into bitmap */
num_words = min(S2W(1), bm_words - offset);
if (num_words < S2W(1))
- memset(page_address(mdev->md_io_page), 0, MD_HARDSECT);
+ memset(page_address(mdev->md_io_page), 0, MD_SECTOR_SIZE);
drbd_bm_get_lel(mdev, offset, num_words,
page_address(mdev->md_io_page));
if (!drbd_md_sync_page_io(mdev, mdev->bc, on_disk_sector, WRITE)) {
diff --git a/drivers/block/drbd/drbd_buildtag.c b/drivers/block/drbd/drbd_buildtag.c
index a58ad76078a5..213234342e70 100644
--- a/drivers/block/drbd/drbd_buildtag.c
+++ b/drivers/block/drbd/drbd_buildtag.c
@@ -2,6 +2,6 @@
#include <linux/drbd_config.h>
const char *drbd_buildtag(void)
{
- return "GIT-hash: 29ef4c01e46b0a269d7bec39d5178be06097fead drbd/Kconfig drbd/Makefile drbd/Makefile-2.6 drbd/drbd_actlog.c drbd/drbd_bitmap.c drbd/drbd_int.h drbd/drbd_main.c drbd/drbd_nl.c drbd/drbd_proc.c drbd/drbd_receiver.c drbd/drbd_req.c drbd/drbd_req.h drbd/drbd_tracing.c drbd/drbd_tracing.h drbd/drbd_worker.c drbd/drbd_wrappers.h drbd/linux/drbd_config.h"
- " build by phil@fat-tyre, 2009-04-29 15:43:41";
+ return "GIT-hash: c522e740ae3163f5a5ff83c0c58d9f2801299961 drbd/drbd_int.h"
+ " build by phil@fat-tyre, 2009-05-05 17:15:39";
}
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 175de11d31e4..dcc35bf67eea 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -38,7 +38,7 @@
#include <linux/mutex.h>
#include <linux/major.h>
#include <linux/blkdev.h>
-#include <linux/bio.h>
+#include <linux/genhd.h>
#include <net/tcp.h>
#include "lru_cache.h"
@@ -131,7 +131,7 @@ struct drbd_conf;
#define ERR_IF(exp) if (({ \
int _b = (exp) != 0; \
- if (_b) dev_err(DEV, "%s: (%s) in %s:%d\n", \
+ if (_b) dev_err(DEV, "%s: (%s) in %s:%d\n", \
__func__, #exp, __FILE__, __LINE__); \
_b; \
}))
@@ -350,7 +350,7 @@ struct p_header {
u16 command;
u16 length; /* bytes of data after this header */
u8 payload[0];
-} __attribute((packed));
+} __packed;
/* 8 bytes. packet FIXED for the next century! */
/*
@@ -380,7 +380,7 @@ struct p_data {
u64 block_id; /* to identify the request in protocol B&C */
u32 seq_num;
u32 dp_flags;
-} __attribute((packed));
+} __packed;
/*
* commands which share a struct:
@@ -396,7 +396,7 @@ struct p_block_ack {
u64 block_id;
u32 blksize;
u32 seq_num;
-} __attribute((packed));
+} __packed;
struct p_block_req {
@@ -405,7 +405,7 @@ struct p_block_req {
u64 block_id;
u32 blksize;
u32 pad; /* to multiple of 8 Byte */
-} __attribute((packed));
+} __packed;
/*
* commands with their own struct for additional fields:
@@ -428,20 +428,20 @@ struct p_handshake {
u32 _pad;
u64 reserverd[7];
-} __attribute((packed));
+} __packed;
/* 80 bytes, FIXED for the next century */
struct p_barrier {
struct p_header head;
u32 barrier; /* barrier number _handle_ only */
u32 pad; /* to multiple of 8 Byte */
-} __attribute((packed));
+} __packed;
struct p_barrier_ack {
struct p_header head;
u32 barrier;
u32 set_size;
-} __attribute((packed));
+} __packed;
struct p_rs_param {
struct p_header head;
@@ -449,7 +449,7 @@ struct p_rs_param {
/* Since protocol version 88 and higher. */
char verify_alg[0];
-} __attribute((packed));
+} __packed;
struct p_rs_param_89 {
struct p_header head;
@@ -457,7 +457,7 @@ struct p_rs_param_89 {
/* protocol version 89: */
char verify_alg[SHARED_SECRET_MAX];
char csums_alg[SHARED_SECRET_MAX];
-} __attribute((packed));
+} __packed;
struct p_protocol {
struct p_header head;
@@ -471,17 +471,17 @@ struct p_protocol {
/* Since protocol version 87 and higher. */
char integrity_alg[0];
-} __attribute((packed));
+} __packed;
struct p_uuids {
struct p_header head;
u64 uuid[UI_EXTENDED_SIZE];
-} __attribute((packed));
+} __packed;
struct p_rs_uuid {
struct p_header head;
u64 uuid;
-} __attribute((packed));
+} __packed;
struct p_sizes {
struct p_header head;
@@ -490,23 +490,23 @@ struct p_sizes {
u64 c_size; /* current exported size */
u32 max_segment_size; /* Maximal size of a BIO */
u32 queue_order_type;
-} __attribute((packed));
+} __packed;
struct p_state {
struct p_header head;
u32 state;
-} __attribute((packed));
+} __packed;
struct p_req_state {
struct p_header head;
u32 mask;
u32 val;
-} __attribute((packed));
+} __packed;
struct p_req_state_reply {
struct p_header head;
u32 retcode;
-} __attribute((packed));
+} __packed;
struct p_drbd06_param {
u64 size;
@@ -516,14 +516,14 @@ struct p_drbd06_param {
u32 version;
u32 gen_cnt[5];
u32 bit_map_gen[5];
-} __attribute((packed));
+} __packed;
struct p_discard {
struct p_header head;
u64 block_id;
u32 seq_num;
u32 pad;
-} __attribute((packed));
+} __packed;
/* Valid values for the encoding field.
* Bump proto version when changing this. */
@@ -544,7 +544,7 @@ struct p_compressed_bm {
u8 encoding;
u8 code[0];
-} __attribute((packed));
+} __packed;
static inline enum drbd_bitmap_code
DCBP_get_code(struct p_compressed_bm *p)
@@ -612,7 +612,7 @@ union p_polymorph {
struct p_req_state req_state;
struct p_req_state_reply req_state_reply;
struct p_block_req block_req;
-} __attribute((packed));
+} __packed;
/**********************************************************************/
enum drbd_thread_state {
@@ -889,7 +889,7 @@ struct drbd_conf {
unsigned long flags;
/* configured by drbdsetup */
- struct net_conf *net_conf; /* protected by inc_net() and dec_net() */
+ struct net_conf *net_conf; /* protected by get_net_conf() and put_net_conf() */
struct syncer_conf sync_conf;
struct drbd_backing_dev *bc __protected_by(local);
@@ -994,7 +994,7 @@ struct drbd_conf {
atomic_t pp_in_use;
wait_queue_head_t ee_wait;
struct page *md_io_page; /* one page buffer for md_io */
- struct page *md_io_tmpp; /* for hardsect != 512 [s390 only?] */
+ struct page *md_io_tmpp; /* for hardsect_size != 512 [s390 only?] */
struct mutex md_io_mutex; /* protects the md_io_buffer */
spinlock_t al_lock;
wait_queue_head_t al_wait;
@@ -1187,13 +1187,13 @@ extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf
#define MD_BM_OFFSET (MD_AL_OFFSET + MD_AL_MAX_SIZE)
/* Since the smalles IO unit is usually 512 byte */
-#define MD_HARDSECT_B 9
-#define MD_HARDSECT (1<<MD_HARDSECT_B)
+#define MD_SECTOR_SHIFT 9
+#define MD_SECTOR_SIZE (1<<MD_SECTOR_SHIFT)
/* activity log */
-#define AL_EXTENTS_PT ((MD_HARDSECT-12)/8-1) /* 61 ; Extents per 512B sector */
-#define AL_EXTENT_SIZE_B 22 /* One extent represents 4M Storage */
-#define AL_EXTENT_SIZE (1<<AL_EXTENT_SIZE_B)
+#define AL_EXTENTS_PT ((MD_SECTOR_SIZE-12)/8-1) /* 61 ; Extents per 512B sector */
+#define AL_EXTENT_SHIFT 22 /* One extent represents 4M Storage */
+#define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT)
#if BITS_PER_LONG == 32
#define LN2_BPL 5
@@ -1227,38 +1227,38 @@ struct bm_extent {
* Bit 1 ==> local node thinks this block needs to be synced.
*/
-#define BM_BLOCK_SIZE_B 12 /* 4k per bit */
-#define BM_BLOCK_SIZE (1<<BM_BLOCK_SIZE_B)
+#define BM_BLOCK_SHIFT 12 /* 4k per bit */
+#define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT)
/* (9+3) : 512 bytes @ 8 bits; representing 16M storage
* per sector of on disk bitmap */
-#define BM_EXT_SIZE_B (BM_BLOCK_SIZE_B + MD_HARDSECT_B + 3) /* = 24 */
-#define BM_EXT_SIZE (1<<BM_EXT_SIZE_B)
+#define BM_EXT_SHIFT (BM_BLOCK_SHIFT + MD_SECTOR_SHIFT + 3) /* = 24 */
+#define BM_EXT_SIZE (1<<BM_EXT_SHIFT)
-#if (BM_EXT_SIZE_B != 24) || (BM_BLOCK_SIZE_B != 12)
+#if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12)
#error "HAVE YOU FIXED drbdmeta AS WELL??"
#endif
/* thus many _storage_ sectors are described by one bit */
-#define BM_SECT_TO_BIT(x) ((x)>>(BM_BLOCK_SIZE_B-9))
-#define BM_BIT_TO_SECT(x) ((sector_t)(x)<<(BM_BLOCK_SIZE_B-9))
+#define BM_SECT_TO_BIT(x) ((x)>>(BM_BLOCK_SHIFT-9))
+#define BM_BIT_TO_SECT(x) ((sector_t)(x)<<(BM_BLOCK_SHIFT-9))
#define BM_SECT_PER_BIT BM_BIT_TO_SECT(1)
/* bit to represented kilo byte conversion */
-#define Bit2KB(bits) ((bits)<<(BM_BLOCK_SIZE_B-10))
+#define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10))
/* in which _bitmap_ extent (resp. sector) the bit for a certain
* _storage_ sector is located in */
-#define BM_SECT_TO_EXT(x) ((x)>>(BM_EXT_SIZE_B-9))
+#define BM_SECT_TO_EXT(x) ((x)>>(BM_EXT_SHIFT-9))
/* how much _storage_ sectors we have per bitmap sector */
-#define BM_EXT_TO_SECT(x) ((sector_t)(x) << (BM_EXT_SIZE_B-9))
+#define BM_EXT_TO_SECT(x) ((sector_t)(x) << (BM_EXT_SHIFT-9))
#define BM_SECT_PER_EXT BM_EXT_TO_SECT(1)
/* in one sector of the bitmap, we have this many activity_log extents. */
-#define AL_EXT_PER_BM_SECT (1 << (BM_EXT_SIZE_B - AL_EXTENT_SIZE_B))
-#define BM_WORDS_PER_AL_EXT (1 << (AL_EXTENT_SIZE_B-BM_BLOCK_SIZE_B-LN2_BPL))
+#define AL_EXT_PER_BM_SECT (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT))
+#define BM_WORDS_PER_AL_EXT (1 << (AL_EXTENT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
-#define BM_BLOCKS_PER_BM_EXT_B (BM_EXT_SIZE_B - BM_BLOCK_SIZE_B)
+#define BM_BLOCKS_PER_BM_EXT_B (BM_EXT_SHIFT - BM_BLOCK_SHIFT)
#define BM_BLOCKS_PER_BM_EXT_MASK ((1<<BM_BLOCKS_PER_BM_EXT_B) - 1)
/* the extent in "PER_EXTENT" below is an activity log extent
@@ -1277,7 +1277,7 @@ struct bm_extent {
#define DRBD_MAX_SECTORS_32 (0xffffffffLU)
#define DRBD_MAX_SECTORS_BM \
- ((MD_RESERVED_SECT - MD_BM_OFFSET) * (1LL<<(BM_EXT_SIZE_B-9)))
+ ((MD_RESERVED_SECT - MD_BM_OFFSET) * (1LL<<(BM_EXT_SHIFT-9)))
#if DRBD_MAX_SECTORS_BM < DRBD_MAX_SECTORS_32
#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_BM
#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_BM
@@ -1902,35 +1902,35 @@ static inline void inc_unacked(struct drbd_conf *mdev)
ERR_IF_CNT_IS_NEGATIVE(unacked_cnt); } while (0)
-static inline void dec_net(struct drbd_conf *mdev)
+static inline void put_net_conf(struct drbd_conf *mdev)
{
if (atomic_dec_and_test(&mdev->net_cnt))
wake_up(&mdev->misc_wait);
}
/**
- * inc_net: Returns TRUE when it is ok to access mdev->net_conf. You
- * should call dec_net() when finished looking at mdev->net_conf.
+ * get_net_conf: Returns TRUE when it is ok to access mdev->net_conf. You
+ * should call put_net_conf() when finished looking at mdev->net_conf.
*/
-static inline int inc_net(struct drbd_conf *mdev)
+static inline int get_net_conf(struct drbd_conf *mdev)
{
int have_net_conf;
atomic_inc(&mdev->net_cnt);
have_net_conf = mdev->state.conn >= C_UNCONNECTED;
if (!have_net_conf)
- dec_net(mdev);
+ put_net_conf(mdev);
return have_net_conf;
}
/**
- * inc_local: Returns TRUE when local IO is possible. If it returns
- * TRUE you should call dec_local() after IO is completed.
+ * get_ldev: Returns TRUE when local IO is possible. If it returns
+ * TRUE you should call put_ldev() after IO is completed.
*/
-#define inc_local_if_state(M,MINS) __cond_lock(local, _inc_local_if_state(M,MINS))
-#define inc_local(M) __cond_lock(local, _inc_local_if_state(M,D_INCONSISTENT))
+#define get_ldev_if_state(M,MINS) __cond_lock(local, _get_ldev_if_state(M,MINS))
+#define get_ldev(M) __cond_lock(local, _get_ldev_if_state(M,D_INCONSISTENT))
-static inline void dec_local(struct drbd_conf *mdev)
+static inline void put_ldev(struct drbd_conf *mdev)
{
__release(local);
if (atomic_dec_and_test(&mdev->local_cnt))
@@ -1939,21 +1939,21 @@ static inline void dec_local(struct drbd_conf *mdev)
}
#ifndef __CHECKER__
-static inline int _inc_local_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
+static inline int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
{
int io_allowed;
atomic_inc(&mdev->local_cnt);
io_allowed = (mdev->state.disk >= mins);
if (!io_allowed)
- dec_local(mdev);
+ put_ldev(mdev);
return io_allowed;
}
#else
-extern int _inc_local_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins);
+extern int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins);
#endif
-/* you must have an "inc_local" reference */
+/* you must have an "get_ldev" reference */
static inline void drbd_get_syncer_progress(struct drbd_conf *mdev,
unsigned long *bits_left, unsigned int *per_mil_done)
{
@@ -1997,9 +1997,9 @@ static inline void drbd_get_syncer_progress(struct drbd_conf *mdev,
static inline int drbd_get_max_buffers(struct drbd_conf *mdev)
{
int mxb = 1000000; /* arbitrary limit on open requests */
- if (inc_net(mdev)) {
+ if (get_net_conf(mdev)) {
mxb = mdev->net_conf->max_buffers;
- dec_net(mdev);
+ put_net_conf(mdev);
}
return mxb;
}
@@ -2196,9 +2196,9 @@ static inline void drbd_blk_run_queue(struct request_queue *q)
static inline void drbd_kick_lo(struct drbd_conf *mdev)
{
- if (inc_local(mdev)) {
+ if (get_ldev(mdev)) {
drbd_blk_run_queue(bdev_get_queue(mdev->bc->backing_bdev));
- dec_local(mdev);
+ put_ldev(mdev);
}
}
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 4a2593ce1d37..3c377d326570 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -173,7 +173,7 @@ STATIC struct block_device_operations drbd_ops = {
/* When checking with sparse, and this is an inline function, sparse will
give tons of false positives. When this is a real functions sparse works.
*/
-int _inc_local_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
+int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
{
int io_allowed;
@@ -400,9 +400,9 @@ int drbd_io_error(struct drbd_conf *mdev, int forcedetach)
int ok = 1;
eh = EP_PASS_ON;
- if (inc_local_if_state(mdev, D_FAILED)) {
+ if (get_ldev_if_state(mdev, D_FAILED)) {
eh = mdev->bc->dc.on_io_error;
- dec_local(mdev);
+ put_ldev(mdev);
}
if (!forcedetach && eh == EP_PASS_ON)
@@ -480,11 +480,13 @@ void drbd_force_state(struct drbd_conf *mdev,
drbd_change_state(mdev, CS_HARD, mask, val);
}
-int is_valid_state(struct drbd_conf *mdev, union drbd_state ns);
-int is_valid_state_transition(struct drbd_conf *,
- union drbd_state, union drbd_state);
+STATIC int is_valid_state(struct drbd_conf *mdev, union drbd_state ns);
+STATIC int is_valid_state_transition(struct drbd_conf *,
+ union drbd_state, union drbd_state);
+STATIC union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
+ union drbd_state ns, int *warn_sync_abort);
int drbd_send_state_req(struct drbd_conf *,
- union drbd_state, union drbd_state);
+ union drbd_state, union drbd_state);
STATIC enum drbd_state_ret_codes _req_st_cond(struct drbd_conf *mdev,
union drbd_state mask, union drbd_state val)
@@ -503,6 +505,8 @@ STATIC enum drbd_state_ret_codes _req_st_cond(struct drbd_conf *mdev,
spin_lock_irqsave(&mdev->req_lock, flags);
os = mdev->state;
ns.i = (os.i & ~mask.i) | val.i;
+ ns = sanitize_state(mdev, os, ns, NULL);
+
if (!cl_wide_st_chg(mdev, os, ns))
rv = SS_CW_NO_NEED;
if (!rv) {
@@ -541,6 +545,7 @@ STATIC int drbd_req_state(struct drbd_conf *mdev,
spin_lock_irqsave(&mdev->req_lock, flags);
os = mdev->state;
ns.i = (os.i & ~mask.i) | val.i;
+ ns = sanitize_state(mdev, os, ns, NULL);
if (cl_wide_st_chg(mdev, os, ns)) {
rv = is_valid_state(mdev, ns);
@@ -567,7 +572,6 @@ STATIC int drbd_req_state(struct drbd_conf *mdev,
(rv = _req_st_cond(mdev, mask, val)));
if (rv < SS_SUCCESS) {
- /* nearly dead code. */
drbd_state_unlock(mdev);
if (f & CS_VERBOSE)
print_st_err(mdev, os, ns, rv);
@@ -655,7 +659,7 @@ void print_st_err(struct drbd_conf *mdev,
A##s_to_name(ns.A)); \
} })
-int is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
+STATIC int is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
{
/* See drbd_state_sw_errors in drbd_strings.c */
@@ -663,16 +667,16 @@ int is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
int rv = SS_SUCCESS;
fp = FP_DONT_CARE;
- if (inc_local(mdev)) {
+ if (get_ldev(mdev)) {
fp = mdev->bc->dc.fencing;
- dec_local(mdev);
+ put_ldev(mdev);
}
- if (inc_net(mdev)) {
+ if (get_net_conf(mdev)) {
if (!mdev->net_conf->two_primaries &&
ns.role == R_PRIMARY && ns.peer == R_PRIMARY)
rv = SS_TWO_PRIMARIES;
- dec_net(mdev);
+ put_net_conf(mdev);
}
if (rv <= 0)
@@ -714,8 +718,8 @@ int is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
return rv;
}
-int is_valid_state_transition(struct drbd_conf *mdev,
- union drbd_state ns, union drbd_state os)
+STATIC int is_valid_state_transition(struct drbd_conf *mdev,
+ union drbd_state ns, union drbd_state os)
{
int rv = SS_SUCCESS;
@@ -755,27 +759,17 @@ int is_valid_state_transition(struct drbd_conf *mdev,
return rv;
}
-int __drbd_set_state(struct drbd_conf *mdev,
- union drbd_state ns, enum chg_state_flags flags,
- struct completion *done)
+STATIC union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
+ union drbd_state ns, int *warn_sync_abort)
{
- union drbd_state os;
- int rv = SS_SUCCESS;
- int warn_sync_abort = 0;
enum drbd_fencing_p fp;
- struct after_state_chg_work *ascw;
-
-
- os = mdev->state;
fp = FP_DONT_CARE;
- if (inc_local(mdev)) {
+ if (get_ldev(mdev)) {
fp = mdev->bc->dc.fencing;
- dec_local(mdev);
+ put_ldev(mdev);
}
- /* Early state sanitising. */
-
/* Dissalow Network errors to configure a device's network part */
if ((ns.conn >= C_TIMEOUT && ns.conn <= C_TEAR_DOWN) &&
os.conn <= C_DISCONNECTING)
@@ -804,9 +798,11 @@ int __drbd_set_state(struct drbd_conf *mdev,
if (ns.conn <= C_DISCONNECTING && ns.disk == D_DISKLESS)
ns.pdsk = D_UNKNOWN;
+ /* Abort resync if a disk fails/detaches */
if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
(ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
- warn_sync_abort = 1;
+ if (warn_sync_abort)
+ *warn_sync_abort = 1;
ns.conn = C_CONNECTED;
}
@@ -857,7 +853,7 @@ int __drbd_set_state(struct drbd_conf *mdev,
/* Connection breaks down before we finished "Negotiating" */
if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
- inc_local_if_state(mdev, D_NEGOTIATING)) {
+ get_ldev_if_state(mdev, D_NEGOTIATING)) {
if (mdev->ed_uuid == mdev->bc->md.uuid[UI_CURRENT]) {
ns.disk = mdev->new_state_tmp.disk;
ns.pdsk = mdev->new_state_tmp.pdsk;
@@ -866,7 +862,7 @@ int __drbd_set_state(struct drbd_conf *mdev,
ns.disk = D_DISKLESS;
ns.pdsk = D_UNKNOWN;
}
- dec_local(mdev);
+ put_ldev(mdev);
}
if (fp == FP_STONITH &&
@@ -887,6 +883,22 @@ int __drbd_set_state(struct drbd_conf *mdev,
ns.conn = C_SYNC_TARGET;
}
+ return ns;
+}
+
+int __drbd_set_state(struct drbd_conf *mdev,
+ union drbd_state ns, enum chg_state_flags flags,
+ struct completion *done)
+{
+ union drbd_state os;
+ int rv = SS_SUCCESS;
+ int warn_sync_abort = 0;
+ struct after_state_chg_work *ascw;
+
+ os = mdev->state;
+
+ ns = sanitize_state(mdev, os, ns, &warn_sync_abort);
+
if (ns.i == os.i)
return SS_NOTHING_TO_DO;
@@ -997,7 +1009,7 @@ int __drbd_set_state(struct drbd_conf *mdev,
mod_timer(&mdev->resync_timer, jiffies);
}
- if (inc_local(mdev)) {
+ if (get_ldev(mdev)) {
u32 mdf = mdev->bc->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
@@ -1021,7 +1033,7 @@ int __drbd_set_state(struct drbd_conf *mdev,
}
if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
drbd_set_ed_uuid(mdev, mdev->bc->md.uuid[UI_CURRENT]);
- dec_local(mdev);
+ put_ldev(mdev);
}
/* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
@@ -1102,9 +1114,9 @@ STATIC void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
}
fp = FP_DONT_CARE;
- if (inc_local(mdev)) {
+ if (get_ldev(mdev)) {
fp = mdev->bc->dc.fencing;
- dec_local(mdev);
+ put_ldev(mdev);
}
/* Inform userspace about the change... */
@@ -1145,24 +1157,24 @@ STATIC void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
ns.pdsk == D_OUTDATED)) {
kfree(mdev->p_uuid);
mdev->p_uuid = NULL;
- if (inc_local(mdev)) {
+ if (get_ldev(mdev)) {
if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
mdev->bc->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
drbd_uuid_new_current(mdev);
drbd_send_uuids(mdev);
}
- dec_local(mdev);
+ put_ldev(mdev);
}
}
- if (ns.pdsk < D_INCONSISTENT && inc_local(mdev)) {
+ if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
if (ns.peer == R_PRIMARY && mdev->bc->md.uuid[UI_BITMAP] == 0)
drbd_uuid_new_current(mdev);
/* D_DISKLESS Peer becomes secondary */
if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
drbd_al_to_on_disk_bm(mdev);
- dec_local(mdev);
+ put_ldev(mdev);
}
/* Last part of the attaching process ... */
@@ -1202,11 +1214,16 @@ STATIC void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate");
if (os.disk > D_DISKLESS && ns.disk == D_DISKLESS) {
- /* since inc_local() only works as long as disk>=D_INCONSISTENT,
+ /* since get_ldev() only works as long as disk>=D_INCONSISTENT,
and it is D_DISKLESS here, local_cnt can only go down, it can
not increase... It will reach zero */
wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
+ drbd_rs_cancel_all(mdev);
+ mdev->rs_total = 0;
+ mdev->rs_failed = 0;
+ atomic_set(&mdev->rs_pending_cnt, 0);
+
lc_free(mdev->resync);
mdev->resync = NULL;
lc_free(mdev->act_log);
@@ -1602,7 +1619,7 @@ int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
struct p_uuids p;
int i;
- if (!inc_local_if_state(mdev, D_NEGOTIATING))
+ if (!get_ldev_if_state(mdev, D_NEGOTIATING))
return 1;
for (i = UI_CURRENT; i < UI_SIZE; i++)
@@ -1615,7 +1632,7 @@ int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
- dec_local(mdev);
+ put_ldev(mdev);
return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS,
(struct p_header *)&p, sizeof(p));
@@ -1649,13 +1666,13 @@ int drbd_send_sizes(struct drbd_conf *mdev)
int q_order_type;
int ok;
- if (inc_local_if_state(mdev, D_NEGOTIATING)) {
+ if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
D_ASSERT(mdev->bc->backing_bdev);
d_size = drbd_get_max_capacity(mdev->bc);
u_size = mdev->bc->dc.disk_size;
q_order_type = drbd_queue_order_type(mdev);
p.queue_order_type = cpu_to_be32(drbd_queue_order_type(mdev));
- dec_local(mdev);
+ put_ldev(mdev);
} else {
d_size = 0;
u_size = 0;
@@ -1888,7 +1905,7 @@ int _drbd_send_bitmap(struct drbd_conf *mdev)
return FALSE;
}
- if (inc_local(mdev)) {
+ if (get_ldev(mdev)) {
if (drbd_md_test_flag(mdev->bc, MDF_FULL_SYNC)) {
dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
drbd_bm_set_all(mdev);
@@ -1902,7 +1919,7 @@ int _drbd_send_bitmap(struct drbd_conf *mdev)
drbd_md_sync(mdev);
}
}
- dec_local(mdev);
+ put_ldev(mdev);
}
c = (struct bm_xfer_ctx) {
@@ -2165,7 +2182,7 @@ int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
return ok;
}
-static inline int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
+static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
{
struct bio_vec *bvec;
int i;
@@ -2177,7 +2194,7 @@ static inline int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
return 1;
}
-static inline int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
+static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
{
struct bio_vec *bvec;
int i;
@@ -2807,10 +2824,10 @@ static int drbd_congested(void *congested_data, int bdi_bits)
goto out;
}
- if (inc_local(mdev)) {
+ if (get_ldev(mdev)) {
q = bdev_get_queue(mdev->bc->backing_bdev);
r = bdi_congested(&q->backing_dev_info, bdi_bits);
- dec_local(mdev);
+ put_ldev(mdev);
if (r)
reason = 'b';
}
@@ -3077,7 +3094,7 @@ struct meta_data_on_disk {
u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
u32 reserved_u32[4];
-} __attribute((packed));
+} __packed;
/**
* drbd_md_sync:
@@ -3095,7 +3112,7 @@ void drbd_md_sync(struct drbd_conf *mdev)
/* We use here D_FAILED and not D_ATTACHING because we try to write
* metadata even if we detach due to a disk failure! */
- if (!inc_local_if_state(mdev, D_FAILED))
+ if (!get_ldev_if_state(mdev, D_FAILED))
return;
trace_drbd_md_io(mdev, WRITE, mdev->bc);
@@ -3136,7 +3153,7 @@ void drbd_md_sync(struct drbd_conf *mdev)
mdev->bc->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
mutex_unlock(&mdev->md_io_mutex);
- dec_local(mdev);
+ put_ldev(mdev);
}
/**
@@ -3151,7 +3168,7 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
struct meta_data_on_disk *buffer;
int i, rv = NO_ERROR;
- if (!inc_local_if_state(mdev, D_ATTACHING))
+ if (!get_ldev_if_state(mdev, D_ATTACHING))
return ERR_IO_MD_DISK;
trace_drbd_md_io(mdev, READ, bdev);
@@ -3210,7 +3227,7 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
err:
mutex_unlock(&mdev->md_io_mutex);
- dec_local(mdev);
+ put_ldev(mdev);
return rv;
}
@@ -3316,7 +3333,7 @@ int drbd_bmio_set_n_write(struct drbd_conf *mdev)
{
int rv = -EIO;
- if (inc_local_if_state(mdev, D_ATTACHING)) {
+ if (get_ldev_if_state(mdev, D_ATTACHING)) {
drbd_md_set_flag(mdev, MDF_FULL_SYNC);
drbd_md_sync(mdev);
drbd_bm_set_all(mdev);
@@ -3328,7 +3345,7 @@ int drbd_bmio_set_n_write(struct drbd_conf *mdev)
drbd_md_sync(mdev);
}
- dec_local(mdev);
+ put_ldev(mdev);
}
return rv;
@@ -3343,10 +3360,10 @@ int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
{
int rv = -EIO;
- if (inc_local_if_state(mdev, D_ATTACHING)) {
+ if (get_ldev_if_state(mdev, D_ATTACHING)) {
drbd_bm_clear_all(mdev);
rv = drbd_bm_write(mdev);
- dec_local(mdev);
+ put_ldev(mdev);
}
return rv;
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index c388478a0188..55dbf83d559f 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -164,9 +164,9 @@ enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev)
D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
- if (inc_local_if_state(mdev, D_CONSISTENT)) {
+ if (get_ldev_if_state(mdev, D_CONSISTENT)) {
fp = mdev->bc->dc.fencing;
- dec_local(mdev);
+ put_ldev(mdev);
} else {
dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n");
return mdev->state.pdsk;
@@ -312,30 +312,30 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
if (new_role == R_SECONDARY) {
set_disk_ro(mdev->vdisk, TRUE);
- if (inc_local(mdev)) {
+ if (get_ldev(mdev)) {
mdev->bc->md.uuid[UI_CURRENT] &= ~(u64)1;
- dec_local(mdev);
+ put_ldev(mdev);
}
} else {
- if (inc_net(mdev)) {
+ if (get_net_conf(mdev)) {
mdev->net_conf->want_lose = 0;
- dec_net(mdev);
+ put_net_conf(mdev);
}
set_disk_ro(mdev->vdisk, FALSE);
- if (inc_local(mdev)) {
+ if (get_ldev(mdev)) {
if (((mdev->state.conn < C_CONNECTED ||
mdev->state.pdsk <= D_FAILED)
&& mdev->bc->md.uuid[UI_BITMAP] == 0) || forced)
drbd_uuid_new_current(mdev);
mdev->bc->md.uuid[UI_CURRENT] |= (u64)1;
- dec_local(mdev);
+ put_ldev(mdev);
}
}
- if ((new_role == R_SECONDARY) && inc_local(mdev)) {
+ if ((new_role == R_SECONDARY) && get_ldev(mdev)) {
drbd_al_to_on_disk_bm(mdev);
- dec_local(mdev);
+ put_ldev(mdev);
}
if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
@@ -737,7 +737,7 @@ STATIC int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
union drbd_state ns, os;
int rv;
int cp_discovered = 0;
- int hardsect;
+ int hardsect_size;
drbd_reconfig_start(mdev);
@@ -890,7 +890,7 @@ STATIC int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
if (retcode < SS_SUCCESS)
goto release_bdev2_fail;
- if (!inc_local_if_state(mdev, D_ATTACHING))
+ if (!get_ldev_if_state(mdev, D_ATTACHING))
goto force_diskless;
drbd_md_set_sector_offsets(mdev, nbc);
@@ -915,7 +915,7 @@ STATIC int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
goto force_diskless_dec;
}
- /* Since we are diskless, fix the AL first... */
+ /* Since we are diskless, fix the activity log first... */
if (drbd_check_al_size(mdev)) {
retcode = ERR_NOMEM;
goto force_diskless_dec;
@@ -934,19 +934,19 @@ STATIC int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
goto force_diskless_dec;
}
- /* allocate a second IO page if hardsect != 512 */
- hardsect = drbd_get_hardsect(nbc->md_bdev);
- if (hardsect == 0)
- hardsect = MD_HARDSECT;
+ /* allocate a second IO page if hardsect_size != 512 */
+ hardsect_size = drbd_get_hardsect_size(nbc->md_bdev);
+ if (hardsect_size == 0)
+ hardsect_size = MD_SECTOR_SIZE;
- if (hardsect != MD_HARDSECT) {
+ if (hardsect_size != MD_SECTOR_SIZE) {
if (!mdev->md_io_tmpp) {
struct page *page = alloc_page(GFP_NOIO);
if (!page)
goto force_diskless_dec;
- dev_warn(DEV, "Meta data's bdev hardsect = %d != %d\n",
- hardsect, MD_HARDSECT);
+ dev_warn(DEV, "Meta data's bdev hardsect_size = %d != %d\n",
+ hardsect_size, MD_SECTOR_SIZE);
dev_warn(DEV, "Workaround engaged (has performace impact).\n");
mdev->md_io_tmpp = page;
@@ -1089,13 +1089,13 @@ STATIC int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
drbd_md_sync(mdev);
kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
- dec_local(mdev);
+ put_ldev(mdev);
reply->ret_code = retcode;
drbd_reconfig_done(mdev);
return 0;
force_diskless_dec:
- dec_local(mdev);
+ put_ldev(mdev);
force_diskless:
drbd_force_state(mdev, NS(disk, D_DISKLESS));
drbd_md_sync(mdev);
@@ -1201,7 +1201,7 @@ STATIC int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
odev = minor_to_mdev(i);
if (!odev || odev == mdev)
continue;
- if (inc_net(odev)) {
+ if (get_net_conf(odev)) {
taken_addr = (struct sockaddr *)&odev->net_conf->my_addr;
if (new_conf->my_addr_len == odev->net_conf->my_addr_len &&
!memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
@@ -1212,7 +1212,7 @@ STATIC int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
!memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
retcode = ERR_PEER_ADDR;
- dec_net(odev);
+ put_net_conf(odev);
if (retcode != NO_ERROR)
goto fail;
}
@@ -1451,7 +1451,7 @@ STATIC int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
goto fail;
}
- if (!inc_local(mdev)) {
+ if (!get_ldev(mdev)) {
retcode = ERR_NO_DISK;
goto fail;
}
@@ -1464,7 +1464,7 @@ STATIC int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
mdev->bc->dc.disk_size = (sector_t)rs.resize_size;
dd = drbd_determin_dev_size(mdev);
drbd_md_sync(mdev);
- dec_local(mdev);
+ put_ldev(mdev);
if (dd == dev_size_error) {
retcode = ERR_NOMEM_BITMAP;
goto fail;
@@ -1610,14 +1610,14 @@ STATIC int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
}
spin_unlock(&mdev->peer_seq_lock);
- if (inc_local(mdev)) {
+ if (get_ldev(mdev)) {
wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
drbd_al_shrink(mdev);
err = drbd_check_al_size(mdev);
lc_unlock(mdev->act_log);
wake_up(&mdev->al_wait);
- dec_local(mdev);
+ put_ldev(mdev);
drbd_md_sync(mdev);
if (err) {
@@ -1735,14 +1735,14 @@ STATIC int drbd_nl_get_config(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
tl = reply->tag_list;
- if (inc_local(mdev)) {
+ if (get_ldev(mdev)) {
tl = disk_conf_to_tags(mdev, &mdev->bc->dc, tl);
- dec_local(mdev);
+ put_ldev(mdev);
}
- if (inc_net(mdev)) {
+ if (get_net_conf(mdev)) {
tl = net_conf_to_tags(mdev, mdev->net_conf, tl);
- dec_net(mdev);
+ put_net_conf(mdev);
}
tl = syncer_conf_to_tags(mdev, &mdev->sync_conf, tl);
@@ -1763,13 +1763,13 @@ STATIC int drbd_nl_get_state(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
/* no local ref, no bitmap, no syncer progress. */
if (s.conn >= C_SYNC_SOURCE && s.conn <= C_PAUSED_SYNC_T) {
- if (inc_local(mdev)) {
+ if (get_ldev(mdev)) {
drbd_get_syncer_progress(mdev, &rs_left, &res);
*tl++ = T_sync_progress;
*tl++ = sizeof(int);
memcpy(tl, &res, sizeof(int));
tl = (unsigned short *)((char *)tl + sizeof(int));
- dec_local(mdev);
+ put_ldev(mdev);
}
}
*tl++ = TT_END; /* Close the tag list */
@@ -1784,7 +1784,7 @@ STATIC int drbd_nl_get_uuids(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
tl = reply->tag_list;
- if (inc_local(mdev)) {
+ if (get_ldev(mdev)) {
/* This is a hand crafted add tag ;) */
*tl++ = T_uuids;
*tl++ = UI_SIZE*sizeof(u64);
@@ -1794,7 +1794,7 @@ STATIC int drbd_nl_get_uuids(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
*tl++ = sizeof(int);
memcpy(tl, &mdev->bc->md.flags, sizeof(int));
tl = (unsigned short *)((char *)tl + sizeof(int));
- dec_local(mdev);
+ put_ldev(mdev);
}
*tl++ = TT_END; /* Close the tag list */
@@ -1852,7 +1852,7 @@ STATIC int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
mutex_lock(&mdev->state_mutex); /* Protects us against serialized state changes. */
- if (!inc_local(mdev)) {
+ if (!get_ldev(mdev)) {
retcode = ERR_NO_DISK;
goto out;
}
@@ -1888,7 +1888,7 @@ STATIC int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
drbd_md_sync(mdev);
out_dec:
- dec_local(mdev);
+ put_ldev(mdev);
out:
mutex_unlock(&mdev->state_mutex);
@@ -2045,7 +2045,7 @@ STATIC void drbd_connector_callback(void *data)
static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
-static inline unsigned short *
+static unsigned short *
__tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
int len, int nul_terminated)
{
@@ -2065,19 +2065,19 @@ __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
return tl;
}
-static inline unsigned short *
+static unsigned short *
tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, int len)
{
return __tl_add_blob(tl, tag, data, len, 0);
}
-static inline unsigned short *
+static unsigned short *
tl_add_str(unsigned short *tl, enum drbd_tags tag, const char *str)
{
return __tl_add_blob(tl, tag, str, strlen(str)+1, 0);
}
-static inline unsigned short *
+static unsigned short *
tl_add_int(unsigned short *tl, enum drbd_tags tag, const void *val)
{
switch(tag_type(tag)) {
@@ -2255,10 +2255,10 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
unsigned int res;
/* no local ref, no bitmap, no syncer progress, no broadcast. */
- if (!inc_local(mdev))
+ if (!get_ldev(mdev))
return;
drbd_get_syncer_progress(mdev, &rs_left, &res);
- dec_local(mdev);
+ put_ldev(mdev);
*tl++ = T_sync_progress;
*tl++ = sizeof(int);
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 7de68d9d6aba..9f0a3c0e952c 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -243,10 +243,10 @@ STATIC int drbd_seq_show(struct seq_file *seq, void *v)
mdev->rs_total - mdev->ov_left,
mdev->rs_total);
- if (proc_details >= 1 && inc_local_if_state(mdev, D_FAILED)) {
+ if (proc_details >= 1 && get_ldev_if_state(mdev, D_FAILED)) {
lc_printf_stats(seq, mdev->resync);
lc_printf_stats(seq, mdev->act_log);
- dec_local(mdev);
+ put_ldev(mdev);
}
if (proc_details >= 2) {
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 077480fe6923..25da228de2fd 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -70,7 +70,8 @@ STATIC int drbd_do_auth(struct drbd_conf *mdev);
STATIC enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
STATIC int e_end_block(struct drbd_conf *, struct drbd_work *, int);
-static inline struct drbd_epoch *previous_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
+
+static struct drbd_epoch *previous_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
{
struct drbd_epoch *prev;
spin_lock(&mdev->epoch_lock);
@@ -565,7 +566,7 @@ STATIC struct socket *drbd_try_connect(struct drbd_conf *mdev)
int err;
int disconnect_on_error = 1;
- if (!inc_net(mdev))
+ if (!get_net_conf(mdev))
return NULL;
what = "sock_create_kern";
@@ -629,7 +630,7 @@ out:
if (disconnect_on_error)
drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
}
- dec_net(mdev);
+ put_net_conf(mdev);
return sock;
}
@@ -639,7 +640,7 @@ STATIC struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
struct socket *s_estab = NULL, *s_listen;
const char *what;
- if (!inc_net(mdev))
+ if (!get_net_conf(mdev))
return NULL;
what = "sock_create_kern";
@@ -675,7 +676,7 @@ out:
drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
}
}
- dec_net(mdev);
+ put_net_conf(mdev);
return s_estab;
}
@@ -934,7 +935,7 @@ STATIC enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct d
{
int rv;
- if (mdev->write_ordering >= WO_bdev_flush && inc_local(mdev)) {
+ if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
rv = blkdev_issue_flush(mdev->bc->backing_bdev, NULL);
if (rv) {
dev_err(DEV, "local disk flush failed with status %d\n", rv);
@@ -943,7 +944,7 @@ STATIC enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct d
* if (rv == -EOPNOTSUPP) */
drbd_bump_write_ordering(mdev, WO_drain_io);
}
- dec_local(mdev);
+ put_ldev(mdev);
}
return drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
@@ -1138,7 +1139,7 @@ int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __relea
/* prepare bio for re-submit,
* re-init volatile members */
/* we still have a local reference,
- * inc_local was done in receive_Data. */
+ * get_ldev was done in receive_Data. */
bio->bi_bdev = mdev->bc->backing_bdev;
bio->bi_sector = e->sector;
bio->bi_size = e->size;
@@ -1428,7 +1429,7 @@ STATIC int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si
e = read_in_block(mdev, ID_SYNCER, sector, data_size);
if (!e) {
- dec_local(mdev);
+ put_ldev(mdev);
return FALSE;
}
@@ -1513,9 +1514,9 @@ STATIC int receive_RSDataReply(struct drbd_conf *mdev, struct p_header *h)
sector = be64_to_cpu(p->sector);
D_ASSERT(p->block_id == ID_SYNCER);
- if (inc_local(mdev)) {
+ if (get_ldev(mdev)) {
/* data is submitted to disk within recv_resync_read.
- * corresponding dec_local done below on error,
+ * corresponding put_ldev done below on error,
* or in drbd_endio_write_sec. */
ok = recv_resync_read(mdev, sector, data_size);
} else {
@@ -1671,9 +1672,9 @@ STATIC int receive_Data(struct drbd_conf *mdev, struct p_header *h)
if (drbd_recv(mdev, h->payload, header_size) != header_size)
return FALSE;
- if (!inc_local(mdev)) {
+ if (!get_ldev(mdev)) {
/* data is submitted to disk at the end of this function.
- * corresponding dec_local done either below (on error),
+ * corresponding put_ldev done either below (on error),
* or in drbd_endio_write_sec. */
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Can not write mirrored data block "
@@ -1691,7 +1692,7 @@ STATIC int receive_Data(struct drbd_conf *mdev, struct p_header *h)
sector = be64_to_cpu(p->sector);
e = read_in_block(mdev, p->block_id, sector, data_size);
if (!e) {
- dec_local(mdev);
+ put_ldev(mdev);
return FALSE;
}
@@ -1835,7 +1836,7 @@ STATIC int receive_Data(struct drbd_conf *mdev, struct p_header *h)
/* we could probably send that P_DISCARD_ACK ourselves,
* but I don't like the receiver using the msock */
- dec_local(mdev);
+ put_ldev(mdev);
wake_asender(mdev);
finish_wait(&mdev->misc_wait, &wait);
return TRUE;
@@ -1905,7 +1906,7 @@ out_interrupted:
/* yes, the epoch_size now is imbalanced.
* but we drop the connection anyways, so we don't have a chance to
* receive a barrier... atomic_inc(&mdev->epoch_size); */
- dec_local(mdev);
+ put_ldev(mdev);
drbd_free_ee(mdev, e);
return FALSE;
}
@@ -1939,7 +1940,7 @@ STATIC int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
return FALSE;
}
- if (!inc_local_if_state(mdev, D_UP_TO_DATE)) {
+ if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Can not satisfy peer's read request, "
"no local data.\n");
@@ -1950,7 +1951,7 @@ STATIC int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_KERNEL);
if (!e) {
- dec_local(mdev);
+ put_ldev(mdev);
return FALSE;
}
@@ -1974,7 +1975,7 @@ STATIC int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
/* we have been interrupted,
* probably connection lost! */
D_ASSERT(signal_pending(current));
- dec_local(mdev);
+ put_ldev(mdev);
drbd_free_ee(mdev, e);
return 0;
}
@@ -1986,7 +1987,7 @@ STATIC int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
digest_size = h->length - brps ;
di = kmalloc(sizeof(*di) + digest_size, GFP_KERNEL);
if (!di) {
- dec_local(mdev);
+ put_ldev(mdev);
drbd_free_ee(mdev, e);
return 0;
}
@@ -1995,7 +1996,7 @@ STATIC int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
di->digest = (((char *)di)+sizeof(struct digest_info));
if (drbd_recv(mdev, di->digest, digest_size) != digest_size) {
- dec_local(mdev);
+ put_ldev(mdev);
drbd_free_ee(mdev, e);
kfree(di);
return FALSE;
@@ -2016,7 +2017,7 @@ STATIC int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
D_ASSERT(signal_pending(current));
drbd_free_ee(mdev, e);
kfree(di);
- dec_local(mdev);
+ put_ldev(mdev);
return FALSE;
}
break;
@@ -2033,7 +2034,7 @@ STATIC int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
/* we have been interrupted,
* probably connection lost! */
D_ASSERT(signal_pending(current));
- dec_local(mdev);
+ put_ldev(mdev);
drbd_free_ee(mdev, e);
return 0;
}
@@ -2737,7 +2738,7 @@ STATIC int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
mdev->p_size = p_size;
#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
- if (inc_local(mdev)) {
+ if (get_ldev(mdev)) {
warn_if_differ_considerably(mdev, "lower level device sizes",
p_size, drbd_get_max_capacity(mdev->bc));
warn_if_differ_considerably(mdev, "user requested size",
@@ -2766,16 +2767,16 @@ STATIC int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
dev_err(DEV, "The peer's disk size is too small!\n");
drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
mdev->bc->dc.disk_size = my_usize;
- dec_local(mdev);
+ put_ldev(mdev);
return FALSE;
}
- dec_local(mdev);
+ put_ldev(mdev);
}
#undef min_not_zero
- if (inc_local(mdev)) {
+ if (get_ldev(mdev)) {
dd = drbd_determin_dev_size(mdev);
- dec_local(mdev);
+ put_ldev(mdev);
if (dd == dev_size_error)
return FALSE;
drbd_md_sync(mdev);
@@ -2784,10 +2785,10 @@ STATIC int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
drbd_set_my_capacity(mdev, p_size);
}
- if (mdev->p_uuid && mdev->state.conn <= C_CONNECTED && inc_local(mdev)) {
+ if (mdev->p_uuid && mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
nconn = drbd_sync_handshake(mdev,
mdev->state.peer, mdev->state.pdsk);
- dec_local(mdev);
+ put_ldev(mdev);
if (nconn == C_MASK) {
drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
@@ -2800,7 +2801,7 @@ STATIC int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
}
}
- if (inc_local(mdev)) {
+ if (get_ldev(mdev)) {
if (mdev->bc->known_size != drbd_get_capacity(mdev->bc->backing_bdev)) {
mdev->bc->known_size = drbd_get_capacity(mdev->bc->backing_bdev);
ldsc = 1;
@@ -2811,7 +2812,7 @@ STATIC int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
drbd_setup_queue_param(mdev, max_seg_s);
drbd_setup_order_type(mdev, be32_to_cpu(p->queue_order_type));
- dec_local(mdev);
+ put_ldev(mdev);
}
if (mdev->state.conn > C_WF_REPORT_PARAMS) {
@@ -2861,7 +2862,7 @@ STATIC int receive_uuids(struct drbd_conf *mdev, struct p_header *h)
return FALSE;
}
- if (inc_local(mdev)) {
+ if (get_ldev(mdev)) {
int skip_initial_sync =
mdev->state.conn == C_CONNECTED &&
mdev->agreed_pro_version >= 90 &&
@@ -2877,7 +2878,7 @@ STATIC int receive_uuids(struct drbd_conf *mdev, struct p_header *h)
CS_VERBOSE, NULL);
drbd_md_sync(mdev);
}
- dec_local(mdev);
+ put_ldev(mdev);
}
/* Before we test for the disk state, we should wait until an eventually
@@ -2982,7 +2983,7 @@ STATIC int receive_state(struct drbd_conf *mdev, struct p_header *h)
nconn = C_CONNECTED;
if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
- inc_local_if_state(mdev, D_NEGOTIATING)) {
+ get_ldev_if_state(mdev, D_NEGOTIATING)) {
int cr; /* consider resync */
cr = (oconn < C_CONNECTED);
@@ -2995,7 +2996,7 @@ STATIC int receive_state(struct drbd_conf *mdev, struct p_header *h)
if (cr)
nconn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
- dec_local(mdev);
+ put_ldev(mdev);
if (nconn == C_MASK) {
if (mdev->state.disk == D_NEGOTIATING) {
drbd_force_state(mdev, NS(disk, D_DISKLESS));
@@ -3066,13 +3067,13 @@ STATIC int receive_sync_uuid(struct drbd_conf *mdev, struct p_header *h)
/* Here the _drbd_uuid_ functions are right, current should
_not_ be rotated into the history */
- if (inc_local_if_state(mdev, D_NEGOTIATING)) {
+ if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
_drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
_drbd_uuid_set(mdev, UI_BITMAP, 0UL);
drbd_start_resync(mdev, C_SYNC_TARGET);
- dec_local(mdev);
+ put_ldev(mdev);
} else
dev_err(DEV, "Ignoring SyncUUID packet!\n");
@@ -3517,9 +3518,9 @@ STATIC void drbd_disconnect(struct drbd_conf *mdev)
drbd_md_sync(mdev);
fp = FP_DONT_CARE;
- if (inc_local(mdev)) {
+ if (get_ldev(mdev)) {
fp = mdev->bc->dc.fencing;
- dec_local(mdev);
+ put_ldev(mdev);
}
if (mdev->state.role == R_PRIMARY) {
@@ -3870,9 +3871,9 @@ STATIC int drbdd_init(struct drbd_thread *thi)
} while (h == 0);
if (h > 0) {
- if (inc_net(mdev)) {
+ if (get_net_conf(mdev)) {
drbdd(mdev);
- dec_net(mdev);
+ put_net_conf(mdev);
}
}
@@ -3929,12 +3930,38 @@ STATIC int got_IsInSync(struct drbd_conf *mdev, struct p_header *h)
drbd_rs_complete_io(mdev, sector);
drbd_set_in_sync(mdev, sector, blksize);
/* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
- mdev->rs_same_csum += (blksize >> BM_BLOCK_SIZE_B);
+ mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
dec_rs_pending(mdev);
return TRUE;
}
+/* when we receive the ACK for a write request,
+ * verify that we actually know about it */
+static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
+ u64 id, sector_t sector)
+{
+ struct hlist_head *slot = tl_hash_slot(mdev, sector);
+ struct hlist_node *n;
+ struct drbd_request *req;
+
+ hlist_for_each_entry(req, n, slot, colision) {
+ if ((unsigned long)req == (unsigned long)id) {
+ if (req->sector != sector) {
+ dev_err(DEV, "_ack_id_to_req: found req %p but it has "
+ "wrong sector (%llus versus %llus)\n", req,
+ (unsigned long long)req->sector,
+ (unsigned long long)sector);
+ break;
+ }
+ return req;
+ }
+ }
+ dev_err(DEV, "_ack_id_to_req: failed to find req %p, sector %llus in list\n",
+ (void *)(unsigned long)id, (unsigned long long)sector);
+ return NULL;
+}
+
STATIC int got_BlockAck(struct drbd_conf *mdev, struct p_header *h)
{
struct drbd_request *req;
@@ -4060,10 +4087,10 @@ STATIC int got_NegRSDReply(struct drbd_conf *mdev, struct p_header *h)
dec_rs_pending(mdev);
- if (inc_local_if_state(mdev, D_FAILED)) {
+ if (get_ldev_if_state(mdev, D_FAILED)) {
drbd_rs_complete_io(mdev, sector);
drbd_rs_failed_io(mdev, sector, size);
- dec_local(mdev);
+ put_ldev(mdev);
}
return TRUE;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index dcf642563c77..2e70345a06d4 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -34,7 +34,7 @@
/* Update disk stats at start of I/O request */
-static inline void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req, struct bio *bio)
+static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req, struct bio *bio)
{
const int rw = bio_data_dir(bio);
int cpu;
@@ -46,7 +46,7 @@ static inline void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_reque
}
/* Update disk stats when completing request upwards */
-static inline void _drbd_end_io_acct(struct drbd_conf *mdev, struct drbd_request *req)
+static void _drbd_end_io_acct(struct drbd_conf *mdev, struct drbd_request *req)
{
int rw = bio_data_dir(req->master_bio);
unsigned long duration = jiffies - req->start_time;
@@ -93,9 +93,9 @@ static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const
* we would forget to resync the corresponding extent.
*/
if (s & RQ_LOCAL_MASK) {
- if (inc_local_if_state(mdev, D_FAILED)) {
+ if (get_ldev_if_state(mdev, D_FAILED)) {
drbd_al_complete_io(mdev, req->sector);
- dec_local(mdev);
+ put_ldev(mdev);
} else if (__ratelimit(&drbd_ratelimit_state)) {
dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu), "
"but my Disk seems to have failed :(\n",
@@ -338,7 +338,7 @@ STATIC int _req_conflicts(struct drbd_request *req)
D_ASSERT(hlist_unhashed(&req->colision));
- if (!inc_net(mdev))
+ if (!get_net_conf(mdev))
return 0;
/* BUG_ON */
@@ -383,11 +383,11 @@ STATIC int _req_conflicts(struct drbd_request *req)
out_no_conflict:
/* this is like it should be, and what we expected.
* our users do behave after all... */
- dec_net(mdev);
+ put_net_conf(mdev);
return 0;
out_conflict:
- dec_net(mdev);
+ put_net_conf(mdev);
return 1;
}
@@ -402,9 +402,6 @@ out_conflict:
* happen "atomically" within the req_lock,
* and it enforces that we have to think in a very structured manner
* about the "events" that may happen to a request during its life time ...
- *
- * Though I think it is likely that we break this again into many
- * static inline void _req_mod_ ## what (req) ...
*/
void _req_mod(struct drbd_request *req, enum drbd_req_event what, int error)
{
@@ -453,7 +450,7 @@ void _req_mod(struct drbd_request *req, enum drbd_req_event what, int error)
req->rq_state &= ~RQ_LOCAL_PENDING;
_req_may_be_done(req, error);
- dec_local(mdev);
+ put_ldev(mdev);
break;
case write_completed_with_error:
@@ -467,7 +464,7 @@ void _req_mod(struct drbd_request *req, enum drbd_req_event what, int error)
/* and now: check how to handle local io error. */
__drbd_chk_io_error(mdev, FALSE);
_req_may_be_done(req, error);
- dec_local(mdev);
+ put_ldev(mdev);
break;
case read_completed_with_error:
@@ -482,19 +479,19 @@ void _req_mod(struct drbd_request *req, enum drbd_req_event what, int error)
if (bio_rw(req->master_bio) == READA) {
/* it is legal to fail READA */
_req_may_be_done(req, error);
- dec_local(mdev);
+ put_ldev(mdev);
break;
}
/* else */
dev_alert(DEV, "Local READ failed sec=%llus size=%u\n",
(unsigned long long)req->sector, req->size);
- /* _req_mod(req,to_be_send); oops, recursion in static inline */
+ /* _req_mod(req,to_be_send); oops, recursion... */
D_ASSERT(!(req->rq_state & RQ_NET_MASK));
req->rq_state |= RQ_NET_PENDING;
inc_ap_pending(mdev);
__drbd_chk_io_error(mdev, FALSE);
- dec_local(mdev);
+ put_ldev(mdev);
/* NOTE: if we have no connection,
* or know the peer has no good data either,
* then we don't actually need to "queue_for_net_read",
@@ -739,7 +736,7 @@ STATIC int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
trace_drbd_bio(mdev, "Rq", bio, 0, req);
- local = inc_local(mdev);
+ local = get_ldev(mdev);
if (!local) {
bio_put(req->private_bio); /* or we get a bio leak */
req->private_bio = NULL;
@@ -758,7 +755,7 @@ STATIC int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
local = 0;
bio_put(req->private_bio);
req->private_bio = NULL;
- dec_local(mdev);
+ put_ldev(mdev);
}
}
remote = !local && mdev->state.pdsk >= D_UP_TO_DATE;
@@ -898,7 +895,7 @@ allocate_barrier:
bio_put(req->private_bio);
req->private_bio = NULL;
drbd_al_complete_io(mdev, req->sector);
- dec_local(mdev);
+ put_ldev(mdev);
local = 0;
}
if (remote)
@@ -953,7 +950,7 @@ fail_and_free_req:
if (local) {
bio_put(req->private_bio);
req->private_bio = NULL;
- dec_local(mdev);
+ put_ldev(mdev);
}
bio_endio(bio, err);
drbd_req_free(req);
@@ -1120,14 +1117,14 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
if (bio_size == 0) {
if (limit <= bvec->bv_len)
limit = bvec->bv_len;
- } else if (limit && inc_local(mdev)) {
+ } else if (limit && get_ldev(mdev)) {
struct request_queue * const b =
mdev->bc->backing_bdev->bd_disk->queue;
if (b->merge_bvec_fn && mdev->bc->dc.use_bmbv) {
backing_limit = b->merge_bvec_fn(b, bvm, bvec);
limit = min(limit, backing_limit);
}
- dec_local(mdev);
+ put_ldev(mdev);
}
return limit;
}
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index a63a1e9ae5a8..81393ffa8c25 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -218,32 +218,6 @@ struct hlist_head *tl_hash_slot(struct drbd_conf *mdev, sector_t sector)
((unsigned int)(sector>>HT_SHIFT) % mdev->tl_hash_s);
}
-/* when we receive the ACK for a write request,
- * verify that we actually know about it */
-static inline struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
- u64 id, sector_t sector)
-{
- struct hlist_head *slot = tl_hash_slot(mdev, sector);
- struct hlist_node *n;
- struct drbd_request *req;
-
- hlist_for_each_entry(req, n, slot, colision) {
- if ((unsigned long)req == (unsigned long)id) {
- if (req->sector != sector) {
- dev_err(DEV, "_ack_id_to_req: found req %p but it has "
- "wrong sector (%llus versus %llus)\n", req,
- (unsigned long long)req->sector,
- (unsigned long long)sector);
- break;
- }
- return req;
- }
- }
- dev_err(DEV, "_ack_id_to_req: failed to find req %p, sector %llus in list\n",
- (void *)(unsigned long)id, (unsigned long long)sector);
- return NULL;
-}
-
/* application reads (drbd_request objects) */
static struct hlist_head *ar_hash_slot(struct drbd_conf *mdev, sector_t sector)
{
diff --git a/drivers/block/drbd/drbd_tracing.c b/drivers/block/drbd/drbd_tracing.c
index 2eff178fbb0f..ab5aba9c4972 100644
--- a/drivers/block/drbd/drbd_tracing.c
+++ b/drivers/block/drbd/drbd_tracing.c
@@ -213,7 +213,7 @@ static void probe_drbd_netlink(void *data, int is_req)
static void probe_drbd_actlog(struct drbd_conf *mdev, sector_t sector, char* msg)
{
- unsigned int enr = (sector >> (AL_EXTENT_SIZE_B-9));
+ unsigned int enr = (sector >> (AL_EXTENT_SHIFT-9));
if (!is_mdev_trace(mdev, TRACE_LVL_ALWAYS))
return;
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 81f3a4e8ea49..dd984502d62e 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -82,11 +82,7 @@ void drbd_md_io_complete(struct bio *bio, int error)
{
struct drbd_md_io *md_io;
- /* error parameter ignored:
- * drbd_md_sync_page_io explicitly tests bio_uptodate(bio); */
-
md_io = (struct drbd_md_io *)bio->bi_private;
-
md_io->error = error;
trace_drbd_bio(md_io->mdev, "Md", bio, 1, NULL);
@@ -128,7 +124,7 @@ void drbd_endio_read_sec(struct bio *bio, int error) __releases(local)
drbd_chk_io_error(mdev, error, FALSE);
drbd_queue_work(&mdev->data.work, &e->w);
- dec_local(mdev);
+ put_ldev(mdev);
trace_drbd_ee(mdev, e, "read completed");
}
@@ -214,7 +210,7 @@ void drbd_endio_write_sec(struct bio *bio, int error) __releases(local)
drbd_al_complete_io(mdev, e_sector);
wake_asender(mdev);
- dec_local(mdev);
+ put_ldev(mdev);
}
@@ -367,7 +363,7 @@ STATIC int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
{
struct drbd_epoch_entry *e;
- if (!inc_local(mdev))
+ if (!get_ldev(mdev))
return 0;
if (FAULT_ACTIVE(mdev, DRBD_FAULT_AL_EE))
@@ -375,7 +371,7 @@ STATIC int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
e = drbd_alloc_ee(mdev, DRBD_MAGIC+0xbeef, sector, size, GFP_TRY);
if (!e) {
- dec_local(mdev);
+ put_ldev(mdev);
return 2;
}
@@ -441,16 +437,16 @@ int w_make_resync_request(struct drbd_conf *mdev,
dev_err(DEV, "%s in w_make_resync_request\n",
conns_to_name(mdev->state.conn));
- if (!inc_local(mdev)) {
+ if (!get_ldev(mdev)) {
/* Since we only need to access mdev->rsync a
- inc_local_if_state(mdev,D_FAILED) would be sufficient, but
+ get_ldev_if_state(mdev,D_FAILED) would be sufficient, but
to continue resync with a broken disk makes no sense at
all */
dev_err(DEV, "Disk broke down during resync!\n");
mdev->resync_work.cb = w_resync_inactive;
return 1;
}
- /* All goto requeses have to happend after this block: inc_local() */
+ /* All goto requeses have to happend after this block: get_ldev() */
number = SLEEP_TIME*mdev->sync_conf.rate / ((BM_BLOCK_SIZE/1024)*HZ);
@@ -466,7 +462,7 @@ next_sector:
if (bit == -1UL) {
mdev->bm_resync_fo = drbd_bm_bits(mdev);
mdev->resync_work.cb = w_resync_inactive;
- dec_local(mdev);
+ put_ldev(mdev);
return 1;
}
@@ -533,7 +529,7 @@ next_sector:
if (mdev->agreed_pro_version >= 89 && mdev->csums_tfm) {
switch (read_for_csum(mdev, sector, size)) {
case 0: /* Disk failure*/
- dec_local(mdev);
+ put_ldev(mdev);
return 0;
case 2: /* Allocation failed */
drbd_rs_complete_io(mdev, sector);
@@ -547,7 +543,7 @@ next_sector:
sector, size, ID_SYNCER)) {
dev_err(DEV, "drbd_send_drequest() failed, aborting...\n");
dec_rs_pending(mdev);
- dec_local(mdev);
+ put_ldev(mdev);
return 0;
}
}
@@ -561,13 +557,13 @@ next_sector:
* until then resync "work" is "inactive" ...
*/
mdev->resync_work.cb = w_resync_inactive;
- dec_local(mdev);
+ put_ldev(mdev);
return 1;
}
requeue:
mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
- dec_local(mdev);
+ put_ldev(mdev);
return 1;
}
@@ -677,7 +673,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
dbdt = Bit2KB(db/dt);
mdev->rs_paused /= HZ;
- if (!inc_local(mdev))
+ if (!get_ldev(mdev))
goto out;
spin_lock_irq(&mdev->req_lock);
@@ -765,7 +761,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
_drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
out_unlock:
spin_unlock_irq(&mdev->req_lock);
- dec_local(mdev);
+ put_ldev(mdev);
out:
mdev->rs_total = 0;
mdev->rs_failed = 0;
@@ -840,9 +836,9 @@ int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
return 1;
}
- if (inc_local_if_state(mdev, D_FAILED)) {
+ if (get_ldev_if_state(mdev, D_FAILED)) {
drbd_rs_complete_io(mdev, e->sector);
- dec_local(mdev);
+ put_ldev(mdev);
}
if (likely(drbd_bio_uptodate(e->private_bio))) {
@@ -1265,7 +1261,7 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
drbd_bm_recount_bits(mdev);
- /* In case a previous resync run was aborted by an IO error... */
+ /* In case a previous resync run was aborted by an IO error/detach on the peer. */
drbd_rs_cancel_all(mdev);
if (side == C_SYNC_TARGET) {
@@ -1284,7 +1280,7 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
drbd_state_lock(mdev);
- if (!inc_local_if_state(mdev, D_NEGOTIATING)) {
+ if (!get_ldev_if_state(mdev, D_NEGOTIATING)) {
drbd_state_unlock(mdev);
return;
}
@@ -1331,12 +1327,12 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
}
write_unlock_irq(&global_state_lock);
drbd_state_unlock(mdev);
- dec_local(mdev);
+ put_ldev(mdev);
if (r == SS_SUCCESS) {
dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
conns_to_name(ns.conn),
- (unsigned long) mdev->rs_total << (BM_BLOCK_SIZE_B-10),
+ (unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10),
(unsigned long) mdev->rs_total);
if (mdev->rs_total == 0) {
diff --git a/drivers/block/drbd/drbd_wrappers.h b/drivers/block/drbd/drbd_wrappers.h
index b7ce5acca0bb..724fb44aad06 100644
--- a/drivers/block/drbd/drbd_wrappers.h
+++ b/drivers/block/drbd/drbd_wrappers.h
@@ -4,11 +4,10 @@
#include <linux/ctype.h>
#include <linux/mm.h>
-
/* see get_sb_bdev and bd_claim */
extern char *drbd_sec_holder;
-static inline sector_t drbd_get_hardsect(struct block_device *bdev)
+static inline sector_t drbd_get_hardsect_size(struct block_device *bdev)
{
return bdev->bd_disk->queue->hardsect_size;
}
diff --git a/drivers/block/drbd/lru_cache.c b/drivers/block/drbd/lru_cache.c
index 71858ff5b02c..80b0839a529d 100644
--- a/drivers/block/drbd/lru_cache.c
+++ b/drivers/block/drbd/lru_cache.c
@@ -34,13 +34,13 @@
#define PARANOIA_LEAVE() do { clear_bit(__LC_PARANOIA, &lc->flags); smp_mb__after_clear_bit(); } while (0)
#define RETURN(x...) do { PARANOIA_LEAVE(); return x ; } while (0)
-static inline size_t size_of_lc(unsigned int e_count, size_t e_size)
+static size_t size_of_lc(unsigned int e_count, size_t e_size)
{
return sizeof(struct lru_cache)
+ e_count * (e_size + sizeof(struct hlist_head));
}
-static inline void lc_init(struct lru_cache *lc,
+static void lc_init(struct lru_cache *lc,
const size_t bytes, const char *name,
const unsigned int e_count, const size_t e_size,
void *private_p)